input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
# -*- coding: utf-8 -*-
""":py:mod:`unittest`-based classes and accompanying functions to
create some types of ferenda-specific tests easier."""
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
from difflib import unified_diff
from io import BytesIO
import builtins
import codecs
import logging
import filecmp
import inspect
import json
import os
import re
import shutil
import sys
import tempfile
import time
import unicodedata
from urllib.parse import unquote
from ferenda.compat import unittest
import rdflib
from rdflib.compare import graph_diff
from rdflib.util import guess_format
from lxml import etree
import requests.exceptions
import responses
from ferenda import DocumentRepository, TextReader
from ferenda import elements, util
from ferenda.errors import ExternalCommandError, MaxDownloadsReached
class FerendaTestCase(object):
"""Convenience class with extra AssertEqual methods. Note that even
though this method provides :py:class:`unittest.TestCase`-like
assert methods, it does not derive from
:py:class:`~unittest.TestCase`. When creating a test case that
makes use of these methods, you need to inherit from both
:py:class:`~unittest.TestCase` and this class, ie::
class MyTestcase(unittest.TestCase, ferenda.testutil.FerendaTestCase):
def test_simple(self):
self.assertEqualXML("<foo arg1='x' arg2='y'/>",
"<foo arg2='y' arg1='x'></foo>")
"""
# FIXME: Some of these should (at least optionally) be registered
# with TestCase.assertEqual through .addTypeEqualityFunc, but some
# (eg. assertEqualDirs) have non-unique types
def assertEqualGraphs(self, want, got, exact=True):
"""Assert that two RDF graphs are identical (isomorphic).
:param want: The graph as expected, as an
:py:class:`~rdflib.graph.Graph` object or the filename
of a serialized graph
:param got: The actual graph, as an :py:class:`~rdflib.graph.Graph`
object or the filename of a serialized graph
:param exact: Whether to require that the graphs are exactly alike
(True) or only if all triples in `want` exists in `got`
(False)
:type exact: bool
"""
def _loadgraph(filename):
g = rdflib.Graph()
# we must read the data ourself, providing a non-ascii
# filename to Graph.parse fails deep in rdflib internals
format = guess_format(filename)
if format == "nt":
data = util.readfile(filename, "r", encoding="utf-8")
else:
data = util.readfile(filename, "rb")
g.parse(data=data, format=format)
return g
if not isinstance(want, rdflib.Graph):
want = _loadgraph(want)
if not isinstance(got, rdflib.Graph):
got = _loadgraph(got)
(in_both, in_first, in_second) = graph_diff(want, got)
msg = ""
if in_first:
for (s, p, o) in sorted(in_first, key=lambda t: (t[0], t[1], t[2])):
msg += "- %s %s %s\n" % (s.n3(), p.n3(), o.n3())
if (exact and in_second) or in_first:
for (s, p, o) in sorted(in_second, key=lambda t: (t[0], t[1], t[2])):
msg += "+ %s %s %s\n" % (s.n3(), p.n3(), o.n3())
if ((len(in_first) > 0) or (len(in_second) > 0 and exact)):
if len(in_first) > 0:
msg = "%s expected triples were not found\n" % len(in_first) + msg
if len(in_second) > 0:
msg = "%s unexpected triples were found\n" % len(in_second) + msg
ntdiff = True
if ntdiff:
msg = "%r != %r\n" % (want, got) + msg
else:
import difflib
d = difflib.unified_diff(want.serialize(format="turtle").decode("utf-8").split("\n"),
got.serialize(format="turtle").decode("utf-8").split("\n"), n=10000)
msg = msg + "\n".join(d)
# print("=======WANT=======")
# print(want.serialize(format="n3"))
# print("=======GOT========")
# print(got.serialize(format="n3"))
# sys.exit(0)
return self.fail(msg)
def assertAlmostEqualDatetime(self, datetime1, datetime2, delta=1):
"""Assert that two datetime objects are reasonably equal.
:param datetime1: The first datetime to compare
:type datetime1: datetime
:param datetime2: The second datetime to compare
:type datetime2: datetime
:param delta: How much the datetimes are allowed to differ, in seconds.
:type delta: int
"""
# if the datetimes differ with max 1 second, they're almost
# equal)
time1 = time.mktime(datetime1.timetuple())
time2 = time.mktime(datetime2.timetuple())
absdiff = abs(time1 - time2)
# FIXME: The "return" is due to a badly written testcase
return self.assertLessEqual(absdiff, delta, "Difference between %s and %s "
"is %s seconds which is NOT almost equal" %
(datetime1.isoformat(), datetime2.isoformat(),
absdiff))
def assertEqualXML(self, want, got, namespace_aware=True, tidy_xhtml=False):
"""Assert that two xml trees are canonically identical.
:param want: The XML document as expected, as a string, byte string or ElementTree element
:param got: The actual XML document, as a string, byte string or ElementTree element
"""
# Adapted from formencode, https://bitbucket.org/ianb/formencode/
def xml_compare(want, got, reporter):
if namespace_aware:
wanttag = want.tag
gottag = got.tag
else:
wanttag = want.tag.rsplit("}")[-1]
gottag = got.tag.rsplit("}")[-1]
if wanttag != gottag:
reporter("Tags do not match: 'want': %s, 'got': %s" % (wanttag, gottag))
return False
for name, value in want.attrib.items():
if got.attrib.get(name) != value:
reporter("Attributes do not match: 'want': %s=%r, 'got': %s=%r"
% (name, value, name, got.attrib.get(name)))
return False
for name in got.attrib.keys():
if name not in want.attrib:
reporter("'got' has an attribute 'want' is missing: %s"
% name)
return False
if not text_compare(want.text, got.text):
reporter("text: 'want': %r != 'got': %r" % (want.text, got.text))
return False
if not text_compare(want.tail, got.tail):
reporter("tail: 'want': %r != 'got': %r" % (want.tail, got.tail))
return False
cl1 = want.getchildren()
cl2 = got.getchildren()
if len(cl1) != len(cl2):
reporter("children length differs, 'want': %i, 'got': %i"
% (len(cl1), len(cl2)))
return False
i = 0
for c1, c2 in zip(cl1, cl2):
i += 1
if not xml_compare(c1, c2, reporter=reporter):
reporter('children %i do not match: %s'
% (i, c1.tag))
return False
return True
def text_compare(want, got):
if not want and not got:
return True
return (want or '').strip() == (got or '').strip()
def treeify(something):
if isinstance(something, str):
fp = BytesIO(something.encode('utf-8'))
# return etree.fromstring(something)
return etree.parse(fp)
elif isinstance(something, bytes):
fp = BytesIO(something)
# return etree.parse(fp).getroot()
return etree.parse(fp)
elif isinstance(something, etree._Element):
return etree.ElementTree(something)
else:
raise ValueError("Can't convert a %s into an ElementTree" % type(something))
def tidy(tree):
import subprocess
# without the "--drop-empty-elements no" argument, empty
# span tags (containing RDFa data) will be
# dropped. Unfortunately this seems to be a new argument
# only available in the tidy-html5 branch. Assume that we
# have the new version, and fall back to the old, worse,
# version otherwise:
for cmdline in ("tidy -i -q -asxhtml -w 100 -utf8 --drop-empty-elements no",
"tidy -i -q -asxhtml -w 100 -utf8"):
p = subprocess.Popen(cmdline,
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
treestr = etree.tostring(tree, encoding="utf-8")
stdout, stderr = p.communicate(treestr)
if stdout.strip():
rawres = stdout
cookedres = stdout.decode(
"utf-8").replace(" ", " ").encode("utf-8")
newtree = etree.parse(BytesIO(cookedres))
return newtree
elif p.returncode and stderr:
log = logging.getLogger("tidy")
log.warning("'%s' failed: %s" % (cmdline, stderr.decode().split("\n")[0]))
# if we reach this point, all command lines have failed
raise ExternalCommandError(stderr)
def c14nize(tree):
tmp = BytesIO()
tree.write_c14n(tmp)
return tmp.getvalue().decode('utf-8')
errors = []
want_tree = treeify(want)
got_tree = treeify(got)
# example on how to filter attributes from one of the trees to
# compare. In this case, b/c we want to test without some
# attributes that a newly added change added to all
# 'pagebreak' spans (to see if the change broke anything
# else).
# for tree in (got_tree, want_tree):
# for pagebreak in tree.findall("//{http://www.w3.org/1999/xhtml}span"):
# if pagebreak.get("id") and pagebreak.get("id").startswith("sid"):
# if "src" in pagebreak.attrib:
# del pagebreak.attrib["src"]
# del pagebreak.attrib["width"]
# del pagebreak.attrib["height"]
xml_compare(want_tree.getroot(),
got_tree.getroot(),
errors.append)
if errors:
if tidy_xhtml:
want_tree = tidy(want_tree)
got_tree = tidy(got_tree)
want_lines = [x + "\n" for x in c14nize(want_tree).split("\n")]
got_lines = [x + "\n" for x in c14nize(got_tree).split("\n")]
diff = unified_diff(want_lines, got_lines, "want.xml", "got.xml")
# convert '@@ -1,1 +1,1 @@' (which py26 difflib produces)
# to '@@ -1 +1 @@' (wich later versions produces)
diff = [re.sub(r"@@ -(\d+),\1 \+(\d+),\2 @@", r"@@ -\1 +\2 @@", x)
for x in diff]
# remove trailing space for other control lines (py26...)
diff = [re.sub(r"((?:\+\+\+|\-\-\- ).*) $", r"\1", x)
for x in diff]
msg = "".join(diff) + "\n\nERRORS:" + "\n".join(errors)
return self.fail(msg)
def assertEqualDirs(self, want, got, suffix=None, subset=False, filterdir="entries"):
"""Assert that two directory trees contains identical files
:param want: The expected directory tree
:type want: str
:param got: The actual directory tree
:type got: str
:param suffix: If given, only check files ending in suffix (otherwise check all the files
:type suffix: str
:param subset: If True, require only that files in want is a subset of files in got (otherwise require that the sets are identical)
:type subset: bool
:param filterdir: If given, don't compare the parts of the tree that starts with filterdir
:type suffix: str
"""
wantfiles = [x[len(want) + 1:]
for x in util.list_dirs(want, suffix) if not x.startswith(want + os.sep + filterdir)]
gotfiles = [x[len(got) + 1:]
for x in util.list_dirs(got, suffix) if not x.startswith(got + os.sep + filterdir)]
self.maxDiff = None
if subset:
self.assertTrue(set(wantfiles).issubset(set(gotfiles)))
else:
self.assertEqual(wantfiles, gotfiles) # or assertIn?
for f in wantfiles:
if not filecmp.cmp(os.path.join(want, f),
os.path.join(got, f),
shallow=False):
self.assertEqual(util.readfile(os.path.join(want, f), mode="rb"),
| |
#!/usr/bin/python3.6
from __future__ import print_function
from googleapiclient.discovery import build
from googleapiclient import errors
from httplib2 import Http
from oauth2client import file, client, tools
from pytz import timezone
from datetime import datetime
from re import match
from netmiko import ConnectHandler, ssh_exception
from email.mime.text import MIMEText
import base64
import time
import threading
import queue
# If modifying these scopes, delete the file token.json.
SCOPES_SHEETS = 'https://www.googleapis.com/auth/spreadsheets'
SCOPES_GMAIL = 'https://www.googleapis.com/auth/gmail.send'
# The ID and range of the VLAN Change Request Form results spreadsheet.
SPREADSHEET_ID = 'yourSheetID'
RANGE_NAME = 'A2:I'
# List of people to receive notifications for failed attempts
EMAILS = ['<EMAIL>']
# Maximum number of connections allowed at one time
MAX_THREADS = 5
# Job queue for new submissions (not a constant)
q = queue.Queue()
# Sleep time in seconds
seconds = 10
# TODO: Edit with appropriate VLAN IDs
VLAN_DICT = {
'Management': 1,
'Food Services': 2,
'UPS': 3,
'Copiers': 4,
'Printers': 5,
'Cameras': 6,
'Air Con': 7,
'DATA': 8,
'Administrator': 9,
'vBrick': 10,
'Servers': 11,
'Wireless': 12,
}
def main():
"""
Reads through submitted responses making changes on new entries.
:return: None
"""
while True:
service_sheets = get_google_service('sheets', 'v4', 'token_sheets.json', SCOPES_SHEETS)
result = service_sheets.spreadsheets().values().get(spreadsheetId=SPREADSHEET_ID,
range=RANGE_NAME).execute()
values = result.get('values', [])
new_data_found = False
if values:
current_row = 2
global q
q = queue.Queue()
threads = []
for data in values:
try:
status = data[6]
if status == 'Authentication Failure' \
or status == 'Connection Timeout':
new_data_found = True
q.put((data, current_row))
except IndexError:
new_data_found = True
q.put((data, current_row))
current_row += 1
if new_data_found:
num_workers = q.qsize() if q.qsize() < MAX_THREADS else MAX_THREADS
print ('num workers: ', num_workers)
for i in range(num_workers):
thread_name = 'thread {}'.format(i)
t = threading.Thread(target=worker, name=thread_name, daemon=True)
t.start()
threads.append(t)
print('workers created')
print('queue size: ', q.qsize())
q.join()
for i in range(num_workers):
q.put(None)
for t in threads:
t.join()
print('workers killed')
print('queue size: ', q.qsize())
if not new_data_found:
print('sleeping')
time.sleep(seconds)
def worker():
"""
Creates a worker/thread. Changes vlans until no jobs are left in the queue.
:return: None
"""
service_gmail = get_google_service('gmail', 'v1', 'token_gmail.json', SCOPES_GMAIL)
service_sheets = get_google_service('sheets', 'v4', 'token_sheets.json', SCOPES_SHEETS)
while True:
data = q.get()
if data is None:
print('Worker freed')
break
print('Worker assigned new job')
device_type = get_system_type(ip=data[0][3])
current_row = data[1]
change_vlan(data[0], device_type, current_row, service_sheets, service_gmail)
q.task_done()
print('worker finished job')
def get_google_service(type, version, token, scope):
"""
Builds and returns a google service.
:param type: String - Type of service to build, either 'gmail' or 'sheets'
:param version: String - Service version e.g. 'v1' 'v4'
:param token: String - Name of token file to create
:param scope: String - Scope of the service i.e. permissions
:return: googleapiclient.discovery.Resource
"""
store = file.Storage(token)
creds = store.get()
if not creds or creds.invalid:
flow = client.flow_from_clientsecrets('credentials.json', scope)
creds = tools.run_flow(flow, store)
return build(type, version, http=creds.authorize(Http()))
def create_message(form_data, status, to=', '.join(EMAILS), subject='VLAN change fail'):
"""
Creates and returns a message.
:param form_data: List - Submitted responses
:param status: String - Status to include in the message body
:param to: String - List of recipients
:param subject: String - Subject of the email
:return: Dict - Message
"""
message_text = str(form_data) + '\n\nStatus: ' + status
message = MIMEText(message_text)
message['to'] = to if len(EMAILS) > 1 else EMAILS[0]
message['from'] = ''
message['subject'] = subject
raw = base64.urlsafe_b64encode(message.as_bytes())
raw = raw.decode()
message = {'raw': raw}
return message
def send_email(service_gmail, message, user_id='me'):
"""
Sends an email
:param service_gmail: Authorized Gmail API service instance.
:param message: Message to be sent
:param user_id: User's email address. The special value "me"
can be used to indicate the authenticated user.
:return: None
"""
try:
message = (service_gmail.users().messages().send(userId=user_id, body=message)
.execute())
return message
except errors.HttpError as error:
print('An error occurred: %s' % error)
def update_sheet(range_to_edit, status, service_sheets, desired_vlan, current_vlan="\' \'"):
"""
Updates the status, change timestamp and vlan change details on the sheet
:param range_to_edit: String - Range of cells to be updated e.g. 'G2:I2'
:param status: String - New status e.g. 'Successful'
:param service_sheets: googleapiclient.discovery.Resource
:param desired_vlan: String - Desired VLAN according to submitted response
:param current_vlan: String - Current VLAN 'name, id'
:return: None
"""
change_info = current_vlan + ' >> ' + desired_vlan + ', ' + str(VLAN_DICT.get(desired_vlan))
new_status = {
'range': range_to_edit,
'majorDimension': 'ROWS',
'values': [[status, get_time(), change_info]]
}
request = service_sheets.spreadsheets().values().update(
spreadsheetId=SPREADSHEET_ID, range=range_to_edit,
valueInputOption='USER_ENTERED', body=new_status)
request.execute()
def get_time():
"""
Returns a String representation of the current time
:return: String - Month/Day/Year HH:MM:SS
"""
tz = timezone('America/Chicago')
current_date, current_time = str(datetime.now(tz)).split('.', 1)[0].split(' ')
year, month, day = current_date.split('-')
month = month.lstrip('0')
day = day.lstrip('0')
formatted_date = '%s/%s/%s %s' % (month, day, year, current_time)
return formatted_date
def get_system_type(ip):
"""
FOR FUTURE USE.
If more than one OS is in use, ideally this function would read
from a file that lists IPs and OS types. In this case only cisco_ios
is in use.
:param ip: String - IP address of the device
:return: String - OS of the device
"""
return 'cisco_ios'
def change_vlan(form_data, device_type, current_row, service_sheets, service_gmail):
"""
:param form_data: List of data of the current row in the sheet
:param device_type: OS of the device e.g. cisco_ios
:param current_row: row in the google sheet
:param service_sheets: googleapiclient.discovery.Resource
:param service_gmail: googleapiclient.discovery.Resource
:return: None
"""
range_to_edit = 'G' + str(current_row) + ':I' + str(current_row)
desired_vlan = form_data[1]
status = 'Connecting...'
update_sheet(range_to_edit, status, service_sheets, desired_vlan)
if device_type == 'cisco_ios':
cisco_ios_change(form_data, range_to_edit, service_sheets, service_gmail)
elif device_type == 'some HP':
print()
# TODO: some HP function, Extreme etc
def cisco_ios_change(form_data, range_to_edit, service_sheets, service_gmail):
"""
Attempts an SSH connection to the switch.
Exits if connection fails to be established.
Changes VLAN as needed, only saving if change is validated successfully.
:param form_data: List of data of the current row in the sheet
:param range_to_edit: String - Range of cells to be updated e.g. "G2:I2"
:param service_sheets: googleapiclient.discovery.Resource
:param service_gmail: googleapiclient.discovery.Resource
:return: None
"""
ip_address = form_data[3]
port = str(form_data[4]).replace(' ', '')
desired_vlan = form_data[1]
if port.upper()[0] == 'G':
port = port[:2] + port[15:]
else:
port = port[:2] + port[12:]
# adjust user/pass accordingly
cisco_ios_switch = {
'device_type': 'cisco_ios',
'ip': ip_address,
'username': 'oscar',
'password': '<PASSWORD>',
'port': 22, # optional, defaults to 22
'secret': '', # optional, defaults to ''
'verbose': False # optional, default to False
}
net_connect, status = get_connection(cisco_ios_switch)
update_sheet(range_to_edit, status, service_sheets, desired_vlan)
# Stops if a connection fails to be established
if status != 'Connection Established':
return send_email(service_gmail, message=create_message(form_data, status))
output = net_connect.send_command('show vlan br')
current_vlan = get_current_cisco_vlan(output, port)
current_vlan = current_vlan[0] + ', ' + current_vlan[1]
status = 'Attempting change...'
update_sheet(range_to_edit, status, service_sheets, desired_vlan, current_vlan)
# Stops if port is not access or port is not found (Should probably never happen)
if current_vlan == 'None, -1':
if is_trunk(net_connect.send_command('show int trunk'), port):
status = 'Port is trunk'
else:
status = 'Port not found'
update_sheet(range_to_edit, status, service_sheets, desired_vlan, current_vlan)
return send_email(service_gmail, message=create_message(form_data, status))
interface_cmd = 'interface ' + port
vlan_cmd = 'switchport access vlan ' + str(VLAN_DICT.get(desired_vlan))
net_connect.send_config_set([interface_cmd, vlan_cmd])
output = net_connect.send_command('show vlan br')
if validate_cisco(output, port, desired_vlan):
net_connect.save_config()
status = 'Successful'
send_email(service_gmail, message=create_message(form_data, status, to=form_data[5], subject='VLAN Changed'))
else:
status = 'Failed'
send_email(service_gmail, message=create_message(form_data, status))
net_connect.cleanup()
net_connect.disconnect()
update_sheet(range_to_edit, status, service_sheets, desired_vlan, current_vlan)
def get_connection(device):
"""
Returns a Connection to the device and a status message
:param device: Dict - holds device information
:return: Connection, String - Netmiko Connection if possible, Status
"""
net_connect = ''
try:
net_connect = ConnectHandler(**device)
status = 'Connection Established'
except ssh_exception.NetMikoAuthenticationException:
status = 'Authentication Failure'
except ssh_exception.NetMikoTimeoutException:
status = 'Connection Timeout'
return net_connect, status
def get_current_cisco_vlan(output, port):
"""
Returns the current VLAN assignment for a given interface.
Reads the output of "show vlan brief" line by line storing the last read
vlan ID and name. When the interface is found, the current stored
ID and name are returned. If no match is found, ('None', '0') is returned.
:param output: String - Output of "show vlan brief"
:param port: String - Interface to be modified (e.g. Gi0/1 Fa0/1)
:return: String Tuple - (vlan NAME, vlan ID) e.g. (DATA, 8)
"""
for lines in output.strip().splitlines():
if match(r'\d{1,4}', lines[0:3].strip()):
vlan_id = lines[0:3].strip()
vlan_name = lines[5:37].strip()
if port in lines:
if vlan_name.upper() != 'VOICE':
return vlan_name, vlan_id
return 'None', | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated Wed Feb 24 19:49:28 2021 by generateDS.py version 2.37.16.
# Python 3.8.6 (v3.8.6:db455296be, Sep 23 2020, 13:31:39) [Clang 6.0 (clang-600.0.57)]
#
# Command line options:
# ('--no-namespace-defs', '')
# ('-o', './tnt_lib/label_common_definitions.py')
#
# Command line arguments:
# ./schemas/label_common_definitions.xsd
#
# Command line:
# /Users/danielkobina/Workspace/project/purplship-carriers/.venv/purplship-carriers/bin/generateDS --no-namespace-defs -o "./tnt_lib/label_common_definitions.py" ./schemas/label_common_definitions.xsd
#
# Current working directory (os.getcwd()):
# tnt
#
import sys
try:
ModulenotfoundExp_ = ModuleNotFoundError
except NameError:
ModulenotfoundExp_ = ImportError
from six.moves import zip_longest
import os
import re as re_
import base64
import datetime as datetime_
import decimal as decimal_
try:
from lxml import etree as etree_
except ModulenotfoundExp_ :
from xml.etree import ElementTree as etree_
Validate_simpletypes_ = True
SaveElementTreeNode = True
if sys.version_info.major == 2:
BaseStrType_ = basestring
else:
BaseStrType_ = str
def parsexml_(infile, parser=None, **kwargs):
if parser is None:
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
try:
parser = etree_.ETCompatXMLParser()
except AttributeError:
# fallback to xml.etree
parser = etree_.XMLParser()
try:
if isinstance(infile, os.PathLike):
infile = os.path.join(infile)
except AttributeError:
pass
doc = etree_.parse(infile, parser=parser, **kwargs)
return doc
def parsexmlstring_(instring, parser=None, **kwargs):
if parser is None:
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
try:
parser = etree_.ETCompatXMLParser()
except AttributeError:
# fallback to xml.etree
parser = etree_.XMLParser()
element = etree_.fromstring(instring, parser=parser, **kwargs)
return element
#
# Namespace prefix definition table (and other attributes, too)
#
# The module generatedsnamespaces, if it is importable, must contain
# a dictionary named GeneratedsNamespaceDefs. This Python dictionary
# should map element type names (strings) to XML schema namespace prefix
# definitions. The export method for any class for which there is
# a namespace prefix definition, will export that definition in the
# XML representation of that element. See the export method of
# any generated element type class for an example of the use of this
# table.
# A sample table is:
#
# # File: generatedsnamespaces.py
#
# GenerateDSNamespaceDefs = {
# "ElementtypeA": "http://www.xxx.com/namespaceA",
# "ElementtypeB": "http://www.xxx.com/namespaceB",
# }
#
# Additionally, the generatedsnamespaces module can contain a python
# dictionary named GenerateDSNamespaceTypePrefixes that associates element
# types with the namespace prefixes that are to be added to the
# "xsi:type" attribute value. See the exportAttributes method of
# any generated element type and the generation of "xsi:type" for an
# example of the use of this table.
# An example table:
#
# # File: generatedsnamespaces.py
#
# GenerateDSNamespaceTypePrefixes = {
# "ElementtypeC": "aaa:",
# "ElementtypeD": "bbb:",
# }
#
try:
from generatedsnamespaces import GenerateDSNamespaceDefs as GenerateDSNamespaceDefs_
except ModulenotfoundExp_ :
GenerateDSNamespaceDefs_ = {}
try:
from generatedsnamespaces import GenerateDSNamespaceTypePrefixes as GenerateDSNamespaceTypePrefixes_
except ModulenotfoundExp_ :
GenerateDSNamespaceTypePrefixes_ = {}
#
# You can replace the following class definition by defining an
# importable module named "generatedscollector" containing a class
# named "GdsCollector". See the default class definition below for
# clues about the possible content of that class.
#
try:
from generatedscollector import GdsCollector as GdsCollector_
except ModulenotfoundExp_ :
class GdsCollector_(object):
def __init__(self, messages=None):
if messages is None:
self.messages = []
else:
self.messages = messages
def add_message(self, msg):
self.messages.append(msg)
def get_messages(self):
return self.messages
def clear_messages(self):
self.messages = []
def print_messages(self):
for msg in self.messages:
print("Warning: {}".format(msg))
def write_messages(self, outstream):
for msg in self.messages:
outstream.write("Warning: {}\n".format(msg))
#
# The super-class for enum types
#
try:
from enum import Enum
except ModulenotfoundExp_ :
Enum = object
#
# The root super-class for element type classes
#
# Calls to the methods in these classes are generated by generateDS.py.
# You can replace these methods by re-implementing the following class
# in a module named generatedssuper.py.
try:
from generatedssuper import GeneratedsSuper
except ModulenotfoundExp_ as exp:
class GeneratedsSuper(object):
__hash__ = object.__hash__
tzoff_pattern = re_.compile(r'(\+|-)((0\d|1[0-3]):[0-5]\d|14:00)$')
class _FixedOffsetTZ(datetime_.tzinfo):
def __init__(self, offset, name):
self.__offset = datetime_.timedelta(minutes=offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return None
def gds_format_string(self, input_data, input_name=''):
return input_data
def gds_parse_string(self, input_data, node=None, input_name=''):
return input_data
def gds_validate_string(self, input_data, node=None, input_name=''):
if not input_data:
return ''
else:
return input_data
def gds_format_base64(self, input_data, input_name=''):
return base64.b64encode(input_data)
def gds_validate_base64(self, input_data, node=None, input_name=''):
return input_data
def gds_format_integer(self, input_data, input_name=''):
return '%d' % input_data
def gds_parse_integer(self, input_data, node=None, input_name=''):
try:
ival = int(input_data)
except (TypeError, ValueError) as exp:
raise_parse_error(node, 'Requires integer value: %s' % exp)
return ival
def gds_validate_integer(self, input_data, node=None, input_name=''):
try:
value = int(input_data)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires integer value')
return value
def gds_format_integer_list(self, input_data, input_name=''):
if len(input_data) > 0 and not isinstance(input_data[0], BaseStrType_):
input_data = [str(s) for s in input_data]
return '%s' % ' '.join(input_data)
def gds_validate_integer_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
int(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of integer values')
return values
def gds_format_float(self, input_data, input_name=''):
return ('%.15f' % input_data).rstrip('0')
def gds_parse_float(self, input_data, node=None, input_name=''):
try:
fval_ = float(input_data)
except (TypeError, ValueError) as exp:
raise_parse_error(node, 'Requires float or double value: %s' % exp)
return fval_
def gds_validate_float(self, input_data, node=None, input_name=''):
try:
value = float(input_data)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires float value')
return value
def gds_format_float_list(self, input_data, input_name=''):
if len(input_data) > 0 and not isinstance(input_data[0], BaseStrType_):
input_data = [str(s) for s in input_data]
return '%s' % ' '.join(input_data)
def gds_validate_float_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of float values')
return values
def gds_format_decimal(self, input_data, input_name=''):
return_value = '%s' % input_data
if '.' in return_value:
return_value = return_value.rstrip('0')
if return_value.endswith('.'):
return_value = return_value.rstrip('.')
return return_value
def gds_parse_decimal(self, input_data, node=None, input_name=''):
try:
decimal_value = decimal_.Decimal(input_data)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires decimal value')
return decimal_value
def gds_validate_decimal(self, input_data, node=None, input_name=''):
try:
value = decimal_.Decimal(input_data)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires decimal value')
return value
def gds_format_decimal_list(self, input_data, input_name=''):
if len(input_data) > 0 and not isinstance(input_data[0], BaseStrType_):
input_data = [str(s) for s in input_data]
return ' '.join([self.gds_format_decimal(item) for item in input_data])
def gds_validate_decimal_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
decimal_.Decimal(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of decimal values')
return values
def gds_format_double(self, input_data, input_name=''):
return '%s' % input_data
def gds_parse_double(self, input_data, node=None, input_name=''):
try:
fval_ = float(input_data)
except (TypeError, ValueError) as exp:
raise_parse_error(node, 'Requires double or float value: %s' % exp)
return fval_
def gds_validate_double(self, input_data, node=None, input_name=''):
try:
value = float(input_data)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires double or float value')
return value
def gds_format_double_list(self, input_data, input_name=''):
if len(input_data) > 0 and not isinstance(input_data[0], BaseStrType_):
input_data = [str(s) for s in input_data]
return '%s' % ' '.join(input_data)
def gds_validate_double_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(
node, 'Requires sequence of double or float values')
return values
def gds_format_boolean(self, input_data, input_name=''):
return ('%s' % input_data).lower()
def gds_parse_boolean(self, input_data, node=None, input_name=''):
if input_data in ('true', '1'):
bval = True
elif input_data in ('false', '0'):
bval = False
else:
raise_parse_error(node, 'Requires boolean value')
return bval
def gds_validate_boolean(self, input_data, node=None, input_name=''):
if input_data not in (True, 1, False, 0, ):
raise_parse_error(
node,
'Requires boolean value '
'(one of True, 1, False, 0)')
return input_data
def gds_format_boolean_list(self, input_data, input_name=''):
if len(input_data) > 0 and not isinstance(input_data[0], BaseStrType_):
input_data = [str(s) for s in input_data]
return '%s' % ' '.join(input_data)
def gds_validate_boolean_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
if value not in (True, 1, False, 0, ):
raise_parse_error(
node,
'Requires sequence of boolean values '
'(one of True, 1, False, 0)')
return values
def gds_validate_datetime(self, input_data, node=None, input_name=''):
return input_data
def gds_format_datetime(self, input_data, input_name=''):
if input_data.microsecond == 0:
_svalue = '%04d-%02d-%02dT%02d:%02d:%02d' % (
input_data.year,
input_data.month,
input_data.day,
input_data.hour,
input_data.minute,
input_data.second,
)
else:
_svalue = '%04d-%02d-%02dT%02d:%02d:%02d.%s' % (
input_data.year,
input_data.month,
input_data.day,
input_data.hour,
input_data.minute,
input_data.second,
('%f' % (float(input_data.microsecond) / 1000000))[2:],
)
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
@classmethod
def gds_parse_datetime(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == | |
<reponame>jaib1/yass<gh_stars>10-100
"""
Functions for dimensionality reduction
"""
try:
from pathlib2 import Path
except Exception:
from pathlib import Path
from functools import reduce
import logging
import numpy as np
from yass.batch import BatchProcessor
from yass.util import check_for_files, LoadFile, save_numpy_object
logger = logging.getLogger(__name__)
@check_for_files(filenames=[LoadFile('scores_filename'),
LoadFile('spike_index_clear_filename'),
LoadFile('rotation_matrix_filename')],
mode='extract', relative_to='output_path')
def pca(path_to_data, dtype, n_channels, data_order, spike_index,
spike_size, temporal_features, neighbors_matrix, channel_index,
max_memory, output_path=None, scores_filename='scores.npy',
rotation_matrix_filename='rotation.npy',
spike_index_clear_filename='spike_index_clear_pca.npy',
if_file_exists='skip'):
"""Apply PCA in batches
Parameters
----------
path_to_data: str
Path to recordings in binary format
dtype: str
Recordings dtype
n_channels: int
Number of channels in the recordings
data_order: str
Recordings order, one of ('channels', 'samples'). In a dataset with k
observations per channel and j channels: 'channels' means first k
contiguous observations come from channel 0, then channel 1, and so
on. 'sample' means first j contiguous data are the first observations
from all channels, then the second observations from all channels and
so on
spike_index: numpy.ndarray
A 2D numpy array, first column is spike time, second column is main
channel (the channel where spike has the biggest amplitude)
spike_size: int
Spike size
temporal_features: numpy.ndarray
Number of output features
neighbors_matrix: numpy.ndarray (n_channels, n_channels)
Boolean numpy 2-D array where a i, j entry is True if i is considered
neighbor of j
channel_index: np.array (n_channels, n_neigh)
Each row indexes its neighboring channels.
For example, channel_index[c] is the index of
neighboring channels (including itself)
If any value is equal to n_channels, it is nothing but
a space holder in a case that a channel has less than
n_neigh neighboring channels
max_memory:
Max memory to use in each batch (e.g. 100MB, 1GB)
output_path: str, optional
Directory to store the scores and rotation matrix, if None, previous
results on disk are ignored, operations are computed and results
aren't saved to disk
scores_filename: str, optional
File name for rotation matrix if False, does not save data
rotation_matrix_filename: str, optional
File name for scores if False, does not save data
spike_index_clear_filename: str, optional
File name for spike index clear
if_file_exists:
What to do if there is already a file in the rotation matrix and/or
scores location. One of 'overwrite', 'abort', 'skip'. If 'overwrite'
it replaces the file if it exists, if 'abort' if raise a ValueError
exception if the file exists, if 'skip' if skips the operation if the
file exists
Returns
-------
scores: numpy.ndarray
Numpy 3D array of size (n_waveforms, n_reduced_features,
n_neighboring_channels) Scores for every waveform, second dimension in
the array is reduced from n_temporal_features to n_reduced_features,
third dimension depends on the number of neighboring channels
rotation_matrix: numpy.ndarray
3D array (window_size, n_features, n_channels)
"""
###########################
# compute rotation matrix #
###########################
bp = BatchProcessor(path_to_data, dtype, n_channels, data_order,
max_memory, buffer_size=spike_size)
# compute PCA sufficient statistics
logger.info('Computing PCA sufficient statistics...')
stats = bp.multi_channel_apply(suff_stat, mode='memory',
spike_index=spike_index,
spike_size=spike_size)
suff_stats = reduce(lambda x, y: np.add(x, y), [e[0] for e in stats])
spikes_per_channel = reduce(lambda x, y: np.add(x, y),
[e[1] for e in stats])
# compute PCA projection matrix
logger.info('Computing PCA projection matrix...')
rotation = project(suff_stats, spikes_per_channel, temporal_features,
neighbors_matrix)
#####################################
# waveform dimensionality reduction #
#####################################
logger.info('Reducing spikes dimensionality with PCA matrix...')
res = bp.multi_channel_apply(score,
mode='memory',
pass_batch_info=True,
rot=rotation,
channel_index=channel_index,
spike_index=spike_index)
scores = np.concatenate([element[0] for element in res], axis=0)
spike_index = np.concatenate([element[1] for element in res], axis=0)
# save scores
if output_path and scores_filename:
path_to_score = Path(output_path) / scores_filename
save_numpy_object(scores, path_to_score,
if_file_exists=if_file_exists,
name='scores')
if output_path and spike_index_clear_filename:
path_to_spike_index = Path(output_path) / spike_index_clear_filename
save_numpy_object(spike_index, path_to_spike_index,
if_file_exists=if_file_exists,
name='Spike index PCA')
if output_path and rotation_matrix_filename:
path_to_rotation = Path(output_path) / rotation_matrix_filename
save_numpy_object(rotation, path_to_rotation,
if_file_exists=if_file_exists,
name='rotation matrix')
return scores, spike_index, rotation
def suff_stat(recordings, spike_index, spike_size):
"""
Get PCA SS matrix per recording channel
Parameters
----------
recordings: np.ndarray (n_observations, n_channels)
Multi-channel recordings
spike_index: np.ndarray (number of spikes, 2)
Spike indexes as returned from the threshold detector
spike_size: int
Spike size
Returns
-------
numpy.ndarray
3D array (?)
numpy.ndarray
1D array, with n_channels entries, the ith entry contains the number
of spikes found in the ith channel
"""
# column ids for index matrix
SPIKE_TIME, MAIN_CHANNEL = 0, 1
n_obs, n_channels = recordings.shape
window_idx = range(-spike_size, spike_size + 1)
window_size = len(window_idx)
pca_suff_stat = np.zeros((window_size, window_size, n_channels))
spikes_per_channel = np.zeros(n_channels, 'int32')
# iterate over every channel
for c in range(n_channels):
# get spikes times for the current channel
channel_spike_times = spike_index[spike_index[:, MAIN_CHANNEL] == c,
SPIKE_TIME]
channel_spike_times = channel_spike_times[np.logical_and(
(channel_spike_times > spike_size),
(channel_spike_times < n_obs - spike_size - 1))]
channel_spikes = len(channel_spike_times)
# create zeros matrix (window size x number of spikes for this channel)
wf_temp = np.zeros((window_size, channel_spikes))
# iterate over the window size
for j in range(window_size):
# fill in recording values for each spike time
wf_temp[j, :] = recordings[channel_spike_times + window_idx[j], c]
pca_suff_stat[:, :, c] = np.matmul(wf_temp, wf_temp.T)
spikes_per_channel[c] = channel_spikes
return pca_suff_stat, spikes_per_channel
def project(ss, spikes_per_channel, n_features, neighbors):
"""
Get PCA projection matrix per channel
Parameters
----------
ss: matrix
SS matrix as returned from get_pca_suff_stat
spikes_per_channel: array
Number of spikes per channel
n_features: int
Number of features
neighbors: matrix
Neighbors matrix
Returns
-------
numpy.ndarray
3D array (window_size, n_features, n_channels)
"""
window_size, _, n_channels = ss.shape
# allocate rotation matrix for each channel
rot = np.zeros((window_size, n_features, n_channels))
ss_all = np.sum(ss, 2)
w, v = np.linalg.eig(ss_all)
rot_all = v[:,
np.argsort(w)[window_size:(window_size - n_features - 1):-1]]
for c in range(n_channels):
if spikes_per_channel[c] <= window_size:
if np.sum(spikes_per_channel[neighbors[c, :]]) <= window_size:
rot[:, :, c] = rot_all
else:
w, v = np.linalg.eig(np.sum(ss[:, :, neighbors[c, :]], 2))
rot[:, :, c] = v[:,
np.argsort(w)[window_size:(
window_size - n_features - 1):-1]]
else:
w, v = np.linalg.eig(ss[:, :, c])
rot[:, :, c] = v[:,
np.argsort(w)[window_size:(
window_size - n_features - 1):-1]]
return rot
def score(recording, idx_local, idx, rot, channel_index, spike_index):
"""
Reduce waveform dimensionality using a rotation matrix. Optionally
return scores only for neighboring channels instead of all channels
Parameters
----------
recordings: np.ndarray (n_observations, n_channels)
Multi-channel recordings
rot: numpy.ndarray
Rotation matrix. Array with dimensions (n_temporal_features,
n_features, n_channels) for PCA matrix or (n_temporal_features,
n_features) for autoencoder matrix
channel_index: np.array (n_channels, n_neigh)
Each row indexes its neighboring channels.
For example, channel_index[c] is the index of
neighboring channels (including itself)
If any value is equal to n_channels, it is nothing but
a space holder in a case that a channel has less than
n_neigh neighboring channels
spike_index: np.array (n_spikes, 2)
contains spike information, the first column is the
spike time and the second column is the main channel
Returns
-------
scores: np.array (n_spikes, n_features, n_neighboring_channels)
Scores for every waveform, second dimension in the array is reduced
from n_temporal_features to n_features, third dimension
is number of neighboring channels.
"""
data_start = idx[0].start
data_end = idx[0].stop
# get offset that will be applied
offset = idx_local[0].start
spike_time = spike_index[:, 0]
spike_index = spike_index[np.logical_and(spike_time >= data_start,
spike_time < data_end)]
spike_index[:, 0] = spike_index[:, 0] - data_start + offset
# obtain shape information
n_observations, n_channels = recording.shape
n_data = spike_index.shape[0]
n_neigh = channel_index.shape[1]
# if rot has two dimension, rotation matrix is used for every
# channels, if it is three, the third dimension has to match
# the number of channels
if rot.ndim == 2:
# neural net case
n_temporal_features, n_reduced_features = rot.shape
# copy rotation matrix to all channels
rot = np.tile(rot[:, :, np.newaxis], [1, 1, n_channels])
elif rot.ndim == 3:
# pca case
n_temporal_features, n_features, n_channels_ = rot.shape
if n_channels != n_channels_:
raise ValueError('n_channels does not match between '
'recording ({}) and the rotation matrix ({})'
.format(n_channels,
n_channels_))
else:
raise ValueError('rot must have 2 or 3 dimensions (has {})'.format(
rot.ndim))
# n_temporal_features has to be an odd number
if n_temporal_features % 2 != 1:
raise ValueError('waveform length needs to be'
'an odd number (has {})'.format(
n_temporal_features))
R = int((n_temporal_features-1)/2)
rot = np.transpose(rot, [2, 1, 0])
scores = np.zeros((n_data, n_features, n_neigh))
for channel in range(n_channels):
# get neighboring channel information
ch_idx = channel_index[channel][
channel_index[channel] < n_channels]
# get spikes whose main channel is equal to channel
idx_c = spike_index[:, 1] == channel
# get waveforms
spt_c = spike_index[idx_c, 0]
waveforms = np.zeros((spt_c.shape[0], ch_idx.shape[0],
n_temporal_features))
for j in range(spt_c.shape[0]):
waveforms[j] | |
#!/usr/bin/env python
#make thumbnails centered at list of ra/dec's
#imname='../data/new/111-18/g/15/Skymapper_805310385_00000_2011-04-12T19:16:27_15.fits'
#ras=[168.98898]
#decs=[-18.014603]
#outdir='.'
import pyfits
import numpy as np
from math import pi
import subprocess
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot
from matplotlib import cm
import os
import sys
import ephem
def STAP_make_thumbs_xy(imname,xs,ys,box=120,outdir=None,usesubid=None,objnames=None,suffix='',outputnames=None):
if len(xs) != len(ys):
print "input xs/ys arrays don't match"
return(1)
if outputnames:
if len(xs) != len(outputnames):
print "length of output stamp names do not match length of coordinates arrays"
return(1)
if objnames:
if len(xs) != len(objnames):
print "length of object names do not match length of coordinates arrays"
return(1)
if not os.path.exists(imname):
imname=imname+'.gz'
if not os.path.exists(imname):
print "can't find image %s"%imname
return(1)
try:
fullimdata=pyfits.getdata(imname)
imheader=pyfits.getheader(imname)
except:
print "can't read image %s"%imname
return(1)
if not outputnames:
if usesubid is None:
imnamebase=os.path.splitext(os.path.basename(imname))[0]
imnamebase='_'.join(imnamebase.split('_')[0:4])
else:
imnamebase=usesubid
if outdir==None:
outdir=os.path.dirname(imname)
if outdir in '':
outdir='.'
[imy,imx]=fullimdata.shape
slow,shigh=zscale(fullimdata)
nstamps=len(xs)
for ind in range(nstamps):
xct=xs[ind]-1.
yct=ys[ind]-1.
xll=xct-box/2
xhh=xct+box/2
yll=yct-box/2
yhh=yct+box/2
#not really necessary, always xll <xhh, yll<yhh
xlow=np.floor(np.min([xll,xhh]))
xhigh=np.ceil(np.max([xll,xhh]))
ylow=np.floor(np.min([yll,yhh]))
yhigh=np.ceil(np.max([yll,yhh]))
if xlow <0:xlow=0
if ylow <0:ylow=0
if xhigh >imx-1: xhigh=imx-1
if yhigh >imy-1: yhigh=imy-1
if yhigh<=ylow or xhigh<=xlow:
if objnames:
print "Stamp size less than 1 for %s"%objnames[ind]
else:
print "Stamp size less than 1 for %f/%f"%(xs[ind],ys[ind])
continue
imdata=fullimdata[ylow:yhigh+1,xlow:xhigh+1]
xrel=xct-xlow
yrel=yct-ylow
#figure out rotation angle
#from header
if ('CD1_1' in imheader) and ('CD2_2' in imheader):
cd1_1=imheader['CD1_1']
cd2_2=imheader['CD2_2']
if cd1_1<0 and cd2_2>0: rotated=imdata
if cd1_1<0 and cd2_2<0:
rotated=np.fliplr(np.rot90(imdata,k=2))
yrel=yhigh-yct
if cd1_1>0 and cd2_2>0:
rotated=np.fliplr(imdata)
xrel=xhigh-xct
if cd1_1>0 and cd2_2<0:
rotated=np.rot90(imdata,k=2)
xrel=xhigh-xct
yrel=yhigh-yct
else:
print "CD1_1 and/or CD2_2 are not found in header, thumb orientation might be incorrect"
#flip left/right by default
rotated=np.fliplr(imdata)
#scale the data
#slow,shigh=zscale(rotated)
rotated=rescale(rotated,slow,shigh)
#output file name
if outputnames:
stampname=outdir + '/%s'%outputnames[ind]
else:
if objnames:
stampname=outdir+'/%s_%s%s.png'%(objnames[ind],imnamebase,suffix)
else:
stampname=outdir+'/'+imnamebase+'_%04.0f_%04.0f%s.png'%(xs[ind],ys[ind],suffix)
#plot data
pyplot.figure(figsize=(4,4))
ax=pyplot.axes([0,0,1,1],frameon=False)
ax.set_axis_off()
pyplot.imshow(rotated,cmap=cm.gray,origin='lower',interpolation='nearest')
#add cross hair
xsize=xhigh-xlow
ysize=yhigh-ylow
ssize=np.min([xsize,ysize])
hline=np.array([xrel-ssize/3.,xrel-ssize/10.,xrel+ssize/10.,xrel+ssize/3.])/xsize
vline=np.array([yrel-ssize/3.,yrel-ssize/10.,yrel+ssize/10.,yrel+ssize/3.])/ysize
pyplot.axhline(y=yrel,xmin=hline[0],xmax=hline[1],c='1')
pyplot.axhline(y=yrel,xmin=hline[2],xmax=hline[3],c='1')
pyplot.axvline(x=xrel,ymin=vline[0],ymax=vline[1],c='1')
pyplot.axvline(x=xrel,ymin=vline[2],ymax=vline[3],c='1')
pyplot.savefig(stampname)
pyplot.close()
def STAP_make_thumbs(imname,ras,decs,box=1.,pbox=None,outdir=None,usesubid=None,objnames=None,suffix=''):
if len(ras) != len(decs):
print "input ra/dec arrays don't match"
return(1)
if not objnames is None:
if len(ras) != len(objnames):
print "length of object names do not match length of coordinates arrays"
return(1)
if not os.path.exists(imname):
imname=imname+'.gz'
if not os.path.exists(imname):
print "can't find image %s"%imname
return(1)
nstamps=len(ras)
[xs,ys]=sky2xy(imname,ras,decs)
if not pbox:
#figure out box size in pixel
header=pyfits.gethead(imname)
if "CD1_1" in header:
scale=header["CD1_1"]
pbox=round(box/60./scale/2.)*2.
else:
ras=np.array(ras)
decs=np.array(decs)
hboxra=box/2./60./np.cos(decs*pi/180.)
hboxdec=box/2./60.
ralimls=ras-hboxra
ralimhs=ras+hboxra
declimls=decs-hboxdec
declimhs=decs+hboxdec
[xlls,ylls]=sky2xy(imname,ralimls,declimls)
[xhhs,yhhs]=sky2xy(imname,ralimhs,declimhs)
pbox=np.mean(xhhs-xlls)
pbox2=np.mean(yhhs-ylls)
if pbox2 >pbox: pbox=pbox2
pbox=round(pbox/2.)*2.
if not objnames:
objnames=[]
for ind in range(nstamps):
radecstr=radec2str(ras[ind],decs[ind])
objnames.append('J'+radecstr)
STAP_make_thumbs_xy(imname,xs,ys,box=pbox,outdir=outdir,usesubid=usesubid,objnames=objnames,suffix=suffix)
def radec2str(rad,decd):
ra=rad/15.
rastr=sixtystr(ra,decimal=2,delim='').replace('.','')
if decd<0: decsign='-'
else: decsign='+'
dec=abs(decd)
decstr=sixtystr(dec,decimal=1,delim='').replace('.','')
return rastr+decsign+decstr
def sixtystr(x,decimal=0,delim=':'):
xh=np.floor(x)
xm=np.floor((x-xh)*60.)
xs=(x-xh-xm/60.)*3600.
xs=np.round(xs*10**decimal)/10**decimal
if decimal==0:
xsstr='%02f'%xs
else:
xsstr=eval('\'%0'+'%d.%df'%(3+decimal,decimal)+'\'%xs')
return delim.join(('%02d'%xh,'%02d'%xm,xsstr))
def sky2xy(imname,ras,decs):
cmd="sky2xy %s"%(imname)
nobj=len(ras)
for ind in range(nobj):
cmd="%s %f %f"%(cmd,ras[ind],decs[ind])
cmd_orig = cmd
cmd = cmd.split(" ")
process=subprocess.Popen(cmd,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
(stdoutstr,stderrstr) = process.communicate()
status = process.returncode
if status != 0:
print cmd_orig, "failed with the following message:"
print stderrstr
print "Couldn't convert ra/dec to image coordinates"
return [[-999]*nobj,[-999]*nobj]
else:
lines=stdoutstr.strip().split('\n')
if len(lines) !=nobj:
print "Output does not match input number of objects"
return [[-999]*nobj,[-999]*nobj]
else:
x=[]
y=[]
for ind in range(nobj):
x.append(float(lines[ind].split()[4]))
y.append(float(lines[ind].split()[5]))
return [x,y]
def xy2sky(imname,xs,ys):
cmd="xy2sky %s"%(imname)
nobj=len(xs)
for ind in range(nobj):
cmd="%s %f %f"%(cmd,xs[ind]+1.,ys[ind]+1.)
cmd = cmd.split(" ")
process=subprocess.Popen(cmd,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
(stdoutstr,stderrstr) = process.communicate()
status = process.returncode
if status != 0:
print "Couldn't convert x/y to sky coordinates"
return [[-999]*nobj,[-999]*nobj]
else:
lines=stdoutstr.strip().split('\n')
if len(lines) !=nobj:
print "Output does not match input number of objects"
return [[-999]*nobj,[-999]*nobj]
else:
ras=[]
decs=[]
for ind in range(nobj):
ras.append(ephem.hours(stdoutstr.split()[0])*180./pi)
decs.append(ephem.degrees(stdoutstr.split()[1])*180./pi)
return [ras,decs]
def rescale(im,z1,z2):
z21=z2-z1
if z21==0: z21=1.
imdata=(im-z1)/z21
imdata=np.clip(imdata,0,1)
return imdata
def zscale (image, nsamples=1000, contrast=0.25, bpmask=None, zmask=None):
"""Implement IRAF zscale algorithm
nsamples=1000 and contrast=0.25 are the IRAF display task defaults
bpmask and zmask not implemented yet
image is a 2-d numpy array
returns (z1, z2)
"""
MAX_REJECT = 0.5
MIN_NPIXELS = 5
KREJ = 2.5
MAX_ITERATIONS = 5
# Sample the image
samples = zsc_sample (image, nsamples, bpmask, zmask)
npix = len(samples)
samples.sort()
zmin = samples[0]
zmax = samples[-1]
# For a zero-indexed array
center_pixel = (npix - 1) // 2
if npix%2 == 1:
median = samples[center_pixel]
else:
median = 0.5 * (samples[center_pixel] + samples[center_pixel + 1])
#
# Fit a line to the sorted array of samples
minpix = max(MIN_NPIXELS, int(npix * MAX_REJECT))
ngrow = max (1, int (npix * 0.01))
ngoodpix, zstart, zslope = zsc_fit_line (samples, npix, KREJ, ngrow,
MAX_ITERATIONS)
if ngoodpix < minpix:
z1 = zmin
z2 = zmax
else:
if contrast > 0: zslope = zslope / contrast
z1 = max (zmin, median - (center_pixel - 1) * zslope)
z2 = min (zmax, median + (npix - center_pixel) * zslope)
return z1, z2
def zsc_sample (image, maxpix, bpmask=None, zmask=None):
# Figure out which pixels to use for the zscale algorithm
# Returns the 1-d array samples
# Don't worry about the bad pixel mask or zmask for the moment
# Sample in a square grid, and return the first maxpix in the sample
nc = image.shape[0]
nl = image.shape[1]
stride = max (1.0, np.sqrt((nc - 1) * (nl - 1) / float(maxpix)))
stride = int (stride)
samples = image[::stride,::stride].flatten()
return samples[:maxpix]
def zsc_fit_line (samples, npix, krej, ngrow, maxiter):
MAX_REJECT = 0.5
MIN_NPIXELS = 5
GOOD_PIXEL = 0
BAD_PIXEL = 1
#
# First re-map indices from -1.0 to 1.0
xscale = 2.0 / (npix - 1)
xnorm = np.arange(npix)
xnorm = xnorm * xscale - 1.0
ngoodpix = npix
minpix = max (MIN_NPIXELS, int (npix*MAX_REJECT))
last_ngoodpix = npix + 1
# This is the mask used in k-sigma clipping. 0 is good, 1 is bad
badpix = np.zeros(npix, dtype="int32")
#
# Iterate
for niter in range(maxiter):
if (ngoodpix >= last_ngoodpix) or (ngoodpix < minpix):
break
# Accumulate sums to calculate straight line fit
goodpixels = np.where(badpix == GOOD_PIXEL)
sumx = xnorm[goodpixels].sum()
sumxx = (xnorm[goodpixels]*xnorm[goodpixels]).sum()
sumxy = (xnorm[goodpixels]*samples[goodpixels]).sum()
sumy = samples[goodpixels].sum()
sum = len(goodpixels[0])
delta = sum * sumxx - sumx * sumx
# Slope and intercept
intercept = (sumxx * sumy - sumx * sumxy) / delta
slope = (sum * sumxy - sumx * sumy) / delta
# Subtract fitted line from the data array
fitted = xnorm*slope + intercept
flat = samples - fitted
# Compute the k-sigma rejection threshold
ngoodpix, mean, sigma = zsc_compute_sigma (flat, badpix, npix)
threshold = sigma * krej
# Detect and reject pixels further than k*sigma from the fitted line
lcut = -threshold
hcut = threshold
below = np.where(flat < lcut)
above = np.where(flat > hcut)
badpix[below] = BAD_PIXEL
badpix[above] = BAD_PIXEL
# Convolve with a kernel of length ngrow
kernel = np.ones(ngrow,dtype="int32")
badpix = np.convolve(badpix, kernel, mode='same')
ngoodpix = len(np.where(badpix == GOOD_PIXEL)[0])
niter += 1
# Transform the line coefficients back to the X range [0:npix-1]
zstart = intercept - slope
zslope = slope * xscale
return ngoodpix, zstart, zslope
def zsc_compute_sigma (flat, badpix, npix):
GOOD_PIXEL = 0
# Compute the rms deviation from the mean of a flattened array.
# Ignore rejected pixels
# Accumulate sum and sum of squares
goodpixels = np.where(badpix == GOOD_PIXEL)
sumz = flat[goodpixels].sum()
sumsq = (flat[goodpixels]*flat[goodpixels]).sum()
ngoodpix = len(goodpixels[0]) | |
doc_type_stats = []
for (
tag,
max_depth,
max_repets,
min_repets,
avg_Repets,
) in self.pm.get_single_xml_type_stats(xml_type):
doc_type_stats.append(
[tag, max_depth, max_repets, min_repets, avg_Repets]
)
doc_types[xml_type] = {
"no_of_docs": no_of_docs,
"doc_type_stats": doc_type_stats,
}
return doc_types
def get_processed_file_overview_with_samples(self):
doc_data = {}
doc_samples = self.inspect_samples()
file_stats = self.get_processed_file_overview()
for doc_type in file_stats:
doc_data[doc_type] = {
"sample_data": doc_samples[doc_type],
"no_of_docs": file_stats[doc_type]["no_of_docs"],
"tag_stats": file_stats[doc_type]["doc_type_stats"],
}
return doc_data
def info(self):
out = (
f"File name: {self.file_name_root}",
f"No of lines in file: {self.line_count}",
f"No of valid documents in file: {self.no_of_docs_in_file}",
)
return out
def debug_info(self):
out = {
"No of valid documents in file": self.no_of_docs_in_file,
"Tags to ignore": self.tags_to_ignore,
"Document delimiters found": self.doc_delimiters,
# "Documents in file": self.xml_source,
# "Parsed file content": self.doc_types,
}
return out
def get_process_log(self, log_level: LogLevel = None, doc_id: int = None):
log_lvl = {
0: "INFO",
1: "WARNING",
2: "ERROR",
}
if not log_level:
log_level = LogLevel.ALL
if doc_id:
return self.pm.get_process_log(log_level, doc_id)
else:
for row in self.pm.get_process_log(log_level):
doc_id = row["DocID"]
log_lev = log_lvl[row["LogLevel"]]
log_entry = row["LogEntry"]
row_out = (doc_id, log_lev, log_entry)
yield row_out
def get_xml_docs_valid(self):
for row in self.pm.get_all_docs(DocValidity.VALID):
row_out = (row["DocID"], row["DocText"])
yield row_out
# @profile
def to_excel(self, out_path=None, attrs_to_split_on=None):
# create the output
cnt_files = 0
dt_now = dt.datetime.now()
timestamp = dt_now.strftime("%Y%m%d_%H%M%S")
if out_path:
file_out_path = out_path
else:
file_out_path = self._data_path
file_out_name = f"{self.file_name_root}_{timestamp}.xlsx"
file_out = os.path.join(file_out_path, file_out_name)
# if we have output to produce, loop through the dict holding all document type lists
# and create an Excel file for each output type
# if len(self.doc_types) > 0:
# # create the excel writer object
# with pd.ExcelWriter(
# file_out, engine="xlsxwriter"
# ) as writer: # pylint: disable=abstract-class-instantiated
# for item in self.doc_types:
# df = pd.DataFrame(self.doc_types[item])
# df.to_excel(writer, index=False, sheet_name=item)
# cnt_files += 1
# # return f"{cnt_files} data tabs created in output file!"
# return file_out_name
# else:
# return "No output data extracted - Excel file not created!"
# Create file
workbook = xlsxwriter.Workbook(file_out)
for doc_type in self.pm.get_xml_types():
try:
# Sheet names in excel can have up to 31 chars
worksheet = workbook.add_worksheet(name=doc_type[0:31])
except:
pass
# debug...
# print(f"{datetime.now():%Y-%m-%d %H:%M:%S}: creating output for {doc_type}")
# create the output table for each document type
if attrs_to_split_on:
# print(f"this is the data received from frontend: {attrs_to_split_on}")
if doc_type in attrs_to_split_on:
create_record_on = set(attrs_to_split_on[doc_type])
# print(
# f"passing on the following split on attrs for type {doc_type}: {create_record_on}"
# )
self.pm.create_output_by_xml_type(
doc_type=doc_type, create_record_on=create_record_on
)
else:
self.pm.create_output_by_xml_type(doc_type=doc_type)
else:
self.pm.create_output_by_xml_type(doc_type=doc_type)
# write the header to the output sheet
header = (row["Tag"] for row in self.pm.get_xml_tags_by_type(doc_type))
worksheet.write_row(0, 0, header)
# write the data row by row to the output sheet
for row_number, row in enumerate(
self.pm.get_output_by_xml_type(doc_type), start=1
):
worksheet.write_row(row_number, 0, tuple(row))
# Saves the new document
workbook.close()
self.generated_excel_name = file_out_name
self.generated_excel = True
return file_out_name
# ------------------
# Internal Functions
# ------------------
def _split_into_documents(self):
out = True
print(f"Splitting File with delim: {self.re_split}")
patts = set()
idx_start = 0
if not self.custom_separator:
# every regex has to be a single capturing group
num_groups = len(self.re_split) + 1
# check if the file contains any known delimiter
if (
not "<xml" in self.file_content.lower()
and not "<document" in self.file_content.lower()
):
self.pm.log_error(
0,
"Error during splitting of document: None of the known delimiters <xml*> and/or <document*> found",
)
self.pm.commit_writes()
print("No delimiter found!!!")
out = False
return out
# regex.split returns a list containing the resulting substrings.
# If capturing parentheses are used in pattern, then the text of all groups in the pattern are also returned as part of the resulting list.
# These groups are returned as elements *before* the actual resulting substring. I.e. if 4 capturing groups have been defined,
# 4 items representing matches of these groups will be returned and the actual substring itself will be returned as the 5th item in the list.
# the list can start with an empty string.
list_raw = self.comp_re_split.split(self.file_content)
if list_raw[0] is not None:
if len(list_raw[0]) == 0:
# ignore 1st emptyp string
idx_start = 1
docs_to_process = len(list_raw)
doc_idx = 0
for idx, item in enumerate(list_raw[idx_start:], start=1):
if idx % 20_000 == 0:
# commit every 20'000 records
self.pm.commit_writes()
progress_pct = round((idx * 100) / docs_to_process, 2)
if (progress_pct).is_integer():
print(
f"Processing split file is at {progress_pct:3.0f}%...", end="\r"
)
# the actual processing
if item:
if idx % num_groups == 0:
# this is the actual content we want
validation_result = self._validate_document(item)
doc_idx += 1
if validation_result.valid:
# store the valid item
# self.xml_source.append(item.strip())
self.pm.store_doc(doc_idx, DocValidity.VALID, item.strip())
else:
out = False
# store the invalid item
# self.xml_source_invalid.append(
# {
# "Document": item.strip(),
# "Validation Result": validation_result.output,
# }
# )
self.pm.store_doc(
doc_idx,
DocValidity.INVALID,
item.strip(),
validation_result.output,
)
else:
patts.add(item)
self.doc_delimiters = list(patts)
else:
# the custom delimiter has only one group, so add 1 + 1
num_groups = 2
if not self.comp_re_split.search(self.file_content):
self.pm.log_error(
0,
"Error during splitting of document: None of the known delimiters <xml*> and/or <document*> found",
)
print("No delimiter found!!!")
self.pm.commit_writes()
out = False
return out
# regex.split returns a list containing the resulting substrings.
# If capturing parentheses are used in pattern, then the text of all groups in the pattern are also returned as part of the resulting list.
# These groups are returned as elements *before* the actual resulting substring. I.e. if 4 capturing groups have been defined,
# 4 items representing matches of these groups will be returned and the actual substring itself will be returned as the 5th item in the list.
# the list can start with an empty string.
list_raw = self.comp_re_split.split(self.file_content)
if list_raw[0] is not None:
if len(list_raw[0]) == 0:
# ignore 1st emptyp string
idx_start = 1
docs_to_process = len(list_raw)
doc_idx = 0
for idx, item in enumerate(list_raw[idx_start:], start=1):
if idx % 20_000 == 0:
# commit every 20'000 records
self.pm.commit_writes()
progress_pct = round((idx * 100) / docs_to_process, 2)
if (progress_pct).is_integer():
print(
f"Processing split file is at {progress_pct:3.0f}%...", end="\r"
)
# the actual processing
if item:
if self.comp_re_split.fullmatch(item):
# this is the delimiter
patts.add(item)
else:
# this is the actual content we want
validation_result = self._validate_document(item)
doc_idx += 1
if validation_result.valid:
# store the valid item
# self.xml_source.append(item.strip())
self.pm.store_doc(doc_idx, DocValidity.VALID, item.strip())
else:
out = False
# store the invalid item
# self.xml_source_invalid.append(
# {
# "Document": item.strip(),
# "Validation Result": validation_result.output,
# }
# )
self.pm.store_doc(
doc_idx,
DocValidity.INVALID,
item.strip(),
validation_result.output,
)
self.doc_delimiters = list(patts)
# once all data is processed and stored, create the indices
self.pm.commit_writes()
self.pm.create_indices(IndexGroup.DOC_STORE)
if out == False:
self.pm.log_warning(
0,
"File successfully split into single documents with invalid docutments found.",
)
else:
self.pm.log_success(
0, "File successfully split into single documents without any errors."
)
self.pm.commit_writes()
# print(f"out={out}")
return out
# @profile
def _validate_document(self, xml_document):
# initiate the validator object
validator = XmlValidator()
# validating the basic well-formedness of the xml document
validation_result = validator.validate_doc(xml_document)
return validation_result
# -------------------------------------------------------------------------------
# General Service Functions
# ------------------------------------------------------------------------------
# -------------------------------------------------------------------------------
# The script entry point
# -------------------------------------------------------------------------------
if __name__ == "__main__":
from pprint import pprint
path_name = "P:/Programming/Python/xml_examples/"
# file_name = "xml_test_data_small_no_prolog.xml"
# file_name = "pain.008.sepa.duplicates_and_invalid.xml"
file_name = "mixed.pain.camt.xml"
# file_name = "camt.054_sweden.xml"
# file_name = "pain.008.sepa.xml"
# file_name = "xml_test_data_small_2k.xml"
# file_name = "xml_test_data_large.xml"
# file_name = "books.xml"
file_in = path_name + file_name
db_path = "C:/Users/Dani/AppData/Local/Temp"
# db_path = "P:/Programming/Python"
# load the file into the file processor
stopwatch = StopWatch(run_label="processing file")
fp = FileProcessor(source_file=file_in, data_directory=db_path)
print(stopwatch.time_run())
# get the basic information for the file
# print(f"FileProcessor object:\n{fp}")
# get the full debug information
# print(f"Debug Info:\n {fp.debug_info()}\n")
# parse the documents within the file
# this is where the actual XML processing happens
stopwatch = StopWatch(run_label="parsing documents")
fp.process_file()
print(stopwatch.time_run())
# print(f"pf doc types:\n{fp.doc_types}")
# get the samples - this returns a dict with
# the 1st document of each document tpye in the file
# print(f"Inspect Sample:\n{fp.inspect_samples()}")
# export the parsed documents to the excel file
# stopwatch = StopWatch(run_label="creating excel")
# fp.to_excel()
# print(stopwatch.time_run())
| |
<filename>mycdo/__init__.py
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 04, 2015
@author: <NAME>
This module is the python interface to the command line tool of Climate Data Operators (CDO). Each CDO operator is wrapped into a string in a list, and the chain of CDO operators is realized by first adding these individual list together to form a long list and then tranforming the long list into a CDO command string. For example,
get_ifile('data.nc') --> ['data.nc']
sel(name='sst') --> ['-selname,sst']
stat('tim','mean') --> ['-timmean']
and
get_cdocmd( get_ifile('data.nc') + sel(name='sst') + stat('tim','mean'), ofile1='sst_timmean.nc' )
-->
'cdo -timmean -selname,sst data.nc sst_timmean.nc'
Now we can use the os.system function to run the CDO command:
os.system('cdo -timmean -selname,sst data.nc sst_timmean.nc')
The module is designed such that it can take full advantage of the CDO chain operators.
The module requires the CDO command tool as well as python module Numpy,netCDF4 and Pandas. The module also has the capability to manipulate data on a remote server as long as:
1) (required) The remote server has installed the CDO tools (required).
2) (optional) The remote server has the mycdo python module (e.g. in /home/wenchay/mypython/mycdo.py)
3) (optional) The remote server user home path has a python file named .cdo_dump.py with content as:
import sys
sys.path.append(mycdo_module_dir)
import mycdo as mc
mc.dump('tmp.nc')
"""
from __future__ import print_function
try:
# For Python 3.0 and later
from urllib.request import urlopen
except ImportError:
# Fall back to Python 2's urllib2
from urllib2 import urlopen
import os, os.path
import tempfile
from netCDF4 import Dataset
import pandas as pd
import numpy as np
import shutil
import glob
import subprocess
# parameters
_server='<EMAIL>'
_userHome='/home/wenchay/'
#
# ######## functions that wrap cdo operators into strings in a list
# ---- input file
def get_ifile(ifile):
'''Return a list with the ifile as the element. The ifile can be a string or list.'''
if type(ifile) is list:
return ifile
else:
fname = ifile
if not os.path.exists(fname) and not fname.startswith('/'):
fname = _userHome + fname
return [fname]
def get_ifiles(ifile):
'''Same as the function get_ifile except that the file name include unix-style wildcards.'''
fname = get_ifile(ifile)[0]
fname = '\'' + fname + '\''
return [fname]
def at_local(ifile):
'''Condition on whether the ifile is on local machine or on the remote server.'''
if type(ifile) is str:
fname = ifile
else:
fname = ifile[0]
if fname.startswith("'") and fname.endswith("'"):
fname = fname[1:-1]
if glob.glob(fname) or os.uname()[1] in _server:
return True
else:
return False
# ---- file operations
# ---- selection
def get_sel_param_name_list():
'''Get a list of parameter names used for the sel* operator.'''
return ['name','stdname','param','code',
'level','levidx','grid','zaxis','ltype','tabnum',
'timestep','year','seas','mon','day','hour','time',
'date','smon',
'lonlatbox','indexbox'
]
def get_select_param_name_list():
'''Get a list of parameter names used for the select operator.'''
return ['name','param','code',
'level','levidx','ltype',
'minute','hour','day','month','year','timestep','timestep_of_year']
def sel(**kwargs):
'''CDO sel* operators. Input arguments are key-value pairs with keys from the return of function get_sel_param_name_list(), and values are all string types with format similar to the CDO command.'''
chain = list()
for key in [key for key in kwargs.keys() if key in get_sel_param_name_list()]:
chain += [ '-sel'+key+','+kwargs[key] ]
if not chain:
print ('Please choose proper input arguments with keys from:\n',
get_sel_param_name_list()
)
return
return chain
def select(**kwargs):
'''CDO select operators. Input arguments are key-value pairs with keys from the return of function get_sel_param_name_list(), and values are all string types with format similar to the CDO command.\nFunction select can manipulate multiple files while sel* can only operate on a single file.'''
chain = list()
cmd = '-select'
# generate the select commands
for key in [key for key in kwargs.keys()
if key in get_select_param_name_list()]:
cmd += ',' + key + '=' + kwargs[key]
if cmd=='-select':
print ('''Please choose proper input arguments listed in the return of function get_select_param_name_list():\n''',
get_select_param_name_list())
return
else:
chain = [cmd]
# generate the sel* commands after the select
for key in [key for key in kwargs.keys()
if key not in get_select_param_name_list()
and key in get_sel_param_name_list()]:
chain += [ '-sel'+key+','+kwargs[key] ]
return chain
# ---- conditional selection
# ---- comparison
# ---- modification
def get_change_param_name_list():
'''Get a list of parameter names used for the ch* operator'''
return ['code','param',
'name','unit','level','levelc','levelv']
def get_set_param_name_list():
'''Get a list of parameter names used for the set* operator.'''
return ['parttabp','partabn','partab','code','param',
'name','unit','level','ltype',
'date','time','day','mon','year','tunits',
'taxis','treftime','calendar'
'grid','gridtype','gridarea',
'zaxis',
'gatt','gatts',
'clonlatbox','cindexbox',
'missval','ctomiss','misstoc','rtomiss','vrange']
def change(param,old,new):
if param in get_change_param_name_list():
return [ '-ch'+param+','+old+','+new ]
else:
print ('Please choose proper input parameters from the return of function get_change_param_name_list():\n',get_change_param_name_list())
def enlarge(grid):
return [ '-enlarge,'+grid ]
def invertlat():
return [ '-invertlat' ]
def invertlev():
return [ '-invertlev' ]
def set(**kwargs):
chain = list()
for key in [key for key in kwargs.keys() if key in get_set_param_name_list()]:
chain += [ '-set'+key+','+kwargs[key] ]
if not chain:
print ('Please choose proper input arguments with keys from:\n' \
,get_sel_param_name_list())
return
return chain
def shifttime(timeString):
return [ '-shifttime,'+timeString ]
# ---- arithmetic
def arith(operator,ifile1=None,ifile2=None):
chain = ['-'+operator ]
if ifile1 is not None:
chain = get_ifile(ifile1) + chain
if ifile2 is not None:
chain = get_ifile(ifile2) + chain
return chain
def expr(expression):
return [ '-expr,' + '\'' + expression + '\'' ]
# ---- statistics
def get_stat_param_name_list():
'''return the list of list of dimension names.'''
return [
['zon','zonal','lon','longitude','longitudinal','x'],
['mer','meridional','lat','latitude','latitudinal','y'],
['fld','field'],
['vert','vertical'],
['tim','time'],
['year','yearly'],
['seas','season','seasonal'],
['mon','month','monthly'],
['day','daily'],
['hour','hourly'],
['ymon','multi-year monthly'],
['yday','multi-year daily'],
['run','running']
]
def stat(overDimension='time',statName='mean',N=None,ifile=None):
# get the dimension name
s = overDimension.lower()
dimension = [par_list for par_list in get_stat_param_name_list() if s in par_list][0][0]
# statistics name
if statName.lower()=='percentile':
statName = 'pctl'
cmd = dimension + statName
# running statistics with a num of points parameter
if dimension=='run':
cmd += ','+str(N)
# whether to combine the operator and the ifile or not
chain = [ '-'+cmd ]
if ifile is not None:
chain = get_ifile(ifile) + chain
return chain
def stat_pctl(ifile,N,overDimension='time'):
'''percentile over time or similar dimension that needs three ifiles'''
return get_ifile(ifile) + stat(overDimension,'max') \
+ get_ifile(ifile) + stat(overDimension,'min') \
+ get_ifile(ifile) + stat(overDimension,'pctl,'+str(N))
# ---- correlation
def cor(dim='tim'):
'''Correlation coefficients.Dimension can be tim or fld. '''
chain = list();
chain += [ '-' + dim + 'cor' ]
return chain
def covar(dim='tim'):
'''Covariance. Dimension can be tim or fld. '''
chain = list();
chain += ['-' + dim + 'covar' ]
return chain
# ---- regression
def regress():
return [ '-regres' ]
def detrend():
return [ '-detrend' ]
# ---- EOFs
# ---- interpolation
# ---- transformation
# ---- import/export
# ---- miscellaneous
# ---- climate indices
#
# ######## low-level functions
# ---- convert the chain of cdo operators to a command string that can be executed in shell
def get_cdocmd(chain,ofile1=None,ofile2=None):
'''Transforms the chain of operators into a string that is executable in Shell.\n\nchain is a list representing chains of operators.'''
if ofile1 is None:
ofile = ''
else:
ofile = ofile1
if ofile2 is not None:
ofile += ' ' + ofile2
if len(chain)==1:
cdocmd = 'cdo pardes ' + chain[0]
else:
cdocmd = 'cdo ' + ' '.join(chain[-1::-1]) + ' ' + ofile
return cdocmd
# ---- run system commands
def run_command(cmd,stdout=subprocess.PIPE,stderr=subprocess.PIPE,**kwargs):
if type(cmd) is str:
cmd = cmd.split()
p = subprocess.Popen(cmd,stdout=stdout,stderr=stderr,**kwargs)
stdout,stderr = p.communicate()
exitCode = p.returncode
if exitCode:
print (stderr)
else:
print (stdout)
return exitCode
# ---- communicate with the remote server
def run_cdo_remote(cdocmd=None,server=_server,otxtfile='.tmp.txt'):
sshcmd = "ssh -t " + server + ' "' \
+ cdocmd \
+ " > " + otxtfile \
+ '" '
print ('\n','-'*10, 'Connecting to server ...\n',sshcmd)
if os.system(sshcmd)==0:
with tempfile.NamedTemporaryFile(suffix='.txt') as tmp:
copycmd = "scp " + server + ":~/" + otxtfile + ' ' + tmp.name
print ('\n','-'*10,'Download and show what has been shown on remote server screen ...\n',copycmd)
if os.system(copycmd)==0: # scp the result file to the local temp file
return os.system('cat ' + tmp.name)
def download_datafile(datafile_remote='.tmp.nc',server=_server):
tmp = tempfile.NamedTemporaryFile(suffix='.nc')
copycmd = "scp " + server + ":~/" + datafile_remote + ' ' + tmp.name
print ('\n','-'*10,'Download data file on remote server ...\n',copycmd)
os.system(copycmd)
return tmp
# ---- convert the chain of cdo operators into python file objects
def get_data_file_obj(chain):
'''Generates a temporary file object pointing to the output netcdf file. \n\nchain can be a list of cdo commands or a string of input file name.'''
if type(chain) is str:
datafile = chain
if at_local(datafile): # data is at local
tmp = open(datafile)
else: # data | |
<reponame>Michal-Gagala/sympy
from sympy.core.add import Add
from sympy.core.exprtools import factor_terms
from sympy.core.function import expand_log, _mexpand
from sympy.core.power import Pow
from sympy.core.singleton import S
from sympy.core.sorting import ordered
from sympy.core.symbol import Dummy
from sympy.functions.elementary.exponential import (LambertW, exp, log)
from sympy.functions.elementary.miscellaneous import root
from sympy.polys.polyroots import roots
from sympy.polys.polytools import Poly, factor
from sympy.simplify.simplify import separatevars
from sympy.simplify.radsimp import collect
from sympy.simplify.simplify import powsimp
from sympy.solvers.solvers import solve, _invert
from sympy.utilities.iterables import uniq
def _filtered_gens(poly, symbol):
"""process the generators of ``poly``, returning the set of generators that
have ``symbol``. If there are two generators that are inverses of each other,
prefer the one that has no denominator.
Examples
========
>>> from sympy.solvers.bivariate import _filtered_gens
>>> from sympy import Poly, exp
>>> from sympy.abc import x
>>> _filtered_gens(Poly(x + 1/x + exp(x)), x)
{x, exp(x)}
"""
gens = {g for g in poly.gens if symbol in g.free_symbols}
for g in list(gens):
ag = 1/g
if g in gens and ag in gens:
if ag.as_numer_denom()[1] is not S.One:
g = ag
gens.remove(g)
return gens
def _mostfunc(lhs, func, X=None):
"""Returns the term in lhs which contains the most of the
func-type things e.g. log(log(x)) wins over log(x) if both terms appear.
``func`` can be a function (exp, log, etc...) or any other SymPy object,
like Pow.
If ``X`` is not ``None``, then the function returns the term composed with the
most ``func`` having the specified variable.
Examples
========
>>> from sympy.solvers.bivariate import _mostfunc
>>> from sympy import exp
>>> from sympy.abc import x, y
>>> _mostfunc(exp(x) + exp(exp(x) + 2), exp)
exp(exp(x) + 2)
>>> _mostfunc(exp(x) + exp(exp(y) + 2), exp)
exp(exp(y) + 2)
>>> _mostfunc(exp(x) + exp(exp(y) + 2), exp, x)
exp(x)
>>> _mostfunc(x, exp, x) is None
True
>>> _mostfunc(exp(x) + exp(x*y), exp, x)
exp(x)
"""
fterms = [tmp for tmp in lhs.atoms(func) if (not X or
X.is_Symbol and X in tmp.free_symbols or
not X.is_Symbol and tmp.has(X))]
if len(fterms) == 1:
return fterms[0]
elif fterms:
return max(list(ordered(fterms)), key=lambda x: x.count(func))
return None
def _linab(arg, symbol):
"""Return ``a, b, X`` assuming ``arg`` can be written as ``a*X + b``
where ``X`` is a symbol-dependent factor and ``a`` and ``b`` are
independent of ``symbol``.
Examples
========
>>> from sympy.solvers.bivariate import _linab
>>> from sympy.abc import x, y
>>> from sympy import exp, S
>>> _linab(S(2), x)
(2, 0, 1)
>>> _linab(2*x, x)
(2, 0, x)
>>> _linab(y + y*x + 2*x, x)
(y + 2, y, x)
>>> _linab(3 + 2*exp(x), x)
(2, 3, exp(x))
"""
arg = factor_terms(arg.expand())
ind, dep = arg.as_independent(symbol)
if arg.is_Mul and dep.is_Add:
a, b, x = _linab(dep, symbol)
return ind*a, ind*b, x
if not arg.is_Add:
b = 0
a, x = ind, dep
else:
b = ind
a, x = separatevars(dep).as_independent(symbol, as_Add=False)
if x.could_extract_minus_sign():
a = -a
x = -x
return a, b, x
def _lambert(eq, x):
"""
Given an expression assumed to be in the form
``F(X, a..f) = a*log(b*X + c) + d*X + f = 0``
where X = g(x) and x = g^-1(X), return the Lambert solution,
``x = g^-1(-c/b + (a/d)*W(d/(a*b)*exp(c*d/a/b)*exp(-f/a)))``.
"""
eq = _mexpand(expand_log(eq))
mainlog = _mostfunc(eq, log, x)
if not mainlog:
return [] # violated assumptions
other = eq.subs(mainlog, 0)
if isinstance(-other, log):
eq = (eq - other).subs(mainlog, mainlog.args[0])
mainlog = mainlog.args[0]
if not isinstance(mainlog, log):
return [] # violated assumptions
other = -(-other).args[0]
eq += other
if x not in other.free_symbols:
return [] # violated assumptions
d, f, X2 = _linab(other, x)
logterm = collect(eq - other, mainlog)
a = logterm.as_coefficient(mainlog)
if a is None or x in a.free_symbols:
return [] # violated assumptions
logarg = mainlog.args[0]
b, c, X1 = _linab(logarg, x)
if X1 != X2:
return [] # violated assumptions
# invert the generator X1 so we have x(u)
u = Dummy('rhs')
xusolns = solve(X1 - u, x)
# There are infinitely many branches for LambertW
# but only branches for k = -1 and 0 might be real. The k = 0
# branch is real and the k = -1 branch is real if the LambertW argumen
# in in range [-1/e, 0]. Since `solve` does not return infinite
# solutions we will only include the -1 branch if it tests as real.
# Otherwise, inclusion of any LambertW in the solution indicates to
# the user that there are imaginary solutions corresponding to
# different k values.
lambert_real_branches = [-1, 0]
sol = []
# solution of the given Lambert equation is like
# sol = -c/b + (a/d)*LambertW(arg, k),
# where arg = d/(a*b)*exp((c*d-b*f)/a/b) and k in lambert_real_branches.
# Instead of considering the single arg, `d/(a*b)*exp((c*d-b*f)/a/b)`,
# the individual `p` roots obtained when writing `exp((c*d-b*f)/a/b)`
# as `exp(A/p) = exp(A)**(1/p)`, where `p` is an Integer, are used.
# calculating args for LambertW
num, den = ((c*d-b*f)/a/b).as_numer_denom()
p, den = den.as_coeff_Mul()
e = exp(num/den)
t = Dummy('t')
args = [d/(a*b)*t for t in roots(t**p - e, t).keys()]
# calculating solutions from args
for arg in args:
for k in lambert_real_branches:
w = LambertW(arg, k)
if k and not w.is_real:
continue
rhs = -c/b + (a/d)*w
for xu in xusolns:
sol.append(xu.subs(u, rhs))
return sol
def _solve_lambert(f, symbol, gens):
"""Return solution to ``f`` if it is a Lambert-type expression
else raise NotImplementedError.
For ``f(X, a..f) = a*log(b*X + c) + d*X - f = 0`` the solution
for ``X`` is ``X = -c/b + (a/d)*W(d/(a*b)*exp(c*d/a/b)*exp(f/a))``.
There are a variety of forms for `f(X, a..f)` as enumerated below:
1a1)
if B**B = R for R not in [0, 1] (since those cases would already
be solved before getting here) then log of both sides gives
log(B) + log(log(B)) = log(log(R)) and
X = log(B), a = 1, b = 1, c = 0, d = 1, f = log(log(R))
1a2)
if B*(b*log(B) + c)**a = R then log of both sides gives
log(B) + a*log(b*log(B) + c) = log(R) and
X = log(B), d=1, f=log(R)
1b)
if a*log(b*B + c) + d*B = R and
X = B, f = R
2a)
if (b*B + c)*exp(d*B + g) = R then log of both sides gives
log(b*B + c) + d*B + g = log(R) and
X = B, a = 1, f = log(R) - g
2b)
if g*exp(d*B + h) - b*B = c then the log form is
log(g) + d*B + h - log(b*B + c) = 0 and
X = B, a = -1, f = -h - log(g)
3)
if d*p**(a*B + g) - b*B = c then the log form is
log(d) + (a*B + g)*log(p) - log(b*B + c) = 0 and
X = B, a = -1, d = a*log(p), f = -log(d) - g*log(p)
"""
def _solve_even_degree_expr(expr, t, symbol):
"""Return the unique solutions of equations derived from
``expr`` by replacing ``t`` with ``+/- symbol``.
Parameters
==========
expr : Expr
The expression which includes a dummy variable t to be
replaced with +symbol and -symbol.
symbol : Symbol
The symbol for which a solution is being sought.
Returns
=======
List of unique solution of the two equations generated by
replacing ``t`` with positive and negative ``symbol``.
Notes
=====
If ``expr = 2*log(t) + x/2` then solutions for
``2*log(x) + x/2 = 0`` and ``2*log(-x) + x/2 = 0`` are
returned by this function. Though this may seem
counter-intuitive, one must note that the ``expr`` being
solved here has been derived from a different expression. For
an expression like ``eq = x**2*g(x) = 1``, if we take the
log of both sides we obtain ``log(x**2) + log(g(x)) = 0``. If
x is positive then this simplifies to
``2*log(x) + log(g(x)) = 0``; the Lambert-solving routines will
return solutions for this, but we must also consider the
solutions for ``2*log(-x) + log(g(x))`` since those must also
be a solution of ``eq`` which has the same value when the ``x``
in ``x**2`` is negated. If `g(x)` | |
<gh_stars>0
"""
Used for pytest fixtures and anything else test setup/teardown related.
"""
import copy
import datetime
import os
import sys
import aiofiles
import aiomock
import mock
import pytest
try:
import uvloop
LOOP = uvloop.new_event_loop
L_SHOW = LOOP()
L_SHOW.set_debug(True)
print("Test loop policy:", str(L_SHOW))
del L_SHOW
except ImportError:
print("Missing: uvloop")
sys.exit(1)
import cog.util
import cogdb
import cogdb.query
from cogdb.schema import (DiscordUser, FortSystem, FortPrep, FortDrop, FortUser, FortOrder,
UMSystem, UMExpand, UMOppose, UMUser, UMHold, EUMSheet, KOS,
AdminPerm, ChannelPerm, RolePerm,
TrackSystem, TrackSystemCached, TrackByID,
OCRTracker, OCRTrigger, OCRPrep, Global, Vote, EVoteType,
Consolidation)
from tests.data import CELLS_FORT, CELLS_FORT_FMT, CELLS_UM
@pytest.fixture(scope='function', autouse=True)
def around_all_tests(session):
"""
Executes before and after EVERY test.
Can be helpful for tracking bugs, like dirty database after test.
Disabled unless needed. Non-trivial overhead.
"""
start = datetime.datetime.utcnow()
yield
print(" Time", datetime.datetime.utcnow() - start, end="")
classes = [DiscordUser, FortUser, FortSystem, FortDrop, FortOrder,
UMSystem, UMUser, UMHold,
KOS, AdminPerm, ChannelPerm, RolePerm,
TrackSystem, TrackSystemCached, TrackByID,
OCRTracker, OCRTrigger, OCRPrep, Global]
for cls in classes:
assert not session.query(cls).all()
REASON_SLOW = 'Slow as blocking to sheet. To enable, ensure os.environ ALL_TESTS=True'
SHEET_TEST = pytest.mark.skipif(not os.environ.get('ALL_TESTS'), reason=REASON_SLOW)
PROC_TEST = SHEET_TEST
@pytest.fixture
def event_loop():
"""
Provide a a new test loop for each test.
Save system wide loop policy, and use uvloop if available.
To test either:
1) Mark with pytest.mark.asyncio
2) event_loop.run_until_complete(asyncio.gather(futures))
"""
loop = LOOP()
loop.set_debug(True)
yield loop
loop.close()
@pytest.fixture
def session():
with cogdb.session_scope(cogdb.Session) as session:
yield session
@pytest.fixture
def side_session():
with cogdb.session_scope(cogdb.SideSession) as session:
yield session
@pytest.fixture
def eddb_session():
with cogdb.session_scope(cogdb.EDDBSession) as session:
yield session
@pytest.fixture
def db_cleanup(session):
"""
Clean the whole database. Guarantee it is empty.
Used when tests don't use a fixture.
"""
yield
cogdb.schema.empty_tables(session, perm=True)
classes = [DiscordUser, FortUser, FortSystem, FortDrop, FortOrder, UMUser, UMSystem, UMHold,
KOS, TrackSystem, TrackSystemCached, TrackByID, AdminPerm, ChannelPerm, RolePerm,
OCRTracker, OCRTrigger, OCRPrep, Global]
for cls in classes:
assert session.query(cls).all() == []
@pytest.fixture
def f_dusers(session):
"""
Fixture to insert some test DiscordUsers.
"""
dusers = (
DiscordUser(id=1, display_name='User1', pref_name='User1'),
DiscordUser(id=2, display_name='User2', pref_name='User2'),
DiscordUser(id=3, display_name='User3', pref_name='User3'),
)
session.add_all(dusers)
session.commit()
yield dusers
session.rollback()
session.query(DiscordUser).delete()
session.commit()
@pytest.fixture
def f_dusers_many(session):
"""
Fixture to insert many DiscordUsers to cover key constraints with scanners.
"""
dusers = []
for ind in range(1, 201):
name = "User{}".format(ind)
dusers += [DiscordUser(id=ind, display_name=name, pref_name=name)]
session.add_all(dusers)
session.commit()
yield dusers
session.rollback()
session.query(DiscordUser).delete()
session.commit()
@pytest.fixture
def f_fort_testbed(session):
"""
Fixture to insert some test SheetRows.
Returns: (users, systems, drops)
"""
dusers = session.query(DiscordUser).all()
assert dusers
users = (
FortUser(id=dusers[0].id, name=dusers[0].pref_name, row=15, cry='User1 are forting late!'),
FortUser(id=dusers[1].id, name=dusers[1].pref_name, row=16, cry=''),
FortUser(id=dusers[2].id, name=dusers[2].pref_name, row=17, cry='User3 is the boss'),
)
systems = (
FortSystem(id=1, name='Frey', fort_status=4910, trigger=4910, fort_override=0.7, um_status=0, undermine=0.0, distance=116.99, notes='', sheet_col='G', sheet_order=1),
FortSystem(id=2, name='Nurundere', fort_status=5422, trigger=8425, fort_override=0.6, um_status=0, undermine=0.0, distance=99.51, notes='', sheet_col='H', sheet_order=2),
FortSystem(id=3, name='LHS 3749', fort_status=1850, trigger=5974, um_status=0, undermine=0.0, distance=55.72, notes='', sheet_col='I', sheet_order=3),
FortSystem(id=4, name='Sol', fort_status=2500, trigger=5211, um_status=2250, undermine=0.0, distance=28.94, notes='Leave For Grinders', sheet_col='J', sheet_order=4),
FortSystem(id=5, name='Dongkum', fort_status=7000, trigger=7239, um_status=0, undermine=0.0, distance=81.54, notes='', sheet_col='K', sheet_order=5),
FortSystem(id=6, name='<NAME>', fort_status=0, trigger=6476, um_status=0, undermine=0.0, distance=67.27, notes='', sheet_col='L', sheet_order=6),
FortSystem(id=7, name='<NAME>', fort_status=0, trigger=7968, um_status=0, undermine=0.0, distance=93.02, notes='Skip it now', sheet_col='M', sheet_order=7),
FortSystem(id=8, name='Othime', fort_status=0, trigger=7367, um_status=0, undermine=0.0, distance=83.68, notes='Priority for S/M ships (no L pads)', sheet_col='AF', sheet_order=26),
FortSystem(id=9, name='<NAME>', fort_status=0, trigger=8563, um_status=0, undermine=1.2, distance=101.38, notes='', sheet_col='BK', sheet_order=57),
FortSystem(id=10, name='LPM 229', fort_status=0, trigger=9479, um_status=0, undermine=1.0, distance=112.98, notes='', sheet_col='BL', sheet_order=58),
FortPrep(id=1000, name='Rhea', trigger=10000, fort_status=5100, um_status=0, undermine=0.0, distance=65.55, notes='Atropos', sheet_col='D', sheet_order=0),
FortPrep(id=1001, name='PrepDone', trigger=10000, fort_status=12500, um_status=0, undermine=0.0, distance=65.55, notes='Atropos', sheet_col='E', sheet_order=0),
)
drops = (
FortDrop(id=1, amount=700, user_id=users[0].id, system_id=systems[0].id),
FortDrop(id=2, amount=400, user_id=users[0].id, system_id=systems[1].id),
FortDrop(id=3, amount=1200, user_id=users[1].id, system_id=systems[0].id),
FortDrop(id=4, amount=1800, user_id=users[2].id, system_id=systems[0].id),
FortDrop(id=5, amount=800, user_id=users[1].id, system_id=systems[1].id),
)
session.add_all(users + systems)
session.flush()
session.add_all(drops)
session.commit()
yield users, systems, drops
session.rollback()
for cls in (FortDrop, FortSystem, FortUser):
session.query(cls).delete()
session.commit()
@pytest.fixture
def f_um_testbed(session):
"""
Fixture to insert some test Systems.
Returns: (users, systems, holds)
"""
dusers = session.query(DiscordUser).all()
assert dusers
users = (
UMUser(id=dusers[0].id, name=dusers[0].pref_name, row=18, cry='We go pew pew!'),
UMUser(id=dusers[1].id, name=dusers[1].pref_name, row=19, cry='Shooting time'),
UMUser(id=dusers[2].id, name=dusers[2].pref_name, sheet_src=EUMSheet.snipe, row=18, cry='Sniping away'),
)
systems = (
UMSystem(id=1, name='Cemplangpa', sheet_col='D', goal=14878, security='Medium', notes='',
progress_us=15000, progress_them=1.0, close_control='Sol', priority='Medium',
map_offset=1380),
UMSystem(id=2, name='Pequen', sheet_col='F', goal=12500, security='Anarchy', notes='',
progress_us=10500, progress_them=0.5, close_control='Atropos', priority='Low',
map_offset=0),
UMExpand(id=3, name='Burr', sheet_col='H', goal=364298, security='Low', notes='',
progress_us=161630, progress_them=35.0, close_control='Dongkum', priority='Medium',
map_offset=76548),
UMOppose(id=4, name='<NAME>', sheet_col='J', goal=59877, security='Low', notes='',
progress_us=47739, progress_them=1.69, close_control='Atropos', priority='low',
map_offset=23960),
UMSystem(id=5, name='Empty', sheet_col='K', goal=10000, security='Medium', notes='',
progress_us=0, progress_them=0.0, close_control='Rana', priority='Low',
map_offset=0),
UMSystem(id=6, name='LeaveIt', sheet_col='L', goal=10000, security='Medium', notes='',
progress_us=9000, progress_them=0.0, close_control='Rana', priority='Leave For Now',
map_offset=0, sheet_src=EUMSheet.main),
UMSystem(id=10007, name='ToSnipe', sheet_col='D', goal=100000, security='Medium', notes='',
progress_us=0, progress_them=0.0, close_control='Rana', priority='Low',
map_offset=0, sheet_src=EUMSheet.snipe),
)
holds = (
UMHold(id=1, held=0, redeemed=4000, user_id=dusers[0].id, system_id=systems[0].id),
UMHold(id=2, held=400, redeemed=1550, user_id=dusers[0].id, system_id=systems[1].id),
UMHold(id=3, held=2200, redeemed=5800, user_id=dusers[0].id, system_id=systems[2].id),
UMHold(id=4, held=450, redeemed=2000, user_id=dusers[1].id, system_id=systems[0].id),
UMHold(id=5, held=2400, redeemed=0, user_id=dusers[1].id, system_id=systems[1].id),
UMHold(id=6, held=0, redeemed=1200, user_id=dusers[1].id, system_id=systems[2].id),
UMHold(id=7, sheet_src=EUMSheet.snipe, held=5000, redeemed=1200, user_id=dusers[2].id, system_id=systems[-1].id),
)
session.add_all(users + systems)
session.flush()
session.add_all(holds)
session.commit()
yield users, systems, holds
session.rollback()
for cls in (UMHold, UMSystem, UMUser):
session.query(cls).delete()
session.commit()
@pytest.fixture
def f_admins(session):
"""
Fixture to insert some test admins.
Depends on: f_dusers
"""
admins = (
AdminPerm(id=1, date=datetime.datetime(2017, 9, 26, 13, 34, 39, 721018)),
AdminPerm(id=2, date=datetime.datetime(2017, 9, 26, 13, 34, 48, 327031)),
)
session.add_all(admins)
session.commit()
yield admins
session.rollback()
session.query(AdminPerm).delete()
session.commit()
@pytest.fixture
def f_cperms(session):
""" Channel perms fixture. """
perms = (
ChannelPerm(cmd="drop", guild_id=10, channel_id=2001),
)
session.add_all(perms)
session.commit()
yield perms
session.rollback()
session.query(ChannelPerm).delete()
session.commit()
@pytest.fixture
def f_rperms(session):
""" Role perms fixture. """
perms = (
RolePerm(cmd="drop", guild_id=10, role_id=3001),
)
session.add_all(perms)
session.commit()
yield perms
session.rollback()
session.query(RolePerm).delete()
session.commit()
@pytest.fixture
def f_fortorders(session):
""" Fort order fixture. """
systems = (
FortOrder(order=1, system_name='Sol'),
FortOrder(order=2, system_name='LPM 229'),
FortOrder(order=3, system_name='Othime'),
)
session.add_all(systems)
session.commit()
yield systems
session.rollback()
session.query(FortOrder).delete()
session.commit()
@pytest.fixture
def f_kos(session):
"""
Fixture to insert some test SheetRows.
"""
kos_rows = (
KOS(id=1, cmdr='good_guy', squad="Hudson", reason="Very good", is_friendly=1),
KOS(id=2, cmdr='good_guy_pvp', squad="Hudson", reason="Very good pvp", is_friendly=1),
KOS(id=3, cmdr='bad_guy', squad="Hudson", reason="Pretty bad guy", is_friendly=0),
)
session.add_all(kos_rows)
session.commit()
yield kos_rows
session.rollback()
session.query(KOS).delete()
session.commit()
@pytest.fixture
def f_testbed(f_dusers, f_fort_testbed, f_um_testbed):
yield [f_dusers, f_fort_testbed, f_um_testbed]
@pytest.fixture()
def mock_fortsheet(db_cleanup):
fake_sheet = mock.Mock()
fake_sheet.whole_sheet.return_value = CELLS_FORT
fake_sheet.get_with_formatting.return_value = copy.deepcopy(CELLS_FORT_FMT)
yield fake_sheet
@pytest.fixture()
def mock_umsheet(db_cleanup):
fake_sheet = mock.Mock()
fake_sheet.whole_sheet.return_value = CELLS_UM
return fake_sheet
# Fake objects look like discord data classes
class FakeObject():
"""
A fake class to impersonate Data Classes from discord.py
"""
oid = 0
@classmethod
def next_id(cls):
cls.oid += 1
return '{}-{}'.format(cls.__name__, cls.oid)
def __init__(self, name, id=None):
if not id:
id = self.__class__.next_id()
self.id = id
self.name = name
def __repr__(self):
return "{}: {} {}".format(self.__class__.__name__, self.id, self.name)
def __str__(self):
return "{}: {}".format(self.__class__.__name__, self.name)
# TODO: Rename Guild.
class Guild(FakeObject):
def __init__(self, name, id=None):
super().__init__(name, id)
self.channels = []
self.roles = []
self.emojis = []
self.mapped = {
1: Member('User1', [Role('FRC Recruit'), Role("Test")]),
2: Member('User2', [Role('FRC Member'), Role("Nothing")]),
3: Member('User3', [Role('FRC Recruit'), Role("Test")]),
}
def add(self, channel):
self.channels.append(channel)
def get_member(self, id):
return self.mapped[id]
@property
def members(self):
return list(self.mapped.values())
def get_channel(self, channel_id):
return [channel for channel in self.channels if channel.id == channel_id][0]
def get_role(self, role_id):
return [role for role in self.roles if role.id == role_id][0]
def get_member_named(self, nick):
"""
Find name of user by string.
Returns: A member if found at least one (first one) else return None.
"""
found = [x for x in self.mapped.values() if nick in x.display_name]
return found[0] if found else None
# def __repr__(self):
# channels = "\n Channels: " + ", ".join([cha.name for cha in self.channels])
# return super().__repr__() + channels
class Emoji(FakeObject):
def __init__(self, name, id=None):
super().__init__(name, id)
def __str__(self):
return "[{}]".format(self.name)
class Channel(FakeObject):
def __init__(self, name, *, srv=None, id=None):
super().__init__(name, id)
self.guild = srv
self.all_delete_messages = []
self.send_messages = None
# def __repr__(self):
# return super().__repr__() + ", Server: {}".format(self.server.name)
@property
def mention(self):
return self.name
async def delete_messages(self, messages):
for msg in messages:
msg.is_deleted = True
self.all_delete_messages += messages
async def send(self, embed, **kwargs):
return Message(embed, None, self.guild, None)
class Member(FakeObject):
def __init__(self, name, roles, *, id=None):
super().__init__(name, id)
self.discriminator = '12345'
self.display_name = self.name
self.roles = roles
@property
def mention(self):
return self.display_name
# def __repr__(self):
# roles = "Roles: " + ", ".join([rol.name for rol in self.roles])
# return super().__repr__() + ", Display: {} ".format(self.display_name) + roles
class Role(FakeObject):
def __init__(self, name, srv=None, *, id=None):
super().__init__(name, id)
self.guild = srv
# def __repr__(self):
# return super().__repr__() + "\n {}".format(self.server)
class Message(FakeObject):
def __init__(self, content, author, srv, channel, *, id=None, mentions=None, channel_mentions=None, role_mentions=None):
super().__init__(None, id)
self.author = author
self.channel = channel
self.content = content
self.mentions = mentions
self.channel_mentions = channel_mentions
self.role_mentions = role_mentions
self.guild = srv
self.is_deleted = | |
<filename>compression.py
"""
toy compression algorithm with an interface matching stdlib modules.
class StatesError
class StatesCompressor
class StatesDecompressor
class StatesFile
def compress
def decompress
def open
"""
import io
import itertools
import os
import pathlib
import struct
import typing
from pprint import pprint
__all__ = [
'StatesError',
'StatesCompressor',
'StatesDecompressor',
'StatesFile',
'compress',
'decompress',
'open']
SIGNATURE = b'YWRhbQ'
MetaData = typing.Dict[bytes, int] # noqa
MetaKeys = typing.MutableSet[bytes] # noqa
MetaValues = typing.MutableSet[int] # noqa
OptMetaData = typing.Optional[MetaData]
MetaTree = typing.Dict[int, typing.Union['MetaTree', int]] # noqa
FlatFmt = typing.List[typing.Optional[int]] # noqa
ByteLines = typing.List[bytes] # noqa
ByteLinesIter = typing.Iterable[bytes] # noqa
Files = typing.Union['StatesFile', typing.TextIO]
D = typing.TypeVar('D')
M = typing.TypeVar('M')
class StatesError(Exception):
"""StatesError."""
class Reversible(typing.Generic[D, M]):
"""Reversible."""
@classmethod
def from_data(cls, data: D) -> 'Reversible':
"""from_data."""
return cls(data=data)
@classmethod
def from_meta(cls, meta: M) -> 'Reversible':
"""from_meta."""
return cls(meta=meta)
def __init__(self, *, data: D=None, meta: M=None) -> None:
self._data = data
self._meta = meta
@property
def data(self) -> D:
"""restore."""
if self._data is not None:
return self._data
self._data = self._restore()
return self._data
@property
def meta(self) -> M:
"""analyse."""
if self._meta is not None:
return self._meta
self._meta = self._analyse()
return self._meta
class Followers(Reversible[bytes, MetaData]):
"""Followers."""
def _restore(self) -> bytes:
"""restore."""
count = len(next(iter(self._meta.keys())))
data = b'\x00' * count
while True:
key = bytes(reversed(data[-count:]))
value = self._meta[key]
if value == -1:
break
data += bytes((value,))
return data[count:]
def __analyse(self, count: int) -> OptMetaData:
"""map subsequences to next byte if unique."""
flat_node = {bytes(reversed(self._data[-count:])): -1}
for end in range(len(self._data)):
start = max(end - count, 0)
key = bytes(reversed(self._data[start:end])).ljust(count, b'\x00')
value = self._data[end]
if flat_node.setdefault(key, value) != value:
return None
return flat_node
def _analyse(self) -> MetaData:
"""find shortest size that uniquely maps to next byte."""
for count in range(len(self._data) + 1):
flat_node = self.__analyse(count)
if flat_node:
return flat_node
raise StatesError
class KeyTrunc(Reversible[MetaData, MetaData]):
"""KeyTrunc."""
def _restore(self) -> MetaData:
"""restore."""
inflate = {} # type: Dict[bytes, int]
unique_size = max(len(key) for key in self._meta.keys())
next_key = b'\x00' * unique_size
while True:
for count in range(unique_size + 1, 0, -1):
value = self._meta.get(next_key[:count], None)
if value is not None:
inflate[next_key] = value
if value == -1:
return inflate
next_key = (bytes([value]) + next_key)[:unique_size]
break
elif count == 1:
raise StatesError
def _analyse(self) -> MetaData:
"""get shortest sequence to match each next."""
flat_node = dict(self._data.items())
condense = {} # type: Dict[bytes, int]
unique_size = len(next(iter(flat_node.keys()))) + 1
repeats = iter(range(1, unique_size))
while flat_node:
count = next(repeats)
for start in set(key[:count] for key in flat_node.keys()):
possible = set(
flat_node.get(key)
for key in flat_node.keys() if key.startswith(start))
if len(possible) == 1:
condense[start] = possible.pop()
for key in tuple(flat_node.keys()):
if key.startswith(start):
del flat_node[key]
return condense
class Reshape(Reversible[MetaData, MetaTree]):
"""Reshape."""
def _restore(self) -> MetaData:
"""restore."""
flat_node = {} # type: Dict[bytes, int]
tree_root = dict(self._meta.items())
while tree_root:
for key, value in tuple(tree_root.items()):
if isinstance(key, int):
bytes_key = bytes((key,))
else:
bytes_key = key
del tree_root[key]
if isinstance(value, int):
flat_node[bytes_key] = value
continue
for child_key, child_value in value.items():
if isinstance(child_key, int):
child_key = bytes((child_key,))
tree_root[bytes_key + child_key] = child_value
return flat_node
def _analyse(self) -> MetaTree:
"""convert meta mapping to tree format."""
tree_root = {}
for key, value in self._data.items():
ref = tree_root
for lookback in key[:-1]:
ref = ref.setdefault(lookback, {})
ref[key[-1]] = value
return tree_root
class Serialize(Reversible[MetaTree, FlatFmt]):
"""Serialize."""
def _restore(self) -> MetaTree:
"""restore."""
flat_node = {} # type: Dict[bytes, int]
tree_root = dict(self._meta.items())
while tree_root:
for key, value in tuple(tree_root.items()):
if isinstance(key, int):
bytes_key = bytes((key,))
else:
bytes_key = key
del tree_root[key]
if isinstance(value, int):
flat_node[bytes_key] = value
continue
for child_key, child_value in value.items():
if isinstance(child_key, int):
child_key = bytes((child_key,))
tree_root[bytes_key + child_key] = child_value
return flat_node
def freeze_tree(tree_root: MetaTree, found) -> MetaTree:
"""freeze_tree."""
root = {}
for key, value in tree_root.items():
if isinstance(value, int):
root[key] = value
continue
frozen = freeze_tree(value, found)
if frozen in found:
root[key] = next(sub for sub in found if sub == frozen)
continue
root[key] = frozen
found.add(frozen)
return frozenset(root.items())
def extract_max(tree_root: MetaTree) -> int:
"""extract_max."""
return max(key for key, _ in tree_root)
def extract_min(tree_root: MetaTree) -> int:
"""extract_min."""
return min(key for key, _ in tree_root)
def serialize_branch(
tree_root: MetaTree, found, base_idx: int=0) -> FlatFmt:
"""convert branch to wire format."""
small = extract_min(tree_root) - 1
flat = [None] * (extract_max(tree_root) - small + 1)
flat[0] = small
next_idx = base_idx + len(flat)
for key, value in tree_root:
key -= small
if isinstance(value, int):
flat[key] = value
continue
if value in found:
flat[key] = found[value]
continue
seri = serialize_branch(value, found, next_idx)
flat[key] = next_idx
found[value] = next_idx
next_idx += len(seri)
flat += seri
return flat
def _analyse(self) -> FlatFmt:
"""convert mapping to wire format."""
frozen = self.freeze_tree(self._data, set())
top = self.serialize_branch(frozen, dict())
return [self.extract_max(frozen)] + top
def pack_format(flat: FlatFmt) -> bytes:
"""pack collection of indexes into a tagged byte format."""
max_idx = max(flat, key=lambda idx: idx or 0)
if max_idx > 0xFFFF:
format_str = '>L'
elif max_idx > 0xFF:
format_str = '>H'
else:
format_str = '>B'
packer = struct.Struct(format_str)
fail_idx = packer.unpack(b'\xFF' * packer.size)[0]
eof_idx = len(flat)
def real_idx(idx: typing.Optional[int]) -> bytes:
"""handle special fake indexes."""
if idx is None:
return fail_idx
elif idx < 0:
return eof_idx
return idx
if packer.size == 1:
format_str = format_str.ljust(len(format_str) + 1, '\x00')
format_str = SIGNATURE + format_str.encode()
return format_str + b''.join(packer.pack(real_idx(idx)) for idx in flat)
def make_queue(tree_root: MetaTree) -> MetaKeys:
"""make_queue."""
return set(
key
for key in tree_root.keys() if not isinstance(tree_root[key], int))
def mergable_tree(left: MetaTree, right: MetaTree) -> bool:
"""mergable_tree."""
for key in set(left.keys()).intersection(right.keys()):
if left[key] != right[key]:
return False
return True
def flatten_tree(tree_root: MetaTree) -> MetaTree:
"""flatten_tree."""
return tree_root
root = {}
for key, value in tree_root.items():
if isinstance(value, int):
root[key] = value
else:
root[key] = flatten_tree(value)
queue = make_queue(root)
while queue:
key = queue.pop()
value = root[key]
if mergable_tree(root, value):
root.pop(key)
root.update(value.items())
queue.update(make_queue(value))
return root
def merge_tree(tree_root: MetaTree) -> MetaTree:
"""merge_tree."""
return tree_root
root = {}
for key, value in tree_root.items():
if isinstance(value, int):
root[key] = value
else:
root[key] = merge_tree(value)
queue = make_queue(root)
while queue:
key = queue.pop()
value = root[key]
for sibling_key in make_queue(root).difference({key}):
sibling_value = root[sibling_key]
if mergable_tree(sibling_value, value):
value.update(sibling_value)
root[sibling_key] = value
return root
class States(Reversible):
"""States."""
def _restore(self) -> bytes:
"""restore."""
return deserialize_wire(self._meta)
def _analyse(self) -> bytes:
"""analyse."""
node = Followers.from_data(self._data)
node = KeyTrunc.from_data(node.meta)
node = Reshape.from_data(node.meta)
tree_root = merge_tree(flatten_tree(node.meta))
node = Serialize.from_data(tree_root)
return pack_format(node.meta)
class StatesCompressor:
"""compression class for incremental packing.
compression class for incremental packing
"""
def __init__(self) -> None:
"""compression class for incremental packing."""
self._data = io.BytesIO()
def compress(self, data: bytes) -> bytes:
"""record partial data."""
self._data.write(data)
return b''
def flush(self) -> bytes:
"""end input stream and return compressed form."""
return States.from_data(self._data.getvalue()).meta
def deserialize_wire(data: bytes) -> bytes:
"""get collection of indexes from wire format."""
signature = data[:len(SIGNATURE)]
if signature != SIGNATURE:
return b''
tag_idx = data.index(0)
unpacker = struct.Struct(data[len(SIGNATURE):tag_idx])
if unpacker.size == 1:
tag_idx += 1
states_data = data[tag_idx:]
indexes = unpacker.iter_unpack(states_data)
max_key = next(indexes)[0]
flat = list(itertools.chain.from_iterable(indexes))
max_idx = len(flat) + 1
fail_idx = unpacker.unpack(b'\xFF' * unpacker.size)
output = io.BytesIO()
index = -1
flat_offset = 0
while True:
if index < -len(output.getvalue()):
lookback = 0
else:
lookback = output.getvalue()[index]
index -= 1
flat_offset = flat[flat_offset + lookback - flat[flat_offset]]
if flat_offset == max_idx:
break
elif flat_offset <= max_key:
output.write(bytes((flat_offset,)))
index = -1
flat_offset = 0
elif flat_offset == fail_idx:
break
return output.getvalue()
class StatesDecompressor:
"""decompression class for incremental unpacking.
decompression class for incremental unpacking
"""
def __init__(self) -> None:
"""decompression class for incremental unpacking."""
self._data = io.BytesIO()
self._eof = False
self._needs_input = True
self._unused_data = io.BytesIO()
def decompress(self, data: bytes, max_length: int=-1) -> bytes:
"""get partial reconstruction of stream."""
self._data.write(data)
if max_length < 0:
return deserialize_wire(self._data.getvalue())
elif max_length == 0:
return b''
data = self._data.getvalue()
return self._data.getvalue()
@property
def eof(self) -> bool:
"""get is file end reached."""
return self._eof
@property
def needs_input(self) -> bool:
"""property."""
return self._needs_input
@property
def unused_data(self) -> bytes:
"""property."""
return self._unused_data.getvalue()
class StatesFile(io.BufferedIOBase, typing.BinaryIO):
"""wrapper for transparent file compression.
wrapper for transparent file compression
"""
def __init__(self, filename, mode='r') -> None:
"""wrapper for transparent file compression."""
super(StatesFile, self).__init__()
if 'r' in mode:
self._file = io.FileIO(filename, 'rb')
else:
self._file = io.FileIO(filename, 'wb')
def __enter__(self) -> 'StatesFile':
"""context manager."""
return | |
- EncryptedPrivateKeyInfo (RSA/DSA/EC - PKCS#8)
- Encrypted RSAPrivateKey (PEM only, OpenSSL)
- Encrypted DSAPrivateKey (PEM only, OpenSSL)
- Encrypted ECPrivateKey (PEM only, OpenSSL)
:param data:
A byte string to load the private key from
:param password:
The password to unencrypt the private key
:raises:
ValueError - when the data does not appear to contain a private key, or the password is invalid
:return:
An asn1crypto.keys.PrivateKeyInfo object
"""
if not isinstance(data, byte_cls):
raise TypeError(pretty_message(
'''
data must be a byte string, not %s
''',
type_name(data)
))
if password is not None:
if not isinstance(password, byte_cls):
raise TypeError(pretty_message(
'''
password must be a byte string, not %s
''',
type_name(password)
))
else:
password = b''
# Appears to be PEM formatted
if re.match(b'\\s*-----', data) is not None:
key_type, _, data = _unarmor_pem(data, password)
if key_type == 'public key':
raise ValueError(pretty_message(
'''
The data specified does not appear to be a private key, but
rather a public key
'''
))
if key_type == 'certificate':
raise ValueError(pretty_message(
'''
The data specified does not appear to be a private key, but
rather a certificate
'''
))
try:
pki = PrivateKeyInfo.load(data)
# Call .native to fully parse since asn1crypto is lazy
pki.native
return pki
except (ValueError):
pass # Data was not PrivateKeyInfo
try:
parsed_wrapper = EncryptedPrivateKeyInfo.load(data)
encryption_algorithm_info = parsed_wrapper['encryption_algorithm']
encrypted_data = parsed_wrapper['encrypted_data'].native
decrypted_data = _decrypt_encrypted_data(encryption_algorithm_info, encrypted_data, password)
pki = PrivateKeyInfo.load(decrypted_data)
# Call .native to fully parse since asn1crypto is lazy
pki.native
return pki
except (ValueError):
pass # Data was not EncryptedPrivateKeyInfo
try:
parsed = RSAPrivateKey.load(data)
# Call .native to fully parse since asn1crypto is lazy
parsed.native
return PrivateKeyInfo.wrap(parsed, 'rsa')
except (ValueError):
pass # Data was not an RSAPrivateKey
try:
parsed = DSAPrivateKey.load(data)
# Call .native to fully parse since asn1crypto is lazy
parsed.native
return PrivateKeyInfo.wrap(parsed, 'dsa')
except (ValueError):
pass # Data was not a DSAPrivateKey
try:
parsed = ECPrivateKey.load(data)
# Call .native to fully parse since asn1crypto is lazy
parsed.native
return PrivateKeyInfo.wrap(parsed, 'ec')
except (ValueError):
pass # Data was not an ECPrivateKey
raise ValueError(pretty_message(
'''
The data specified does not appear to be a known private key format
'''
))
def _unarmor_pem(data, password=None):
"""
Removes PEM-encoding from a public key, private key or certificate. If the
private key is encrypted, the password will be used to decrypt it.
:param data:
A byte string of the PEM-encoded data
:param password:
A byte string of the encryption password, or None
:return:
A 3-element tuple in the format: (key_type, algorithm, der_bytes). The
key_type will be a unicode string of "public key", "private key" or
"certificate". The algorithm will be a unicode string of "rsa", "dsa"
or "ec".
"""
object_type, headers, der_bytes = unarmor(data)
type_regex = '^((DSA|EC|RSA) PRIVATE KEY|ENCRYPTED PRIVATE KEY|PRIVATE KEY|PUBLIC KEY|RSA PUBLIC KEY|CERTIFICATE)'
armor_type = re.match(type_regex, object_type)
if not armor_type:
raise ValueError(pretty_message(
'''
data does not seem to contain a PEM-encoded certificate, private
key or public key
'''
))
pem_header = armor_type.group(1)
data = data.strip()
# RSA private keys are encrypted after being DER-encoded, but before base64
# encoding, so they need to be hanlded specially
if pem_header in set(['RSA PRIVATE KEY', 'DSA PRIVATE KEY', 'EC PRIVATE KEY']):
algo = armor_type.group(2).lower()
return ('private key', algo, _unarmor_pem_openssl_private(headers, der_bytes, password))
key_type = pem_header.lower()
algo = None
if key_type == 'encrypted private key':
key_type = 'private key'
elif key_type == 'rsa public key':
key_type = 'public key'
algo = 'rsa'
return (key_type, algo, der_bytes)
def _unarmor_pem_openssl_private(headers, data, password):
"""
Parses a PKCS#1 private key, or encrypted private key
:param headers:
A dict of "Name: Value" lines from right after the PEM header
:param data:
A byte string of the DER-encoded PKCS#1 private key
:param password:
A byte string of the password to use if the private key is encrypted
:return:
A byte string of the DER-encoded private key
"""
enc_algo = None
enc_iv_hex = None
enc_iv = None
if 'DEK-Info' in headers:
params = headers['DEK-Info']
if params.find(',') != -1:
enc_algo, enc_iv_hex = params.strip().split(',')
else:
enc_algo = 'RC4'
if not enc_algo:
return data
if enc_iv_hex:
enc_iv = binascii.unhexlify(enc_iv_hex.encode('ascii'))
enc_algo = enc_algo.lower()
enc_key_length = {
'aes-128-cbc': 16,
'aes-128': 16,
'aes-192-cbc': 24,
'aes-192': 24,
'aes-256-cbc': 32,
'aes-256': 32,
'rc4': 16,
'rc4-64': 8,
'rc4-40': 5,
'rc2-64-cbc': 8,
'rc2-40-cbc': 5,
'rc2-cbc': 16,
'rc2': 16,
'des-ede3-cbc': 24,
'des-ede3': 24,
'des3': 24,
'des-ede-cbc': 16,
'des-cbc': 8,
'des': 8,
}[enc_algo]
enc_key = hashlib.md5(password + enc_iv[0:8]).digest()
while enc_key_length > len(enc_key):
enc_key += hashlib.md5(enc_key + password + enc_iv[0:8]).digest()
enc_key = enc_key[0:enc_key_length]
enc_algo_name = {
'aes-128-cbc': 'aes',
'aes-128': 'aes',
'aes-192-cbc': 'aes',
'aes-192': 'aes',
'aes-256-cbc': 'aes',
'aes-256': 'aes',
'rc4': 'rc4',
'rc4-64': 'rc4',
'rc4-40': 'rc4',
'rc2-64-cbc': 'rc2',
'rc2-40-cbc': 'rc2',
'rc2-cbc': 'rc2',
'rc2': 'rc2',
'des-ede3-cbc': 'tripledes',
'des-ede3': 'tripledes',
'des3': 'tripledes',
'des-ede-cbc': 'tripledes',
'des-cbc': 'des',
'des': 'des',
}[enc_algo]
decrypt_func = crypto_funcs[enc_algo_name]
if enc_algo_name == 'rc4':
return decrypt_func(enc_key, data)
return decrypt_func(enc_key, data, enc_iv)
def _parse_pkcs12(data, password, load_private_key):
"""
Parses a PKCS#12 ANS.1 DER-encoded structure and extracts certs and keys
:param data:
A byte string of a DER-encoded PKCS#12 file
:param password:
A byte string of the password to any encrypted data
:param load_private_key:
A callable that will accept a byte string and return an
oscrypto.asymmetric.PrivateKey object
:raises:
ValueError - when any of the parameters are of the wrong type or value
OSError - when an error is returned by one of the OS decryption functions
:return:
A three-element tuple of:
1. An asn1crypto.keys.PrivateKeyInfo object
2. An asn1crypto.x509.Certificate object
3. A list of zero or more asn1crypto.x509.Certificate objects that are
"extra" certificates, possibly intermediates from the cert chain
"""
if not isinstance(data, byte_cls):
raise TypeError(pretty_message(
'''
data must be a byte string, not %s
''',
type_name(data)
))
if password is not None:
if not isinstance(password, byte_cls):
raise TypeError(pretty_message(
'''
password must be a byte string, not %s
''',
type_name(password)
))
else:
password = b''
certs = {}
private_keys = {}
pfx = Pfx.load(data)
auth_safe = pfx['auth_safe']
if auth_safe['content_type'].native != 'data':
raise ValueError(pretty_message(
'''
Only password-protected PKCS12 files are currently supported
'''
))
authenticated_safe = pfx.authenticated_safe
mac_data = pfx['mac_data']
if mac_data:
mac_algo = mac_data['mac']['digest_algorithm']['algorithm'].native
key_length = {
'sha1': 20,
'sha224': 28,
'sha256': 32,
'sha384': 48,
'sha512': 64,
'sha512_224': 28,
'sha512_256': 32,
}[mac_algo]
mac_key = pkcs12_kdf(
mac_algo,
password,
mac_data['mac_salt'].native,
mac_data['iterations'].native,
key_length,
3 # ID 3 is for generating an HMAC key
)
hash_mod = getattr(hashlib, mac_algo)
computed_hmac = hmac.new(mac_key, auth_safe['content'].contents, hash_mod).digest()
stored_hmac = mac_data['mac']['digest'].native
if not constant_compare(computed_hmac, stored_hmac):
raise ValueError('Password provided is invalid')
for content_info in authenticated_safe:
content = content_info['content']
if isinstance(content, OctetString):
_parse_safe_contents(content.native, certs, private_keys, password, load_private_key)
elif isinstance(content, EncryptedData):
encrypted_content_info = content['encrypted_content_info']
encryption_algorithm_info = encrypted_content_info['content_encryption_algorithm']
encrypted_content = encrypted_content_info['encrypted_content'].native
decrypted_content = _decrypt_encrypted_data(encryption_algorithm_info, encrypted_content, password)
_parse_safe_contents(decrypted_content, certs, private_keys, password, load_private_key)
else:
raise ValueError(pretty_message(
'''
Public-key-based PKCS12 files are not currently supported
'''
))
key_fingerprints = set(private_keys.keys())
cert_fingerprints = set(certs.keys())
common_fingerprints = sorted(list(key_fingerprints & cert_fingerprints))
key = None
cert = None
other_certs = []
if len(common_fingerprints) >= 1:
fingerprint = common_fingerprints[0]
key = private_keys[fingerprint]
cert = certs[fingerprint]
other_certs = [certs[f] for f in certs if f != fingerprint]
return (key, cert, other_certs)
if len(private_keys) > 0:
first_key = sorted(list(private_keys.keys()))[0]
key = private_keys[first_key]
if len(certs) > 0:
first_key = sorted(list(certs.keys()))[0]
cert = certs[first_key]
del certs[first_key]
if len(certs) > 0:
other_certs = sorted(list(certs.values()), key=lambda c: c.subject.human_friendly)
return (key, cert, other_certs)
def _parse_safe_contents(safe_contents, certs, private_keys, password, load_private_key):
"""
Parses a SafeContents PKCS#12 ANS.1 structure and extracts certs and keys
:param safe_contents:
A byte string of ber-encoded SafeContents, or a asn1crypto.pkcs12.SafeContents
parsed object
:param certs:
A dict to store certificates in
:param keys:
A dict to store keys in
:param password:
A byte string of the password to any encrypted data
:param load_private_key:
A callable that will accept a byte string and return an
oscrypto.asymmetric.PrivateKey object
"""
if isinstance(safe_contents, byte_cls):
safe_contents = SafeContents.load(safe_contents)
for safe_bag in safe_contents:
bag_value = safe_bag['bag_value']
if isinstance(bag_value, CertBag):
if bag_value['cert_id'].native == 'x509':
cert = bag_value['cert_value'].parsed
public_key_info = cert['tbs_certificate']['subject_public_key_info']
certs[_fingerprint(public_key_info, None)] = bag_value['cert_value'].parsed
elif isinstance(bag_value, PrivateKeyInfo):
private_keys[_fingerprint(bag_value, load_private_key)] = bag_value
elif isinstance(bag_value, EncryptedPrivateKeyInfo):
encryption_algorithm_info = bag_value['encryption_algorithm']
| |
"""
Asset compilation and collection.
"""
import argparse
import glob
import json
import os
import traceback
from datetime import datetime
from functools import wraps
from threading import Timer
from paver import tasks
from paver.easy import call_task, cmdopts, consume_args, needs, no_help, path, sh, task
from watchdog.events import PatternMatchingEventHandler
from watchdog.observers import Observer
from watchdog.observers.api import DEFAULT_OBSERVER_TIMEOUT
from openedx.core.djangoapps.theming.paver_helpers import get_theme_paths
from .utils.cmd import cmd, django_cmd
from .utils.envs import Env
from .utils.process import run_background_process
from .utils.timer import timed
# setup baseline paths
ALL_SYSTEMS = ['lms', 'studio']
LMS = 'lms'
CMS = 'cms'
SYSTEMS = {
'lms': LMS,
'cms': CMS,
'studio': CMS
}
# Common lookup paths that are added to the lookup paths for all sass compilations
COMMON_LOOKUP_PATHS = [
path("common/static"),
path("common/static/sass"),
path('node_modules/@edx'),
path('node_modules'),
]
# A list of NPM installed libraries that should be copied into the common
# static directory.
# If string ends with '/' then all file in the directory will be copied.
NPM_INSTALLED_LIBRARIES = [
'backbone.paginator/lib/backbone.paginator.js',
'backbone/backbone.js',
'bootstrap/dist/js/bootstrap.bundle.js',
'hls.js/dist/hls.js',
'jquery-migrate/dist/jquery-migrate.js',
'jquery.scrollto/jquery.scrollTo.js',
'jquery/dist/jquery.js',
'moment-timezone/builds/moment-timezone-with-data.js',
'moment/min/moment-with-locales.js',
'picturefill/dist/picturefill.js',
'requirejs/require.js',
'underscore.string/dist/underscore.string.js',
'underscore/underscore.js',
'@edx/studio-frontend/dist/',
'which-country/index.js'
]
# A list of NPM installed developer libraries that should be copied into the common
# static directory only in development mode.
NPM_INSTALLED_DEVELOPER_LIBRARIES = [
'sinon/pkg/sinon.js',
'squirejs/src/Squire.js',
]
# Directory to install static vendor files
NPM_JS_VENDOR_DIRECTORY = path('common/static/common/js/vendor')
NPM_CSS_VENDOR_DIRECTORY = path("common/static/common/css/vendor")
NPM_CSS_DIRECTORY = path("common/static/common/css")
# system specific lookup path additions, add sass dirs if one system depends on the sass files for other systems
SASS_LOOKUP_DEPENDENCIES = {
'cms': [path('lms') / 'static' / 'sass' / 'partials', ],
}
# Collectstatic log directory setting
COLLECTSTATIC_LOG_DIR_ARG = 'collect_log_dir'
# Webpack command
WEBPACK_COMMAND = 'STATIC_ROOT_LMS={static_root_lms} STATIC_ROOT_CMS={static_root_cms} $(npm bin)/webpack {options}'
def get_sass_directories(system, theme_dir=None):
"""
Determine the set of SASS directories to be compiled for the specified list of system and theme
and return a list of those directories.
Each item in the list is dict object containing the following key-value pairs.
{
"sass_source_dir": "", # directory where source sass files are present
"css_destination_dir": "", # destination where css files would be placed
"lookup_paths": [], # list of directories to be passed as lookup paths for @import resolution.
}
if theme_dir is empty or None then return sass directories for the given system only. (i.e. lms or cms)
:param system: name if the system for which to compile sass e.g. 'lms', 'cms'
:param theme_dir: absolute path of theme for which to compile sass files.
"""
if system not in SYSTEMS:
raise ValueError("'system' must be one of ({allowed_values})".format(
allowed_values=', '.join(list(SYSTEMS.keys())))
)
system = SYSTEMS[system]
applicable_directories = list()
if theme_dir:
# Add theme sass directories
applicable_directories.extend(
get_theme_sass_dirs(system, theme_dir)
)
else:
# add system sass directories
applicable_directories.extend(
get_system_sass_dirs(system)
)
return applicable_directories
def get_common_sass_directories():
"""
Determine the set of common SASS directories to be compiled for all the systems and themes.
Each item in the returned list is dict object containing the following key-value pairs.
{
"sass_source_dir": "", # directory where source sass files are present
"css_destination_dir": "", # destination where css files would be placed
"lookup_paths": [], # list of directories to be passed as lookup paths for @import resolution.
}
"""
applicable_directories = list()
# add common sass directories
applicable_directories.append({
"sass_source_dir": path("common/static/sass"),
"css_destination_dir": path("common/static/css"),
"lookup_paths": COMMON_LOOKUP_PATHS,
})
return applicable_directories
def get_theme_sass_dirs(system, theme_dir):
"""
Return list of sass dirs that need to be compiled for the given theme.
:param system: name if the system for which to compile sass e.g. 'lms', 'cms'
:param theme_dir: absolute path of theme for which to compile sass files.
"""
if system not in ('lms', 'cms'):
raise ValueError('"system" must either be "lms" or "cms"')
dirs = []
system_sass_dir = path(system) / "static" / "sass"
sass_dir = theme_dir / system / "static" / "sass"
css_dir = theme_dir / system / "static" / "css"
certs_sass_dir = theme_dir / system / "static" / "certificates" / "sass"
certs_css_dir = theme_dir / system / "static" / "certificates" / "css"
dependencies = SASS_LOOKUP_DEPENDENCIES.get(system, [])
if sass_dir.isdir():
css_dir.mkdir_p()
# first compile lms sass files and place css in theme dir
dirs.append({
"sass_source_dir": system_sass_dir,
"css_destination_dir": css_dir,
"lookup_paths": dependencies + [
sass_dir / "partials",
system_sass_dir / "partials",
system_sass_dir,
],
})
# now compile theme sass files and override css files generated from lms
dirs.append({
"sass_source_dir": sass_dir,
"css_destination_dir": css_dir,
"lookup_paths": dependencies + [
sass_dir / "partials",
system_sass_dir / "partials",
system_sass_dir,
],
})
# now compile theme sass files for certificate
if system == 'lms':
dirs.append({
"sass_source_dir": certs_sass_dir,
"css_destination_dir": certs_css_dir,
"lookup_paths": [
sass_dir / "partials",
sass_dir
],
})
return dirs
def get_system_sass_dirs(system):
"""
Return list of sass dirs that need to be compiled for the given system.
:param system: name if the system for which to compile sass e.g. 'lms', 'cms'
"""
if system not in ('lms', 'cms'):
raise ValueError('"system" must either be "lms" or "cms"')
dirs = []
sass_dir = path(system) / "static" / "sass"
css_dir = path(system) / "static" / "css"
dependencies = SASS_LOOKUP_DEPENDENCIES.get(system, [])
dirs.append({
"sass_source_dir": sass_dir,
"css_destination_dir": css_dir,
"lookup_paths": dependencies + [
sass_dir / "partials",
sass_dir,
],
})
if system == 'lms':
dirs.append({
"sass_source_dir": path(system) / "static" / "certificates" / "sass",
"css_destination_dir": path(system) / "static" / "certificates" / "css",
"lookup_paths": [
sass_dir / "partials",
sass_dir
],
})
return dirs
def get_watcher_dirs(theme_dirs=None, themes=None):
"""
Return sass directories that need to be added to sass watcher.
Example:
>> get_watcher_dirs('/edx/app/edx-platform/themes', ['red-theme'])
[
'common/static',
'common/static/sass',
'lms/static/sass',
'lms/static/sass/partials',
'/edx/app/edxapp/edx-platform/themes/red-theme/lms/static/sass',
'/edx/app/edxapp/edx-platform/themes/red-theme/lms/static/sass/partials',
'cms/static/sass',
'cms/static/sass/partials',
'/edx/app/edxapp/edx-platform/themes/red-theme/cms/static/sass/partials',
]
Parameters:
theme_dirs (list): list of theme base directories.
themes (list): list containing names of themes
Returns:
(list): dirs that need to be added to sass watchers.
"""
dirs = []
dirs.extend(COMMON_LOOKUP_PATHS)
if theme_dirs and themes:
# Register sass watchers for all the given themes
themes = get_theme_paths(themes=themes, theme_dirs=theme_dirs)
for theme in themes:
for _dir in get_sass_directories('lms', theme) + get_sass_directories('cms', theme):
dirs.append(_dir['sass_source_dir'])
dirs.extend(_dir['lookup_paths'])
# Register sass watchers for lms and cms
for _dir in get_sass_directories('lms') + get_sass_directories('cms') + get_common_sass_directories():
dirs.append(_dir['sass_source_dir'])
dirs.extend(_dir['lookup_paths'])
# remove duplicates
dirs = list(set(dirs))
return dirs
def debounce(seconds=1):
"""
Prevents the decorated function from being called more than every `seconds`
seconds. Waits until calls stop coming in before calling the decorated
function.
"""
def decorator(func):
func.timer = None
@wraps(func)
def wrapper(*args, **kwargs):
def call():
func(*args, **kwargs)
func.timer = None
if func.timer:
func.timer.cancel()
func.timer = Timer(seconds, call)
func.timer.start()
return wrapper
return decorator
class SassWatcher(PatternMatchingEventHandler):
"""
Watches for sass file changes
"""
ignore_directories = True
patterns = ['*.scss']
ignore_patterns = ['common/static/xmodule/*']
def register(self, observer, directories):
"""
register files with observer
Arguments:
observer (watchdog.observers.Observer): sass file observer
directories (list): list of directories to be register for sass watcher.
"""
for dirname in directories:
paths = []
if '*' in dirname:
paths.extend(glob.glob(dirname))
else:
paths.append(dirname)
for obs_dirname in paths:
observer.schedule(self, obs_dirname, recursive=True)
@debounce()
def on_any_event(self, event):
print('\tCHANGED:', event.src_path)
try:
compile_sass() # pylint: disable=no-value-for-parameter
except Exception: # pylint: disable=broad-except
traceback.print_exc()
class XModuleSassWatcher(SassWatcher):
"""
Watches for sass file changes
"""
ignore_directories = True
ignore_patterns = []
@debounce()
def on_any_event(self, event):
print('\tCHANGED:', event.src_path)
try:
process_xmodule_assets()
except Exception: # pylint: disable=broad-except
traceback.print_exc()
class XModuleAssetsWatcher(PatternMatchingEventHandler):
"""
Watches for css and js file changes
"""
ignore_directories = True
patterns = ['*.css', '*.js']
def register(self, observer):
"""
Register files with observer
"""
observer.schedule(self, 'common/lib/xmodule/', recursive=True)
@debounce()
def on_any_event(self, event):
print('\tCHANGED:', event.src_path)
try:
process_xmodule_assets()
except Exception: # pylint: disable=broad-except
traceback.print_exc()
# To refresh the hash values of static xmodule content
restart_django_servers()
@task
@no_help
@cmdopts([
('system=', 's', 'The system to compile sass for (defaults to all)'),
('theme-dirs=', '-td', 'Theme dirs containing all themes (defaults to None)'),
('themes=', '-t', 'The theme to compile sass for (defaults to None)'),
('debug', 'd', 'Debug mode'),
('force', '', 'Force full compilation'),
])
@timed
def compile_sass(options):
"""
Compile Sass to CSS. If command is called without any arguments, it will
only compile lms, cms sass for the open source theme. And none of the comprehensive theme's sass would be compiled.
If you want to compile sass for all comprehensive themes you will have to run compile_sass
specifying all the themes that need to be compiled..
The following is a list of some possible ways to use this command.
Command:
paver compile_sass
Description:
compile sass files for both lms and cms. If command is called like above (i.e. without any arguments) it will
only compile lms, cms sass for the open source theme. None of the theme's sass will be compiled.
| |
<gh_stars>10-100
#!/usr/bin/python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The api_external module is part of the nmeta suite, but is run
separately
This module runs a class and methods for an API that
exposes an interface into nmeta MongoDB collections.
It leverages the Eve Python REST API Framework
"""
#*** Python 3 style division results as floating point:
from __future__ import division
import os
#*** Import Eve for REST API Framework:
from eve import Eve
#*** Inherit logging etc:
from baseclass import BaseClass
#*** mongodb Database Import:
from pymongo import MongoClient
#*** nmeta imports
import config
#*** import from api_definitions subdirectory:
from api_definitions import switches_api
from api_definitions import pi_rate
from api_definitions import pi_time
from api_definitions import controller_summary
from api_definitions import identities_api
from api_definitions import identities_ui
from api_definitions import flows_api
from api_definitions import flows_removed_api
from api_definitions import flows_ui
from api_definitions import flow_mods_api
from api_definitions import classifications_api
#*** For timestamps:
import datetime
#*** To get request parameters:
from flask import request
#*** Amount of time (seconds) to go back for to calculate Packet-In rate:
PACKET_IN_RATE_INTERVAL = 10
#*** Amount of time (seconds) to go back for to calculate Packet-In rate:
PACKET_TIME_PERIOD = 10
#*** Used for WebUI:
FLOW_SEARCH_LIMIT = 600
FLOW_RESULT_LIMIT = 100
#*** FlowUI attributes to match against for different filter types
FLOW_FILTER_ANY = ['src', 'src_hover', 'dst', 'dst_hover', 'proto',
'proto_hover']
FLOW_FILTER_SRC = ['src', 'src_hover']
FLOW_FILTER_DST = ['dst', 'dst_hover']
FLOW_FILTER_SRC_OR_DST = ['src', 'src_hover', 'dst', 'dst_hover']
#*** Number of previous IP identity records to search for a hostname before
#*** giving up. Used for augmenting flows with identity metadata:
HOST_LIMIT = 2000
SERVICE_LIMIT = 250
#*** How far back in time to go back looking for packets in flow:
FLOW_TIME_LIMIT = datetime.timedelta(seconds=3600)
FLOW_REM_TIME_LIMIT = datetime.timedelta(seconds=3600)
CLASSIFICATION_TIME_LIMIT = datetime.timedelta(seconds=4000)
#*** Enumerate some proto numbers, someone's probably already done this...
ETH_TYPES = {
2048: 'IPv4',
2054: 'ARP',
34525: 'IPv6',
35020: 'LLDP'
}
IP_PROTOS = {
1: 'ICMP',
2: 'IGMP',
6: 'TCP',
17: 'UDP',
41: 'IPv6'
}
class ExternalAPI(BaseClass):
"""
This class provides methods for the External API
"""
def __init__(self, config):
"""
Initialise the ExternalAPI class
"""
self.config = config
#*** Set up Logging with inherited base class method:
self.configure_logging(__name__, "api_external_logging_level_s",
"api_external_logging_level_c")
#*** MongoDB Setup:
#*** Get database parameters from config:
mongo_addr = self.config.get_value("mongo_addr")
mongo_port = self.config.get_value("mongo_port")
mongo_dbname = self.config.get_value("mongo_dbname")
self.logger.info("Connecting to the %s MongoDB database on %s %s",
mongo_addr, mongo_port, mongo_dbname)
#*** Use Pymongo to connect to the nmeta MongoDB database:
mongo_client = MongoClient(mongo_addr, mongo_port)
#*** Connect to MongoDB nmeta database:
db_nmeta = mongo_client[mongo_dbname]
#*** Variables for MongoDB Collections:
self.packet_ins = db_nmeta.packet_ins
self.identities = db_nmeta.identities
self.classifications = db_nmeta.classifications
self.flow_rems = db_nmeta.flow_rems
self.db_pi_time = db_nmeta.pi_time
self.switches_col = db_nmeta.switches_col
class FlowUI(object):
"""
An object that represents a flow record to be sent in response
to the WebUI. Features:
- Flow direction normalised to direction of
first packet in flow
- Src and Dst are IP or Layer 2 to optimise screen space
- Extra data included for hover-over tips
Note that there should not be any display-specific data (i.e. don't
send any HTML, leave this to the client code)
"""
def __init__(self):
#*** Initialise flow variables:
self.flow_hash = ""
self.timestamp = ""
self.src_location_logical = ""
self.src = ""
self.src_hover = ""
self.dst = ""
self.dst_hover = ""
self.proto = ""
self.proto_hover = ""
self.tp_src = ""
self.tp_src_hover = ""
self.tp_dst = ""
self.tp_dst_hover = ""
self.classification = ""
self.classification_hover = ""
self.actions = ""
self.actions_hover = ""
self.data_sent = ""
self.data_sent_hover = ""
self.data_received = ""
self.data_received_hover = ""
def response(self):
"""
Return a dictionary object of flow parameters
for sending in response
"""
return self.__dict__
def run(self):
"""
Run the External API instance
Note that API definitions are from previously imported
files from api_definitions subdirectory
"""
#*** Eve Domain for the whole API:
eve_domain = {
'pi_rate': pi_rate.pi_rate_settings,
'pi_time': pi_time.pi_time_settings,
'controller_summary': controller_summary.controller_summary_settings,
'switches_col': switches_api.switches_settings,
'switches_count_col': switches_api.switches_count_settings,
'identities': identities_api.identities_settings,
'identities_ui': identities_ui.identities_ui_settings,
'flows': flows_api.flows_settings,
'flows_removed': flows_removed_api.flows_removed_settings,
'flows_removed_stats_count': flows_removed_api.flows_removed_stats_count_settings,
'flows_removed_src_bytes_sent': flows_removed_api.flows_removed_src_bytes_sent_settings,
'flows_removed_src_bytes_received': flows_removed_api.flows_removed_src_bytes_received_settings,
'flows_removed_dst_bytes_sent': flows_removed_api.flows_removed_dst_bytes_sent_settings,
'flows_removed_dst_bytes_received': flows_removed_api.flows_removed_dst_bytes_received_settings,
'flows_ui': flows_ui.flows_ui_settings,
'flow_mods': flow_mods_api.flow_mods_settings,
'classifications': classifications_api.classifications_settings
}
#*** Set up a settings dictionary for starting Eve app:datasource
eve_settings = {}
eve_settings['HATEOAS'] = self.config.get_value('external_api_hateoas')
eve_settings['MONGO_HOST'] = \
self.config.get_value('mongo_addr')
eve_settings['MONGO_PORT'] = \
self.config.get_value('mongo_port')
eve_settings['MONGO_DBNAME'] = \
self.config.get_value('mongo_dbname')
#*** Version, used in URL:
eve_settings['API_VERSION'] = \
self.config.get_value('external_api_version')
eve_settings['DOMAIN'] = eve_domain
#*** Allowed Eve methods:
eve_settings['RESOURCE_METHODS'] = ['GET']
eve_settings['ITEM_METHODS'] = ['GET']
#*** Set format of datetime as it appears to API consumers:
eve_settings['DATE_FORMAT'] = '%H:%M:%S.%f'
#*** TBD - set up username/password into MongoDB
#*** Set up static content location:
file_dir = os.path.dirname(os.path.realpath(__file__))
static_folder = os.path.join(file_dir, 'webUI')
#*** Set up Eve:
self.logger.info("Configuring Eve Python REST API Framework")
self.app = Eve(settings=eve_settings, static_folder=static_folder)
self.logger.debug("static_folder=%s", static_folder)
#*** Hook for adding pi_rate to returned resource:
self.app.on_fetched_resource_pi_rate += self.response_pi_rate
#*** Hook for adding pi_time to returned resource:
self.app.on_fetched_resource_pi_time += self.response_pi_time
#*** Hook for adding controller_summary to returned resource:
self.app.on_fetched_resource_controller_summary += \
self.response_controller_summary
#*** Hook for filtered identities response:
self.app.on_fetched_resource_identities_ui += \
self.response_identities_ui
#*** Hook for flows removed stats count response:
self.app.on_fetched_resource_flows_removed_stats_count += \
self.response_flows_removed_stats_count
#*** Hook for flows removed src bytes sent response:
self.app.on_fetched_resource_flows_removed_src_bytes_sent += \
self.response_flows_removed_src_bytes_sent
#*** Hook for flows removed src bytes received response:
self.app.on_fetched_resource_flows_removed_src_bytes_received += \
self.response_flows_removed_src_bytes_received
#*** Hook for flows removed dst bytes sent response:
self.app.on_fetched_resource_flows_removed_dst_bytes_sent += \
self.response_flows_removed_dst_bytes_sent
#*** Hook for flows removed dst bytes received response:
self.app.on_fetched_resource_flows_removed_dst_bytes_received += \
self.response_flows_removed_dst_bytes_received
#*** Hook for filtered flows response:
self.app.on_fetched_resource_flows_ui += \
self.response_flows_ui
#*** Hook for switch count:
self.app.on_fetched_resource_switches_count_col += \
self.response_switches_count
#*** Get necessary parameters from config:
eve_port = self.config.get_value('external_api_port')
eve_debug = self.config.get_value('external_api_debug')
eve_host = self.config.get_value('external_api_host')
#*** Run Eve:
self.logger.info("Starting Eve Python REST API Framework")
self.app.run(port=eve_port, debug=eve_debug, host=eve_host)
@self.app.route('/')
def serve_static():
"""
Serve static content for WebUI
"""
return 1
def response_pi_rate(self, items):
"""
Update the response with the packet_in rate.
Hooked from on_fetched_resource_pi_rate
Returns key/values for packet-in processing time in API response:
- timestamp
- pi_rate
"""
self.logger.debug("Hooked on_fetched_resource items=%s ", items)
#*** Get rid of superfluous keys in response:
if '_items' in items:
del items['_items']
if '_meta' in items:
del items['_meta']
items['timestamp'] = datetime.datetime.now().strftime("%H:%M:%S")
items['pi_rate'] = self.get_pi_rate()
def response_pi_time(self, items):
"""
Update the response with the packet_time min, avg and max.
Hooked from on_fetched_resource_pi_time
Returns key/values for packet-in processing time in API response:
- timestamp
- ryu_time_max
- ryu_time_min
- ryu_time_avg
- ryu_time_period
- ryu_time_records
- pi_time_max
- pi_time_min
- pi_time_avg
- pi_time_period
- pi_time_records
If no data found within time period then returns without
key/values
"""
#*** Get rid of superfluous keys in response:
if '_items' in items:
del items['_items']
if '_meta' in items:
del items['_meta']
results = self.get_pi_time()
if results:
#*** Set values in API response:
items['timestamp'] = results['timestamp']
items['ryu_time_max'] = results['ryu_time_max']
items['ryu_time_min'] = results['ryu_time_min']
items['ryu_time_avg'] = results['ryu_time_avg']
items['ryu_time_period'] = results['ryu_time_period']
items['ryu_time_records'] = results['ryu_time_records']
items['pi_time_max'] = results['pi_time_max']
items['pi_time_min'] = results['pi_time_min']
items['pi_time_avg'] = results['pi_time_avg']
items['pi_time_period'] = results['pi_time_period']
items['pi_time_records'] = results['pi_time_records']
def response_controller_summary(self, items):
"""
Update the response with the packet_in rate, packet processing
time stats
Hooked from on_fetched_resource_controller_summary
Rounds seconds results
"""
self.logger.debug("Hooked on_fetched_resource items=%s ", items)
#*** Number of decimal places to round seconds results to:
places = 3
#*** Get rid of superfluous _items key in response:
if '_items' in items:
del items['_items']
#*** pi_rate:
items['pi_rate'] = self.get_pi_rate()
#*** pi_time:
results = self.get_pi_time()
if results:
#*** Set values in API response:
items['timestamp'] = results['timestamp']
items['ryu_time_max'] = round(results['ryu_time_max'], places)
items['ryu_time_min'] = round(results['ryu_time_min'], places)
items['ryu_time_avg'] = round(results['ryu_time_avg'], places)
items['ryu_time_period'] = results['ryu_time_period']
items['ryu_time_records'] = results['ryu_time_records']
items['pi_time_max'] = round(results['pi_time_max'], places)
items['pi_time_min'] = round(results['pi_time_min'], places)
items['pi_time_avg'] = round(results['pi_time_avg'], places)
items['pi_time_period'] = results['pi_time_period']
items['pi_time_records'] = results['pi_time_records']
else:
items['timestamp'] = 'unknown'
items['ryu_time_max'] = 'unknown'
items['ryu_time_min'] = 'unknown'
items['ryu_time_avg'] = 'unknown'
items['ryu_time_period'] = 'unknown'
items['ryu_time_records'] = 'unknown'
items['pi_time_max'] = 'unknown'
items['pi_time_min'] = 'unknown'
items['pi_time_avg'] = 'unknown'
items['pi_time_period'] = 'unknown'
items['pi_time_records'] = 'unknown'
def response_identities_ui(self, items):
"""
Populate the response with identities that are filtered:
- Reverse sort | |
<reponame>marcellogoccia/deep-value-investing
import traceback
import time
import datetime
import sys
from decimal import Decimal
from utilities import log
from urllib.request import URLError
default_float = None
NA = ['-', '--', 'N/A', 'NA', ' ']
def parse(string, dictionary):
if string in dictionary:
return dictionary[string]
else:
return ''
d = {
'K': 3,
'k': 3,
'M': 6,
'm': 6,
'B': 9,
'b': 9,
'T': 12,
't': 12
}
class Methods:
@staticmethod
def perform_until_succeed(max_timeout, function, *args):
"""
@function perform_until_succeed
The method loops several times trying to running the function passed as input with its own arguments.
It the function is performed without raising an exception, the performance is considered succeeded, otherwise
it keeps on looping until the timout is reached.
@param max_timeout the time spent before exiting without success.
@param function the function to execute
@args the parameters of the function.
"""
try:
timeout = time.time() + max_timeout # 1 minutes from now
while True:
time.sleep(1)
if time.time() > timeout:
raise Exception(f"Cannot execute the requested function {function}!!!: {e}\n{getDebugInfo()}")
try:
fun_output = function(*args)
return fun_output
except Exception as e:
print(f"Debug:\t\tTrying running the function {function} ")
continue
except URLError as e:
msg = "Cannot open url."
log.error(msg)
raise Exception(msg)
except Exception as e:
msg = f"Cannot execute the requested function {function}!!!: {e}\n{getDebugInfo()}"
log.error(msg)
raise Exception(msg)
@staticmethod
def text_to_num(text):
"""
@function text_to_num
Used to convert text number with B or M or K into numbers
:param text:
:return:
"""
try:
if text[-1] in d:
num, magnitude = text[:-1], text[-1]
return Decimal(num) * 10 ** d[magnitude]
else:
return Decimal(text)
except Exception as e:
log.error(f"There is a problem in the code!: {e}\n{getDebugInfo()}")
raise
@staticmethod
def convert_to_float(value):
"""
@function convert_to_float
Used to convert text number with some particular keywords (like M,B or %) into a floating point number
"""
try:
if value is None:
return None
assert(isinstance(value, str))
# remove the comma from large numbers.
value = value.replace(',', '')
if "%" in value:
value = value.replace("%", u'')
value = float(value) / 100 # get a percentage
elif "T" in value or "t" in value or "B" in value or "b" in value or \
"M" in value or "m" in value or "K" in value or "k" in value:
value = Methods.text_to_num(value)
try:
value = float(value)
except ValueError:
raise Exception(f"{value} cannot be converted to float")
return value
except Exception as e:
log.error(f"There is a problem in the code!: {e}\n{getDebugInfo()}")
raise
@staticmethod
def get_valid_value(value):
"""
@function get_valid_value
Removes not needed characters
Still returns a string.
"""
try:
if value is None:
return None
if value == "NULL":
return None
assert(isinstance(value, str))
value = value.replace(u'\xa0', u' ')
value = value.replace(u'\n', u'')
value = value.replace(u'\t', u'')
if value in NA:
return None
value = value.replace('- ', '-')
return value
except Exception as e:
log.error(f"There is a problem in the code!: {e}\n{getDebugInfo()}")
raise
@staticmethod
def remove_character(string, to_remove):
try:
if string is None:
return None
return string.replace(to_remove, '')
except Exception as e:
log.error(f"There is a problem in the code!: {e}\n{getDebugInfo()}")
raise
@staticmethod
def from_percent_to_decimal(value):
try:
if value is None:
return None
try:
value = float(value)
except ValueError:
raise Exception(f"{value} cannot be converted to float")
if isinstance(value, float):
value = value / 100
return value
except Exception as e:
log.error(f"There is a problem in the code!: {e}\n{getDebugInfo()}")
raise
@staticmethod
def truncate(f, n):
"""
Truncates/pads a float f to n decimal places without rounding
"""
try:
s = '{}'.format(f)
if 'e' in s or 'E' in s:
return '{0:.{1}f}'.format(f, n)
i, p, d = s.partition('.')
return '.'.join([i, (d+'0'*n)[:n]])
except Exception as e:
log.error(f"There is a problem in the code!: {e}\n{getDebugInfo()}")
raise
@staticmethod
def validate(value):
"""
@function get_valid_value
This method check if the variable in input is None, if it is, it return zero, otherwise
it returns the variable itself.
"""
try:
if value is None:
return 0
else:
return value
except Exception as e:
log.error(f"There is a problem in the code!: {e}\n{getDebugInfo()}")
raise
@staticmethod
def get_today_date():
try:
return datetime.datetime.today().strftime('%Y-%m-%d')
except:
print("Cannot define the current date.")
return None
@staticmethod
def get_date_days_ago(number_of_days):
try:
date_days_ago = datetime.datetime.now() - datetime.timedelta(days=number_of_days)
return date_days_ago.strftime('%Y-%m-%d')
except:
print("Cannot define the current date.")
return None
@staticmethod
def get_date_weeks_ago(number_of_weeks):
try:
date_weeks_ago = datetime.datetime.now() - datetime.timedelta(weeks=number_of_weeks)
return date_weeks_ago.strftime('%Y-%m-%d')
except:
print("Cannot define the current date.")
return None
@staticmethod
def get_last_year():
try:
now = datetime.datetime.now()
return now.year - 1
except:
print("Cannot define last year number")
return None
@staticmethod
def get_last_month():
try:
now = datetime.datetime.now()
return now.month - 1 if now.month > 1 else 12
except:
print("Cannot define last month number")
return None
@staticmethod
def forward_days(inut_time, days=0):
try:
return inut_time + datetime.timedelta(days=days)
except:
print("Cannot generate a data with one year foward")
return None
@staticmethod
def backward_days(inut_time, days=0):
try:
return inut_time - datetime.timedelta(days=days)
except:
print("Cannot generate a data with one year foward")
return None
@staticmethod
def trunc_date(a_date):
return a_date.replace(day=1)
@staticmethod
def get_prices_in_range_of_dates(equity, dates):
"""
Given the equity in input it returns the prices for the date in the range of the input variable dates..
@param equity the equity we are interested in the dates
@param dates, the dates among which the prices will be scraped (dates is a dictionary(
@return the vector with the prices, empty if it was not successful
"""
# sort the dates of the prices.
try:
equity.prices.sort(key=lambda x: x.day)
prices_in_range = []
for price in equity.prices:
if dates['start_date'] <= price.day <= dates['end_date']:
prices_in_range.append(price)
return prices_in_range
except Exception as e:
log.error(f"There is a problem with the equity {equity.exchange}:{equity.symbol_1} : {e}\n{getDebugInfo()}")
return []
@staticmethod
def get_prices_in_range_of_dates_from_file(input_path, dates=None):
"""
Given the equity in input it returns the prices for the date in the range of the input variable dates..
@param equity the equity we are interested in the dates
@param dates, the dates among which the prices will be scraped (dates is a dictionary(
@return the vector with the prices, empty if it was not successful
"""
try:
import csv
data_prices = dict()
with open(input_path, newline='') as csvfile:
data_reader = csv.reader(csvfile, delimiter=',', quotechar='"')
header = next(iter(data_reader))
for row in data_reader:
# Casting the two values
day = datetime.datetime.strptime(row[0], '%b %d, %Y').date()
price_str = row[1].replace(',', '')
price = float(price_str)
data_prices[day] = price
sorted_dictionary = dict()
for key in sorted(data_prices):
sorted_dictionary[key] = data_prices[key]
selected_dates = dict()
for key, value in sorted_dictionary.items():
if not dates:
selected_dates[key] = value
elif dates['start_date'] <= key <= dates['end_date']:
selected_dates[key] = value
# return a list of prices (from the class Price)
prices = []
for key, value in selected_dates.items():
prices.append(Price(day=key, close=value))
return prices
# header = next(iter(data_reader))
# # The following will read all the prices csv file and place in a complete dictionary
# for value in header:
# data_prices[value] = []
#
# for row in data_reader:
# for key, value in zip(header, row):
# if "Date" in key:
# value = datetime.datetime.strptime(value, '%b %d, %Y').date()
# data_prices[key].append(value)
except Exception as e:
log.error(f"There is a problem in the code!: {e}\n{getDebugInfo()}")
return None
@staticmethod
def sort_getter(item):
value = item[0]
return value
@staticmethod
def are_variations_in_purchasing_prices_unlikely(equity, dates):
"""
The methods check if the prices of the equity in the wanted range are possible or not.
We mean that if between two near samples there is a very high variations, for example
higher that 50% or 100% it is possible that it is only a mistake of the website where the data was collected.
@return True if variations are unlikely, False if they are not.
"""
try:
prices = Methods.get_prices_in_range_of_dates(equity, dates)
higher_threshold = 5
lower_threshold = 0.1
for price_t0, price_t1 in zip(prices, prices[1:]):
try:
price_ratio = price_t1.close / price_t0.close
except ZeroDivisionError:
price_ratio = 10000
if lower_threshold < price_ratio < higher_threshold:
continue # False in this case is good
else:
log.error(f"Something wrong about the price of equity {equity.exchange}:{equity.symbol_1}. "
f"The price ratio is {price_ratio}")
return True # True in this case is not good
return False # False in this case is good
except Exception as e:
log.error(f"There is a problem in the code!: {e}\n{getDebugInfo()}")
return False
@staticmethod
def nth_root(num, root):
#** means square
answer | |
'property_name': 'exploration_id',
'node_id': 'node_1',
'old_value': self.EXP_ID,
'new_value': None
})], 'Removed exploration.')
# Suggestion should be rejected after exploration is removed from the
# story.
suggestions = suggestion_services.query_suggestions(
[('author_id', self.author_id), ('target_id', self.EXP_ID)])
self.assertEqual(len(suggestions), 1)
self.assertEqual(
suggestions[0].status, suggestion_models.STATUS_REJECTED)
class UserContributionProficiencyUnitTests(test_utils.GenericTestBase):
def setUp(self):
super(UserContributionProficiencyUnitTests, self).setUp()
self.signup('<EMAIL>', 'user1')
self.signup('<EMAIL>', 'user2')
self.user_1_id = self.get_user_id_from_email('<EMAIL>')
self.user_2_id = self.get_user_id_from_email('<EMAIL>')
def test_get_all_user_ids_who_are_allowed_to_review(self):
user_models.UserContributionProficiencyModel.create(
self.user_1_id, 'category1', 0)
user_models.UserContributionProficiencyModel.create(
self.user_1_id, 'category2',
feconf.MINIMUM_SCORE_REQUIRED_TO_REVIEW)
user_models.UserContributionProficiencyModel.create(
self.user_2_id, 'category1', 0)
user_models.UserContributionProficiencyModel.create(
self.user_2_id, 'category2', 0)
user_ids = (
suggestion_services.get_all_user_ids_who_are_allowed_to_review(
'category1'))
self.assertEqual(user_ids, [])
user_ids = (
suggestion_services.get_all_user_ids_who_are_allowed_to_review(
'category2'))
self.assertEqual(user_ids, [self.user_1_id])
self.assertFalse(suggestion_services.can_user_review_category(
self.user_1_id, 'category1'))
self.assertTrue(suggestion_services.can_user_review_category(
self.user_1_id, 'category2'))
self.assertFalse(suggestion_services.can_user_review_category(
self.user_2_id, 'category1'))
self.assertFalse(suggestion_services.can_user_review_category(
self.user_2_id, 'category1'))
def test_get_all_scores_of_the_user_with_multiple_scores(self):
user_models.UserContributionProficiencyModel.create(
self.user_1_id, 'category1', 1)
user_models.UserContributionProficiencyModel.create(
self.user_1_id, 'category2', 2)
user_models.UserContributionProficiencyModel.create(
self.user_1_id, 'category3', 3)
expected_scores_dict = {}
for index in python_utils.RANGE(1, 4):
key = 'category%s' % python_utils.UNICODE(index)
expected_scores_dict[key] = index
scores_dict = suggestion_services.get_all_scores_of_user(
self.user_1_id)
self.assertEqual(len(scores_dict), 3)
self.assertDictEqual(scores_dict, expected_scores_dict)
def test_get_all_scores_of_the_user_when_no_scores_exist(self):
scores_dict = suggestion_services.get_all_scores_of_user(
self.user_1_id)
self.assertEqual(len(scores_dict), 0)
self.assertDictEqual(scores_dict, {})
class VoiceoverApplicationServiceUnitTest(test_utils.GenericTestBase):
"""Tests for the ExplorationVoiceoverApplication class."""
def setUp(self):
super(VoiceoverApplicationServiceUnitTest, self).setUp()
self.signup('<EMAIL>', 'author')
self.author_id = self.get_user_id_from_email('<EMAIL>')
suggestion_models.GeneralVoiceoverApplicationModel(
id='application_id',
target_type='exploration',
target_id='0',
status='review',
author_id=self.author_id,
final_reviewer_id=None,
language_code='en',
filename='filename.mp3',
content='<p>content</p>',
rejection_message=None).put()
self.voiceover_application_model = (
suggestion_models.GeneralVoiceoverApplicationModel.get_by_id(
'application_id'))
def test_get_voiceover_application_from_model_with_invalid_type_raise_error(
self):
suggestion_services.get_voiceover_application(
self.voiceover_application_model.id)
self.voiceover_application_model.target_type = 'invalid_type'
with self.assertRaisesRegexp(
Exception,
'Invalid target type for voiceover application: invalid_type'):
suggestion_services.get_voiceover_application(
self.voiceover_application_model.id)
class ReviewableSuggestionEmailInfoUnitTests(
test_utils.GenericTestBase):
"""Tests the methods related to the ReviewableSuggestionEmailInfo class.
"""
target_id = 'exp1'
skill_id = 'skill1'
language_code = 'en'
AUTHOR_EMAIL = '<EMAIL>'
REVIEWER_EMAIL = '<EMAIL>'
COMMIT_MESSAGE = 'commit message'
def _create_translation_suggestion_with_translation_html(
self, translation_html):
"""Creates a translation suggestion with the given translation_html."""
add_translation_change_dict = {
'cmd': exp_domain.CMD_ADD_TRANSLATION,
'state_name': feconf.DEFAULT_INIT_STATE_NAME,
'content_id': feconf.DEFAULT_NEW_STATE_CONTENT_ID,
'language_code': self.language_code,
'content_html': feconf.DEFAULT_INIT_STATE_CONTENT_STR,
'translation_html': translation_html
}
return suggestion_services.create_suggestion(
suggestion_models.SUGGESTION_TYPE_TRANSLATE_CONTENT,
suggestion_models.TARGET_TYPE_EXPLORATION,
self.target_id, feconf.CURRENT_STATE_SCHEMA_VERSION,
self.author_id, add_translation_change_dict,
'test description'
)
def _create_question_suggestion_with_question_html_content(
self, question_html_content):
"""Creates a question suggestion with the html content used for the
question in the question suggestion.
"""
with self.swap(
feconf, 'DEFAULT_INIT_STATE_CONTENT_STR', question_html_content):
add_question_change_dict = {
'cmd': (
question_domain
.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION),
'question_dict': {
'question_state_data': self._create_valid_question_data(
'default_state').to_dict(),
'language_code': self.language_code,
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION),
'linked_skill_ids': ['skill_1'],
'inapplicable_skill_misconception_ids': ['skillid12345-1']
},
'skill_id': self.skill_id,
'skill_difficulty': 0.3
}
return suggestion_services.create_suggestion(
suggestion_models.SUGGESTION_TYPE_ADD_QUESTION,
suggestion_models.TARGET_TYPE_SKILL,
self.skill_id, feconf.CURRENT_STATE_SCHEMA_VERSION,
self.author_id, add_question_change_dict,
'test description'
)
def _create_edit_state_content_suggestion(self):
"""Creates an "edit state content" suggestion."""
edit_state_content_change_dict = {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': 'Introduction',
'new_value': {
'content_id': 'content',
'html': 'new html content'
},
'old_value': {
'content_id': 'content',
'html': 'old html content'
}
}
return suggestion_services.create_suggestion(
suggestion_models.SUGGESTION_TYPE_EDIT_STATE_CONTENT,
suggestion_models.TARGET_TYPE_EXPLORATION,
self.target_id, feconf.CURRENT_STATE_SCHEMA_VERSION,
self.author_id, edit_state_content_change_dict,
'test description')
def _assert_reviewable_suggestion_email_infos_are_equal(
self, reviewable_suggestion_email_info,
expected_reviewable_suggestion_email_info):
"""Asserts that the reviewable suggestion email info is equal to the
expected reviewable suggestion email info.
"""
self.assertEqual(
reviewable_suggestion_email_info.suggestion_type,
expected_reviewable_suggestion_email_info.suggestion_type)
self.assertEqual(
reviewable_suggestion_email_info.language_code,
expected_reviewable_suggestion_email_info.language_code)
self.assertEqual(
reviewable_suggestion_email_info.suggestion_content,
expected_reviewable_suggestion_email_info.suggestion_content)
self.assertEqual(
reviewable_suggestion_email_info.submission_datetime,
expected_reviewable_suggestion_email_info.submission_datetime)
def setUp(self):
super(
ReviewableSuggestionEmailInfoUnitTests, self).setUp()
self.signup(self.AUTHOR_EMAIL, 'author')
self.author_id = self.get_user_id_from_email(
self.AUTHOR_EMAIL)
self.signup(self.REVIEWER_EMAIL, 'reviewer')
self.reviewer_id = self.get_user_id_from_email(
self.REVIEWER_EMAIL)
self.save_new_valid_exploration(self.target_id, self.author_id)
def test_create_raises_for_suggestion_type_not_on_contributor_dashboard(
self):
edit_state_content_suggestion = (
self._create_edit_state_content_suggestion())
# Mocking the SUGGESTION_EMPHASIZED_TEXT_GETTER_FUNCTIONS dict in
# suggestion services so that this test still passes if the
# "edit state content" suggestion type is added to the Contributor
# Dashboard in the future.
suggestion_emphasized_text_getter_functions_mock = {}
with self.swap(
suggestion_services, 'SUGGESTION_EMPHASIZED_TEXT_GETTER_FUNCTIONS',
suggestion_emphasized_text_getter_functions_mock):
with self.assertRaisesRegexp(
Exception,
'Expected suggestion type to be offered on the Contributor '
'Dashboard, received: %s.' % (
suggestion_models.SUGGESTION_TYPE_EDIT_STATE_CONTENT)):
(
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
edit_state_content_suggestion)
)
def test_contributor_suggestion_types_are_in_suggestion_text_getter_dict(
self):
# This test will fail if a new suggestion type is added to the
# Contributor Dashboard but hasn't been added to
# SUGGESTION_EMPHASIZED_TEXT_GETTER_FUNCTIONS.
sorted_text_getter_dict_suggestion_types = sorted(
suggestion_services
.SUGGESTION_EMPHASIZED_TEXT_GETTER_FUNCTIONS.keys())
sorted_contributor_dashboard_suggestion_types = sorted(
suggestion_models.CONTRIBUTOR_DASHBOARD_SUGGESTION_TYPES)
self.assertListEqual(
sorted_text_getter_dict_suggestion_types,
sorted_contributor_dashboard_suggestion_types)
def test_create_from_suggestion_returns_info_for_question_suggestion(self):
question_suggestion = (
self._create_question_suggestion_with_question_html_content(
'<p>default question content</p>'))
expected_reviewable_suggestion_email_info = (
suggestion_registry.ReviewableSuggestionEmailInfo(
question_suggestion.suggestion_type,
question_suggestion.language_code,
'default question content',
question_suggestion.last_updated
))
reviewable_suggestion_email_info = (
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
question_suggestion)
)
self._assert_reviewable_suggestion_email_infos_are_equal(
reviewable_suggestion_email_info,
expected_reviewable_suggestion_email_info
)
def test_create_from_suggestion_returns_info_for_translation_suggestion(
self):
translation_suggestion = (
self._create_translation_suggestion_with_translation_html(
'<p>default translation content</p>'))
expected_reviewable_suggestion_email_info = (
suggestion_registry.ReviewableSuggestionEmailInfo(
translation_suggestion.suggestion_type,
translation_suggestion.language_code,
'default translation content',
translation_suggestion.last_updated
))
reviewable_suggestion_email_info = (
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
translation_suggestion)
)
self._assert_reviewable_suggestion_email_infos_are_equal(
reviewable_suggestion_email_info,
expected_reviewable_suggestion_email_info
)
def test_create_from_suggestion_returns_info_for_empty_html(self):
translation_suggestion = (
self._create_translation_suggestion_with_translation_html(
''))
expected_reviewable_suggestion_email_info = (
suggestion_registry.ReviewableSuggestionEmailInfo(
translation_suggestion.suggestion_type,
translation_suggestion.language_code, '',
translation_suggestion.last_updated
))
reviewable_suggestion_email_info = (
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
translation_suggestion)
)
self._assert_reviewable_suggestion_email_infos_are_equal(
reviewable_suggestion_email_info,
expected_reviewable_suggestion_email_info
)
def test_create_from_suggestion_returns_info_with_no_trailing_whitespace(
self):
translation_suggestion = (
self._create_translation_suggestion_with_translation_html(
' <p> test whitespace </p> '))
expected_reviewable_suggestion_email_info = (
suggestion_registry.ReviewableSuggestionEmailInfo(
translation_suggestion.suggestion_type,
translation_suggestion.language_code,
'test whitespace',
translation_suggestion.last_updated
))
reviewable_suggestion_email_info = (
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
translation_suggestion)
)
self._assert_reviewable_suggestion_email_infos_are_equal(
reviewable_suggestion_email_info,
expected_reviewable_suggestion_email_info
)
def test_create_returns_info_for_translation_suggestion_if_html_math_rte(
self):
translation_suggestion = (
self._create_translation_suggestion_with_translation_html(
'<p>translation with rte'
'<oppia-noninteractive-math></oppia-noninteractive-math></p>'))
expected_reviewable_suggestion_email_info = (
suggestion_registry.ReviewableSuggestionEmailInfo(
translation_suggestion.suggestion_type,
translation_suggestion.language_code,
'translation with rte [Math]',
translation_suggestion.last_updated
))
reviewable_suggestion_email_info = (
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
translation_suggestion)
)
self._assert_reviewable_suggestion_email_infos_are_equal(
reviewable_suggestion_email_info,
expected_reviewable_suggestion_email_info
)
def test_create_returns_info_for_translation_suggestion_if_html_image_rte(
self):
translation_suggestion = (
self._create_translation_suggestion_with_translation_html(
'<p>translation with rte'
'<oppia-noninteractive-image></oppia-noninteractive-image>'
'</p>'))
expected_reviewable_suggestion_email_info = (
suggestion_registry.ReviewableSuggestionEmailInfo(
translation_suggestion.suggestion_type,
translation_suggestion.language_code,
'translation with rte [Image]',
translation_suggestion.last_updated
))
reviewable_suggestion_email_info = (
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
translation_suggestion)
)
self._assert_reviewable_suggestion_email_infos_are_equal(
reviewable_suggestion_email_info,
expected_reviewable_suggestion_email_info
)
def test_create_returns_info_for_translation_suggestion_if_html_link_rte(
self):
translation_suggestion = (
self._create_translation_suggestion_with_translation_html(
'<p> translation with rte'
'<oppia-noninteractive-link></oppia-noninteractive-link> </p>'))
expected_reviewable_suggestion_email_info = (
suggestion_registry.ReviewableSuggestionEmailInfo(
translation_suggestion.suggestion_type,
translation_suggestion.language_code,
'translation with rte [Link]',
translation_suggestion.last_updated
))
reviewable_suggestion_email_info = (
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
translation_suggestion)
)
self._assert_reviewable_suggestion_email_infos_are_equal(
reviewable_suggestion_email_info,
expected_reviewable_suggestion_email_info
)
def test_create_returns_info_for_translation_suggestion_if_html_rte_repeats(
self):
translation_suggestion = (
self._create_translation_suggestion_with_translation_html(
'<p> translation with rte'
'<oppia-noninteractive-link></oppia-noninteractive-link>'
'</p><oppia-noninteractive-link></oppia-noninteractive-link>'))
expected_reviewable_suggestion_email_info = (
suggestion_registry.ReviewableSuggestionEmailInfo(
translation_suggestion.suggestion_type,
translation_suggestion.language_code,
'translation with rte [Link] [Link]',
translation_suggestion.last_updated
))
reviewable_suggestion_email_info = (
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
translation_suggestion)
)
self._assert_reviewable_suggestion_email_infos_are_equal(
reviewable_suggestion_email_info,
expected_reviewable_suggestion_email_info
)
def test_create_returns_info_for_translation_suggestion_if_html_multi_rte(
self):
translation_suggestion = (
self._create_translation_suggestion_with_translation_html(
'<p> translation with rte'
'<oppia-noninteractive-link></oppia-noninteractive-link>'
'</p><oppia-noninteractive-math></oppia-noninteractive-math>'))
expected_reviewable_suggestion_email_info = (
suggestion_registry.ReviewableSuggestionEmailInfo(
translation_suggestion.suggestion_type,
translation_suggestion.language_code,
'translation with rte [Link] [Math]',
translation_suggestion.last_updated
))
reviewable_suggestion_email_info = (
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
translation_suggestion)
)
self._assert_reviewable_suggestion_email_infos_are_equal(
reviewable_suggestion_email_info,
expected_reviewable_suggestion_email_info
)
def test_create_returns_info_for_translation_suggestion_if_html_rte_value(
self):
translation_suggestion = (
self._create_translation_suggestion_with_translation_html(
'<p><oppia-noninteractive-link text-with-value="&quot;Test '
'a tag&quot;" url-with-value="&quot;somelink&'
'quot;"></oppia-noninteractive-link></p>'))
expected_reviewable_suggestion_email_info = (
suggestion_registry.ReviewableSuggestionEmailInfo(
translation_suggestion.suggestion_type,
translation_suggestion.language_code,
'[Link]',
translation_suggestion.last_updated
))
reviewable_suggestion_email_info = (
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
translation_suggestion)
)
self._assert_reviewable_suggestion_email_infos_are_equal(
reviewable_suggestion_email_info,
expected_reviewable_suggestion_email_info
)
def test_create_returns_info_for_question_suggestion_if_html_has_math_rte(
self):
question_suggestion = (
self._create_question_suggestion_with_question_html_content(
'<p> question with rte'
'<oppia-noninteractive-math></oppia-noninteractive-math> </p>'))
expected_reviewable_suggestion_email_info = (
suggestion_registry.ReviewableSuggestionEmailInfo(
question_suggestion.suggestion_type,
question_suggestion.language_code,
'question with rte [Math]',
question_suggestion.last_updated
))
reviewable_suggestion_email_info = (
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
question_suggestion)
)
self._assert_reviewable_suggestion_email_infos_are_equal(
reviewable_suggestion_email_info,
expected_reviewable_suggestion_email_info
)
def test_create_returns_info_for_question_suggestion_if_html_has_image_rte(
self):
question_suggestion = (
self._create_question_suggestion_with_question_html_content(
'<p> question with rte'
'<oppia-noninteractive-image></oppia-noninteractive-image>'
'</p>'))
expected_reviewable_suggestion_email_info = (
suggestion_registry.ReviewableSuggestionEmailInfo(
question_suggestion.suggestion_type,
question_suggestion.language_code,
'question with rte [Image]',
question_suggestion.last_updated
))
reviewable_suggestion_email_info = (
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
question_suggestion)
)
self._assert_reviewable_suggestion_email_infos_are_equal(
reviewable_suggestion_email_info,
expected_reviewable_suggestion_email_info)
def test_create_returns_info_for_question_suggestion_if_html_has_link_rte(
self):
question_suggestion = (
self._create_question_suggestion_with_question_html_content(
'<p> question with rte'
'<oppia-noninteractive-link></oppia-noninteractive-link> </p>'))
expected_reviewable_suggestion_email_info = (
suggestion_registry.ReviewableSuggestionEmailInfo(
question_suggestion.suggestion_type,
question_suggestion.language_code,
'question with rte [Link]',
question_suggestion.last_updated
))
reviewable_suggestion_email_info = (
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
question_suggestion)
)
self._assert_reviewable_suggestion_email_infos_are_equal(
reviewable_suggestion_email_info,
expected_reviewable_suggestion_email_info
)
def test_create_returns_info_for_question_suggestion_if_html_has_repeat_rte(
self):
question_suggestion = (
self._create_question_suggestion_with_question_html_content(
'<p> question with rte'
'<oppia-noninteractive-link></oppia-noninteractive-link>'
'</p><oppia-noninteractive-link></oppia-noninteractive-link>'))
expected_reviewable_suggestion_email_info = (
suggestion_registry.ReviewableSuggestionEmailInfo(
question_suggestion.suggestion_type,
question_suggestion.language_code,
'question with rte [Link] [Link]',
question_suggestion.last_updated
))
reviewable_suggestion_email_info = (
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
question_suggestion)
)
self._assert_reviewable_suggestion_email_infos_are_equal(
reviewable_suggestion_email_info,
expected_reviewable_suggestion_email_info
)
def test_create_returns_info_for_question_suggestion_if_html_has_multi_rte(
self):
question_suggestion = (
self._create_question_suggestion_with_question_html_content(
'<p> question with rte'
'<oppia-noninteractive-link></oppia-noninteractive-link>'
'</p><oppia-noninteractive-math></oppia-noninteractive-math>'))
expected_reviewable_suggestion_email_info = (
suggestion_registry.ReviewableSuggestionEmailInfo(
question_suggestion.suggestion_type,
question_suggestion.language_code,
'question with rte [Link] [Math]',
question_suggestion.last_updated
))
reviewable_suggestion_email_info = (
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
question_suggestion)
)
self._assert_reviewable_suggestion_email_infos_are_equal(
reviewable_suggestion_email_info,
expected_reviewable_suggestion_email_info
)
def test_create_returns_info_for_question_suggestion_if_html_has_rte_value(
self):
question_suggestion = (
self._create_question_suggestion_with_question_html_content(
'<p><oppia-noninteractive-link text-with-value="&quot;Test '
'a tag&quot;" url-with-value="&quot;somelink&'
'quot;"></oppia-noninteractive-link></p>'))
expected_reviewable_suggestion_email_info = (
suggestion_registry.ReviewableSuggestionEmailInfo(
question_suggestion.suggestion_type,
question_suggestion.language_code,
'[Link]',
question_suggestion.last_updated
))
reviewable_suggestion_email_info = (
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
question_suggestion)
)
self._assert_reviewable_suggestion_email_infos_are_equal(
reviewable_suggestion_email_info,
expected_reviewable_suggestion_email_info
)
def test_create_returns_info_for_suggestion_if_html_has_rte_with_text(
self):
question_suggestion = (
self._create_question_suggestion_with_question_html_content(
'<p><oppia-noninteractive-link text-with-value="&quot;Test '
'a tag&quot;" url-with-value="&quot;somelink&'
'quot;">text</oppia-noninteractive-link></p>'))
expected_reviewable_suggestion_email_info = (
suggestion_registry.ReviewableSuggestionEmailInfo(
question_suggestion.suggestion_type,
question_suggestion.language_code,
'[Link]',
question_suggestion.last_updated
))
reviewable_suggestion_email_info = (
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
question_suggestion)
)
self._assert_reviewable_suggestion_email_infos_are_equal(
reviewable_suggestion_email_info,
expected_reviewable_suggestion_email_info
)
def test_create_returns_info_for_suggestion_if_html_has_rte_with_html(
self):
question_suggestion = (
self._create_question_suggestion_with_question_html_content(
'<p><oppia-noninteractive-link text-with-value="&quot;Test '
'a tag&quot;" url-with-value="&quot;somelink&'
'quot;"><p>text</p></oppia-noninteractive-link></p>'))
expected_reviewable_suggestion_email_info = (
suggestion_registry.ReviewableSuggestionEmailInfo(
question_suggestion.suggestion_type,
question_suggestion.language_code,
'[Link]',
question_suggestion.last_updated
))
reviewable_suggestion_email_info = (
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
question_suggestion)
)
self._assert_reviewable_suggestion_email_infos_are_equal(
reviewable_suggestion_email_info,
expected_reviewable_suggestion_email_info
)
def test_create_returns_info_for_suggestion_if_html_has_rte_with_multi_word(
self):
question_suggestion = (
self._create_question_suggestion_with_question_html_content(
'<p><oppia-noninteractive-link-test text-with-value='
'"&quot;Test a tag&quot;" url-with-value="&quot;'
'somelink&quot;"><p>text</p>'
'</oppia-noninteractive-link-test></p>'))
expected_reviewable_suggestion_email_info = (
suggestion_registry.ReviewableSuggestionEmailInfo(
question_suggestion.suggestion_type,
question_suggestion.language_code,
'[Link Test]',
question_suggestion.last_updated
))
reviewable_suggestion_email_info = (
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
question_suggestion)
)
self._assert_reviewable_suggestion_email_infos_are_equal(
reviewable_suggestion_email_info,
expected_reviewable_suggestion_email_info
)
class GetSuggestionsWaitingForReviewInfoToNotifyReviewersUnitTests(
test_utils.GenericTestBase):
"""Test the ability of the
get_suggestions_waitng_for_review_info_to_notify_reviewers method
in suggestion services, which is used to retrieve the information required
to notify reviewers that there are suggestions that need review.
"""
target_id = 'exp1'
language_code = 'en'
AUTHOR_EMAIL = '<EMAIL>'
REVIEWER_1_EMAIL = '<EMAIL>'
REVIEWER_2_EMAIL = '<EMAIL>'
COMMIT_MESSAGE = 'commit message'
def _create_translation_suggestion_with_language_code_and_author(
self, language_code, author_id):
"""Creates a translation suggestion in the given language_code with the
given author id.
"""
add_translation_change_dict = {
'cmd': exp_domain.CMD_ADD_TRANSLATION,
'state_name': feconf.DEFAULT_INIT_STATE_NAME,
'content_id': feconf.DEFAULT_NEW_STATE_CONTENT_ID,
'language_code': language_code,
'content_html': feconf.DEFAULT_INIT_STATE_CONTENT_STR,
'translation_html': '<p>This is the translated | |
#!/usr/bin/python
'''
Variable
Expression = Variable + Variable
Assignment(Expresssion)
'''
env = None
_float = None
_int = None
_string = None
#============================
class NameGen:
def __init__(self,pre):
#self.val = val
self.pre = pre
def __get__(self, instance, owner):
return self.pre + instance.id
#============================
class Typ:
def __init__(self,name, parent=None):
self.name = name
self.id = env.addType(self)
def __str__(self):
return self.name
def isSubType(self, t ):
myt = self
while myt is not None:
if t == myt: return True
myt = myt.parent
return False
#============================
def defaultTypFunc(typ1,typ2):
if typ1==typ2:
return typ1
elif typ1.isSubType(typ2): return typ2
elif typ1.isSubType(typ2): return typ2
else: raise Exception( "ERROR : no default congruence between types "+str(typ1)+","+str(typ2) )
class Operator():
def __init__(self,name):
self.name = name
self.id = env.addOperator(self)
self.typFunc = defaultTypFunc
def __str__(self):
return self.name
def propagateType(self,typ1,typ2):
return self.typFunc(typ1,typ2)
#============================
class Constant:
def __init__(self,val):
self.val = val
if isinstance(val, int ): self.typ = env.types["int"]
elif isinstance(val, float): self.typ = env.types["float"]
elif isinstance(val, str ): self.typ = env.types["string"]
else: raise Exception( "ERROR Constant(x) not defined for type of x == ", type(val) )
self.name = str(val)
class Var:
def __init__(self,typ=None,name=None,val=None):
self.val = val
if isinstance(typ, Typ):
self.typ = typ
elif isinstance(typ, str):
self.typ = env.types[typ]
self.name = name
#self.id = env.get_next_var_id()
self.id = env.scope.add_var(self)
if name is None :
self.name = env.var_prefix + str(self.id)
self.named = False
elif isinstance(name, str):
self.name = name
self.named = True
else:
raise Exception('ERROR in Var() : variable name must be string ' )
def __add__(self,b): return Expression(env.operators['+'],self,b)
def __sub__(self,b): return Expression(env.operators['-'],self,b)
def __mul__(self,b): return Expression(env.operators['*'],self,b)
def __div__(self,b): return Expression(env.operators['/'],self,b)
def __pow__(self,b): return Expression(env.operators['**'],self,b)
#def __str__(self): return self.name + " : "+self.typ.name
def str_def(self): return self.name + " : "+self.typ.name
#============================
def checkArg(arg):
if ( isinstance(arg, Var) or isinstance(arg, Expression) ):
return arg
else:
return Constant(arg)
def checkOperator(op):
if isinstance(op, Operator): return op
elif isinstance(op, Function): return op
else: return Operator(op)
def checkStrUnfold(a):
if isinstance(a, Expression): return a.str_unfolded()
else: return a.name
class Expression(Var):
# TODO: Var should be derived from Expression not Expression from Var
def __init__(self,op, arg1, arg2=None, name=None ):
self.op = op
if isinstance(self.op, Function):
self.op = op
if isinstance(arg1, tuple):
self.arg1 = arg1
else:
raise Exception( "ERROR Expression(op): Function argument-list must be tuple of types", type(arg1) )
self.arg2 = None
self.typ = op.ret.typ
#elif op isinstance(self.op, Operator):
else:
self.op = checkOperator( op )
self.arg1 = checkArg( arg1 )
self.arg2 = checkArg( arg2 )
self.typ = op.propagateType(arg1.typ,arg1.typ)
#else:
# raise Exception( "ERROR Expression(op): op is not Operator nor Function, but ", type(op) )
#self.id = env.get_next_var_id()
self.id = env.scope.add_expr(self)
if name is None :
self.name = env.expr_prefix + str(self.id)
self.named = False
else:
self.name = name
self.named = True
# def __add__(b):
# if isinstance(b, Var) or isinstance(b, Expression):
# return Expression(_add,self,b)
# else:
# raise Exception('ERROR in __add__( '+self+' , '+b+' ) : '+b+' is not variable nor expression ' )
def str_assignment(self,folded=True):
if folded: return self.name+" : "+self.str_folded()
else: return self.name+" : "+self.str_folded()
def str_folded(self):
if isinstance(self.op, Operator):
return "("+" ".join([self.arg1.name,str(self.op),self.arg2.name])+")"
elif isinstance(self.op, Function):
return self.op.str_call(self.arg1)
def str_unfolded(self):
s1 = checkStrUnfold(self.arg1)
s2 = checkStrUnfold(self.arg2)
return "("+" ".join([ s1, str(self.operator), s2 ])+")"
#def __str__(self):
# if env.fold_expressions: return self.str_folded ()
# else: return self.str_unfolded()
#============================
def strNone(o):
if o is None: return ""
else: return str(o)
class Scope(Expression):
def __init__(self,parent,head=None):
self.head = head
self.parent = parent
parent.childs.append(self)
self.childs = []
self.var_id0 = parent.var_id0 + len(parent.variables )
self.expr_id0 = parent.expr_id0 + len(parent.expressions)
self.variables = []
self.expressions = [] # should we? expression are not named - we cannot refer to them
self.operators = {}
self.types = {}
def add_var(self,a):
self.variables.append(a)
return self.var_id0 + len(self.variables)
def add_expr(self,a):
self.expressions.append(a)
return self.expr_id0 + len(self.expressions)
def add_type(self,a):
self.types.append(a)
return self.types_id0 + len(self.types)
def str_head(self):
s_indent = ' '*(self.level)
if self.level==0:
return s_indent+"GLOBAL_SCOPE"
else:
return s_indent+strNone(self.head)
#def __str__(self):
# print self.level
# #if env.fold_expressions: return self.str_folded ()
# #else: return self.str_unfolded()
def str_assignment(self,folded=True):
if folded: str_folded(self)
else: str_unfolded(self)
def str_folded(self):
s_indent = ' '*self.level
return s_indent+str(head)
def str_unfolded(self):
print ">>> BEGIN str_unfolded for Scope ", self.str_head() , self
s_indent = ' '*(self.level+1)
ls = [ self.str_head()+"{" ]
ls.append(" // --- Scope "+str(self) )
#s = [ "for %s in [%i..%i\%%i]{\n" %( self.i.name, self.i0.name, self.n.name, self.di.name) ]
for v in self.variables:
ls.append( s_indent + v.str_def() )
for e in self.expressions:
if isinstance(e, Scope):
#ls.append(" // Scope ")
ls.append( e.str_unfolded() )
elif isinstance(e, Expression):
#ls.append(" // Expression ")
ls.append( s_indent + e.str_assignment() )
else:
#ls.append(" // else ")
ls.append( s_indent + str(e) )
ls.append( ' '*(self.level) + "}")
print "<<< END str_unfolded for Scope ", self.str_head(), self
return "\n".join(ls)
def close(self):
if self != env.scope:
raise Exception( "ERROR closing scope which is not current", self, env.scope )
else:
env.endScope()
#============================
class Range:
def __init__(self,imax,i='i',i0=0,di=1):
self.imax = checkArg(imax)
self.i0 = checkArg(i0)
self.di = checkArg(di)
if isinstance(i, str): self.i = Var(_int,i)
else: self.i = checkArg(i0)
self.bFirstIter = True
def __iter__(self):
self.bFirstIter = True
return self
def __str__(self):
return "for %s in [%s..%s%%%s]" %( self.i.name, self.i0.name, self.imax.name, self.di.name)
def next(self):
if self.bFirstIter:
self.bFirstIter = False
env.beginScope(self)
return self.i
else:
env.endScope()
raise StopIteration
#============================
class Function:
def __init__(self,name,argTypes=None,ret=None, argNames=None, args=None):
self.name = name
self.ret = ret
if argTypes is None:
if args is not None:
argTypes = [ a.typ for a in args ]
self.argTypes = argTypes
if args is not None:
self.args = [ a.name for a in args]
elif argNames is not None:
self.args = argNames
else:
self.args = tuple( Var(t) for t in self.argTypes )
def __iter__(self):
self.bFirstIter = True
return self
# def __str__(self):
# ls = [ self.name+"(" ]
# for arg in self.args:
# ls.append( arg.name )
# return ",".join(ls) + ")"
def __str__(self):
ls = [ self.name+"(" ]
for i,(arg,typ) in enumerate(zip(self.args,self.argTypes)):
if i>0 :
ls.append(",")
ls.append( str(arg) )
ls.append( ")"+str(self.ret) )
return "".join(ls)
def next(self):
if self.bFirstIter:
self.bFirstIter = False
env.beginScope(self)
return self.args
else:
env.endScope()
raise StopIteration
def __call__(self,*argv):
return Expression(self, argv )
def str_call(self,args):
ls = [ self.name+"(" ]
for i,arg in enumerate(args):
if i>0 :
ls.append(",")
if isinstance(arg, Var) or isinstance(i, Expression):
ls.append( arg.name )
else:
ls.append( str(arg) )
return "".join(ls) + ")"
def _return(self,e):
pass
#============================
class Case:
def __init__(self):
pass
def __eq__(self,val):
env.scope.add_expr('==')
print "Case .eq.",val
def __ne__(self,val):
print "Case .ne.",val
def __gt__(self,val):
print "Case .gt.",val
def __ge__(self,val):
print "Case .ge.",val
def __lt__(self,val):
print "Case .lt.",val
def __le__(self,val):
print "Case .le.",val
# __lt__, __le__, __gt__, __ge__, __eq__ and __ne__
#============================
class Switch:
def __init__(self,var):
if isinstance(var, Var):
self.var = var
else:
raise Exception( "ERROR Switch(var): var is not a Variable", type(var) )
def __iter__(self):
self.bFirstIter = True
return self
def next(self):
if self.bFirstIter:
self.bFirstIter = False
env.beginScope(self)
return Case()
else:
env.endScope()
raise StopIteration
def __str__(self):
return "Switch: "
#============================
class Environment(Scope):
var_prefix = "v"
expr_prefix = "e"
def __init__(self):
self.childs = [] # from scope
self.scopeSeq = []
#self.next_var_id = 0
#self.next_expr_id = 0
self.fold_expressions = False
self.variables = []
self.expressions = []
self.operators = {}
self.types = {}
self.rootScope = self
self.scope = self.rootScope
self.scopeSeq.append(self.scope)
self.level = 0
self.var_id0 = 0
self.expr_id0 = 0
def initDefaults(self):
global _int, _float, _string
#for s in ["int","string","float"]:
# Typ(s)
#_int = self.types["int"]
#_float = self.types["float"]
#_string = self.types["string"]
#_number = Typ("number")
_float = Typ ( "float" )
_int = Typ ( "int" , _float )
_string = Typ( "string" )
for s in ["+","-","*","/","**", "=="]:
Operator(s)
def addOperator(self, op ):
self.scope.operators[op.name] = op
def addType(self, typ ):
self.scope.types[typ.name] = typ
def beginScope(self,head=None):
s = Scope(self.scope,head)
s.level = 1 + self.scope.level
self.scope = s
self.scopeSeq.append(s)
self.expressions.append(s)
return s
def endScope(self):
if self.scope == self.rootScope:
print "ERROR : cannot escape root scope "
else:
self.scope = self.scope.parent
#============================
# https://docs.python.org/2/library/inspect.html
import inspect
def makeFunc( func ):
argSpec = inspect.getargspec(func)
args = [ Var(type,name) for name,type in zip(argSpec.args,argSpec.defaults) ]
#f = Function( func.__name__, args=args )
f = Function( func.__name__, argTypes=argSpec.defaults, argNames=argSpec.args )
s = env.beginScope(f)
ret = func(*args)
s.close()
f.ret = ret.typ
def makeMethod( func, obj ):
argSpec = inspect.getargspec(func)
print " !!!! argSpec ", argSpec
args = []
for i,arg in enumerate(argSpec.args):
if arg != 'self':
typ = argSpec.defaults[i-1]
| |
== date.today()
assert occurrences.outdated is True
assert occurrences.tags == ['b', 'c']
assert occurrences.locations == ['B', 'C']
occurrences = occurrences.for_filter(start=date(2010, 5, 1))
assert occurrences.range is None
assert occurrences.start == date(2010, 5, 1)
assert occurrences.end == date.today()
assert occurrences.outdated is True
assert occurrences.tags == ['b', 'c']
assert occurrences.locations == ['B', 'C']
occurrences = occurrences.for_filter(range='-', end=date(2010, 5, 1))
assert occurrences.range is None
assert occurrences.start == date(2010, 5, 1)
assert occurrences.end == date(2010, 5, 1)
assert occurrences.outdated is True
assert occurrences.tags == ['b', 'c']
assert occurrences.locations == ['B', 'C']
def test_occurrence_collection_outdated(session):
today = date.today()
for year in (today.year - 1, today.year, today.year + 1):
event = EventCollection(session).add(
title='Event {0}-{1}'.format(year, today.month),
start=datetime(year, today.month, today.day, 0, 0),
end=datetime(year, today.month, today.day, 23, 59),
timezone='US/Eastern'
)
event.submit()
event.publish()
def query(**kwargs):
return OccurrenceCollection(session, **kwargs).query()
assert query(outdated=False).count() == 2
assert query(outdated=True).count() == 3
assert query(start=date(today.year - 1, 1, 1), outdated=False).count() == 2
assert query(start=date(today.year - 1, 1, 1), outdated=True).count() == 3
assert query(end=date.today(), outdated=False).count() == 1
assert query(end=date.today(), outdated=True).count() == 2
def test_occurrence_collection_range_to_dates():
def to_dates(range):
return OccurrenceCollection(None).range_to_dates(range)
with freeze_time("2018-12-03"):
assert to_dates('today') == (date(2018, 12, 3), date(2018, 12, 3))
assert to_dates('tomorrow') == (date(2018, 12, 4), date(2018, 12, 4))
assert to_dates('weekend') == (date(2018, 12, 7), date(2018, 12, 9))
assert to_dates('week') == (date(2018, 12, 3), date(2018, 12, 9))
assert to_dates('month') == (date(2018, 12, 1), date(2018, 12, 31))
with freeze_time("2018-12-06"):
assert to_dates('today') == (date(2018, 12, 6), date(2018, 12, 6))
assert to_dates('tomorrow') == (date(2018, 12, 7), date(2018, 12, 7))
assert to_dates('weekend') == (date(2018, 12, 7), date(2018, 12, 9))
assert to_dates('week') == (date(2018, 12, 3), date(2018, 12, 9))
assert to_dates('month') == (date(2018, 12, 1), date(2018, 12, 31))
with freeze_time("2018-12-07"):
assert to_dates('today') == (date(2018, 12, 7), date(2018, 12, 7))
assert to_dates('tomorrow') == (date(2018, 12, 8), date(2018, 12, 8))
assert to_dates('weekend') == (date(2018, 12, 7), date(2018, 12, 9))
assert to_dates('week') == (date(2018, 12, 3), date(2018, 12, 9))
assert to_dates('month') == (date(2018, 12, 1), date(2018, 12, 31))
with freeze_time("2018-12-08"):
assert to_dates('today') == (date(2018, 12, 8), date(2018, 12, 8))
assert to_dates('tomorrow') == (date(2018, 12, 9), date(2018, 12, 9))
assert to_dates('weekend') == (date(2018, 12, 7), date(2018, 12, 9))
assert to_dates('week') == (date(2018, 12, 3), date(2018, 12, 9))
assert to_dates('month') == (date(2018, 12, 1), date(2018, 12, 31))
with freeze_time("2018-12-09"):
assert to_dates('today') == (date(2018, 12, 9), date(2018, 12, 9))
assert to_dates('tomorrow') == (date(2018, 12, 10), date(2018, 12, 10))
assert to_dates('weekend') == (date(2018, 12, 7), date(2018, 12, 9))
assert to_dates('week') == (date(2018, 12, 3), date(2018, 12, 9))
assert to_dates('month') == (date(2018, 12, 1), date(2018, 12, 31))
with freeze_time("2018-12-10"):
assert to_dates('today') == (date(2018, 12, 10), date(2018, 12, 10))
assert to_dates('tomorrow') == (date(2018, 12, 11), date(2018, 12, 11))
assert to_dates('weekend') == (date(2018, 12, 14), date(2018, 12, 16))
assert to_dates('week') == (date(2018, 12, 10), date(2018, 12, 16))
assert to_dates('month') == (date(2018, 12, 1), date(2018, 12, 31))
with freeze_time("2019-01-31"):
assert to_dates('today') == (date(2019, 1, 31), date(2019, 1, 31))
assert to_dates('tomorrow') == (date(2019, 2, 1), date(2019, 2, 1))
assert to_dates('weekend') == (date(2019, 2, 1), date(2019, 2, 3))
assert to_dates('week') == (date(2019, 1, 28), date(2019, 2, 3))
assert to_dates('month') == (date(2019, 1, 1), date(2019, 1, 31))
assert to_dates(None) == (None, None)
assert to_dates('') == (None, None)
assert to_dates(1) == (None, None)
assert to_dates('never') == (None, None)
assert OccurrenceCollection(None).range_to_dates(
'', start=date(2019, 1, 1), end=date(2019, 1, 31)
) == (date(2019, 1, 1), date(2019, 1, 31))
def test_unique_names(session):
events = EventCollection(session)
added = [
events.add(
title='Squirrel Park Visit',
start=datetime(2015, 6, 16, 9, 30),
end=datetime(2015, 6, 16, 18, 00),
timezone='US/Eastern'
) for x in range(11)
]
assert added[0].name == 'squirrel-park-visit'
assert added[1].name == 'squirrel-park-visit-1'
assert added[2].name == 'squirrel-park-visit-2'
assert added[3].name == 'squirrel-park-visit-3'
assert added[4].name == 'squirrel-park-visit-4'
assert added[5].name == 'squirrel-park-visit-5'
assert added[6].name == 'squirrel-park-visit-6'
assert added[7].name == 'squirrel-park-visit-7'
assert added[8].name == 'squirrel-park-visit-8'
assert added[9].name == 'squirrel-park-visit-9'
assert added[10].name == 'squirrel-park-visit-10'
events.delete(added[6])
event = events.add(
title='Squirrel Park Visit',
start=datetime(2015, 6, 16, 9, 30),
end=datetime(2015, 6, 16, 18, 00),
timezone='US/Eastern',
recurrence=(
'RRULE:FREQ=WEEKLY;'
'BYDAY=MO,TU,WE,TH,FR,SA,SU;'
'UNTIL=20150620T220000Z'
)
)
assert event.name == 'squirrel-park-visit-6'
event.submit()
event.publish()
assert event.occurrences[0].name == 'squirrel-park-visit-6-2015-06-16'
assert event.occurrences[1].name == 'squirrel-park-visit-6-2015-06-17'
assert event.occurrences[2].name == 'squirrel-park-visit-6-2015-06-18'
assert event.occurrences[3].name == 'squirrel-park-visit-6-2015-06-19'
assert event.occurrences[4].name == 'squirrel-park-visit-6-2015-06-20'
assert events.by_name('test') is None
assert events.by_name('squirrel-park-visit-6') == event
occurrences = OccurrenceCollection(session)
assert occurrences.by_name('test') is None
occurrence = occurrences.by_name('squirrel-park-visit-6-2015-06-20')
assert occurrence == event.occurrences[4]
def test_unicode(session):
event = EventCollection(session).add(
title='Salon du mieux-vivre, 16e édition',
start=datetime(2015, 6, 16, 9, 30),
end=datetime(2015, 6, 16, 18, 00),
timezone='Europe/Zurich',
content={
'description': 'Rendez-vous automnal des médecines.'
},
location='Salon du mieux-vivre à Saignelégier',
tags=['salons', 'congrès']
)
event.submit()
event.publish()
event = EventCollection(session).add(
title='Témoins de Jéhovah',
start=datetime(2015, 6, 18, 14, 00),
end=datetime(2015, 6, 18, 16, 00),
timezone='Europe/Zurich',
content={
'description': 'Congrès en français et espagnol.'
},
location='Salon du mieux-vivre à Saignelégier',
tags=['témoins']
)
event.submit()
event.publish()
session.flush()
occurrences = OccurrenceCollection(session, outdated=True)
assert sorted(occurrences.used_tags) == ['congrès', 'salons', 'témoins']
assert occurrences.query().count() == 2
occurrences = occurrences.for_filter(tags=['congrès'])
occurrence = occurrences.query().one()
assert occurrence.title == 'Salon du mieux-vivre, 16e édition'
assert occurrence.location == 'Salon du mieux-vivre à Saignelégier'
assert sorted(occurrence.tags) == ['congrès', 'salons']
assert occurrence.event.description \
== 'Rendez-vous automnal des médecines.'
occurrences = occurrences.for_filter(tags=['témoins'])
occurrence = occurrences.query().one()
assert occurrence.title == 'Témoins de Jéhovah'
assert occurrence.location == 'Salon du mieux-vivre à Saignelégier'
assert occurrence.tags == ['témoins']
assert occurrence.event.description == 'Congrès en français et espagnol.'
def test_as_ical(session):
def as_ical(occurrences):
result = occurrences.as_ical(DummyRequest())
result = result.decode().strip().splitlines()
return result
occurrences = OccurrenceCollection(session)
assert sorted(as_ical(occurrences)) == sorted([
'BEGIN:VCALENDAR',
'VERSION:2.0',
'PRODID:-//OneGov//onegov.event//',
'END:VCALENDAR',
])
events = EventCollection(session)
with freeze_time("2014-01-01"):
event = events.add(
title='Squirrel Park Visit',
start=datetime(2015, 6, 16, 9, 30),
end=datetime(2015, 6, 16, 18, 00),
timezone='US/Eastern',
content={
'description': '<em>Furry</em> things will happen!'
},
location='Squirrel Park',
tags=['fun', 'animals'],
recurrence=(
'RRULE:FREQ=WEEKLY;'
'BYDAY=MO,TU,WE,TH,FR,SA,SU;'
'UNTIL=20150616T220000Z'
),
coordinates=Coordinates(47.051752750515746, 8.305739625357093)
)
event.submit()
event.publish()
event = events.add(
title='History of the Squirrel Park',
start=datetime(2015, 6, 18, 14, 00),
end=datetime(2015, 6, 18, 16, 00),
timezone='US/Eastern',
content={
'description': 'Learn how the Park got so <em>furry</em>!'
},
location='Squirrel Park',
tags=['history'],
coordinates=Coordinates(47.051752750515746, 8.305739625357093)
)
event.submit()
event.publish()
session.flush()
occurrences = OccurrenceCollection(session)
assert sorted(as_ical(occurrences)) == sorted([
'BEGIN:VCALENDAR',
'VERSION:2.0',
'PRODID:-//OneGov//onegov.event//',
'END:VCALENDAR',
])
occurrences = occurrences.for_filter(start=date(2015, 6, 1))
assert sorted(as_ical(occurrences)) == sorted([
'BEGIN:VCALENDAR',
'VERSION:2.0',
'PRODID:-//OneGov//onegov.event//',
'END:VCALENDAR',
])
occurrences = occurrences.for_filter(outdated=True)
assert sorted(as_ical(occurrences)) == sorted([
'BEGIN:VCALENDAR',
'VERSION:2.0',
'PRODID:-//OneGov//onegov.event//',
'BEGIN:VEVENT',
'SUMMARY:Squirrel Park Visit',
'UID:squirrel-park-visit-2015-06-16@onegov.event',
'DTSTART;VALUE=DATE-TIME:20150616T133000Z',
'DTEND;VALUE=DATE-TIME:20150616T220000Z',
'DTSTAMP;VALUE=DATE-TIME:20140101T000000Z',
'RRULE:FREQ=WEEKLY;UNTIL=20150616T220000Z;BYDAY=MO,TU,WE,TH,FR,SA,SU',
'DESCRIPTION:<em>Furry</em> things will happen!',
'CATEGORIES:fun,animals',
'LAST-MODIFIED;VALUE=DATE-TIME:20140101T000000Z',
'LOCATION:Squirrel Park',
'GEO:47.051752750515746;8.305739625357093',
'URL:https://example.org/event/squirrel-park-visit',
'END:VEVENT',
'BEGIN:VEVENT',
'UID:history-of-the-squirrel-park-2015-06-18@<EMAIL>',
'SUMMARY:History of the Squirrel Park',
'DTSTART;VALUE=DATE-TIME:20150618T180000Z',
'DTEND;VALUE=DATE-TIME:20150618T200000Z',
'DTSTAMP;VALUE=DATE-TIME:20140101T000000Z',
'DESCRIPTION:Learn how the Park got so <em>furry</em>!',
'CATEGORIES:history',
'LAST-MODIFIED;VALUE=DATE-TIME:20140101T000000Z',
'LOCATION:Squirrel Park',
'GEO:47.051752750515746;8.305739625357093',
'URL:https://example.org/event/history-of-the-squirrel-park',
'END:VEVENT',
'END:VCALENDAR'
])
occurrences = occurrences.for_filter(
start=date(2015, 6, 18),
end=date(2018, 6, 18),
tags=['history']
)
assert sorted(as_ical(occurrences)) == sorted([
'BEGIN:VCALENDAR',
'VERSION:2.0',
'PRODID:-//OneGov//onegov.event//',
'BEGIN:VEVENT',
'UID:history-of-the-squirrel-park-2015-06-18@onegov.event',
'SUMMARY:History of the Squirrel Park',
'DTSTART;VALUE=DATE-TIME:20150618T180000Z',
'DTEND;VALUE=DATE-TIME:20150618T200000Z',
'DTSTAMP;VALUE=DATE-TIME:20140101T000000Z',
'DESCRIPTION:Learn how the Park got so <em>furry</em>!',
'CATEGORIES:history',
'LAST-MODIFIED;VALUE=DATE-TIME:20140101T000000Z',
'LOCATION:Squirrel Park',
'GEO:47.051752750515746;8.305739625357093',
'URL:https://example.org/event/history-of-the-squirrel-park',
'END:VEVENT',
'END:VCALENDAR'
])
def test_from_import(session):
events = EventCollection(session)
added, updated, purged = events.from_import([
EventImportItem(
event=Event(
state='initiated',
title='Title A',
location='Location A',
tags=['Tag A.1', 'Tag A.2'],
start=tzdatetime(2015, 6, 16, 9, 30, 'US/Eastern'),
end=tzdatetime(2015, 6, 16, 18, 00, 'US/Eastern'),
timezone='US/Eastern',
description='Description A',
organizer='Organizer A',
recurrence=(
'RRULE:FREQ=WEEKLY;'
'UNTIL=20150616T220000Z;'
'BYDAY=MO,TU,WE,TH,FR,SA,SU'
),
coordinates=Coordinates(48.051752750515746, 9.305739625357093),
source='import-1-A'
),
image=None,
image_filename=None,
pdf=None,
pdf_filename=None,
),
EventImportItem(
event=Event(
state='initiated',
title='Title B',
location='Location B',
tags=['Tag B.1', 'Tag B.2'],
start=tzdatetime(2015, 6, 16, 9, 30, 'US/Eastern'),
end=tzdatetime(2015, 6, 16, 18, 00, 'US/Eastern'),
timezone='US/Eastern',
description='Description B',
organizer='Organizer B',
recurrence=(
'RRULE:FREQ=WEEKLY;'
'UNTIL=20150616T220000Z;'
'BYDAY=MO,TU,WE,TH,FR,SA,SU'
),
coordinates=Coordinates(48.051752750515746, 9.305739625357093),
source='import-1-B'
),
image=None,
image_filename=None,
pdf=None,
pdf_filename=None,
)
])
assert (len(added), len(updated), len(purged)) == (2, 0, 0)
def items():
yield EventImportItem(
event=Event(
state='initiated',
title='Title C',
location='Location C',
tags=['Tag C.1', 'Tag C.2'],
start=tzdatetime(2015, 6, 16, 9, 30, 'US/Eastern'),
end=tzdatetime(2015, 6, 16, 18, 00, 'US/Eastern'),
timezone='US/Eastern',
description='Description C',
organizer='Organizer C',
recurrence=(
'RRULE:FREQ=WEEKLY;'
'UNTIL=20150616T220000Z;'
'BYDAY=MO,TU,WE,TH,FR,SA,SU'
),
coordinates=Coordinates(48.051752750515746, 9.305739625357093),
source='import-2-C'
),
image=None,
image_filename=None,
pdf=None,
pdf_filename=None,
)
added, updated, purged = events.from_import(items())
assert (len(added), len(updated), len(purged)) == (1, 0, 0)
# Already imported
assert events.from_import([
EventImportItem(
event=Event(
state='initiated',
title='Title C',
location='Location C',
tags=['Tag C.1', 'Tag C.2'],
start=tzdatetime(2015, 6, 16, 9, 30, 'US/Eastern'),
end=tzdatetime(2015, 6, 16, 18, 00, 'US/Eastern'),
timezone='US/Eastern',
description='Description C',
organizer='Organizer C',
recurrence=(
'RRULE:FREQ=WEEKLY;'
'UNTIL=20150616T220000Z;'
'BYDAY=MO,TU,WE,TH,FR,SA,SU'
),
coordinates=Coordinates(48.051752750515746, 9.305739625357093),
source='import-2-C'
),
image=None,
image_filename=None,
pdf=None,
pdf_filename=None,
)
]) == ([], [], [])
# Update and purge
a, u, p = events.from_import([
EventImportItem(
| |
<gh_stars>0
"""
====================================================================================================
Parse a PhysiCell configuration file (XML) and generate two Jupyter (Python) modules:
user_params.py - containing widgets for user parameters.
microenv_params.py - containing widgets for microenvironment parameters.
====================================================================================================
Inputs - takes none, 1, 2, 3, or 4 arguments
------
config filename (str, optional): the PhysiCell configuration file (.xml) (Default = config.xml)
GUI module (str, optional): the primary GUI for the Jupyter notebook
colorname1, colorname2 (str, optional): the colors to use for the alternating rows of widgets
(Defaults: lightgreen, tan)
Examples (with 0,1,2,3,4 args):
--------
python xml2jupyter.py
python xml2jupyter.py config_heterogeneity.xml
python xml2jupyter.py config_heterogeneity.xml mygui.py
python xml2jupyter.py config_biorobots.xml lightblue tan
python xml2jupyter.py config_biorobots.xml mygui.py lightblue tan
Outputs
-------
user_params.py: Python module used to create/edit custom user parameters (--> "User Params" GUI tab)
microenv_params.py: Python module used to create/edit custom user parameters (--> "User Params" GUI tab)
Authors:
<NAME> (<EMAIL>)
<NAME>, <NAME>, <NAME> (undergrad students in Intelligent Systems Engineering, IU)
Dr. <NAME> (<EMAIL>)
--- History ---
v2 - also generate the microenv_params.py
v1 - generate the user_params.py
"""
import sys
import os
import math
import xml.etree.ElementTree as ET
# Defaults
config_file = "config.xml"
colorname1 = 'lightgreen'
colorname2 = 'tan'
num_args = len(sys.argv)
print("num_args=",num_args)
if (num_args < 2):
print()
print("*** NOTE: using config.xml ***")
print()
else:
config_file = sys.argv[1]
if not os.path.isfile(config_file):
print(config_file, "does not exist")
print("Usage: python " + sys.argv[0] + " <config-file.xml> [<gui-file.py>] [<colorname1> <colorname2>]")
sys.exit(1)
if (num_args == 3):
gui_file = sys.argv[2]
elif (num_args == 4):
colorname1 = sys.argv[2]
colorname2 = sys.argv[3]
elif (num_args == 5):
gui_file = sys.argv[2]
colorname1 = sys.argv[3]
colorname2 = sys.argv[4]
elif (num_args > 5):
print("Usage: python " + sys.argv[0] + " <config-file.xml> [<gui-file.py>] [<colorname1> <colorname2>]")
sys.exit(1)
print()
print("config_file = ",config_file)
print("colorname1 = ",colorname1)
print("colorname2 = ",colorname2)
print()
if (num_args == 3):
with open(gui_file) as f: # e.g., "mygui.py"
# newText = f.read().replace('myconfig.xml', config_file) # rwh todo: don't assume this string; find line
file_str = f.read()
idx = file_str.find('main_xml_filename') # verify > -1
file_pre = file_str[:idx]
idx2 = file_str[idx:].find('\n')
file_post = file_str[idx+idx2:]
with open(gui_file, "w") as f:
f.write(file_pre)
f.write("main_xml_filename = '" + config_file + "'")
f.write(file_post)
#---------------------------------------------------------------------------------------------------
user_tab_header = """
# This file is auto-generated from a Python script that parses a PhysiCell configuration (.xml) file.
#
# Edit at your own risk.
#
import os
from ipywidgets import Label,Text,Checkbox,Button,HBox,VBox,FloatText,IntText,BoundedIntText,BoundedFloatText,Layout,Box
class UserTab(object):
def __init__(self):
micron_units = Label('micron') # use "option m" (Mac, for micro symbol)
constWidth = '180px'
tab_height = '500px'
stepsize = 10
#style = {'description_width': '250px'}
style = {'description_width': '25%'}
layout = {'width': '400px'}
name_button_layout={'width':'25%'}
widget_layout = {'width': '15%'}
units_button_layout ={'width':'15%'}
desc_button_layout={'width':'45%'}
divider_button_layout={'width':'40%'}
"""
"""
self.therapy_activation_time = BoundedFloatText(
min=0.,
max=100000000,
step=stepsize,
description='therapy_activation_time',
style=style, layout=layout,
# layout=Layout(width=constWidth),
)
self.save_interval_after_therapy_start = BoundedFloatText(
min=0.,
max=100000000,
step=stepsize,
description='save_interval_after_therapy_start',
style=style, layout=layout,
)
label_blankline = Label('')
self.tab = VBox([HBox([self.therapy_activation_time, Label('min')]),
HBox([self.save_interval_after_therapy_start, Label('min')]),
])
"""
fill_gui_str= """
# Populate the GUI widgets with values from the XML
def fill_gui(self, xml_root):
uep = xml_root.find('.//microenvironment_setup') # find unique entry point
vp = [] # pointers to <variable> nodes
if uep:
for var in uep.findall('variable'):
vp.append(var)
"""
fill_xml_str= """
# Read values from the GUI widgets to enable editing XML
def fill_xml(self, xml_root):
uep = xml_root.find('.//microenvironment_setup') # find unique entry point
vp = [] # pointers to <variable> nodes
if uep:
for var in uep.findall('variable'):
vp.append(var)
"""
def get_float_stepsize(val_str):
# fval_abs = abs(float(ppchild.text))
fval_abs = abs(float(val_str))
if (fval_abs > 0.0):
if (fval_abs > 1.0): # crop
delta_val = pow(10, int(math.log10(abs(float(ppchild.text)))) - 1)
else: # round
delta_val = pow(10, round(math.log10(abs(float(ppchild.text)))) - 1)
else:
delta_val = 0.01 # if initial value=0.0, we're totally guessing at what a good delta is
return delta_val
# Now parse a configuration file (.xml) and map the user parameters into GUI widgets
#tree = ET.parse('../config/PhysiCell_settings.xml')
try:
tree = ET.parse(config_file)
except:
print("Cannot parse",config_file, "- check it's XML syntax.")
sys.exit(1)
root = tree.getroot()
indent = " "
indent2 = " "
widgets = {"double":"FloatText", "int":"IntText", "bool":"Checkbox", "string":"Text", "divider":""}
#widgets = {"double":"FloatText", "int":"IntText", "bool":"Checkbox", "string":"Text"}
type_cast = {"double":"float", "int":"int", "bool":"bool", "string":"", "divider":"Text"}
vbox_str = "\n" + indent + "self.tab = VBox([\n"
#param_desc_buttons_str = "\n"
#name_buttons_str = "\n"
units_buttons_str = "\n"
desc_buttons_str = "\n"
header_buttons_str = "\n"
row_str = "\n"
box_str = "\n" + indent + "box_layout = Layout(display='flex', flex_flow='row', align_items='stretch', width='100%')\n"
row_header_str = "\n"
box_header_str = "\n"
# box1 = Box(children=row1, layout=box_layout)\n"
menv_var_count = 0 # micronenv
param_count = 0
divider_count = 0
color_count = 0
#param_desc_count = 0
name_count = 0
units_count = 0
#---------- custom user_parameters --------------------
# TODO: cast attributes to lower case before doing equality tests; perform more testing!
uep = root.find('.//user_parameters') # find unique entry point (uep) to user params
fill_gui_str += indent + "uep = xml_root.find('.//user_parameters') # find unique entry point\n"
fill_xml_str += indent + "uep = xml_root.find('.//user_parameters') # find unique entry point\n"
# param_count = 0
#param_desc_count = 0
# name_count = 0
# units_count = 0
print_vars = True
print_var_types = False
tag_list = []
# function to process a "divider" type element
def handle_divider(child):
global divider_count, user_tab_header, indent, indent2, vbox_str
divider_count += 1
print('-----------> handler_divider: ',divider_count)
row_name = "div_row" + str(divider_count)
user_tab_header += "\n" + indent + row_name + " = " + "Button(description='" + child.attrib['description'] + "', disabled=True, layout=divider_button_layout)\n"
vbox_str += indent2 + row_name + ",\n"
#=========== main loop ===================
# NOTE: we assume a simple "children-only" hierarchy in <user_parameters>
for child in uep: # uep = "unique entry point" for <user_parameters> (from above)
if print_vars:
print(child.tag, child.attrib)
divider_flag = False
if child.attrib['type'].lower() == 'divider':
divider_flag = True
else:
param_count += 1
# we allow the divider elements to have the same name, but not other elements
if (child.tag in tag_list) and (not divider_flag):
print("-------> Warning: duplicate tag! ", child.tag)
continue
else:
tag_list.append(child.tag)
units_str = ""
describe_str = ""
if 'hidden' in child.attrib.keys() and (child.attrib['hidden'].lower() == "true"): # do we want to hide this from the user?
print(" HIDE this parameter from the GUI: ", child.tag)
continue
# names_str = ''
# units_str = ''
# describe_str = ''
# desc_row_name = None
desc_row_name = ''
units_btn_name = ''
if not divider_flag:
if 'description' in child.attrib.keys():
describe_str = child.attrib['description']
else:
describe_str = ""
desc_row_name = "desc_button" + str(param_count)
desc_buttons_str += indent + desc_row_name + " = " + "Button(description='" + describe_str + "', disabled=True, layout=desc_button_layout) \n"
# print("--- debug: " + desc_row_name + " --> " + describe_str) #rwh debug
if (param_count % 2):
desc_buttons_str += indent + desc_row_name + ".style.button_color = '" + colorname1 + "'\n"
else: # rf. https://www.w3schools.com/colors/colors_names.asp
desc_buttons_str += indent + desc_row_name + ".style.button_color = '" + colorname2 + "'\n"
if 'units' in child.attrib.keys():
if child.attrib['units'] != "dimensionless" and child.attrib['units'] != "none":
# units_str = child.attrib['units']
units_count += 1
units_btn_name = "units_button" + str(units_count)
units_buttons_str += indent + units_btn_name + " = " + "Button(description='" + child.attrib['units'] + "', disabled=True, layout=units_button_layout) \n"
if (param_count % 2):
units_buttons_str += indent + units_btn_name + ".style.button_color = '" + colorname1 + "'\n"
else: # rf. https://www.w3schools.com/colors/colors_names.asp
units_buttons_str += indent + units_btn_name + ".style.button_color = '" + colorname2 + "'\n"
else:
units_count += 1
units_btn_name = "units_button" + str(units_count)
units_buttons_str += indent + units_btn_name + " = " + "Button(description='" + "', disabled=True, layout=units_button_layout) \n"
if (param_count % 2):
units_buttons_str += indent + units_btn_name + ".style.button_color = '" + colorname1 + "'\n"
else: # rf. https://www.w3schools.com/colors/colors_names.asp
units_buttons_str += indent + units_btn_name + ".style.button_color = '" + colorname2 + "'\n"
else:
units_count += 1
units_btn_name = "units_button" + str(units_count)
units_buttons_str += indent + units_btn_name + " = " + "Button(description='" + "', disabled=True, layout=units_button_layout) \n"
if (param_count % 2):
units_buttons_str += indent + units_btn_name + ".style.button_color = '" + colorname1 + "'\n"
else: # rf. https://www.w3schools.com/colors/colors_names.asp
units_buttons_str += indent + units_btn_name + ".style.button_color = '" + colorname2 + "'\n"
if 'type' in child.attrib.keys():
# self.therapy_activation_time = BoundedFloatText(
# min=0., max=100000000, step=stepsize,
full_name = "self." + child.tag
# name_count += 1
if child.attrib['type'] not in widgets.keys():
print(" *** Error - Invalid type: " + child.attrib['type'])
sys.exit(1)
else:
# The "divider" type elements are unique; let's handle them in their own function
if divider_flag:
handle_divider(child)
continue
name_count += 1
param_name_button = "param_name" + str(name_count)
user_tab_header += "\n" + indent + param_name_button + " = " + "Button(description='" + child.tag + "', disabled=True, layout=name_button_layout)\n"
if (param_count % 2):
user_tab_header += indent + param_name_button + ".style.button_color = '" | |
# -*- coding: utf-8 -*-
"""The xonsh built-ins.
Note that this module is named 'built_ins' so as not to be confused with the
special Python builtins module.
"""
import os
import re
import sys
import types
import signal
import atexit
import pathlib
import inspect
import warnings
import builtins
import itertools
import contextlib
import collections.abc as cabc
from xonsh.ast import AST
from xonsh.lazyasd import LazyObject, lazyobject
from xonsh.inspectors import Inspector
from xonsh.aliases import Aliases, make_default_aliases
from xonsh.environ import Env, default_env
from xonsh.platform import ON_POSIX, ON_WINDOWS
from xonsh.tools import (
expand_path,
globpath,
XonshError,
XonshCalledProcessError,
print_color,
)
from xonsh.commands_cache import CommandsCache
from xonsh.events import events
import xonsh.procs.specs
import xonsh.completers.init
BUILTINS_LOADED = False
INSPECTOR = LazyObject(Inspector, globals(), "INSPECTOR")
warnings.filterwarnings("once", category=DeprecationWarning)
@lazyobject
def AT_EXIT_SIGNALS():
sigs = (
signal.SIGABRT,
signal.SIGFPE,
signal.SIGILL,
signal.SIGSEGV,
signal.SIGTERM,
)
if ON_POSIX:
sigs += (signal.SIGTSTP, signal.SIGQUIT, signal.SIGHUP)
return sigs
def resetting_signal_handle(sig, f):
"""Sets a new signal handle that will automatically restore the old value
once the new handle is finished.
"""
oldh = signal.getsignal(sig)
def newh(s=None, frame=None):
f(s, frame)
signal.signal(sig, oldh)
if sig != 0:
sys.exit(sig)
signal.signal(sig, newh)
def helper(x, name=""):
"""Prints help about, and then returns that variable."""
INSPECTOR.pinfo(x, oname=name, detail_level=0)
return x
def superhelper(x, name=""):
"""Prints help about, and then returns that variable."""
INSPECTOR.pinfo(x, oname=name, detail_level=1)
return x
def reglob(path, parts=None, i=None):
"""Regular expression-based globbing."""
if parts is None:
path = os.path.normpath(path)
drive, tail = os.path.splitdrive(path)
parts = tail.split(os.sep)
d = os.sep if os.path.isabs(path) else "."
d = os.path.join(drive, d)
return reglob(d, parts, i=0)
base = subdir = path
if i == 0:
if not os.path.isabs(base):
base = ""
elif len(parts) > 1:
i += 1
regex = os.path.join(base, parts[i])
if ON_WINDOWS:
# currently unable to access regex backslash sequences
# on Windows due to paths using \.
regex = regex.replace("\\", "\\\\")
regex = re.compile(regex)
files = os.listdir(subdir)
files.sort()
paths = []
i1 = i + 1
if i1 == len(parts):
for f in files:
p = os.path.join(base, f)
if regex.fullmatch(p) is not None:
paths.append(p)
else:
for f in files:
p = os.path.join(base, f)
if regex.fullmatch(p) is None or not os.path.isdir(p):
continue
paths += reglob(p, parts=parts, i=i1)
return paths
def path_literal(s):
s = expand_path(s)
return pathlib.Path(s)
def regexsearch(s):
s = expand_path(s)
return reglob(s)
def globsearch(s):
csc = builtins.__xonsh__.env.get("CASE_SENSITIVE_COMPLETIONS")
glob_sorted = builtins.__xonsh__.env.get("GLOB_SORTED")
dotglob = builtins.__xonsh__.env.get("DOTGLOB")
return globpath(
s,
ignore_case=(not csc),
return_empty=True,
sort_result=glob_sorted,
include_dotfiles=dotglob,
)
def pathsearch(func, s, pymode=False, pathobj=False):
"""
Takes a string and returns a list of file paths that match (regex, glob,
or arbitrary search function). If pathobj=True, the return is a list of
pathlib.Path objects instead of strings.
"""
if not callable(func) or len(inspect.signature(func).parameters) != 1:
error = "%r is not a known path search function"
raise XonshError(error % func)
o = func(s)
if pathobj and pymode:
o = list(map(pathlib.Path, o))
no_match = [] if pymode else [s]
return o if len(o) != 0 else no_match
def subproc_captured_stdout(*cmds, envs=None):
"""Runs a subprocess, capturing the output. Returns the stdout
that was produced as a str.
"""
return xonsh.procs.specs.run_subproc(cmds, captured="stdout", envs=envs)
def subproc_captured_inject(*cmds, envs=None):
"""Runs a subprocess, capturing the output. Returns a list of
whitespace-separated strings of the stdout that was produced.
The string is split using xonsh's lexer, rather than Python's str.split()
or shlex.split().
"""
o = xonsh.procs.specs.run_subproc(cmds, captured="object", envs=envs)
o.end()
toks = []
for line in o:
line = line.rstrip(os.linesep)
toks.extend(builtins.__xonsh__.execer.parser.lexer.split(line))
return toks
def subproc_captured_object(*cmds, envs=None):
"""
Runs a subprocess, capturing the output. Returns an instance of
CommandPipeline representing the completed command.
"""
return xonsh.procs.specs.run_subproc(cmds, captured="object", envs=envs)
def subproc_captured_hiddenobject(*cmds, envs=None):
"""Runs a subprocess, capturing the output. Returns an instance of
HiddenCommandPipeline representing the completed command.
"""
return xonsh.procs.specs.run_subproc(cmds, captured="hiddenobject", envs=envs)
def subproc_uncaptured(*cmds, envs=None):
"""Runs a subprocess, without capturing the output. Returns the stdout
that was produced as a str.
"""
return xonsh.procs.specs.run_subproc(cmds, captured=False, envs=envs)
def ensure_list_of_strs(x):
"""Ensures that x is a list of strings."""
if isinstance(x, str):
rtn = [x]
elif isinstance(x, cabc.Sequence):
rtn = [i if isinstance(i, str) else str(i) for i in x]
else:
rtn = [str(x)]
return rtn
def ensure_str_or_callable(x):
"""Ensures that x is single string or function."""
if isinstance(x, str) or callable(x):
return x
if isinstance(x, bytes):
# ``os.fsdecode`` decodes using "surrogateescape" on linux and "strict" on windows.
# This is used to decode bytes for interfacing with the os, notably for command line arguments.
# See https://www.python.org/dev/peps/pep-0383/#specification
return os.fsdecode(x)
return str(x)
def list_of_strs_or_callables(x):
"""
Ensures that x is a list of strings or functions.
This is called when using the ``@()`` operator to expand it's content.
"""
if isinstance(x, (str, bytes)) or callable(x):
rtn = [ensure_str_or_callable(x)]
elif isinstance(x, cabc.Iterable):
rtn = list(map(ensure_str_or_callable, x))
else:
rtn = [ensure_str_or_callable(x)]
return rtn
def list_of_list_of_strs_outer_product(x):
"""Takes an outer product of a list of strings"""
lolos = map(ensure_list_of_strs, x)
rtn = []
for los in itertools.product(*lolos):
s = "".join(los)
if "*" in s:
rtn.extend(builtins.__xonsh__.glob(s))
else:
rtn.append(builtins.__xonsh__.expand_path(s))
return rtn
def eval_fstring_field(field):
"""Evaluates the argument in Xonsh context."""
res = __xonsh__.execer.eval(
field[0].strip(), glbs=globals(), locs=builtins.__xonsh__.ctx, filename=field[1]
)
return res
@lazyobject
def MACRO_FLAG_KINDS():
return {
"s": str,
"str": str,
"string": str,
"a": AST,
"ast": AST,
"c": types.CodeType,
"code": types.CodeType,
"compile": types.CodeType,
"v": eval,
"eval": eval,
"x": exec,
"exec": exec,
"t": type,
"type": type,
}
def _convert_kind_flag(x):
"""Puts a kind flag (string) a canonical form."""
x = x.lower()
kind = MACRO_FLAG_KINDS.get(x, None)
if kind is None:
raise TypeError("{0!r} not a recognized macro type.".format(x))
return kind
def convert_macro_arg(raw_arg, kind, glbs, locs, *, name="<arg>", macroname="<macro>"):
"""Converts a string macro argument based on the requested kind.
Parameters
----------
raw_arg : str
The str representation of the macro argument.
kind : object
A flag or type representing how to convert the argument.
glbs : Mapping
The globals from the call site.
locs : Mapping or None
The locals from the call site.
name : str, optional
The macro argument name.
macroname : str, optional
The name of the macro itself.
Returns
-------
The converted argument.
"""
# munge kind and mode to start
mode = None
if isinstance(kind, cabc.Sequence) and not isinstance(kind, str):
# have (kind, mode) tuple
kind, mode = kind
if isinstance(kind, str):
kind = _convert_kind_flag(kind)
if kind is str or kind is None:
return raw_arg # short circuit since there is nothing else to do
# select from kind and convert
execer = builtins.__xonsh__.execer
filename = macroname + "(" + name + ")"
if kind is AST:
ctx = set(dir(builtins)) | set(glbs.keys())
if locs is not None:
ctx |= set(locs.keys())
mode = mode or "eval"
if mode != "eval" and not raw_arg.endswith("\n"):
raw_arg += "\n"
arg = execer.parse(raw_arg, ctx, mode=mode, filename=filename)
elif kind is types.CodeType or kind is compile: # NOQA
mode = mode or "eval"
arg = execer.compile(
raw_arg, mode=mode, glbs=glbs, locs=locs, filename=filename
)
elif kind is eval:
arg = execer.eval(raw_arg, glbs=glbs, locs=locs, filename=filename)
elif kind is exec:
mode = mode or "exec"
if not raw_arg.endswith("\n"):
raw_arg += "\n"
arg = execer.exec(raw_arg, mode=mode, glbs=glbs, locs=locs, filename=filename)
elif kind is type:
arg = type(execer.eval(raw_arg, glbs=glbs, locs=locs, filename=filename))
else:
msg = "kind={0!r} and mode={1!r} was not recognized for macro " "argument {2!r}"
raise TypeError(msg.format(kind, mode, name))
return arg
@contextlib.contextmanager
def in_macro_call(f, glbs, locs):
"""Attaches macro globals and locals temporarily to function as a
context manager.
Parameters
----------
f : callable object
The function that is called as ``f(*args)``.
glbs : Mapping
The globals from the call site.
locs : Mapping or None
The locals from the call site.
"""
prev_glbs = getattr(f, "macro_globals", None)
prev_locs = getattr(f, "macro_locals", None)
f.macro_globals = glbs
f.macro_locals = locs
yield
if prev_glbs is None:
del f.macro_globals
else:
f.macro_globals = prev_glbs
if prev_locs is None:
del f.macro_locals
else:
f.macro_locals = prev_locs
def call_macro(f, raw_args, glbs, locs):
"""Calls a function as a macro, returning its result.
Parameters
----------
f : callable object
The function that is called as ``f(*args)``.
raw_args : tuple of str
The str representation of arguments of that were passed into the
macro. These strings will be parsed, compiled, evaled, or left as
a string depending on the annotations of f.
glbs : Mapping
The globals from the call site.
locs : Mapping or None
The locals from the call site.
"""
sig = inspect.signature(f)
empty = inspect.Parameter.empty
macroname = f.__name__
i = 0
args = []
| |
<filename>fos/lib/pyglet/media/avbin.py
# ----------------------------------------------------------------------------
# fos.lib.pyglet
# Copyright (c) 2006-2008 <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of fos.lib.pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Use avbin to decode audio and video media.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import ctypes
import threading
import time
import fos.lib.pyglet
from fos.lib.pyglet import gl
from fos.lib.pyglet.gl import gl_info
from fos.lib.pyglet import image
import fos.lib.pyglet.lib
from fos.lib.pyglet.media import \
MediaFormatException, StreamingSource, VideoFormat, AudioFormat, \
AudioData, MediaEvent, WorkerThread, SourceInfo
av = fos.lib.pyglet.lib.load_library('avbin',
darwin='/usr/local/lib/libavbin.dylib')
AVBIN_RESULT_ERROR = -1
AVBIN_RESULT_OK = 0
AVbinResult = ctypes.c_int
AVBIN_STREAM_TYPE_UNKNOWN = 0
AVBIN_STREAM_TYPE_VIDEO = 1
AVBIN_STREAM_TYPE_AUDIO = 2
AVbinStreamType = ctypes.c_int
AVBIN_SAMPLE_FORMAT_U8 = 0
AVBIN_SAMPLE_FORMAT_S16 = 1
AVBIN_SAMPLE_FORMAT_S24 = 2
AVBIN_SAMPLE_FORMAT_S32 = 3
AVBIN_SAMPLE_FORMAT_FLOAT = 4
AVbinSampleFormat = ctypes.c_int
AVBIN_LOG_QUIET = -8
AVBIN_LOG_PANIC = 0
AVBIN_LOG_FATAL = 8
AVBIN_LOG_ERROR = 16
AVBIN_LOG_WARNING = 24
AVBIN_LOG_INFO = 32
AVBIN_LOG_VERBOSE = 40
AVBIN_LOG_DEBUG = 48
AVbinLogLevel = ctypes.c_int
AVbinFileP = ctypes.c_void_p
AVbinStreamP = ctypes.c_void_p
Timestamp = ctypes.c_int64
class AVbinFileInfo(ctypes.Structure):
_fields_ = [
('structure_size', ctypes.c_size_t),
('n_streams', ctypes.c_int),
('start_time', Timestamp),
('duration', Timestamp),
('title', ctypes.c_char * 512),
('author', ctypes.c_char * 512),
('copyright', ctypes.c_char * 512),
('comment', ctypes.c_char * 512),
('album', ctypes.c_char * 512),
('year', ctypes.c_int),
('track', ctypes.c_int),
('genre', ctypes.c_char * 32),
]
class _AVbinStreamInfoVideo8(ctypes.Structure):
_fields_ = [
('width', ctypes.c_uint),
('height', ctypes.c_uint),
('sample_aspect_num', ctypes.c_uint),
('sample_aspect_den', ctypes.c_uint),
('frame_rate_num', ctypes.c_uint),
('frame_rate_den', ctypes.c_uint),
]
class _AVbinStreamInfoAudio8(ctypes.Structure):
_fields_ = [
('sample_format', ctypes.c_int),
('sample_rate', ctypes.c_uint),
('sample_bits', ctypes.c_uint),
('channels', ctypes.c_uint),
]
class _AVbinStreamInfoUnion8(ctypes.Union):
_fields_ = [
('video', _AVbinStreamInfoVideo8),
('audio', _AVbinStreamInfoAudio8),
]
class AVbinStreamInfo8(ctypes.Structure):
_fields_ = [
('structure_size', ctypes.c_size_t),
('type', ctypes.c_int),
('u', _AVbinStreamInfoUnion8)
]
class AVbinPacket(ctypes.Structure):
_fields_ = [
('structure_size', ctypes.c_size_t),
('timestamp', Timestamp),
('stream_index', ctypes.c_int),
('data', ctypes.POINTER(ctypes.c_uint8)),
('size', ctypes.c_size_t),
]
AVbinLogCallback = ctypes.CFUNCTYPE(None,
ctypes.c_char_p, ctypes.c_int, ctypes.c_char_p)
av.avbin_get_version.restype = ctypes.c_int
av.avbin_get_ffmpeg_revision.restype = ctypes.c_int
av.avbin_get_audio_buffer_size.restype = ctypes.c_size_t
av.avbin_have_feature.restype = ctypes.c_int
av.avbin_have_feature.argtypes = [ctypes.c_char_p]
av.avbin_init.restype = AVbinResult
av.avbin_set_log_level.restype = AVbinResult
av.avbin_set_log_level.argtypes = [AVbinLogLevel]
av.avbin_set_log_callback.argtypes = [AVbinLogCallback]
av.avbin_open_filename.restype = AVbinFileP
av.avbin_open_filename.argtypes = [ctypes.c_char_p]
av.avbin_close_file.argtypes = [AVbinFileP]
av.avbin_seek_file.argtypes = [AVbinFileP, Timestamp]
av.avbin_file_info.argtypes = [AVbinFileP, ctypes.POINTER(AVbinFileInfo)]
av.avbin_stream_info.argtypes = [AVbinFileP, ctypes.c_int,
ctypes.POINTER(AVbinStreamInfo8)]
av.avbin_open_stream.restype = ctypes.c_void_p
av.avbin_open_stream.argtypes = [AVbinFileP, ctypes.c_int]
av.avbin_close_stream.argtypes = [AVbinStreamP]
av.avbin_read.argtypes = [AVbinFileP, ctypes.POINTER(AVbinPacket)]
av.avbin_read.restype = AVbinResult
av.avbin_decode_audio.restype = ctypes.c_int
av.avbin_decode_audio.argtypes = [AVbinStreamP,
ctypes.c_void_p, ctypes.c_size_t,
ctypes.c_void_p, ctypes.POINTER(ctypes.c_int)]
av.avbin_decode_video.restype = ctypes.c_int
av.avbin_decode_video.argtypes = [AVbinStreamP,
ctypes.c_void_p, ctypes.c_size_t,
ctypes.c_void_p]
if True:
# XXX lock all avbin calls. not clear from ffmpeg documentation if this
# is necessary. leaving it on while debugging to rule out the possiblity
# of a problem.
def synchronize(func, lock):
def f(*args):
lock.acquire()
result = func(*args)
lock.release()
return result
return f
_avbin_lock = threading.Lock()
for name in dir(av):
if name.startswith('avbin_'):
setattr(av, name, synchronize(getattr(av, name), _avbin_lock))
def get_version():
return av.avbin_get_version()
class AVbinException(MediaFormatException):
pass
def timestamp_from_avbin(timestamp):
return float(timestamp) / 1000000
def timestamp_to_avbin(timestamp):
return int(timestamp * 1000000)
class VideoPacket(object):
_next_id = 0
def __init__(self, packet):
self.timestamp = timestamp_from_avbin(packet.timestamp)
self.data = (ctypes.c_uint8 * packet.size)()
self.size = packet.size
ctypes.memmove(self.data, packet.data, self.size)
# Decoded image. 0 == not decoded yet; None == Error or discarded
self.image = 0
self.id = self._next_id
self.__class__._next_id += 1
class AVbinSource(StreamingSource):
def __init__(self, filename, file=None):
if file is not None:
raise NotImplementedError('TODO: Load from file stream')
self._file = av.avbin_open_filename(filename)
if not self._file:
raise AVbinException('Could not open "%s"' % filename)
self._video_stream = None
self._video_stream_index = -1
self._audio_stream = None
self._audio_stream_index = -1
file_info = AVbinFileInfo()
file_info.structure_size = ctypes.sizeof(file_info)
av.avbin_file_info(self._file, ctypes.byref(file_info))
self._duration = timestamp_from_avbin(file_info.duration)
self.info = SourceInfo()
self.info.title = file_info.title
self.info.author = file_info.author
self.info.copyright = file_info.copyright
self.info.comment = file_info.comment
self.info.album = file_info.album
self.info.year = file_info.year
self.info.track = file_info.track
self.info.genre = file_info.genre
# Pick the first video and audio streams found, ignore others.
for i in range(file_info.n_streams):
info = AVbinStreamInfo8()
info.structure_size = ctypes.sizeof(info)
av.avbin_stream_info(self._file, i, info)
if (info.type == AVBIN_STREAM_TYPE_VIDEO and
not self._video_stream):
stream = av.avbin_open_stream(self._file, i)
if not stream:
continue
self.video_format = VideoFormat(
width=info.u.video.width,
height=info.u.video.height)
if info.u.video.sample_aspect_num != 0:
self.video_format.sample_aspect = (
float(info.u.video.sample_aspect_num) /
info.u.video.sample_aspect_den)
if _have_frame_rate:
self.video_format.frame_rate = (
float(info.u.video.frame_rate_num) /
info.u.video.frame_rate_den)
self._video_stream = stream
self._video_stream_index = i
elif (info.type == AVBIN_STREAM_TYPE_AUDIO and
info.u.audio.sample_bits in (8, 16) and
info.u.audio.channels in (1, 2) and
not self._audio_stream):
stream = av.avbin_open_stream(self._file, i)
if not stream:
continue
self.audio_format = AudioFormat(
channels=info.u.audio.channels,
sample_size=info.u.audio.sample_bits,
sample_rate=info.u.audio.sample_rate)
self._audio_stream = stream
self._audio_stream_index = i
self._packet = AVbinPacket()
self._packet.structure_size = ctypes.sizeof(self._packet)
self._packet.stream_index = -1
self._events = []
# Timestamp of last video packet added to decoder queue.
self._video_timestamp = 0
self._buffered_audio_data = []
if self.audio_format:
self._audio_buffer = \
(ctypes.c_uint8 * av.avbin_get_audio_buffer_size())()
if self.video_format:
self._video_packets = []
self._decode_thread = WorkerThread()
self._decode_thread.start()
self._condition = threading.Condition()
def __del__(self):
if _debug:
print 'del avbin source'
try:
if self._video_stream:
av.avbin_close_stream(self._video_stream)
if self._audio_stream:
av.avbin_close_stream(self._audio_stream)
av.avbin_close_file(self._file)
except:
pass
# XXX TODO call this / add to source api
def delete(self):
if self.video_format:
self._decode_thread.stop()
def seek(self, timestamp):
if _debug:
print 'AVbin seek', timestamp
av.avbin_seek_file(self._file, timestamp_to_avbin(timestamp))
self._audio_packet_size = 0
del self._events[:]
del self._buffered_audio_data[:]
if self.video_format:
self._video_timestamp = 0
self._condition.acquire()
for packet in self._video_packets:
packet.image = None
self._condition.notify()
self._condition.release()
del self._video_packets[:]
self._decode_thread.clear_jobs()
def _get_packet(self):
# Read a packet into self._packet. Returns True if OK, False if no
# more packets are in stream.
return av.avbin_read(self._file, self._packet) == AVBIN_RESULT_OK
def _process_packet(self):
# Returns (packet_type, packet), where packet_type = 'video' or
# 'audio'; and packet is VideoPacket or AudioData. In either case,
# packet is buffered or queued for decoding; no further action is
# necessary. Returns (None, None) if packet was neither type.
if self._packet.stream_index == self._video_stream_index:
if self._packet.timestamp < 0:
# XXX TODO
# AVbin needs hack to decode timestamp for B frames in
# some containers (OGG?). See
# http://www.dranger.com/ffmpeg/tutorial05.html
# For now we just drop these frames.
return None, None
video_packet = VideoPacket(self._packet)
if _debug:
print 'Created and queued frame %d (%f)' % \
(video_packet.id, video_packet.timestamp)
self._video_timestamp = max(self._video_timestamp,
video_packet.timestamp)
self._video_packets.append(video_packet)
self._decode_thread.put_job(
lambda: self._decode_video_packet(video_packet))
return 'video', video_packet
elif self._packet.stream_index == self._audio_stream_index:
audio_data = self._decode_audio_packet()
if audio_data:
if _debug:
print 'Got an audio packet at', audio_data.timestamp
self._buffered_audio_data.append(audio_data)
return 'audio', audio_data
return None, None
def get_audio_data(self, bytes):
try:
audio_data = self._buffered_audio_data.pop(0)
audio_data_timeend = audio_data.timestamp + audio_data.duration
except IndexError:
audio_data = None
audio_data_timeend = self._video_timestamp + 1
if _debug:
print 'get_audio_data'
have_video_work = False
# Keep reading packets until we have an audio packet and all the
# associated video packets have been enqueued on the decoder thread.
while not audio_data or (
self._video_stream and self._video_timestamp < audio_data_timeend):
if not self._get_packet():
break
packet_type, packet = self._process_packet()
if packet_type == 'video':
have_video_work = True
elif not audio_data and packet_type == 'audio':
audio_data = self._buffered_audio_data.pop(0)
if _debug:
print 'Got requested audio packet at', audio_data.timestamp
audio_data_timeend = audio_data.timestamp + audio_data.duration
if have_video_work:
# Give decoder thread a chance to run before we return this audio
# data.
time.sleep(0)
if not audio_data:
if _debug:
print 'get_audio_data returning None'
return None
while self._events and self._events[0].timestamp <= audio_data_timeend:
event = self._events.pop(0)
if event.timestamp >= audio_data.timestamp:
event.timestamp -= audio_data.timestamp
audio_data.events.append(event)
if _debug:
print 'get_audio_data returning ts %f with events' % \
audio_data.timestamp, audio_data.events
print 'remaining events are', self._events
return audio_data
def _decode_audio_packet(self):
packet = self._packet
size_out = ctypes.c_int(len(self._audio_buffer))
while True:
audio_packet_ptr = ctypes.cast(packet.data, ctypes.c_void_p)
audio_packet_size = packet.size
used = av.avbin_decode_audio(self._audio_stream,
audio_packet_ptr, audio_packet_size,
self._audio_buffer, size_out)
if used < 0:
self._audio_packet_size = 0
break
audio_packet_ptr.value += | |
x in remote_states])
comm_queues = ray.get([x.get_comm_queue.remote() for x in remote_states])
aggregators = get_aggregators(comm_queues)
[x.set_comm_queues.remote(comm_queues, control_queues) for x in remote_states]
if not USE_RAY_CALLS:
retval_queue = ramba_queue.Queue()
[x.rpc_serve.remote(retval_queue) for x in remote_states]
def print_comm_stats():
if not USE_ZMQ:
return
stats = remote_call_all("get_comm_stats")
totals = {}
pickle_times = {}
unpickle_times = {}
for i, l in enumerate(stats):
ips = l[i][0]
for ipd, _, _, sent, unpick, pick in l:
if (ips, ipd) not in totals:
totals[(ips, ipd)] = 0
totals[(ips, ipd)] += sent
if (ips, ipd) not in pickle_times:
pickle_times[(ips, ipd)] = 0.0
pickle_times[(ips, ipd)] += pick
if ips not in unpickle_times:
unpickle_times[ips] = 0.0
unpickle_times[ips] += unpick
print(totals)
print(pickle_times)
print(unpickle_times)
## track live ndarray gids. map gid to [ref_count, remote_constructed flag, weakset of ndarray references]
# ndarray_gids = {}
def find_index(distribution, index):
if isinstance(index, int):
index = (index,)
dprint(3, "find_index:", distribution, index)
dshape = distribution.shape
assert dshape[2] == len(index)
for i in range(dshape[0]):
for j in range(dshape[2]):
if index[j] < distribution[i][0][j] or index[j] > distribution[i][1][j]:
break
else:
return i
HANDLED_FUNCTIONS = {}
class ndarray:
def __init__(
self,
size,
gid=None,
distribution=None,
local_border=0,
dtype=None,
flex_dist=True,
readonly=False,
**kwargs
):
t0 = timer()
self.bdarray = bdarray.assign_bdarray(
self, size, gid, distribution, local_border, flex_dist, dtype, **kwargs
) # extra options for distribution hints
# self.gid = self.bdarray.gid
self.size = size
self.distribution = (
distribution
if ((distribution is not None) and (gid is not None))
else self.bdarray.distribution
)
self.local_border = local_border
self.getitem_cache = {}
# TODO: move to_border, from_border out of ndarray; put into bdarray, or just compute when needed to construct Local_NDarray on remotes
if local_border > 0:
self.from_border, self.to_border = shardview.compute_from_border(
size, self.distribution, local_border
)
else:
self.from_border = None
self.to_border = None
# self.order = order
# #self.remote_constructed = False
# if gid in ndarray_gids:
# ndarray_gids[gid][0]+=1 # increment refcount
# ndarray_gids[gid][2].add(self) # add to weakref set
# else:
# # initially: refcount=1, remote_constructed=False, only this array in set
# ndarray_gids[gid] = [1, False, weakref.WeakSet([self])]
# self.broadcasted_dims = broadcasted_dims
self.readonly = readonly
t1 = timer()
dprint(2, "Created ndarray", self.gid, "size", size, "time", (t1 - t0) * 1000)
# TODO: should consider using a class rather than tuple; alternative -- use weak ref to ndarray
def get_details(self):
return tuple(
[
self.size,
self.distribution,
self.local_border,
self.from_border,
self.to_border,
self.dtype,
]
)
@property
def gid(self):
return self.bdarray.gid
@property
def shape(self):
return self.size
@property
def ndim(self):
return len(self.size)
@property
def dtype(self):
return np.dtype(self.bdarray.dtype)
def transpose(self, *args):
ndims = len(self.size)
if len(args) == 0:
return self.remapped_axis([ndims - i - 1 for i in range(ndims)])
else:
if len(args) > 1:
axes = args
elif len(args) == 1 and isinstance(args[0], tuple):
axes = args[0]
# Not sufficient...should check for duplicates (i.e., all axes used exactly once)
assert all(index >= 0 and index < ndims for index in axes)
return self.remapped_axis(axes)
@property
def T(self):
return self.transpose()
# @property
# def T(self):
# assert(len(self.size) == 2)
# divs = shardview.distribution_to_divisions(self.distribution)
# outdiv = np.flip(divs, axis=2)
# rev_base_offsets = [np.flip(x.base_offset) for x in self.distribution]
# return ndarray((self.size[1], self.size[0]), gid=self.gid, distribution=shardview.divisions_to_distribution(outdiv, base_offset=rev_base_offsets), order=("C" if self.order == "F" else "F"), broadcasted_dims=(None if self.broadcasted_dims is None else self.broadcasted_dims[::-1]))
"""
def __del__(self):
#ndarray_gids[self.gid][0]-=1
dprint(2, "Deleting ndarray",self.gid, self)
#if ndarray_gids[self.gid][0] <=0:
# if ndarray_gids[self.gid][1]: # check remote constructed flag
# deferred_op.del_remote_array(self.gid)
# del ndarray_gids[self.gid]
"""
def __str__(self):
return str(self.gid) + " " + str(self.size) + " " + str(self.local_border)
def remapped_axis(self, newmap):
# make sure array distribution can't change (ie, not flexible or is already constructed)
if self.bdarray.flex_dist or not self.bdarray.remote_constructed:
deferred_op.do_ops()
newsize, newdist = shardview.remap_axis(self.size, self.distribution, newmap)
return ndarray(newsize, self.gid, newdist, readonly=self.readonly)
def asarray(self):
if self.size == ():
return self.distribution
deferred_op.do_ops()
ret = np.empty(self.size, dtype=self.dtype)
# dist_shape = self.distribution.shape
# topleft = tuple([self.distribution[0][0][j] for j in range(dist_shape[2])])
dprint(2, "asarray:", self.distribution, self.size)
dprint(2, "asarray:", shardview.distribution_to_divisions(self.distribution))
# shards = ray.get([remote_states[i].get_array.remote(self.gid) for i in range(dist_shape[0])])
# shards = ray.get([remote_states[i].get_partial_array_global.remote(self.gid,
# tuple([slice(self.distribution[i][0][j], self.distribution[i][1][j] + 1) for j in range(dist_shape[2])]) ) for i in range(dist_shape[0])])
# shards = ray.get([remote_states[i].get_view.remote(self.gid, self.distribution[i]) for i in range(num_workers)])
shards = get_results(
[
remote_async_call(i, "get_view", self.gid, self.distribution[i])
for i in range(num_workers)
]
)
# print("shards:", shards)
# for i in range(dist_shape[0]):
for i in range(num_workers):
# dprint(2, "for:", i, dist_shape[2])
# gindex = tuple([slice(self.distribution[i][0][j], self.distribution[i][1][j] + 1) for j in range(dist_shape[2])])
# rindex = slice_minus_offset(gindex, topleft)
# dprint(3, "gindex:", gindex, rindex, shards[i].shape, shards[i].size)
dprint(
3,
"gslice:",
shardview.to_slice(self.distribution[i]),
"bslice:",
shardview.to_base_slice(self.distribution[i]),
)
# ret[rindex] = shards[i]
ret[shardview.to_slice(self.distribution[i])] = shards[i]
return ret
def array_unaryop(
self, op, optext, reduction=False, imports=[], dtype=None, axis=None
):
if dtype is None:
dtype = self.dtype
elif dtype == "float":
dtype = np.float32 if self.dtype == np.float32 else np.float64
if reduction:
# TODO: should see if this can be converted into a deferred op
deferred_op.do_ops()
if axis is None or (axis == 0 and self.ndim == 1):
# v = [remote_async_call(i, "array_unaryop", self.gid, self.distribution[i], None, op, axis, dtype) for i in range(num_workers)]
# g1 = get_results(v)
g1 = remote_call_all(
"array_unaryop",
self.gid,
None,
self.distribution,
None,
op,
None,
dtype,
)
v = np.array(g1)
uop = getattr(v, op)
ret = uop(dtype=dtype)
else:
dsz, dist = shardview.reduce_axis(self.shape, self.distribution, axis)
k = dsz[axis]
red_arr = empty(
dsz, dtype=dtype, distribution=dist
) # should create immediately
remote_exec_all(
"array_unaryop",
self.gid,
red_arr.gid,
self.distribution,
dist,
op,
axis,
dtype,
)
sl = tuple(0 if i == axis else slice(None) for i in range(red_arr.ndim))
if k == 1: # done, just get the slice with axis removed
ret = red_arr[sl]
else:
# need global reduction
arr = empty_like(red_arr[sl])
code = ["", arr, " = " + optext + "( np.array([", red_arr[sl]]
for j in range(1, k):
sl = tuple(
j if i == axis else slice(None) for i in range(red_arr.ndim)
)
code += [", ", red_arr[sl]]
code.append("]) )")
deferred_op.add_op(code, arr, imports=imports)
ret = arr
return ret
else:
new_ndarray = create_array_with_divisions(
self.size,
self.distribution,
local_border=self.local_border,
dtype=dtype,
)
deferred_op.add_op(
["", new_ndarray, " = " + optext + "(", self, ")"],
new_ndarray,
imports=imports,
)
return new_ndarray
def broadcast_to(self, size):
dprint(4, "broadcast_to:", self.size, size)
new_dims = len(size) - len(self.size)
dprint(4, "new_dims:", new_dims)
sslice = size[-len(self.size) :]
dprint(4, "sslice:", sslice)
z1 = zip(sslice, self.size)
dprint(4, "zip check:", z1)
if any([a > 1 and b > 1 and a != b for a, b in z1]):
raise ValueError("Non-broadcastable.")
bd = [
i < new_dims or (size[i] != 1 and self.size[i - new_dims] == 1)
for i in range(len(size))
]
dprint(4, "broadcasted_dims:", bd)
# make sure array distribution can't change (ie, not flexible or is already constructed)
if self.bdarray.flex_dist or not self.bdarray.remote_constructed:
deferred_op.do_ops()
return ndarray(
size,
gid=self.gid,
distribution=shardview.broadcast(self.distribution, bd, size),
local_border=0,
readonly=True,
)
@classmethod
def broadcast(cls, a, b):
new_array_size = numpy_broadcast_size(a, b)
# Check for 0d case first. If so then return the internal value (stored in distribution).
if isinstance(a, ndarray) and a.shape == ():
aview = a.distribution
elif not isinstance(a, ndarray) or new_array_size == a.size:
aview = a
else:
aview = a.broadcast_to(new_array_size)
# Check for 0d case first. If so then return the internal value (stored in distribution).
if isinstance(b, ndarray) and b.shape == ():
bview = b.distribution
elif not isinstance(b, ndarray) or new_array_size == b.size:
bview = b
else:
bview = b.broadcast_to(new_array_size)
return (new_array_size, aview, bview)
def astype(self, dtype, copy=True):
dprint(3, "astype:", self.dtype, type(self.dtype), dtype, type(dtype), copy)
if dtype == self.dtype:
if copy:
return copy(self)
else:
return self
if copy:
new_ndarray = create_array_with_divisions(
self.size, self.distribution, dtype=dtype
)
deferred_op.add_op(["", new_ndarray, " = ", self], new_ndarray)
else:
raise ValueError("Non-copy version of astype not implemented.")
return new_ndarray
def array_binop(
self, rhs, op, optext, inplace=False, reverse=False, imports=[], dtype=None
):
t0 = timer()
if isinstance(rhs, np.ndarray):
rhs = fromarray(rhs)
if inplace:
sz, selfview, rhsview = ndarray.broadcast(self, rhs)
assert self.size == sz
if not isinstance(selfview, ndarray) and not isinstance(rhsview, ndarray):
getattr(selfview, op)(rhsview)
return self
deferred_op.add_op(["", self, optext, rhsview], self, imports=imports)
t1 = timer()
dprint(4, "BINARY_OP:", optext, "time", (t1 - t0) * 1000)
return self
else:
lb = max(
self.local_border, rhs.local_border if isinstance(rhs, ndarray) else 0
)
new_array_size, selfview, rhsview = ndarray.broadcast(self, rhs)
if hasattr(rhs, | |
key-value pair that forms a tag associated with a given resource. For instance, if you want to show which resources are used by which departments, you might use “Department” as the key portion of the pair, with multiple possible values such as “sales,” “legal,” and “administration.”
- **Value** *(string) --*
The second part of a key-value pair that forms a tag associated with a given resource. For instance, if you want to show which resources are used by which departments, you might use “Department” as the initial (key) portion of the pair, with a value of “sales” to indicate the sales department.
:type InputDataConfig: dict
:param InputDataConfig: **[REQUIRED]**
Specifies the format and location of the input data. The S3 bucket containing the input data must be located in the same region as the entity recognizer being created.
- **EntityTypes** *(list) --* **[REQUIRED]**
The entity types in the input data for an entity recognizer.
- *(dict) --*
Information about an individual item on a list of entity types.
- **Type** *(string) --* **[REQUIRED]**
Entity type of an item on an entity type list.
- **Documents** *(dict) --* **[REQUIRED]**
S3 location of the documents folder for an entity recognizer
- **S3Uri** *(string) --* **[REQUIRED]**
Specifies the Amazon S3 location where the training documents for an entity recognizer are located. The URI must be in the same region as the API endpoint that you are calling.
- **Annotations** *(dict) --*
S3 location of the annotations file for an entity recognizer.
- **S3Uri** *(string) --* **[REQUIRED]**
Specifies the Amazon S3 location where the annotations for an entity recognizer are located. The URI must be in the same region as the API endpoint that you are calling.
- **EntityList** *(dict) --*
S3 location of the entity list for an entity recognizer.
- **S3Uri** *(string) --* **[REQUIRED]**
Specifies the Amazon S3 location where the entity list is located. The URI must be in the same region as the API endpoint that you are calling.
:type ClientRequestToken: string
:param ClientRequestToken:
A unique identifier for the request. If you don\'t set the client request token, Amazon Comprehend generates one.
This field is autopopulated if not provided.
:type LanguageCode: string
:param LanguageCode: **[REQUIRED]**
The language of the input documents. All documents must be in the same language. Only English (\"en\") is currently supported.
:type VolumeKmsKeyId: string
:param VolumeKmsKeyId:
ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt data on the storage volume attached to the ML compute instance(s) that process the analysis job. The VolumeKmsKeyId can be either of the following formats:
* KMS Key ID: ``\"<KEY>``
* Amazon Resource Name (ARN) of a KMS Key: ``\"arn:aws:kms:us-west-2:111122223333:key/<KEY>``
:rtype: dict
:returns:
"""
pass
def delete_document_classifier(self, DocumentClassifierArn: str) -> Dict:
"""
Deletes a previously created document classifier
Only those classifiers that are in terminated states (IN_ERROR, TRAINED) will be deleted. If an active inference job is using the model, a ``ResourceInUseException`` will be returned.
This is an asynchronous action that puts the classifier into a DELETING state, and it is then removed by a background job. Once removed, the classifier disappears from your account and is no longer available for use.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/DeleteDocumentClassifier>`_
**Request Syntax**
::
response = client.delete_document_classifier(
DocumentClassifierArn='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
:type DocumentClassifierArn: string
:param DocumentClassifierArn: **[REQUIRED]**
The Amazon Resource Name (ARN) that identifies the document classifier.
:rtype: dict
:returns:
"""
pass
def delete_entity_recognizer(self, EntityRecognizerArn: str) -> Dict:
"""
Deletes an entity recognizer.
Only those recognizers that are in terminated states (IN_ERROR, TRAINED) will be deleted. If an active inference job is using the model, a ``ResourceInUseException`` will be returned.
This is an asynchronous action that puts the recognizer into a DELETING state, and it is then removed by a background job. Once removed, the recognizer disappears from your account and is no longer available for use.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/DeleteEntityRecognizer>`_
**Request Syntax**
::
response = client.delete_entity_recognizer(
EntityRecognizerArn='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
:type EntityRecognizerArn: string
:param EntityRecognizerArn: **[REQUIRED]**
The Amazon Resource Name (ARN) that identifies the entity recognizer.
:rtype: dict
:returns:
"""
pass
def describe_document_classification_job(self, JobId: str) -> Dict:
"""
Gets the properties associated with a document classification job. Use this operation to get the status of a classification job.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/DescribeDocumentClassificationJob>`_
**Request Syntax**
::
response = client.describe_document_classification_job(
JobId='string'
)
**Response Syntax**
::
{
'DocumentClassificationJobProperties': {
'JobId': 'string',
'JobName': 'string',
'JobStatus': 'SUBMITTED'|'IN_PROGRESS'|'COMPLETED'|'FAILED'|'STOP_REQUESTED'|'STOPPED',
'Message': 'string',
'SubmitTime': datetime(2015, 1, 1),
'EndTime': datetime(2015, 1, 1),
'DocumentClassifierArn': 'string',
'InputDataConfig': {
'S3Uri': 'string',
'InputFormat': 'ONE_DOC_PER_FILE'|'ONE_DOC_PER_LINE'
},
'OutputDataConfig': {
'S3Uri': 'string',
'KmsKeyId': 'string'
},
'DataAccessRoleArn': 'string',
'VolumeKmsKeyId': 'string'
}
}
**Response Structure**
- *(dict) --*
- **DocumentClassificationJobProperties** *(dict) --*
An object that describes the properties associated with the document classification job.
- **JobId** *(string) --*
The identifier assigned to the document classification job.
- **JobName** *(string) --*
The name that you assigned to the document classification job.
- **JobStatus** *(string) --*
The current status of the document classification job. If the status is ``FAILED`` , the ``Message`` field shows the reason for the failure.
- **Message** *(string) --*
A description of the status of the job.
- **SubmitTime** *(datetime) --*
The time that the document classification job was submitted for processing.
- **EndTime** *(datetime) --*
The time that the document classification job completed.
- **DocumentClassifierArn** *(string) --*
The Amazon Resource Name (ARN) that identifies the document classifier.
- **InputDataConfig** *(dict) --*
The input data configuration that you supplied when you created the document classification job.
- **S3Uri** *(string) --*
The Amazon S3 URI for the input data. The URI must be in same region as the API endpoint that you are calling. The URI can point to a single input file or it can provide the prefix for a collection of data files.
For example, if you use the URI ``S3://bucketName/prefix`` , if the prefix is a single file, Amazon Comprehend uses that file as input. If more than one file begins with the prefix, Amazon Comprehend uses all of them as input.
- **InputFormat** *(string) --*
Specifies how the text in an input file should be processed:
* ``ONE_DOC_PER_FILE`` - Each file is considered a separate document. Use this option when you are processing large documents, such as newspaper articles or scientific papers.
* ``ONE_DOC_PER_LINE`` - Each line in a file is considered a separate document. Use this option when you are processing many short documents, such as text messages.
- **OutputDataConfig** *(dict) --*
The output data configuration that you supplied when you created the document classification job.
- **S3Uri** *(string) --*
When you use the ``OutputDataConfig`` object with asynchronous operations, you specify the Amazon S3 location where you want to write the output data. The URI must be in the same region as the API endpoint that you are calling. The location is used as the prefix for the actual location of the output file.
When the topic detection job is finished, the service creates an output file in a directory specific to the job. The ``S3Uri`` field contains the location of the output file, called ``output.tar.gz`` . It is a compressed archive that contains the ouput of the operation.
- **KmsKeyId** *(string) --*
ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt the output results from an analysis job. The KmsKeyId can be one of the following formats:
* KMS Key ID: ``"<KEY>"``
* Amazon Resource Name (ARN) of a KMS Key: ``"arn:aws:kms:us-west-2:111122223333:key/<KEY>"``
| |
"4363 4468 4537",
51051: "4363 4468 4538",
51052: "4363 4468 4539",
51053: "4363 4468 4540",
51054: "4363 4468 4541",
51055: "4363 4468 4542",
51056: "4363 4468 4543",
51057: "4363 4468 4544",
51058: "4363 4468 4545",
51059: "4363 4468 4546",
51060: "4363 4469",
51061: "4363 4469 4520",
51062: "4363 4469 4521",
51063: "4363 4469 4522",
51064: "4363 4469 4523",
51065: "4363 4469 4524",
51066: "4363 4469 4525",
51067: "4363 4469 4526",
51068: "4363 4469 4527",
51069: "4363 4469 4528",
51070: "4363 4469 4529",
51071: "4363 4469 4530",
51072: "4363 4469 4531",
51073: "4363 4469 4532",
51074: "4363 4469 4533",
51075: "4363 4469 4534",
51076: "4363 4469 4535",
51077: "4363 4469 4536",
51078: "4363 4469 4537",
51079: "4363 4469 4538",
51080: "4363 4469 4539",
51081: "4363 4469 4540",
51082: "4363 4469 4541",
51083: "4363 4469 4542",
51084: "4363 4469 4543",
51085: "4363 4469 4544",
51086: "4363 4469 4545",
51087: "4363 4469 4546",
51088: "4364 4449",
51089: "4364 4449 4520",
51090: "4364 4449 4521",
51091: "4364 4449 4522",
51092: "4364 4449 4523",
51093: "4364 4449 4524",
51094: "4364 4449 4525",
51095: "4364 4449 4526",
51096: "4364 4449 4527",
51097: "4364 4449 4528",
51098: "4364 4449 4529",
51099: "4364 4449 4530",
51100: "4364 4449 4531",
51101: "4364 4449 4532",
51102: "4364 4449 4533",
51103: "4364 4449 4534",
51104: "4364 4449 4535",
51105: "4364 4449 4536",
51106: "4364 4449 4537",
51107: "4364 4449 4538",
51108: "4364 4449 4539",
51109: "4364 4449 4540",
51110: "4364 4449 4541",
51111: "4364 4449 4542",
51112: "4364 4449 4543",
51113: "4364 4449 4544",
51114: "4364 4449 4545",
51115: "4364 4449 4546",
51116: "4364 4450",
51117: "4364 4450 4520",
51118: "4364 4450 4521",
51119: "4364 4450 4522",
51120: "4364 4450 4523",
51121: "4364 4450 4524",
51122: "4364 4450 4525",
51123: "4364 4450 4526",
51124: "4364 4450 4527",
51125: "4364 4450 4528",
51126: "4364 4450 4529",
51127: "4364 4450 4530",
51128: "4364 4450 4531",
51129: "4364 4450 4532",
51130: "4364 4450 4533",
51131: "4364 4450 4534",
51132: "4364 4450 4535",
51133: "4364 4450 4536",
51134: "4364 4450 4537",
51135: "4364 4450 4538",
51136: "4364 4450 4539",
51137: "4364 4450 4540",
51138: "4364 4450 4541",
51139: "4364 4450 4542",
51140: "4364 4450 4543",
51141: "4364 4450 4544",
51142: "4364 4450 4545",
51143: "4364 4450 4546",
51144: "4364 4451",
51145: "4364 4451 4520",
51146: "4364 4451 4521",
51147: "4364 4451 4522",
51148: "4364 4451 4523",
51149: "4364 4451 4524",
51150: "4364 4451 4525",
51151: "4364 4451 4526",
51152: "4364 4451 4527",
51153: "4364 4451 4528",
51154: "4364 4451 4529",
51155: "4364 4451 4530",
51156: "4364 4451 4531",
51157: "4364 4451 4532",
51158: "4364 4451 4533",
51159: "4364 4451 4534",
51160: "4364 4451 4535",
51161: "4364 4451 4536",
51162: "4364 4451 4537",
51163: "4364 4451 4538",
51164: "4364 4451 4539",
51165: "4364 4451 4540",
51166: "4364 4451 4541",
51167: "4364 4451 4542",
51168: "4364 4451 4543",
51169: "4364 4451 4544",
51170: "4364 4451 4545",
51171: "4364 4451 4546",
51172: "4364 4452",
51173: "4364 4452 4520",
51174: "4364 4452 4521",
51175: "4364 4452 4522",
51176: "4364 4452 4523",
51177: "4364 4452 4524",
51178: "4364 4452 4525",
51179: "4364 4452 4526",
51180: "4364 4452 4527",
51181: "4364 4452 4528",
51182: "4364 4452 4529",
51183: "4364 4452 4530",
51184: "4364 4452 4531",
51185: "4364 4452 4532",
51186: "4364 4452 4533",
51187: "4364 4452 4534",
51188: "4364 4452 4535",
51189: "4364 4452 4536",
51190: "4364 4452 4537",
51191: "4364 4452 4538",
51192: "4364 4452 4539",
51193: "4364 4452 4540",
51194: "4364 4452 4541",
51195: "4364 4452 4542",
51196: "4364 4452 4543",
51197: "4364 4452 4544",
51198: "4364 4452 4545",
51199: "4364 4452 4546",
51200: "4364 4453",
51201: "4364 4453 4520",
51202: "4364 4453 4521",
51203: "4364 4453 4522",
51204: "4364 4453 4523",
51205: "4364 4453 4524",
51206: "4364 4453 4525",
51207: "4364 4453 4526",
51208: "4364 4453 4527",
51209: "4364 4453 4528",
51210: "4364 4453 4529",
51211: "4364 4453 4530",
51212: "4364 4453 4531",
51213: "4364 4453 4532",
51214: "4364 4453 4533",
51215: "4364 4453 4534",
51216: "4364 4453 4535",
51217: "4364 4453 4536",
51218: "4364 4453 4537",
51219: "4364 4453 4538",
51220: "4364 4453 4539",
51221: "4364 4453 4540",
51222: "4364 4453 4541",
51223: "4364 4453 4542",
51224: "4364 4453 4543",
51225: "4364 4453 4544",
51226: "4364 4453 4545",
51227: "4364 4453 4546",
51228: "4364 4454",
51229: "4364 4454 4520",
51230: "4364 4454 4521",
51231: "4364 4454 4522",
51232: "4364 4454 4523",
51233: "4364 4454 4524",
51234: "4364 4454 4525",
51235: "4364 4454 4526",
51236: "4364 4454 4527",
51237: "4364 4454 4528",
51238: "4364 4454 4529",
51239: "4364 4454 4530",
51240: "4364 4454 4531",
51241: "4364 4454 4532",
51242: "4364 4454 4533",
51243: "4364 4454 4534",
51244: "4364 4454 4535",
51245: "4364 4454 4536",
51246: "4364 4454 4537",
51247: "4364 4454 4538",
51248: "4364 4454 4539",
51249: "4364 4454 4540",
51250: "4364 4454 4541",
51251: "4364 4454 4542",
51252: "4364 4454 4543",
51253: "4364 4454 4544",
51254: "4364 4454 4545",
51255: "4364 4454 4546",
51256: "4364 4455",
51257: "4364 4455 4520",
51258: "4364 4455 4521",
51259: "4364 4455 4522",
51260: "4364 4455 4523",
51261: "4364 4455 4524",
51262: "4364 4455 4525",
51263: "4364 4455 4526",
51264: "4364 4455 4527",
51265: "4364 4455 4528",
51266: "4364 4455 4529",
51267: "4364 4455 4530",
51268: "4364 4455 4531",
51269: "4364 4455 4532",
51270: "4364 4455 4533",
51271: "4364 4455 4534",
51272: "4364 4455 4535",
51273: "4364 4455 4536",
51274: "4364 4455 4537",
51275: "4364 4455 4538",
51276: "4364 4455 4539",
51277: "4364 4455 4540",
51278: "4364 4455 4541",
51279: "4364 4455 4542",
51280: "4364 4455 4543",
51281: "4364 4455 4544",
51282: "4364 4455 4545",
51283: "4364 4455 4546",
51284: "4364 4456",
51285: "4364 4456 4520",
51286: "4364 4456 4521",
51287: "4364 4456 4522",
51288: "4364 4456 4523",
51289: "4364 4456 4524",
51290: "4364 4456 4525",
51291: "4364 4456 4526",
51292: "4364 4456 4527",
51293: "4364 4456 4528",
51294: "4364 4456 4529",
51295: "4364 4456 4530",
51296: "4364 4456 4531",
51297: "4364 4456 4532",
51298: "4364 4456 4533",
51299: "4364 4456 4534",
51300: "4364 4456 4535",
51301: "4364 4456 4536",
51302: "4364 4456 4537",
51303: "4364 4456 4538",
51304: "4364 4456 4539",
51305: "4364 4456 4540",
51306: "4364 4456 4541",
51307: "4364 4456 4542",
51308: "4364 4456 4543",
51309: "4364 4456 4544",
51310: "4364 4456 4545",
51311: "4364 4456 4546",
51312: "4364 4457",
51313: "4364 4457 4520",
51314: "4364 4457 4521",
51315: "4364 4457 4522",
51316: "4364 4457 4523",
51317: "4364 4457 4524",
51318: "4364 4457 4525",
51319: "4364 4457 4526",
51320: "4364 4457 4527",
51321: "4364 4457 4528",
51322: "4364 4457 4529",
51323: "4364 4457 4530",
51324: "4364 4457 4531",
51325: "4364 4457 4532",
51326: "4364 4457 4533",
51327: "4364 4457 4534",
51328: "4364 4457 4535",
51329: "4364 4457 4536",
51330: "4364 4457 4537",
51331: "4364 4457 4538",
51332: "4364 4457 4539",
51333: "4364 4457 4540",
51334: "4364 4457 4541",
51335: "4364 4457 4542",
51336: "4364 4457 4543",
51337: "4364 4457 4544",
51338: "4364 4457 4545",
51339: "4364 4457 4546",
51340: "4364 4458",
51341: "4364 4458 4520",
51342: "4364 4458 4521",
51343: "4364 4458 4522",
51344: "4364 4458 4523",
51345: "4364 4458 4524",
51346: "4364 4458 4525",
51347: "4364 4458 4526",
51348: "4364 4458 4527",
51349: "4364 4458 4528",
51350: "4364 4458 4529",
51351: "4364 4458 4530",
51352: "4364 4458 4531",
51353: "4364 4458 4532",
51354: "4364 4458 4533",
51355: "4364 4458 4534",
51356: "4364 4458 4535",
51357: "4364 4458 4536",
51358: "4364 4458 4537",
51359: "4364 4458 4538",
51360: "4364 4458 4539",
51361: "4364 4458 4540",
51362: "4364 4458 4541",
51363: "4364 4458 4542",
51364: "4364 4458 4543",
51365: "4364 4458 4544",
51366: "4364 4458 4545",
51367: "4364 4458 4546",
51368: "4364 4459",
51369: "4364 4459 4520",
51370: "4364 4459 4521",
51371: "4364 4459 4522",
51372: "4364 4459 4523",
51373: | |
<reponame>amcclead7336/Enterprise_Data_Science_Final
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator 2.3.33.0
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from .. import models
class JasmineOperations(object):
"""JasmineOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def get_curated_environment(
self, subscription_id, resource_group_name, workspace_name, experiment_id, auto_ml_curated_environment_input_dto=None, custom_headers=None, raw=False, **operation_config):
"""
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which
the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param experiment_id: Experiment Id.
:type experiment_id: str
:param auto_ml_curated_environment_input_dto:
:type auto_ml_curated_environment_input_dto:
~_restclient.models.AutoMLCuratedEnvInput
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: AutoMLCuratedEnvOutput or ClientRawResponse if raw=true
:rtype: ~_restclient.models.AutoMLCuratedEnvOutput or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<_restclient.models.ErrorResponseException>`
"""
# Construct URL
url = self.get_curated_environment.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("subscription_id", subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'experimentId': self._serialize.url("experiment_id", experiment_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if auto_ml_curated_environment_input_dto is not None:
body_content = self._serialize.body(auto_ml_curated_environment_input_dto, 'AutoMLCuratedEnvInput')
else:
body_content = None
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('AutoMLCuratedEnvOutput', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_curated_environment.metadata = {'url': '/jasmine/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experimentids/{experimentId}/getCuratedEnvironment'}
def submit_remote_snapshot_run(
self, subscription_id, resource_group_name, workspace_name, experiment_id, parent_run_id, run_definition=None, snapshot_id=None, custom_headers=None, raw=False, **operation_config):
"""
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which
the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param experiment_id: Experiment Id.
:type experiment_id: str
:param parent_run_id:
:type parent_run_id: str
:param run_definition:
:type run_definition: ~_restclient.models.RunDefinition
:param snapshot_id:
:type snapshot_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: StartRunResult or ClientRawResponse if raw=true
:rtype: ~_restclient.models.StartRunResult or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<_restclient.models.ErrorResponseException>`
"""
# Construct URL
url = self.submit_remote_snapshot_run.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("subscription_id", subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'experimentId': self._serialize.url("experiment_id", experiment_id, 'str'),
'parentRunId': self._serialize.url("parent_run_id", parent_run_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if snapshot_id is not None:
query_parameters['snapshotId'] = self._serialize.query("snapshot_id", snapshot_id, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if run_definition is not None:
body_content = self._serialize.body(run_definition, 'RunDefinition')
else:
body_content = None
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('StartRunResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
submit_remote_snapshot_run.metadata = {'url': '/jasmine/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experimentids/{experimentId}/runs/{parentRunId}/submitSnapshotRun'}
def continue_run(
self, subscription_id, resource_group_name, workspace_name, experiment_id, parent_run_id, updated_iterations=None, updated_time=-1, updated_exit_score=None, custom_headers=None, raw=False, **operation_config):
"""
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which
the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param experiment_id: Experiment Id.
:type experiment_id: str
:param parent_run_id:
:type parent_run_id: str
:param updated_iterations:
:type updated_iterations: int
:param updated_time:
:type updated_time: int
:param updated_exit_score:
:type updated_exit_score: float
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<_restclient.models.ErrorResponseException>`
"""
# Construct URL
url = self.continue_run.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("subscription_id", subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'experimentId': self._serialize.url("experiment_id", experiment_id, 'str'),
'parentRunId': self._serialize.url("parent_run_id", parent_run_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if updated_iterations is not None:
query_parameters['updatedIterations'] = self._serialize.query("updated_iterations", updated_iterations, 'int')
if updated_time is not None:
query_parameters['updatedTime'] = self._serialize.query("updated_time", updated_time, 'int')
if updated_exit_score is not None:
query_parameters['updatedExitScore'] = self._serialize.query("updated_exit_score", updated_exit_score, 'float')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
continue_run.metadata = {'url': '/jasmine/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experimentids/{experimentId}/runs/{parentrunId}/continueRun'}
def create_parent_run_method(
self, subscription_id, resource_group_name, workspace_name, experiment_id, create_parent_run_dto=None, custom_headers=None, raw=False, **operation_config):
"""
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which
the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param experiment_id: Experiment Id.
:type experiment_id: str
:param create_parent_run_dto:
:type create_parent_run_dto: ~_restclient.models.CreateParentRun
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: str or ClientRawResponse if raw=true
:rtype: str or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<_restclient.models.ErrorResponseException>`
"""
# Construct URL
url = self.create_parent_run_method.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("subscription_id", subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'experimentId': self._serialize.url("experiment_id", experiment_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if create_parent_run_dto is not None:
body_content = self._serialize.body(create_parent_run_dto, 'CreateParentRun')
else:
body_content = None
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
create_parent_run_method.metadata = {'url': '/jasmine/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experimentids/{experimentId}/run'}
def local_run_get_next_task(
self, subscription_id, resource_group_name, workspace_name, experiment_id, parent_run_id, start_child_run=None, local_run_get_next_task_input=None, custom_headers=None, raw=False, **operation_config):
"""
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which
the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param experiment_id: Experiment Id.
:type experiment_id: str
:param parent_run_id:
:type parent_run_id: str
:param start_child_run:
:type start_child_run: bool
:param local_run_get_next_task_input:
:type local_run_get_next_task_input:
~_restclient.models.LocalRunGetNextTaskInput
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: IterationTask or ClientRawResponse if raw=true
:rtype: ~_restclient.models.IterationTask or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<_restclient.models.ErrorResponseException>`
"""
# Construct URL
url = self.local_run_get_next_task.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("subscription_id", subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'experimentId': self._serialize.url("experiment_id", experiment_id, 'str'),
'parentRunId': self._serialize.url("parent_run_id", parent_run_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if start_child_run is not None:
query_parameters['startChildRun'] = self._serialize.query("start_child_run", start_child_run, 'bool')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if local_run_get_next_task_input is not None:
body_content = self._serialize.body(local_run_get_next_task_input, 'LocalRunGetNextTaskInput')
else:
body_content = None
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('IterationTask', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
local_run_get_next_task.metadata = {'url': '/jasmine/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/experimentids/{experimentId}/runs/{parentRunId}/next'}
def local_run_get_next_task_batch(
self, subscription_id, resource_group_name, workspace_name, experiment_id, parent_run_id, start_child_runs=None, local_run_get_next_task_batch_input=None, custom_headers=None, raw=False, **operation_config):
"""
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the | |
αρτόδεντρο αρφάνια αρχάγγελος αρχάνθρωπος αρχές αρχέτυπο αρχέτυπον αρχή
αρχίγραμμα αρχίδι αρχίνημα αρχίνισμα αρχαΐζουσα αρχαΐστρια αρχαία
αρχαίος αρχαγγελικός αρχαιγόνιο αρχαιοβακτήριο αρχαιοβοτανική
αρχαιογνωσία αρχαιογνωστικός αρχαιογνώστης αρχαιοδίφης αρχαιοδιφικός
αρχαιοκάπηλος αρχαιοκαπηλία αρχαιοκύτταρο αρχαιολάτρης αρχαιολάτρισσα
αρχαιολογία αρχαιολόγος αρχαιομάθεια αρχαιομανία αρχαιομετρία αρχαιονετρίνο
αρχαιοπληξία αρχαιοπρέπεια αρχαιοπώλης αρχαιοπώλισσα αρχαιοσυλία αρχαιοφιλία
αρχαιρεσίες αρχαιότητα αρχαιόφιλος αρχαϊκότητα αρχαϊσμός αρχαϊστής αρχαϊστικός
αρχείον αρχεγονία αρχεγονιάτες αρχειοδίφης αρχειοδιφικός αρχειοθέτης
αρχειοθέτρια αρχειοθήκη αρχειοφυλάκιο αρχειοφυλακείο αρχειοφύλακας αρχειοφύλαξ
αρχηγία αρχηγίνα αρχηγίς αρχηγίσκος αρχηγείο αρχηγείον αρχηγισμός αρχηγός
αρχιγουναραίος αρχιγραμματέας αρχιγραμματεία αρχιγραμματεύς αρχιδιά
αρχιδιάκος αρχιδικαστής αρχιδούκας αρχιδούκισσα αρχιεπίσκοπος αρχιεπισκοπή
αρχιεργάτης αρχιεργάτισσα αρχιεργάτρια αρχιεροσύνη αρχιθαλαμηπόλος
αρχιθησαυροφύλακας αρχιθύτης αρχικελευστής αρχικλέφτης αρχικλέφτρα
αρχικουμούνι αρχικτηνίατρος αρχιλήσταρχος αρχιλακές αρχιληστής αρχιλογίστρια
αρχιλοχίας αρχιμάστορας αρχιμανδρίτης αρχιμηνιά αρχιμηχανικός αρχιμουσικός
αρχινοσοκόμα αρχινοσοκόμος αρχιπέλαγος αρχιπλοίαρχος αρχιστράτηγος
αρχισυμμορίτης αρχισυντάκτης αρχισυντάκτρια αρχισυντάχτης αρχισυντάχτρια
αρχιτέκτονας αρχιτέκτων αρχιτεκτονική αρχιτεκτόνημα αρχιτεκτόνισσα αρχιτελώνης
αρχιτεμπέλαρος αρχιτεμπέλης αρχιτεχνίτης αρχιτεχνίτις αρχιτεχνίτισσα
αρχιφύλακας αρχιφύλαξ αρχιχρονιά αρχιψεύταρος αρχιψεύτης αρχιψεύτρα
αρχολίπαρος αρχολιπαρία αρχομανία αρχονετρίνο αρχονουκλεοσύνθεση
αρχοντάρης αρχονταρίκι αρχονταριό αρχοντιά αρχοντικό αρχοντογυναίκα αρχοντολόι
αρχοντοχωριάτης αρχοντοχωριάτισσα αρχοντοχωριατιά αρχοντοχωριατισμός
αρχοντόπουλο αρχοντόσπιτο αρχοσπόριο αρχοφωτόνια αρχοφωτόνιο αρχόντισσα
αρωγή αρωγός αρωδαμός αρωματοποιία αρωματοποιείο αρωματοποιός αρωματοπωλείο
αρωματοπώλις αρωματοπώλισσα αρωμουνικά αρόσιμος αρύταινα ασάφεια ασέβεια
ασέλγεια ασέξουαλ ασέξουαλς ασήμι ασήμια ασήμωμα ασαμικά ασανσέρ ασανσεριτζής
ασβέστι ασβέστιο ασβέστωμα ασβέστωση ασβακάνδη ασβεστάδικο ασβεστάς ασβεστίτης
ασβεστοκάμινο ασβεστοκάμινος ασβεστοκονίαμα ασβεστού ασβεστόγαλα ασβεστόλιθος
ασβόλη ασβός ασεξουαλικότητα ασετιλίνη ασετυλίνη ασετόν ασημί ασημαντότης
ασημικά ασημικό ασημοκάντηλο ασημόσκονη ασημότης ασημότητα ασηψία ασθένεια
ασθενικότης ασθενικότητα ασθενοφόρο ασιάτης ασιανολογία ασιανολόγος ασιανός
ασινόφιδο ασιτία ασκέρι ασκήμια ασκήτρια ασκί ασκαλώνιο ασκαψία ασκημάδα
ασκητήριο ασκητής ασκηταριό ασκητεία ασκητισμός ασκιανάδα ασκιανός ασκληπιείο
ασκορδούλακας ασκός ασλάνι ασματογράφος ασπάλαθος ασπάλακας ασπάλαξ ασπίδα
ασπαραγίνη ασπασμός ασπεργκερικός ασπιρίνη ασπλάχνια ασπλαχνία ασπλαχνιά
ασπράδι ασπρίλα ασπρικά ασπριστής ασπριτζής ασπροδέλφινο ασπροθαλασσίτης
ασπρολούλουδο ασπροπάρης ασπροπάρι ασπροπυργιώτης ασπρορουχάδικο ασπρορουχάς
ασπροφρύδα ασπροφρύδης ασπρόξυλο ασπρόρουχα ασπρόρουχο ασπρόχωμα ασπόνδυλα
ασσίτης ασσασίνος ασσυριακά ασσυριολογία ασσυριολόγος αστάθεια αστάρι αστάρωμα
αστέρας αστέρι αστή αστήθι αστήρ αστήρανθος αστίατρος αστακοδεξαμενή
αστακολίβαδο αστακοουρά αστακοτροφία αστακοτροφείο αστακός αστακόσουπα αστείο
αστειότητα αστεράκι αστερίας αστερίσκος αστερισμός αστεροειδής αστεροσκοπείο
αστερόεσσα αστεϊσμός αστιγμία αστιγματισμός αστικοποίηση αστικοποίησις αστικό
αστισμός αστοργία αστουριανά αστοχία αστοχασιά αστράβη αστράγαλος αστράκι
αστρέχα αστρί αστρίτης αστραγαλιά αστρακιά αστραπάρι αστραπή αστραποβρόντι
αστραποφεγγιά αστραπόβροντο αστραπόφεγγο αστραπόφιδο αστραχιά αστραψιά
αστροβιογένεση αστροβιολογία αστροβολίδα αστρογεωλογία αστροκουάρκ αστρολάβος
αστρολούλουδο αστρολόγος αστρομαντεία αστρομαντική αστρομετρία αστροναυτική
αστροναύτισσα αστρονομία αστρονόμος αστροπαλίτης αστροπελέκι αστροφεγγιά
αστρόφεγγο αστυθύρεος αστυκτηνίατρος αστυλογία αστυνομία αστυνομικά
αστυνομικός αστυνομοκρατία αστυνόμευση αστυνόμος αστυσία αστυφιλία αστυφυλακή
αστός ασυγυρισιά ασυδοσία ασυλία ασυλλογισιά ασυμβατότητα ασυμμετρία ασυμφωνία
ασυνέχεια ασυναισθησία ασυναρτησία ασυνείδητο ασυνειδησία ασυνεννοησία
ασυρματίστρια ασυρματιστής ασφάλακας ασφάλεια ασφάλιση ασφάλισις ασφάλισμα
ασφάλτωση ασφάλτωσις ασφέρδουκλας ασφακόμελο ασφαλίτης ασφαλίτισσα
ασφαλισιμότητα ασφαλιστήριο ασφαλιστής ασφαλτολίμνη ασφαλτόπανο ασφαλτόστρωμα
ασφαλτόστρωσις ασφοδέλι ασφοδήλι ασφοδίλι ασφοδελίνη ασφοντύλι ασφυγμία
ασφόδελος ασφόντυλος ασχήμια ασχετίλα ασχετοσύνη ασχημάδα ασχημάνθρωπος
ασχημόπαπο ασχολία ασωματίδιο ασωτία ασύρματος ατάκα ατέλεια ατίμασμα ατίμωση
αταβισμός αταξία αταραξία ατασθαλία ατεκνία ατελιέ ατεχνία ατζάρδος ατζέντα
ατζί ατζαμοσύνη ατζούγα ατζούγια ατημέλεια ατημελησία ατημελησιά ατθιδογράφος
ατιμασμός ατιμαστής ατιμωρησία ατλάζι ατμάκατος ατμάμαξα ατμιστής ατμοβαρίδα
ατμοδρόμωνας ατμοκίνηση ατμοκαθαριστήρας ατμοκαθαριστής ατμοκινητήρας
ατμολέβητας ατμομάγειρας ατμομηχανή ατμομηχανικός ατμοπαγίδα ατμοπλοΐα
ατμοποίησις ατμοσίδερο ατμοστρόβιλος ατμοσφαίρα ατμοτουρμπίνα ατμόιππος
ατμόπλοιο ατμός ατμόσφαιρα ατολμία ατομίκευση ατομίκευσις ατομίστρια ατομιζέρ
ατομικισμός ατομικιστής ατομικότης ατομικότητα ατομισμός ατομιστής ατομοκράτης
ατονία ατονικότης ατονικότητα ατοπία ατοπικός ατού ατρακτίδιο ατραξιόν ατραπός
ατροπίνη ατροφία ατροφικότητα ατρωσία ατσάλι ατσάλωμα ατσάλωση ατσέλεγος
ατσίδα ατσίδας ατσαλάκωτος ατσαλίνα ατσαλιά ατσαλοσύνη ατσαλόπροκα ατσελεράντο
ατταπουλγίτης αττικίζουσα αττικίστρια αττικισμός αττικιστής ατυχία ατόλη
ατόνησις ατόπημα ατύχημα αυγή αυγίτης αυγινή αυγοθήκη αυγολέμονο αυγομαντεία
αυγοσαλάτα αυγοτάραχο αυγουλάδικο αυγουλάκι αυγουλάς αυγουλομάτης αυγουστιά
αυγούλι αυγό αυγότσουφλο αυθάδεια αυθέντης αυθαίρετο αυθαιρεσία αυθεντία
αυθεντικότητα αυθορμησία αυθορμητισμός αυθυπαρξία αυθυποβολή αυθυποστασία
αυθυπόστατο αυλάκι αυλάκιασμα αυλάκισμα αυλάκωμα αυλάκωση αυλάρχης αυλή
αυλαία αυλακιά αυλαρχία αυλαρχείο αυλητής αυλητρίδα αυλιδιώτης αυλικός
αυλοκόλακας αυλωθητήρας αυλωνίτης αυλόγυρος αυλόθυρα αυλόπορτα αυλός
αυνανισμός αυξίνη αυξομείωση αυξορρύθμιση αυξότητα αυριανά αυριανισμός
αυστηρότητα αυστραλέζος αυστραλοπίθηκος αυστραλός αυτάδελφος αυτάρκεια αυτί
αυταδέλφισσα αυτανάφλεξη αυταξιολόγηση αυταπάρνηση αυταπάτη αυταρέσκεια
αυταρχικότης αυταρχικότητα αυταρχισμός αυτασφάλεια αυτασφάλιση αυτεγκλωβισμός
αυτεμβόλιο αυτεμπλοκή αυτενέργεια αυτενδοσκόπηση αυτεξουσιότητα αυτεπίγνωση
αυτεπιστασία αυτερωτισμός αυτηκοΐα αυτισμός αυτιστικός αυτοΐαση αυτοάμυνα
αυτοέπαινος αυτοαμφισβήτηση αυτοανάδειξη αυτοαναίρεση αυτοανοσία
αυτοαντίληψη αυτοαξιολόγηση αυτοαποτίμηση αυτοαποτελεσματικότητα
αυτοβιογένεση αυτοβιογράφημα αυτοβιογράφηση αυτοβιογράφος αυτοβιογραφία
αυτοβουλία αυτογένεση αυτογαμία αυτογνωσία αυτογνώστης αυτογονιμοποίηση
αυτοδέσμευση αυτοδιάγνωση αυτοδιάθεση αυτοδιάλυση αυτοδιάψευση αυτοδιέγερση
αυτοδιαφήμιση αυτοδιαχείριση αυτοδιαχειρίζομαι αυτοδικία αυτοδικαίωση
αυτοδιορισμός αυτοδιπλασιασμός αυτοδραστικότητα αυτοδυναμία αυτοεγκλωβισμός
αυτοειρωνεία αυτοεκσπλαχνισμός αυτοεκτίμηση αυτοεμπιστοσύνη αυτοεμπλοκή
αυτοεξαίρεση αυτοεξορία αυτοεξυπηρέτηση αυτοεξόφληση αυτοεπίγνωση
αυτοεπιδιόρθωση αυτοεπικονίαση αυτοεποπτεία αυτοερωτισμός αυτοθέσμιση
αυτοθεραπεία αυτοθυσία αυτοκάθαρση αυτοκέφαλο αυτοκίνητο αυτοκαθαρισμός
αυτοκαθορισμός αυτοκαλλιέργεια αυτοκατάργηση αυτοκαταδίκη αυτοκατανάλωση
αυτοκατεύθυνση αυτοκεφαλία αυτοκινητάδα αυτοκινητάκι αυτοκινητάμαξα
αυτοκινητισμός αυτοκινητιστής αυτοκινητοβιομηχανία αυτοκινητοδρομία
αυτοκινητοθυρίδα αυτοκινητοπομπή αυτοκινητοτράπεζα αυτοκινητόδρομος
αυτοκολλητάκι αυτοκράτειρα αυτοκράτορας αυτοκράτωρ αυτοκρατία αυτοκρατορία
αυτοκριτική αυτοκτονία αυτοκτόνος αυτοκυβέρνηση αυτοκυριαρχία αυτοκόλλητο
αυτολογοκρισία αυτοματισμός αυτοματοποίηση αυτομείωση αυτομελέτη αυτομετάγγιση
αυτομόληση αυτομόλυνση αυτομόρφωση αυτονομία αυτονομίστρια αυτονομιστής
αυτοοικολογία αυτοπάθεια αυτοπαγίδευση αυτοπαλίνδρομος αυτοπαλινδρόμηση
αυτοπαρακίνηση αυτοπαρηγορία αυτοπαρηγοριά αυτοπαρουσίαση αυτοπειθάρχηση
αυτοπεποίθηση αυτοπερίπλεξη αυτοπεριορισμός αυτοπεριπλοκή αυτοπορτρέτο
αυτοπροβολή αυτοπρομηθευτής αυτοπροσδιορισμός αυτοπροστασία αυτοπροσωπογράφος
αυτοπροώθηση αυτοπυρπόληση αυτοραδιογράφημα αυτορρύθμιση αυτοσαρκασμός
αυτοσεβασμός αυτοσκοπός αυτοστοχασμός αυτοσυγκέντρωση αυτοσυγκράτημα
αυτοσυνείδηση αυτοσυνειδησία αυτοσυντήρηση αυτοσυντηρησία αυτοσυσχέτιση
αυτοσχεδιάστρια αυτοσχεδιασμός αυτοσχεδιαστής αυτοσχεδιαστικός αυτοσύμπλεξη
αυτοτέλεια αυτοτελείωση αυτοτελειοποίηση αυτοτιμωρία αυτοτομία αυτοτραυματίας
αυτοτροφοδότηση αυτουργία αυτουργός αυτοφαγία αυτοφθορισμός αυτοφωράκιας
αυτοχαρακτηρισμός αυτοχειρία αυτοχειριασμός αυτοχθονισμός αυτοχρηματοδότηση
αυτοϊκανοποίηση αυτοϋπέρβαση αυτοϋπονόμευση αυτόγραφο αυτόγυρο αυτόκαυστο
αυτόματο αυτόμολος αυτόπτης αυτόπτις αυτότητα αυτόφωρο αυτόχειρ αυτόχειρας
αυτόχθων αυτώνυμο αυχένας αφάλι αφάνα αφάνεια αφάνισμα αφέλεια αφέλειες
αφέντισσα αφέντρα αφέτης αφέψημα αφή αφήγημα αφήγηση αφήλιο αφήνιασμα αφίδα
αφίππευση αφίσα αφαίμαξη αφαίρεση αφαγία αφαγιά αφαιμαξομετάγγιση αφαιρέτης
αφαιρετική αφαλάτωση αφαλατώνω αφαλός αφανισμός αφανιστής αφασία αφγανός
αφελληνισμός αφεντάνθρωπος αφεντιά αφεντικίνα αφεντικό αφεντικός
αφεντόπαιδο αφεντόπουλο αφερεγγυότητα αφερμάτιση αφερμάτισμα αφερματισμός
αφηγήτρια αφηγηματικότητα αφηγητής αφηνίαση αφηνίασμα αφηνιασμός αφηρημάδα
αφθαρσία αφθονία αφιέρωμα αφιέρωση αφιαπωνισμός αφιδνιώτης αφιερωτής
αφιλία αφιλανθρωπία αφιλοκέρδεια αφιλοκαλία αφιλομουσία αφιλοξενία αφιλοπατρία
αφιλοτιμία αφιλοτιμιά αφιλοχρηματία αφιονισμός αφισοκολλήτρια αφισοκολλητής
αφισορύπανση αφισούλα αφιόνι αφιόνισμα αφλογιστία αφοβία αφοβιά αφοδευτήριο
αφοπλισμός αφορία αφορδακός αφορεσμός αφορισμός αφορμή αφοσίωση αφουγκράστρα
αφούγκρασμα αφραγκία αφριά αφρικάανς αφρικανολλανδικά αφρισμός αφροέλληνας
αφροδίσιο αφροδισία αφροδισιακό αφροδισιασμός αφροδισιαστής αφροδισιολογία
αφρολέξ αφρολλανδικά αφρομηλιά αφροντισία αφροντισιά αφροξυλιά αφροσύνη
αφρόγαλο αφρόκρεμα αφρόλουτρο αφρόξυλο αφρός αφρόστοκος αφρόψαρο αφτί αφυΐα
αφυδάτωση αφυδρογόνωση αφυλαξία αφυπηρέτηση αφωνία αφόδευμα αφόδευση αφόπλιση
αφόρμισμα αφύγρανση αφύπνιση αχάμνια αχάνεια αχάτης αχέπανς
αχαμνά αχαμνάδα αχανές αχαριστία αχείλι αχερώνα αχερώνας αχεσιά αχηβάδα
αχθοφόρος αχιβάδα αχιλλαία αχινιός αχινός αχινόσουπα αχιουρές αχλάδα αχλάδι
αχλαδιά αχλαδομηλιά αχλαδόμηλο αχλαδόσχημος αχλύς αχμάκης αχνάδα αχνάρι
αχνοφεγγιά αχνός αχνόφεγγο αχολογή αχολόγημα αχολόι αχονδροπλασία αχορταγιά
αχορτασιά αχούλ αχούρι αχρήστευση αχρήστευσις αχρειολογία αχρειολόγος
αχρειότης αχρειότητα αχρηματία αχρησία αχρηστία αχρωμία αχρωματοψία
αχτίδα αχτίνα αχταρμάς αχυράνθρωπος αχυροκαλύβα αχυροσκεπή αχυρόστρωμα αχυρώνα
αχωνεψιά αχός αψάδα αψέντι αψήφιση αψίδα αψίδωμα αψίδωση αψίνθιο αψίς αψηφισιά
αψιθιά αψιθυμία αψιλία αψιμαχία αψινθιά αϊβαλιώτης αϊμάρα αϊνσταΐνιο αϊράνι
αϊτινός αϊτονύχης αϊτονύχισσα αϊτόπουλο αϊτός αϋλισμός αϋπνία αόριστος αύλακα
αύλαξ αύξηση αύρα αύριο αἰώρα αἴγειρος αὐγόν β-λακτάμες βάβισμα βάβω βάγιο
βάδιση βάδισμα βάζελος βάζο βάθεμα βάθος βάθρακας βάθρο βάθυνση βάι βάιο
βάκλα βάκτρο βάκτρον βάλανος βάλσαμο βάλσιμο βάλτος βάλτωμα βάμβαξ βάμμα
βάνα βάνδαλος βάπτιση βάπτισις βάπτισμα βάραθρο βάραθρον βάρβαρος βάρβιτος
βάρδος βάρδουλο βάρεμα βάριο βάριον βάρκα βάρνα βάρος βάρσαμο βάρσαμος βάσανο
βάσανος βάση βάσις βάσκαμα βάσταγμα βάτα βάτεμα βάτευμα βάτο βάτος βάτραχος
βάφλα βάφτιση βάφτισμα βάψη βάψιμο βάψις βέγγε βέγκε βέδες βέλασμα βέλγος βέλο
βέμβικας βένγκε βένδα βένθος βέντο βέξιμον βέρα βέργα βέσπα βέτο βήμα βήξιμο
βήτα βήχας βήχιο βία βίβλος βίβλος βίγλα βίδα βίδρα βίδωμα βίζα βίζιτα βίκι
βίλα βίλλος βίντεο βίντζι βίντσι βίος βίπερ βίρα βίσονας βίσων βίτζι βίτσα
βίωμα βίωση βίωσις βαένι βαβά βαβά βαβεσίωση βαβουίνος βαβουκλί βαβούλι
βαβυλωνία βαγένι βαγαποντιά βαγγέλιο βαγενάρης βαγενάς βαγεναρείο βαγεναριό
βαγιοβδομάδα βαγιόκλαδο βαγιόκλαρο βαγκνεριστής βαγκόν-λι βαγκόν-ρεστοράν
βαγόνι βαδιστής βαζάκι βαζελίνη βαζιβουζούκος βαθμίδα βαθμοθέτης βαθμοθέτηση
βαθμοθηρία βαθμολογία βαθμολογητής βαθμολόγηση βαθμολόγιο βαθμονομία
βαθμονόμηση βαθμονόμος βαθμοφόρος βαθμωτό βαθμός βαθομέτρηση βαθούλωμα
βαθυμέτρηση βαθυμετρία βαθυσκάφος βαθυτυπία βαθόμετρο βαθύμετρο βαθύνοια
βαθύτητα βαθύχορδο βακέσιο βακέτα βακαλάος βακελίτης βακούφι βακούφιο βακτήριο
βακτηρίαση βακτηρίδιο βακτηρίωση βακτηριαιμία βακτηριολογία βακτηριοσίνη
βακτηριοχλωροφύλλη βακτηριόσταση βακχεία βακχευτής βακχεύτρια βακχιστόρημα
βαλάντωμα βαλέρ βαλές βαλής βαλίνη βαλίτζα βαλίτσα βαλαάς βαλανίδι βαλανίτιδα
βαλανιδιά βαλανιδόψωμο βαλανόστρακο βαλβίδα βαλβιδοπάθεια βαλβιδοπλαστική
βαλβολίνη βαλελίκι βαλεριάνα βαλιδέ βαλιντέ βαλιτσάκι βαλιτσάρα βαλιτσούλα
βαλκανιονίκης βαλκανολογία βαλκανολόγος βαλκανοποίηση βαλλίστρα βαλλισμός
βαλμάς βαλμαδιό βαλμαριό βαλονικά βαλς βαλσάκι βαλσάμωμα βαλσάμωση βαλσαμέλαιο
βαλτονέρι βαλτοποταμίδα βαλτοτόπι βαλτόμπουφος βαλτόνερο βαλτότοπος βαμβάκι
βαμβακέμπορος βαμβακίαση βαμβακιά βαμβακοπαραγωγή βαμβακοπαραγωγός
βαμβακουργία βαμβακουργείο βαμβακοφυτεία βαμβακούλα βαμβακόμελο βαμβακόπιτα
βαμβακόσχοινο βαμβακώνας βαμπάκι βαμπίρ βανάδιο βανίλια βαναυσούργημα
βαναυσότητα βανγκαρντισμός βανγκαρντιστής βανδαλισμός βανιλίνη βανοστάσιο
βαπέρ βαποράκι βαποράρα βαποριά βαποριζατέρ βαπτίσια βαπτισμός βαπτιστής
βαράθρωση βαράθρωσις βαρέλα βαρέλι βαρίδι βαρίδιο βαρίδιον βαρίτης βαρβαρισμός
βαρβαρότητα βαρβατίλα βαρβατιά βαρβατότητα βαρβιτουρικά βαρδάρης βαρδαβέλα
βαρδατέντα βαρδιάνος βαρδιάτορας βαρεία βαρελάδικο βαρελάκι βαρελάς
βαρελοποιός βαρελοσάνιδο βαρελοσανίδα βαρελοσανίς βαρελοστεφάνη βαρελότο
βαρεμένη βαρεμός βαρηκοΐα βαρηκούω βαριά βαριάντ βαριάντα βαριαναστεναγμός
βαριεμάρα | |
# Digital Object Identifier (DOI)
#if headerCol == "D2": paperIn[""] = col # Book Digital Object Identifier (DOI)
if headerCol == "PG": paperIn["pageCount"] = col # Page Count
#if headerCol == "WC": paperIn["subject"] = col # Web of Science Categories
if headerCol == "SC": paperIn["subject"] = col # Research Areas
#if headerCol == "GA": paperIn[""] = col #Document Delivery Number
if headerCol == "UT": paperIn["eid"] = col # Accession Number
if headerCol == "PM": paperIn["pubMedId"] = col # PubMed ID
#if headerCol == "OA": paperIn[""] = col # Open Access Indicator
#if headerCol == "HC": paperIn[""] = col # ESI Highly Cited Paper. Note that this field is valued only for ESI subscribers.
#if headerCol == "HP": paperIn[""] = col # ESI Hot Paper. Note that this field is valued only for ESI subscribers.
#if headerCol == "DA": paperIn[""] = col # Date this report was generated.
# Own fields
if headerCol == "Subject": paperIn["subject"] = col
if headerCol == "duplicatedIn": paperIn["duplicatedIn"] = col.split(";")
if headerCol == "country": paperIn["country"] = col
if headerCol == "institution": paperIn["institution"] = col
if headerCol == "institutionWithCountry": paperIn["institutionWithCountry"] = col
if headerCol == "bothKeywords": paperIn["bothKeywords"] = col
if headerCol == "emailHost": paperIn["emailHost"] = col
if headerCol == "authorFull": paperIn["authorFull"] = col
colnum += 1
# Omit papers with invalid year
if not paperIn["year"].isdigit():
continue
# Omit papers without title
if paperIn["title"] == "":
continue
# Put the database ussing eid
if paperIn["dataBase"] == "":
if paperIn["eid"].startswith("WOS"):
paperIn["dataBase"] = "WoS"
paperIn["source"] = "WoS"
elif paperIn["eid"].startswith("2-"):
paperIn["dataBase"] = "Scopus"
# If not valid eid
else:
continue
# If cited by is emtpy add 0
if paperIn["citedBy"] == "":
paperIn["citedBy"] = "0"
# Change to false to not preprocess author
if True:
if paperIn["dataBase"] == "WoS":
paperIn["author"] = paperIn["author"].replace(";", ",")
# Remove accents in author
paperIn["author"] = unidecode.unidecode(paperIn["author"])
paperIn["authorFull"] = unidecode.unidecode(paperIn["authorFull"])
# Put a dot after the name initials in uppercase
author = []
for i in range(len(paperIn["author"])):
author.append(paperIn["author"][i])
# if the last character and uppercase, put the dot
if i == len(paperIn["author"]) - 1:
if paperIn["author"][i].isupper():
author.append('.')
break
# if upper and the next is upper or "," put the dot
if paperIn["author"][i].isupper() and \
(paperIn["author"][i + 1].isupper() or paperIn["author"][i + 1] == ','):
author.append('.')
paperIn["author"] = ''.join(author)
# Remove the "-" that is before an initial, to have coherence between WoS and Scoups
paperIn["author"] = paperIn["author"].replace(".-", ".")
# Replace authors separater ".," with ";" for scientopy analysis, put it back in paperSave
paperIn["author"] = paperIn["author"].replace(".,", ".;")
# Extract country, institution and institutionWithCountry from affilation
if paperIn["country"] == "" or paperIn["institution"] == "" or paperIn["institutionWithCountry"] == "":
# Get each author affiliations
affiliations = re.split("; (?=[^\]]*(?:\[|$))", paperIn["affiliations"])
countries = []
institutions = []
institutionsWithCoutnry = []
# For each affiliation
for affiliation in affiliations:
# Divide affiliation in sections by ",", but not consider "," inside "[]"
afSections = re.split(", (?=[^\]]*(?:\[|$))|]", affiliation)
# The last item in affiliation list is the country
country = afSections[-1].strip()
# Remove dots in country
country = country.replace(".", "")
if "BOSNIA & HERCEG".upper() == country.upper():
country = "Bosnia and Herzegovina"
if "CHINA".upper() in country.upper():
country = "China"
if "ENGLAND".upper() in country.upper():
country = "United Kingdom"
if "SCOTLAND".upper() in country.upper():
country = "United Kingdom"
if "WALES".upper() in country.upper():
country = "United Kingdom"
if "UK".upper() == country.upper():
country = "United Kingdom"
if "KINGDOM OF SAUDI ARABIA".upper() == country.upper():
country = "Saudi Arabia"
if "RUSSIA".upper() in country.upper():
country = "Russian Federation"
if "TRINID & TOBAGO".upper() == country.upper():
country = "Trinidad and Tobago"
if "U ARAB EMIRATES".upper() == country.upper():
country = "United Arab Emirates"
if "USA".upper() in country.upper():
country = "United States"
if "VIET NAM".upper() == country.upper():
country = "Vietnam"
# To do not duplicate countries in country field
if country.upper() not in [x.upper() for x in countries]:
countries.append(country)
# Get institution
institution = ""
if paperIn["dataBase"] == "WoS" and affiliations != "":
# Extract institution as the second element in affiliation sections
if len(afSections) >= 2:
institution = afSections[1].strip()
if institution.upper() not in [x.upper() for x in institutions]:
institutions.append(institution)
institutionWithCoutnry = ""
if institution != "":
institutionWithCoutnry = ("%s, %s" % (institution.replace(",", ""), country.replace(",", "")))
if institutionWithCoutnry.upper() not in [x.upper() for x in institutionsWithCoutnry]:
institutionsWithCoutnry.append(institutionWithCoutnry)
#print("\nOriginal: %s" % affiliation)
#print("afSections: %s" % str(afSections))
#print("country: %s" % country)
#print("institution: %s" % institution)
#print("institutionWithCoutnry: %s" % institutionWithCoutnry)
paperIn["country"] = ";".join(countries)
paperIn["institution"] = ";".join(institutions)
paperIn["institutionWithCountry"] = ";".join(institutionsWithCoutnry)
# Get email host
if paperIn["emailHost"] == "":
splited1 = paperIn["correspondenceAddress"].split("@")
if len(splited1) > 1:
splited2 = splited1[1].split(";")
paperIn["emailHost"] = splited2[0]
else:
paperIn["emailHost"] = "No email"
# Both keywords
if paperIn["bothKeywords"] == "":
bothKeywords = []
for keyword in paperIn["authorKeywords"].split(";"):
keywordStriped = keyword.strip()
if keywordStriped == "":
continue
if keywordStriped.upper() not in [x.upper() for x in bothKeywords]:
bothKeywords.append(keywordStriped)
for keyword in paperIn["indexKeywords"].split(";"):
keywordStriped = keyword.strip()
if keywordStriped == "":
continue
if keywordStriped.upper() not in [x.upper() for x in bothKeywords]:
bothKeywords.append(keywordStriped)
paperIn["bothKeywords"] = ";".join(bothKeywords)
# printPaper(paperIn)
globalVar.loadedPapers += 1
# Filter papers that are not in document tipe list
if any(pType.upper() in paperIn["documentType"].upper().split("; ") \
for pType in globalVar.INCLUDED_TYPES):
papersDict.append(paperIn)
if paperIn["dataBase"] == "WoS":
globalVar.papersWoS += 1
if paperIn["dataBase"] == "Scopus":
globalVar.papersScopus += 1
else:
globalVar.omitedPapers += 1
rownum += 1
ifile.close()
def getPapersLinkFromFile(ifile, papersDict):
firstLineTell = ifile.tell()
firstLine = ifile.readline()
ifile.seek(firstLineTell)
if "\t" in firstLine:
reader = csv.reader(ifile, delimiter='\t')
else:
reader = csv.reader(ifile,delimiter=',')
header = 0
rownum = 0
for row in reader:
# Save header row.
if rownum == 0:
header = row
else:
colnum = 0
paperIn = {}
# Init key elements as zero
paperIn["Link"] = ""
for col in row:
#if colnum >= len(header):
# break
#headerCol = header[colnum].decode("ascii", errors="ignore").encode()
# Scopus fields
if col.startswith("https://www.scopus.com"):
paperIn["Link"] = col
colnum += 1
if paperIn["Link"] != "":
papersDict.append(paperIn)
rownum += 1
ifile.close()
def printPaper(paper):
print('Authors: %s' % (paper["author"]))
print('Title: %s' % (paper["title"]))
print('Year: %s' % (paper["year"]))
print('Source: %s' % (paper["source"]))
print('DOI: %s' % (paper["doi"]))
#print('Abstract: %s' % (paper["abstract"]))
print('Author Key: %s' % (paper["authorKeywords"]))
print('Index Key: %s' % (paper["indexKeywords"]))
print('eid: %s' % (paper["eid"]))
print('Data base: %s' % (paper["dataBase"]))
print('Affilations:')
for af in re.split("; (?=[^\]]*(?:\[|$))",paper["affiliations"]):
print("- " + af)
print('Country: %s' % (paper["country"]))
print('Document type: %s' % (paper["documentType"]))
print('Cited by: %s' % (paper["citedBy"]))
print('\n')
def removeDuplicates(paperDict, logWriter=None, preProcessBrief=None):
duplicatedPapersCount = 0
removedPapersScopus = 0
removedPapersWoS = 0
duplicatedWithDifferentCitedBy = 0
noAuthors = 0
# Remove part of the title inside parentisis or square brakets
# Some journals put this the original language tile in the brakets
# Remove whitespace at the end and start of the tile
for paper in paperDict:
paper["titleB"] = unidecode.unidecode(paper["title"])
paper["titleB"] = re.sub("[\(\[].*?[\)\]]", "", paper["titleB"].upper()).strip()
paper["titleB"] = re.sub("[^a-zA-Z0-9]+", "", paper["titleB"])
paper["firstAuthorLastName"] = unidecode.unidecode(paper["author"])
paper["firstAuthorLastName"] = paper["firstAuthorLastName"].upper().strip()
paper["firstAuthorLastName"] = re.sub(";|\.|,", " ", paper["firstAuthorLastName"]).split(" ")[0]
paper["firstAuthorLastName"] = re.sub("[^a-zA-Z]+", "", paper["firstAuthorLastName"])
# Short by database, to put WoS first over Scopus, reverse True
paperDict = sorted(paperDict, key=lambda x: x["dataBase"], reverse=True)
paperDict = sorted(paperDict, key=lambda x: x["titleB"])
print("Removing duplicates...")
globalVar.progressText = 'Removing duplicates'
countMatch2 = 0
progressPerPrev = 0
# Run on paper list
for i in range(0, len(paperDict)):
match = True
while(match):
# If we are on the last paper in the list
if i >= (len(paperDict) - 1):
match = False
continue
# Compare first author last name and titleB in uppercase
match = (paperDict[i]["firstAuthorLastName"] == paperDict[i+1]["firstAuthorLastName"])
match &= (paperDict[i]["titleB"] == paperDict[i+1]["titleB"])
if(paperDict[i]["doi"] != ""):
match |= (paperDict[i]["doi"] == paperDict[i + 1]["doi"])
match2 = (paperDict[i]["year"] != paperDict[i + 1]["year"]) & match
if (match2 == True):
countMatch2 += 1
#print(countMatch2)
# If the criterion match
if(match == True):
#print("\nPaper %s duplicated with %s" % (i, i+1))
#print("Dup A: %s, %s" % (paperDict[i]["title"], paperDict[i]["year"]))
#print("Authors: %s, Database: %s, Cited by: %s" %
#(paperDict[i]["author"], paperDict[i]["dataBase"], paperDict[i]["citedBy"]))
#print("Dup B: %s, %s" % (paperDict[i+1]["title"], paperDict[i+1]["year"]))
#print("Authors: %s, Database: %s, Cited by: %s" %
#(paperDict[i+1]["author"], paperDict[i+1]["dataBase"], | |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Abstract base class used to build new loggers."""
import argparse
import functools
import operator
from abc import ABC, abstractmethod
from argparse import Namespace
from functools import wraps
from typing import Any, Callable, Dict, Iterable, List, Mapping, MutableMapping, Optional, Sequence, Tuple, Union
from weakref import ReferenceType
import numpy as np
import torch
import pytorch_lightning as pl
from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
def rank_zero_experiment(fn: Callable) -> Callable:
"""Returns the real experiment on rank 0 and otherwise the DummyExperiment."""
@wraps(fn)
def experiment(self):
@rank_zero_only
def get_experiment():
return fn(self)
return get_experiment() or DummyExperiment()
return experiment
class LightningLoggerBase(ABC):
"""
Base class for experiment loggers.
Args:
agg_key_funcs:
Dictionary which maps a metric name to a function, which will
aggregate the metric values for the same steps.
agg_default_func:
Default function to aggregate metric values. If some metric name
is not presented in the `agg_key_funcs` dictionary, then the
`agg_default_func` will be used for aggregation.
Note:
The `agg_key_funcs` and `agg_default_func` arguments are used only when
one logs metrics with the :meth:`~LightningLoggerBase.agg_and_log_metrics` method.
"""
def __init__(
self,
agg_key_funcs: Optional[Mapping[str, Callable[[Sequence[float]], float]]] = None,
agg_default_func: Callable[[Sequence[float]], float] = np.mean,
):
self._prev_step: int = -1
self._metrics_to_agg: List[Dict[str, float]] = []
self._agg_key_funcs = agg_key_funcs if agg_key_funcs else {}
self._agg_default_func = agg_default_func
def after_save_checkpoint(self, checkpoint_callback: "ReferenceType[ModelCheckpoint]") -> None:
"""
Called after model checkpoint callback saves a new checkpoint
Args:
model_checkpoint: the model checkpoint callback instance
"""
pass
def update_agg_funcs(
self,
agg_key_funcs: Optional[Mapping[str, Callable[[Sequence[float]], float]]] = None,
agg_default_func: Callable[[Sequence[float]], float] = np.mean,
):
"""
Update aggregation methods.
Args:
agg_key_funcs:
Dictionary which maps a metric name to a function, which will
aggregate the metric values for the same steps.
agg_default_func:
Default function to aggregate metric values. If some metric name
is not presented in the `agg_key_funcs` dictionary, then the
`agg_default_func` will be used for aggregation.
"""
if agg_key_funcs:
self._agg_key_funcs.update(agg_key_funcs)
if agg_default_func:
self._agg_default_func = agg_default_func
@property
@abstractmethod
def experiment(self) -> Any:
"""Return the experiment object associated with this logger."""
def _aggregate_metrics(
self, metrics: Dict[str, float], step: Optional[int] = None
) -> Tuple[int, Optional[Dict[str, float]]]:
"""
Aggregates metrics.
Args:
metrics: Dictionary with metric names as keys and measured quantities as values
step: Step number at which the metrics should be recorded
Returns:
Step and aggregated metrics. The return value could be ``None``. In such case, metrics
are added to the aggregation list, but not aggregated yet.
"""
# if you still receiving metric from the same step, just accumulate it
if step == self._prev_step:
self._metrics_to_agg.append(metrics)
return step, None
# compute the metrics
agg_step, agg_mets = self._reduce_agg_metrics()
# as new step received reset accumulator
self._metrics_to_agg = [metrics]
self._prev_step = step
return agg_step, agg_mets
def _reduce_agg_metrics(self):
"""Aggregate accumulated metrics."""
# compute the metrics
if not self._metrics_to_agg:
agg_mets = None
elif len(self._metrics_to_agg) == 1:
agg_mets = self._metrics_to_agg[0]
else:
agg_mets = merge_dicts(self._metrics_to_agg, self._agg_key_funcs, self._agg_default_func)
return self._prev_step, agg_mets
def _finalize_agg_metrics(self):
"""This shall be called before save/close."""
agg_step, metrics_to_log = self._reduce_agg_metrics()
self._metrics_to_agg = []
if metrics_to_log is not None:
self.log_metrics(metrics=metrics_to_log, step=agg_step)
def agg_and_log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None):
"""
Aggregates and records metrics.
This method doesn't log the passed metrics instantaneously, but instead
it aggregates them and logs only if metrics are ready to be logged.
Args:
metrics: Dictionary with metric names as keys and measured quantities as values
step: Step number at which the metrics should be recorded
"""
agg_step, metrics_to_log = self._aggregate_metrics(metrics=metrics, step=step)
if metrics_to_log:
self.log_metrics(metrics=metrics_to_log, step=agg_step)
@abstractmethod
def log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None):
"""
Records metrics.
This method logs metrics as as soon as it received them. If you want to aggregate
metrics for one specific `step`, use the
:meth:`~pytorch_lightning.loggers.base.LightningLoggerBase.agg_and_log_metrics` method.
Args:
metrics: Dictionary with metric names as keys and measured quantities as values
step: Step number at which the metrics should be recorded
"""
pass
@staticmethod
def _convert_params(params: Union[Dict[str, Any], Namespace]) -> Dict[str, Any]:
# in case converting from namespace
if isinstance(params, Namespace):
params = vars(params)
if params is None:
params = {}
return params
@staticmethod
def _sanitize_callable_params(params: Dict[str, Any]) -> Dict[str, Any]:
"""
Sanitize callable params dict, e.g. ``{'a': <function_**** at 0x****>} -> {'a': 'function_****'}``.
Args:
params: Dictionary containing the hyperparameters
Returns:
dictionary with all callables sanitized
"""
def _sanitize_callable(val):
# Give them one chance to return a value. Don't go rabbit hole of recursive call
if isinstance(val, Callable):
try:
_val = val()
if isinstance(_val, Callable):
return val.__name__
return _val
# todo: specify the possible exception
except Exception:
return getattr(val, "__name__", None)
return val
return {key: _sanitize_callable(val) for key, val in params.items()}
@staticmethod
def _flatten_dict(params: Dict[Any, Any], delimiter: str = "/") -> Dict[str, Any]:
"""
Flatten hierarchical dict, e.g. ``{'a': {'b': 'c'}} -> {'a/b': 'c'}``.
Args:
params: Dictionary containing the hyperparameters
delimiter: Delimiter to express the hierarchy. Defaults to ``'/'``.
Returns:
Flattened dict.
Examples:
>>> LightningLoggerBase._flatten_dict({'a': {'b': 'c'}})
{'a/b': 'c'}
>>> LightningLoggerBase._flatten_dict({'a': {'b': 123}})
{'a/b': 123}
>>> LightningLoggerBase._flatten_dict({5: {'a': 123}})
{'5/a': 123}
"""
def _dict_generator(input_dict, prefixes=None):
prefixes = prefixes[:] if prefixes else []
if isinstance(input_dict, MutableMapping):
for key, value in input_dict.items():
key = str(key)
if isinstance(value, (MutableMapping, Namespace)):
value = vars(value) if isinstance(value, Namespace) else value
yield from _dict_generator(value, prefixes + [key])
else:
yield prefixes + [key, value if value is not None else str(None)]
else:
yield prefixes + [input_dict if input_dict is None else str(input_dict)]
return {delimiter.join(keys): val for *keys, val in _dict_generator(params)}
@staticmethod
def _sanitize_params(params: Dict[str, Any]) -> Dict[str, Any]:
"""
Returns params with non-primitvies converted to strings for logging.
>>> params = {"float": 0.3,
... "int": 1,
... "string": "abc",
... "bool": True,
... "list": [1, 2, 3],
... "namespace": Namespace(foo=3),
... "layer": torch.nn.BatchNorm1d}
>>> import pprint
>>> pprint.pprint(LightningLoggerBase._sanitize_params(params)) # doctest: +NORMALIZE_WHITESPACE
{'bool': True,
'float': 0.3,
'int': 1,
'layer': "<class 'torch.nn.modules.batchnorm.BatchNorm1d'>",
'list': '[1, 2, 3]',
'namespace': 'Namespace(foo=3)',
'string': 'abc'}
"""
for k in params.keys():
# convert relevant np scalars to python types first (instead of str)
if isinstance(params[k], (np.bool_, np.integer, np.floating)):
params[k] = params[k].item()
elif type(params[k]) not in [bool, int, float, str, torch.Tensor]:
params[k] = str(params[k])
return params
@abstractmethod
def log_hyperparams(self, params: argparse.Namespace, *args, **kwargs):
"""
Record hyperparameters.
Args:
params: :class:`~argparse.Namespace` containing the hyperparameters
args: Optional positional arguments, depends on the specific logger being used
kwargs: Optional keywoard arguments, depends on the specific logger being used
"""
def log_graph(self, model: "pl.LightningModule", input_array=None) -> None:
"""
Record model graph
Args:
model: lightning model
input_array: input passes to `model.forward`
"""
pass
def save(self) -> None:
"""Save log data."""
self._finalize_agg_metrics()
def finalize(self, status: str) -> None:
"""
Do any processing that is necessary to finalize an experiment.
Args:
status: Status that the experiment finished with (e.g. success, failed, aborted)
"""
self.save()
def close(self) -> None:
"""Do any cleanup that is necessary to close an experiment."""
self.save()
@property
def save_dir(self) -> Optional[str]:
"""
Return the root directory where experiment logs get saved, or `None` if the logger does not
save data locally.
"""
return None
@property
@abstractmethod
def name(self) -> str:
"""Return the experiment name."""
@property
@abstractmethod
def version(self) -> Union[int, str]:
"""Return the experiment version."""
def _add_prefix(self, metrics: Dict[str, float]):
if self._prefix:
metrics = {f"{self._prefix}{self.LOGGER_JOIN_CHAR}{k}": v for k, v in metrics.items()}
return metrics
class LoggerCollection(LightningLoggerBase):
"""
The :class:`LoggerCollection` class is used to iterate all logging actions over
the given `logger_iterable`.
Args:
logger_iterable: An iterable collection of loggers
"""
def __init__(self, logger_iterable: Iterable[LightningLoggerBase]):
super().__init__()
self._logger_iterable = logger_iterable
def __getitem__(self, index: int) -> LightningLoggerBase:
return list(self._logger_iterable)[index]
def after_save_checkpoint(self, checkpoint_callback: "ReferenceType[ModelCheckpoint]") -> None:
for logger in self._logger_iterable:
logger.after_save_checkpoint(checkpoint_callback)
def update_agg_funcs(
self,
agg_key_funcs: Optional[Mapping[str, Callable[[Sequence[float]], float]]] = None,
agg_default_func: | |
``Upgrade`` class provides methods to configure the upgrade of this
appliance from an existing vCenter appliance. This class was added in
vSphere API 6.7.
"""
_VAPI_SERVICE_ID = 'com.vmware.vcenter.deployment.upgrade'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _UpgradeStub)
class VcsaEmbeddedSpec(VapiStruct):
"""
The ``Upgrade.VcsaEmbeddedSpec`` class contains information used to upgrade
a Embedded vCenter Server appliance. This class was added in vSphere API
6.7.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
ceip_enabled=None,
):
"""
:type ceip_enabled: :class:`bool`
:param ceip_enabled: Customer experience improvement program should be enabled or
disabled for this embedded vCenter Server upgrade. This attribute
was added in vSphere API 6.7.
"""
self.ceip_enabled = ceip_enabled
VapiStruct.__init__(self)
VcsaEmbeddedSpec._set_binding_type(type.StructType(
'com.vmware.vcenter.deployment.upgrade.vcsa_embedded_spec', {
'ceip_enabled': type.BooleanType(),
},
VcsaEmbeddedSpec,
False,
None))
class PscSpec(VapiStruct):
"""
The ``Upgrade.PscSpec`` class contains information used to upgrade a
Platform Service Controller appliance. This class was added in vSphere API
6.7.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
ceip_enabled=None,
):
"""
:type ceip_enabled: :class:`bool`
:param ceip_enabled: Customer experience improvement program should be enabled or
disabled for this Platform Services Controller upgrade. This
attribute was added in vSphere API 6.7.
"""
self.ceip_enabled = ceip_enabled
VapiStruct.__init__(self)
PscSpec._set_binding_type(type.StructType(
'com.vmware.vcenter.deployment.upgrade.psc_spec', {
'ceip_enabled': type.BooleanType(),
},
PscSpec,
False,
None))
class SourceApplianceSpec(VapiStruct):
"""
The ``Upgrade.SourceApplianceSpec`` class contains information used to
connect to the appliance used as the source for an upgrade. This class was
added in vSphere API 6.7.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
hostname=None,
https_port=None,
ssl_thumbprint=None,
ssl_verify=None,
sso_admin_username=None,
sso_admin_password=<PASSWORD>,
root_password=<PASSWORD>,
ssh_verify=None,
ssh_thumbprint=None,
):
"""
:type hostname: :class:`str`
:param hostname: The IP address or DNS resolvable name of the source appliance. This
attribute was added in vSphere API 6.7.
:type https_port: :class:`long` or ``None``
:param https_port: The HTTPS port of the source appliance. This attribute was added in
vSphere API 6.7.
If None, port 443 will be used.
:type ssl_thumbprint: :class:`str` or ``None``
:param ssl_thumbprint: SHA1 thumbprint of the server SSL certificate will be used for
verification. This attribute was added in vSphere API 6.7.
This attribute is only relevant if ``sslVerify`` is None or has the
value true.
:type ssl_verify: :class:`bool` or ``None``
:param ssl_verify: SSL verification should be enabled or disabled for the source
appliance validations. By default it is enabled and will use SSL
certificate for verification. If thumbprint is provided, will use
thumbprint for the verification. This attribute was added in
vSphere API 6.7.
If None, ssl_verify true will be used.
:type sso_admin_username: :class:`str`
:param sso_admin_username: The SSO administrator account on the source appliance. This
attribute was added in vSphere API 6.7.
:type sso_admin_password: :class:`str`
:param sso_admin_password: The SSO administrator account password. This attribute was added in
vSphere API 6.7.
:type root_password: :class:`str`
:param root_password: The password of the root <PASSWORD> on the source appliance. This
attribute was added in vSphere API 6.7.
:type ssh_verify: :class:`bool` or ``None``
:param ssh_verify: Appliance SSH verification should be enabled or disabled. By
default it is disabled and will not use any verification. If
thumbprint is provided, thumbprint verification will be performed.
This attribute was added in vSphere API 6.7.
If None, ssh_verify true will be used.
:type ssh_thumbprint: :class:`str` or ``None``
:param ssh_thumbprint: MD5 thumbprint of the server SSH key will be used for verification.
This attribute was added in vSphere API 6.7.
This attribute is only relevant if ``sshVerify`` is None or has the
value true.
"""
self.hostname = hostname
self.https_port = https_port
self.ssl_thumbprint = ssl_thumbprint
self.ssl_verify = ssl_verify
self.sso_admin_username = sso_admin_username
self.sso_admin_password = <PASSWORD>
self.root_password = <PASSWORD>
self.ssh_verify = ssh_verify
self.ssh_thumbprint = ssh_thumbprint
VapiStruct.__init__(self)
SourceApplianceSpec._set_binding_type(type.StructType(
'com.vmware.vcenter.deployment.upgrade.source_appliance_spec', {
'hostname': type.StringType(),
'https_port': type.OptionalType(type.IntegerType()),
'ssl_thumbprint': type.OptionalType(type.StringType()),
'ssl_verify': type.OptionalType(type.BooleanType()),
'sso_admin_username': type.StringType(),
'sso_admin_password': type.SecretType(),
'root_password': type.SecretType(),
'ssh_verify': type.OptionalType(type.BooleanType()),
'ssh_thumbprint': type.OptionalType(type.StringType()),
},
SourceApplianceSpec,
False,
None))
class UpgradeSpec(VapiStruct):
"""
The ``Upgrade.UpgradeSpec`` class contains information used to configure
the appliance upgrade. This class was added in vSphere API 6.7.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
source_appliance=None,
source_location=None,
history=None,
vcsa_embedded=None,
psc=None,
auto_answer=None,
):
"""
:type source_appliance: :class:`Upgrade.SourceApplianceSpec`
:param source_appliance: Source appliance spec. This attribute was added in vSphere API 6.7.
:type source_location: :class:`LocationSpec`
:param source_location: Source location spec. This attribute was added in vSphere API 6.7.
:type history: :class:`HistoryMigrationSpec` or ``None``
:param history: Determines how vCenter history will be migrated during the upgrade
process. vCenter history consists of:
* Statistics
* Events
* Tasks
By default only core data will be migrated. Use this spec to define
which part of vCenter history data will be migrated and when. This
attribute was added in vSphere API 6.7.
If None, only core database content will be migrated.
:type vcsa_embedded: :class:`Upgrade.VcsaEmbeddedSpec` or ``None``
:param vcsa_embedded: Information that are specific to this embedded vCenter Server. This
attribute was added in vSphere API 6.7.
If None, ceip_enabled for embedded vcenter server upgrade will
default to enabled.
:type psc: :class:`Upgrade.PscSpec` or ``None``
:param psc: Information that are specific to this Platform Services Controller.
This attribute was added in vSphere API 6.7.
If None, ceip_enabled for psc upgrade will default to enabled.
:type auto_answer: :class:`bool` or ``None``
:param auto_answer: Use the default option for any questions that may come up during
appliance configuration. This attribute was added in vSphere API
6.7.
If None, will default to false.
"""
self.source_appliance = source_appliance
self.source_location = source_location
self.history = history
self.vcsa_embedded = vcsa_embedded
self.psc = psc
self.auto_answer = auto_answer
VapiStruct.__init__(self)
UpgradeSpec._set_binding_type(type.StructType(
'com.vmware.vcenter.deployment.upgrade.upgrade_spec', {
'source_appliance': type.ReferenceType(__name__, 'Upgrade.SourceApplianceSpec'),
'source_location': type.ReferenceType(__name__, 'LocationSpec'),
'history': type.OptionalType(type.ReferenceType(__name__, 'HistoryMigrationSpec')),
'vcsa_embedded': type.OptionalType(type.ReferenceType(__name__, 'Upgrade.VcsaEmbeddedSpec')),
'psc': type.OptionalType(type.ReferenceType(__name__, 'Upgrade.PscSpec')),
'auto_answer': type.OptionalType(type.BooleanType()),
},
UpgradeSpec,
False,
None))
def get(self):
"""
Get the UpgradeSpec parameters used to configure the ongoing appliance
upgrade. This method was added in vSphere API 6.7.
:rtype: :class:`Upgrade.UpgradeSpec`
:return: UpgradeSpec parameters being used to configure appliance upgrade.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthenticated`
if the caller is not authenticated.
:raise: :class:`com.vmware.vapi.std.errors_client.NotAllowedInCurrentState`
if appliance is not in UPGRADE_PROGRESS state.
"""
return self._invoke('get', None)
def check(self,
spec,
):
"""
Run sanity checks using the UpgradeSpec parameters passed. This method
was added in vSphere API 6.7.
:type spec: :class:`Upgrade.UpgradeSpec`
:param spec: UpgradeSpec parameters to run sanity check on.
:rtype: :class:`CheckInfo`
:return: CheckInfo containing the check results.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthenticated`
if the caller is not authenticated.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthenticated`
if source credentials are not valid.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthenticated`
if source container credentials are not valid.
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidArgument`
if passed arguments are invalid.
:raise: :class:`com.vmware.vapi.std.errors_client.NotAllowedInCurrentState`
if the appliance is not in INITIALIZED state.
"""
return self._invoke('check',
{
'spec': spec,
})
def start(self,
spec,
):
"""
Start the appliance installation. This method was added in vSphere API
6.7.
:type spec: :class:`Upgrade.UpgradeSpec`
:param spec: UpgradeSpec parameters to configure the appliance upgrade.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthenticated`
if the caller is not authenticated.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthenticated`
if source credentials are not valid.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthenticated`
if source container credentials are not valid.
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidArgument`
if passed arguments are invalid.
:raise: :class:`com.vmware.vapi.std.errors_client.NotAllowedInCurrentState`
if the appliance is not in INITIALIZED state.
"""
return self._invoke('start',
{
'spec': spec,
})
def cancel(self):
"""
Cancel the appliance upgrade that is in progress. This method was added
in vSphere API 6.7.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthenticated`
if the caller is not authenticated.
:raise: :class:`com.vmware.vapi.std.errors_client.NotAllowedInCurrentState`
if the appliance is not in CONFIG_IN_PROGRESS state and if the
operation is not INSTALL.
"""
return self._invoke('cancel', None)
class Question(VapiInterface):
"""
The ``Question`` class provides methods to get the question raised during
deployment and to answer them. This class was added in vSphere API 6.7.
"""
_VAPI_SERVICE_ID = 'com.vmware.vcenter.deployment.question'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used | |
<filename>venv/lib/python2.7/site-packages/ansible/modules/cloud/amazon/ec2_eni.py
#!/usr/bin/python
#
# This is a free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This Ansible library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ec2_eni
short_description: Create and optionally attach an Elastic Network Interface (ENI) to an instance
description:
- Create and optionally attach an Elastic Network Interface (ENI) to an instance. If an ENI ID or private_ip is
provided, the existing ENI (if any) will be modified. The 'attached' parameter controls the attachment status
of the network interface.
version_added: "2.0"
author: "<NAME> (@wimnat)"
options:
eni_id:
description:
- The ID of the ENI (to modify); if null and state is present, a new eni will be created.
instance_id:
description:
- Instance ID that you wish to attach ENI to. Since version 2.2, use the 'attached' parameter to attach or
detach an ENI. Prior to 2.2, to detach an ENI from an instance, use 'None'.
private_ip_address:
description:
- Private IP address.
subnet_id:
description:
- ID of subnet in which to create the ENI.
description:
description:
- Optional description of the ENI.
security_groups:
description:
- List of security groups associated with the interface. Only used when state=present. Since version 2.2, you
can specify security groups by ID or by name or a combination of both. Prior to 2.2, you can specify only by ID.
state:
description:
- Create or delete ENI
default: present
choices: [ 'present', 'absent' ]
device_index:
description:
- The index of the device for the network interface attachment on the instance.
default: 0
attached:
description:
- Specifies if network interface should be attached or detached from instance. If ommited, attachment status
won't change
default: 'yes'
version_added: 2.2
type: bool
force_detach:
description:
- Force detachment of the interface. This applies either when explicitly detaching the interface by setting instance_id
to None or when deleting an interface with state=absent.
default: 'no'
delete_on_termination:
description:
- Delete the interface when the instance it is attached to is terminated. You can only specify this flag when the
interface is being modified, not on creation.
required: false
source_dest_check:
description:
- By default, interfaces perform source/destination checks. NAT instances however need this check to be disabled.
You can only specify this flag when the interface is being modified, not on creation.
required: false
secondary_private_ip_addresses:
description:
- A list of IP addresses to assign as secondary IP addresses to the network interface.
This option is mutually exclusive of secondary_private_ip_address_count
required: false
version_added: 2.2
purge_secondary_private_ip_addresses:
description:
- To be used with I(secondary_private_ip_addresses) to determine whether or not to remove any secondary IP addresses other than those specified.
Set secondary_private_ip_addresses to an empty list to purge all secondary addresses.
default: no
version_added: 2.5
secondary_private_ip_address_count:
description:
- The number of secondary IP addresses to assign to the network interface. This option is mutually exclusive of secondary_private_ip_addresses
required: false
version_added: 2.2
extends_documentation_fragment:
- aws
- ec2
notes:
- This module identifies and ENI based on either the eni_id, a combination of private_ip_address and subnet_id,
or a combination of instance_id and device_id. Any of these options will let you specify a particular ENI.
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Create an ENI. As no security group is defined, ENI will be created in default security group
- ec2_eni:
private_ip_address: 172.31.0.20
subnet_id: subnet-xxxxxxxx
state: present
# Create an ENI and attach it to an instance
- ec2_eni:
instance_id: i-xxxxxxx
device_index: 1
private_ip_address: 172.31.0.20
subnet_id: subnet-xxxxxxxx
state: present
# Create an ENI with two secondary addresses
- ec2_eni:
subnet_id: subnet-xxxxxxxx
state: present
secondary_private_ip_address_count: 2
# Assign a secondary IP address to an existing ENI
# This will purge any existing IPs
- ec2_eni:
subnet_id: subnet-xxxxxxxx
eni_id: eni-yyyyyyyy
state: present
secondary_private_ip_addresses:
- 172.16.1.1
# Remove any secondary IP addresses from an existing ENI
- ec2_eni:
subnet_id: subnet-xxxxxxxx
eni_id: eni-yyyyyyyy
state: present
secondary_private_ip_address_count: 0
# Destroy an ENI, detaching it from any instance if necessary
- ec2_eni:
eni_id: eni-xxxxxxx
force_detach: yes
state: absent
# Update an ENI
- ec2_eni:
eni_id: eni-xxxxxxx
description: "My new description"
state: present
# Update an ENI identifying it by private_ip_address and subnet_id
- ec2_eni:
subnet_id: subnet-xxxxxxx
private_ip_address: 172.16.1.1
description: "My new description"
# Detach an ENI from an instance
- ec2_eni:
eni_id: eni-xxxxxxx
instance_id: None
state: present
### Delete an interface on termination
# First create the interface
- ec2_eni:
instance_id: i-xxxxxxx
device_index: 1
private_ip_address: 172.31.0.20
subnet_id: subnet-xxxxxxxx
state: present
register: eni
# Modify the interface to enable the delete_on_terminaton flag
- ec2_eni:
eni_id: "{{ eni.interface.id }}"
delete_on_termination: true
'''
RETURN = '''
interface:
description: Network interface attributes
returned: when state != absent
type: complex
contains:
description:
description: interface description
type: string
sample: Firewall network interface
groups:
description: list of security groups
type: list of dictionaries
sample: [ { "sg-f8a8a9da": "default" } ]
id:
description: network interface id
type: string
sample: "eni-1d889198"
mac_address:
description: interface's physical address
type: string
sample: "00:00:5E:00:53:23"
owner_id:
description: aws account id
type: string
sample: 812381371
private_ip_address:
description: primary ip address of this interface
type: string
sample: 10.20.30.40
private_ip_addresses:
description: list of all private ip addresses associated to this interface
type: list of dictionaries
sample: [ { "primary_address": true, "private_ip_address": "10.20.30.40" } ]
source_dest_check:
description: value of source/dest check flag
type: boolean
sample: True
status:
description: network interface status
type: string
sample: "pending"
subnet_id:
description: which vpc subnet the interface is bound
type: string
sample: subnet-b0a0393c
vpc_id:
description: which vpc this network interface is bound
type: string
sample: vpc-9a9a9da
'''
import time
import re
try:
import boto.ec2
import boto.vpc
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (AnsibleAWSError, connect_to_aws,
ec2_argument_spec, get_aws_connection_info,
get_ec2_security_group_ids_from_names)
def get_eni_info(interface):
# Private addresses
private_addresses = []
for ip in interface.private_ip_addresses:
private_addresses.append({'private_ip_address': ip.private_ip_address, 'primary_address': ip.primary})
interface_info = {'id': interface.id,
'subnet_id': interface.subnet_id,
'vpc_id': interface.vpc_id,
'description': interface.description,
'owner_id': interface.owner_id,
'status': interface.status,
'mac_address': interface.mac_address,
'private_ip_address': interface.private_ip_address,
'source_dest_check': interface.source_dest_check,
'groups': dict((group.id, group.name) for group in interface.groups),
'private_ip_addresses': private_addresses
}
if interface.attachment is not None:
interface_info['attachment'] = {'attachment_id': interface.attachment.id,
'instance_id': interface.attachment.instance_id,
'device_index': interface.attachment.device_index,
'status': interface.attachment.status,
'attach_time': interface.attachment.attach_time,
'delete_on_termination': interface.attachment.delete_on_termination,
}
return interface_info
def wait_for_eni(eni, status):
while True:
time.sleep(3)
eni.update()
# If the status is detached we just need attachment to disappear
if eni.attachment is None:
if status == "detached":
break
else:
if status == "attached" and eni.attachment.status == "attached":
break
def create_eni(connection, vpc_id, module):
instance_id = module.params.get("instance_id")
attached = module.params.get("attached")
if instance_id == 'None':
instance_id = None
device_index = module.params.get("device_index")
subnet_id = module.params.get('subnet_id')
private_ip_address = module.params.get('private_ip_address')
description = module.params.get('description')
security_groups = get_ec2_security_group_ids_from_names(module.params.get('security_groups'), connection, vpc_id=vpc_id, boto3=False)
secondary_private_ip_addresses = module.params.get("secondary_private_ip_addresses")
secondary_private_ip_address_count = module.params.get("secondary_private_ip_address_count")
changed = False
try:
eni = connection.create_network_interface(subnet_id, private_ip_address, description, security_groups)
if attached and instance_id is not None:
try:
eni.attach(instance_id, device_index)
except BotoServerError:
eni.delete()
raise
# Wait to allow creation / attachment to finish
wait_for_eni(eni, "attached")
eni.update()
if secondary_private_ip_address_count is not None:
try:
connection.assign_private_ip_addresses(network_interface_id=eni.id, secondary_private_ip_address_count=secondary_private_ip_address_count)
except BotoServerError:
eni.delete()
raise
if secondary_private_ip_addresses is not None:
try:
connection.assign_private_ip_addresses(network_interface_id=eni.id, private_ip_addresses=secondary_private_ip_addresses)
except BotoServerError:
eni.delete()
raise
changed = True
except BotoServerError as e:
module.fail_json(msg=e.message)
module.exit_json(changed=changed, interface=get_eni_info(eni))
def modify_eni(connection, vpc_id, module, eni):
instance_id = module.params.get("instance_id")
attached = module.params.get("attached")
do_detach = module.params.get('state') == 'detached'
device_index = module.params.get("device_index")
description = module.params.get('description')
security_groups = module.params.get('security_groups')
force_detach = module.params.get("force_detach")
source_dest_check = module.params.get("source_dest_check")
delete_on_termination = module.params.get("delete_on_termination")
secondary_private_ip_addresses = module.params.get("secondary_private_ip_addresses")
purge_secondary_private_ip_addresses = module.params.get("purge_secondary_private_ip_addresses")
secondary_private_ip_address_count = module.params.get("secondary_private_ip_address_count")
changed = False
try:
if description is not None:
if eni.description != description:
connection.modify_network_interface_attribute(eni.id, "description", description)
changed = True
if len(security_groups) > 0:
groups = get_ec2_security_group_ids_from_names(security_groups, connection, vpc_id=vpc_id, boto3=False)
if sorted(get_sec_group_list(eni.groups)) != sorted(groups):
connection.modify_network_interface_attribute(eni.id, "groupSet", groups)
changed = True
if source_dest_check is not None:
if eni.source_dest_check != source_dest_check:
connection.modify_network_interface_attribute(eni.id, "sourceDestCheck", source_dest_check)
changed | |
k, v in config.items():
if type(v) == bytes:
# noinspection PyArgumentList
config[k] = str(v, encoding="utf8")
config_json = json.dumps(config, sort_keys=True, indent=4)
with gzip.GzipFile(join(SETTINGS_DIR, "settings.json.gz"), "w+") as gz_file:
try:
gz_file.write(config_json)
except TypeError: # Python3
gz_file.write(config_json.encode("utf8"))
except IOError as error:
print("On saving settings:", error)
@staticmethod
def pickle(array):
""" Serialize some binary settings
:type array: QByteArray
:param array: The data
"""
if PYTHON2:
return pickle.dumps(array.data())
# noinspection PyArgumentList
return str(pickle.dumps(array.data()), encoding="unicode_escape") # Python3
@staticmethod
def unpickle(key):
""" Un-serialize some binary settings
:type key: str|unicode
:param key: The dict key to be un-pickled
"""
try:
if PYTHON2:
try:
value = pickle.loads(str(app_config.get(key)))
except UnicodeEncodeError: # settings file from Python3
return
else:
try:
# noinspection PyArgumentList
pickled = pickle.loads(bytes(app_config.get(key), encoding="latin"))
value = QByteArray(pickled)
except (UnicodeDecodeError, ImportError): # settings file from Python2
return
except pickle.UnpicklingError as err:
print("While unPickling:", err)
return
return value
# ___ ___________________ UTILITY STUFF _________________________
def thread_cleanup(self):
""" Deletes the finished threads
"""
for thread in self.threads:
if thread.isFinished():
self.threads.remove(thread)
def popup(self, title, text, icon=QMessageBox.Warning, buttons=1,
extra_text="", check_text=""):
""" Creates and returns a Popup dialog
:type title: str|unicode
:parameter title: The Popup's title
:type text: str|unicode
:parameter text: The Popup's text
:type icon: int|unicode|QPixmap
:parameter icon: The Popup's icon
:type buttons: int
:parameter buttons: The number of the Popup's buttons
:type extra_text: str|unicode
:parameter extra_text: The extra button's text (button is omitted if "")
:type check_text: str|unicode
:parameter check_text: The checkbox's text (checkbox is omitted if "")
"""
popup = XMessageBox(self)
popup.setWindowIcon(self.ico_app)
if type(icon) == QMessageBox.Icon:
popup.setIcon(icon)
elif type(icon) == unicode:
popup.setIconPixmap(QPixmap(icon))
elif type(icon) == QPixmap:
popup.setIconPixmap(icon)
else:
raise TypeError("Wrong icon type!")
popup.setWindowTitle(title)
popup.setText(text + "\n" if check_text else text)
if buttons == 1:
popup.addButton(_("Close"), QMessageBox.RejectRole)
elif buttons == 2:
popup.addButton(_("OK"), QMessageBox.AcceptRole)
popup.addButton(_("Cancel"), QMessageBox.RejectRole)
elif buttons == 3:
popup.addButton(_("Yes"), QMessageBox.AcceptRole)
popup.addButton(_("No"), QMessageBox.RejectRole)
if extra_text: # add an extra button
popup.addButton(extra_text, QMessageBox.ApplyRole)
if check_text: # hide check_box if no text for it
popup.check_box.setText(check_text)
else:
popup.check_box.hide()
popup.checked = popup.exec_()[1]
return popup
def passed_files(self):
""" Command line parameters that are passed to the program.
"""
# args = QApplication.instance().arguments()
try:
if sys.argv[1]:
self.on_file_table_fileDropped(sys.argv[1:])
except IndexError:
pass
def open_file(self, path):
""" Opens a file with its associated app
:type path: str|unicode
:param path: The path to the file to be opened
"""
try:
if sys.platform == "win32":
os.startfile(path)
else:
opener = "open" if sys.platform == "darwin" else "xdg-open"
subprocess.call([opener, path])
except OSError:
self.popup(_("Error opening target!"),
_('"{}" does not exists!').format(path))
def copy_text_2clip(self, text):
""" Copy a text to clipboard
:type text: str|unicode
"""
if text:
data = QMimeData()
data.setText(text)
self.clip.setMimeData(data)
def recalculate_md5(self, file_path):
""" Recalculates the MD5 for a book and saves it to the metadata file
:type file_path: str|unicode
:param file_path: The path to the book
"""
popup = self.popup(_("Confirmation"),
_("This action can not be undone.\nContinue?"), buttons=2)
if popup.buttonRole(popup.clickedButton()) == QMessageBox.AcceptRole:
row = self.sel_idx.row()
data = self.file_table.item(row, TITLE).data(Qt.UserRole)
path = self.file_table.item(row, PATH).text()
md5 = self.md5_from_file(file_path)
if "partial_md5_checksum" in data:
data["partial_md5_checksum"] = md5
if "stats" in data and "md5" in data["stats"]:
data["stats"]["md5"] = md5
self.file_table.item(row, TITLE).setData(Qt.UserRole, data)
self.save_book_data(path, data)
self.popup(_("Information"), _("The MD5 was recalculated and saved!"),
QMessageBox.Information)
@staticmethod
def md5_from_file(file_path):
""" Calculates the MD5 for a file
:type file_path: str|unicode
:param file_path: The path to the file
:return: str|unicode|None
"""
if isfile(file_path):
with open(file_path, "rb") as file_:
md5 = hashlib.md5()
sample = file_.read(1024)
if sample:
md5.update(sample)
for i in range(11):
file_.seek((4 ** i) * 1024)
sample = file_.read(1024)
if sample:
md5.update(sample)
else:
break
return md5.hexdigest()
@staticmethod
def get_time_str(sec):
""" Takes seconds and returns the formatted time value
:type sec: int
:param sec: The seconds
"""
return "{:02}:{:02}:{:02}".format(int(sec / 3600),
int(sec % 3600 / 60),
int(sec % 60))
def auto_check4update(self):
""" Checks online for an updated version
"""
self.db_maintenance()
self.opened_times += 1
if self.opened_times == 20:
text = _("Since you are using {} for some time now, perhaps you find it "
"useful enough to consider a donation.\nWould you like to visit "
"the PayPal donation page?\n\nThis is a one-time message. "
"It will never appear again!").format(APP_NAME)
popup = self.popup(_("A reminder..."), text,
icon=":/stuff/paypal76.png", buttons=3)
if popup.buttonRole(popup.clickedButton()) == QMessageBox.AcceptRole:
webbrowser.open("https://www.paypal.com/cgi-bin/webscr?"
"cmd=_s-xclick%20&hosted_button_id=MYV4WLTD6PEVG")
return
# noinspection PyBroadException
try:
version_new = self.about.get_online_version()
# except URLError: # can not connect
except Exception:
return
if not version_new:
return
version = LooseVersion(self.version)
skip_version = LooseVersion(self.skip_version)
if version_new > version and version_new != skip_version:
popup = self.popup(_("Newer version exists!"),
_("There is a newer version (v.{}) online.\n"
"Open the site to download it now?")
.format(version_new),
icon=QMessageBox.Information, buttons=2,
check_text=_("Don\"t alert me for this version again"))
if popup.checked:
self.skip_version = version_new
if popup.clickedButton().text() == "OK":
webbrowser.open("http://www.noembryo.com/apps.php?kohighlights")
def db_maintenance(self):
""" Compacts db every three months
"""
if self.get_db_book_count(): # db has books
now = datetime.now()
delta = now - datetime.strptime(self.date_vacuumed, DATE_FORMAT)
if delta.days > 90: # after three months
self.vacuum_db(info=False) # compact db
self.date_vacuumed = now.strftime(DATE_FORMAT) # reset vacuumed date
def write_to_log(self, text):
""" Append text to the QTextEdit.
"""
# self.about.log_txt.appendPlainText(text)
cursor = self.about.log_txt.textCursor()
cursor.movePosition(QTextCursor.End)
cursor.insertText(text)
self.about.log_txt.setTextCursor(cursor)
self.about.log_txt.ensureCursorVisible()
if self.sender().objectName() == "err":
text = "\033[91m" + text + "\033[0m"
# noinspection PyBroadException
try:
sys.__stdout__.write(text)
except Exception: # a problematic print that WE HAVE to ignore or we LOOP
pass
@staticmethod
def delete_logs():
""" Keeps the number of log texts steady.
"""
_, _, files = next(os.walk(SETTINGS_DIR))
files = sorted(i for i in files if i.startswith("error_log"))
if len(files) > 3:
for name in files[:-3]:
try:
os.remove(join(SETTINGS_DIR, name))
except WindowsError: # the file is locked
pass
def on_check_btn(self):
pass
class KOHighlights(QApplication):
def __init__(self, *args, **kwargs):
super(KOHighlights, self).__init__(*args, **kwargs)
# decode app's arguments
try:
sys.argv = [i.decode(sys.getfilesystemencoding()) for i in sys.argv]
except AttributeError: # i.decode does not exists in Python 3
pass
self.parser = argparse.ArgumentParser(prog=APP_NAME,
description=_("{} v{} - A KOReader's "
"highlights converter")
.format(APP_NAME, __version__),
epilog=_("Thanks for using %s!") % APP_NAME)
self.parser.add_argument("-v", "--version", action="version",
version="%(prog)s v{}".format(__version__))
self.base = Base()
if getattr(sys, "frozen", False): # the app is compiled
if not sys.platform.lower().startswith("win"):
self.parse_args()
else:
self.parse_args()
# # hide console window, but only under Windows and only if app is frozen
# on_windows = sys.platform.lower().startswith("win")
# compiled = getattr(sys, 'frozen', False)
# if on_windows and compiled:
# hide_console()
# self.parse_args()
# else:
# self.parse_args()
self.base.setWindowTitle(APP_NAME)
self.exec_()
# show_console() if on_windows and compiled else None
# ___ ___________________ CLI STUFF _____________________________
def parse_args(self):
""" Parse the command line parameters that are passed to the program.
"""
self.parser.add_argument("paths", nargs="*",
help="The paths to input files or folder")
self.parser.add_argument("-x", "--use_cli", required="-o" in sys.argv,
help="Use the command line interface only (exit the "
"app after finishing)", action="store_true",
default=False)
# self.parser.add_argument("-i", "--input", required="-x" in sys.argv,
# help="The path to input files or folder")
# sort_group = self.parser.add_mutually_exclusive_group()
self.parser.add_argument("-s", "--sort_page", action="store_true", default=False,
help="Sort highlights by page, otherwise sort by date")
self.parser.add_argument("-m", "--merge", action="store_true", default=False,
help="Merge the highlights of all input books in a "
"single file, otherwise exports every book's "
"highlights to a different file")
self.parser.add_argument("-f", "--html", action="store_true", default=False,
help="Exports highlights in .html format "
"instead of .txt")
self.parser.add_argument("-np", "--no_page", action="store_true", default=False,
help="Exclude the page number of the highlight")
self.parser.add_argument("-nd", "--no_date", action="store_true", default=False,
help="Exclude the date of the highlight")
self.parser.add_argument("-nh", "--no_highlight",
action="store_true", default=False,
help="Exclude the highlighted text of the highlight")
self.parser.add_argument("-nc", "--no_comment",
action="store_true", default=False,
help="Exclude the comment of the highlight")
self.parser.add_argument("-o", "--output", required="-x" in sys.argv,
help="The filename of the file (in merge mode) or "
"the directory for saving the highlight files")
# args, paths = self.parser.parse_known_args()
args = self.parser.parse_args()
if args.use_cli:
self.cli_save_highlights(args)
sys.exit(0) # quit the app if cli execution
# if args.paths:
# self.on_file_table_fileDropped(args.paths)
def cli_save_highlights(self, args):
""" Saves highlights using the command line interface
:type args: argparse.Namespace
:param args: The parsed cli args
"""
# pprint(args.__dict__)
files = self.get_lua_files(args.paths)
if not files:
return
path = abspath(args.output)
if not args.merge: # save to different files
if not isdir(path):
self.parser.error("The output path (-o/--output) must point "
"to an existing directory!")
saved = self.cli_save_multi_files(args, files)
else: # save combined highlights to one file
if isdir(path):
ext = "an .html" if args.html else "a | |
#!/usr/bin/env python3
#
# relayenforce.py
#
# Copyright (c) 2020 Infoblox, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# DESCRIPTION:
# This script will remove unauthorized/invalid DHCP relay helpers that are
# configured on Cisco devices and then replace them using a delimeted
# key value list defined in NetMRI.
#
# The script assumes that the relays are configured on a per-interface basis
# and that the relay is in the global VRF. It additionally will only run on
# IOS, IOS-XE, NX-OS, and ASA/ASAv devices.
#
# PREQUISITES:
# 1. NetMRI version 7.5+
# 2. Sandbox must have CiscoConfParse module installed.
from infoblox_netmri.easy import NetMRIEasy
from ciscoconfparse import CiscoConfParse
import re
#------------------------------------------------------------------------------
# These are to just keep pylint happy.
# Comment or remove for production
api_url = "http://netmri"
http_username = "austin"
http_password = "<PASSWORD>"
job_id = 7
device_id = 31
batch_id = 8
relay_list_key = "list_row_1"
dry_run = "on"
#------------------------------------------------------------------------------
# Change the below global variables to match the list name, key name, and key
# value column name that is used in your NetMRI environment.
RELAY_LIST_NAME = "DHCP Relays"
RELAY_LIST_KEY = "Key"
RELAY_LIST_KEYVAL = "Relays"
RELAY_LIST_EXCLUSIONS = "Exclusions"
easyparams = {
"api_url": api_url,
"http_username": http_username,
"http_password": <PASSWORD>,
"job_id": job_id,
"device_id": device_id,
"batch_id": batch_id
}
# BEGIN-SCRIPT-BLOCK
#
# Script-Filter:
# $Vendor eq "Cisco"
# and $sysDescr like /IOS|NX-OS|Adaptive Security Appliance/
#
# Script-Timeout: 1200
#
# Script-Login:
# true
#
# Script-Variables:
# $relay_list_key string "Row ID Key from DHCP Relay List"
# $dry_run boolean
#
# END-SCRIPT-BLOCK
#------------------------------------------------------------------------------
class TargetDevice:
def __init__(self, easy_class, device_class):
self.dis = easy_class # NetMRI Easy instance
self.device = device_class # Device instance
self.name = device_class.DeviceName # Target name
self.os_type = None # Target OS type
self.active_intfs = [] # Active interfaces on device
self.relay_intfs = {} # Configured relays (per interface)
# Figure out what OS type the device is.
# This will determine what CLI syntax to use.
if re.search(r'Adaptive Security', self.device.DeviceSysDescr):
self.os_type = "ASA"
elif re.search(r'NX-OS', self.device.DeviceSysDescr):
self.os_type = "NXOS"
else:
# CiscoConfParse defaults to IOS
self.os_type = "IOS"
self.dis.log_message("info", f"[~] Target: {self.name}")
self.dis.log_message("info", f"[~] Syntax: {self.os_type}")
self.dis.log_message("info", "-" * 72)
# Get operational up/up interfaces
self.dis.log_message("info", "[~] Getting active UP/UP interfaces ...")
self.get_active_interfaces()
# Get configured DHCP relays
self.dis.log_message("info",
"[~] Searching for DHCP relays on interfaces ..."
)
self.process_relay_interfaces()
def enter_global_config(self):
"""
Enter global configuration mode on the Cisco device.
"""
self.dis.send_command("configure terminal")
def exit_global_config(self, commit_config):
"""
Exit global configuration.
"""
self.dis.send_command("end")
if commit_config is True:
self.dis.send_command("copy running-config startup-config\r\r")
def get_active_interfaces(self):
"""
Get interfaces that are up/up with an IP address assigned.
"""
# Set up 'show' command to send to device
# Command always starts with 'show'
cmd = "show "
if self.os_type == "ASA":
cmd = cmd + "int ip br | ex ^Interface|Internal"
elif self.os_type == "NXOS":
cmd = cmd + "ip int br | ex \"(^$|Interface|down)\""
else: # self.os_type == "IOS"
cmd = cmd + "ip int br | ex (Proto|unassign|down|Any|NVI)"
# Send the show command
raw_output = self.dis.send_command(cmd)
# Regex the CLI output to get the interface list.
self.active_intfs = re.findall(r'^([^\s]+)', raw_output, re.MULTILINE)
def process_relay_interfaces(self):
"""
Finds interfaces with DHCP relays configured and stores them
in a dictionary.
"""
if isinstance(self.active_intfs, list):
for intf_id in self.active_intfs:
# Get the raw 'show run' output
cmd = "show run interface " + intf_id
raw_output = self.dis.send_command(cmd)
# Send the raw output to CiscoConfParse
ccp_input = raw_output.splitlines()
ccp_parse = CiscoConfParse(
ccp_input,
syntax=self.os_type.lower()
)
# Relay command differs between OS types
if self.os_type == "ASA":
helper_re = r"dhcprelay\s+server\s+(\S+)$"
elif self.os_type == "NXOS":
helper_re = r"ip\s+dhcp\s+relay\s+address\s+(\S+)$"
else: # self.os_type == "IOS"
helper_re = r"ip\s+helper-address\s+(\S+)$"
# Iterate through child objects of the interface
for intf_cfg_sect in ccp_parse.find_objects(r'^interface'):
# Empty list to hold the configured DHCP relays
relaylist = []
# Loop through the interface config and find
# configured DHCP relays.
for intf_cfg_obj in intf_cfg_sect.children:
# Regex out the relay IP address
dhcp_relay_addr = intf_cfg_obj.re_match_typed(
helper_re, default="__none__")
if dhcp_relay_addr != "__none__":
# Found a relay, so put it in the list.
relaylist.append(dhcp_relay_addr)
if len(relaylist) > 0:
# Capture the interface name the relays were
# detected on.
intf_name = re.search(r"^interface\s(.*)",
intf_cfg_sect.text)
# Now that we have the interface name and the list of
# configured DHCP relays, we will store that in to the
# relay_intf dictionary.
ri_key = len(self.relay_intfs)
self.relay_intfs[ri_key] = \
{
'name': intf_name.group(1),
'relays': relaylist
}
def main(easy):
easy.log_message("info", "-" * 72)
easy.log_message("info", "DHCP Relay Change and Enforce")
easy.log_message("info", "<NAME> - Infoblox - 2020")
easy.log_message("info", "-" * 72)
# Instantiate the current device (TargetDevice class)
target = TargetDevice(easy, easy.get_device())
# If the dictionary length is zero then no configured DHCP relays were
# found and we can exit early. Otherwise, DHCP relays were found and
# there is more work to do.
if len(target.relay_intfs) == 0:
easy.log_message("info",
f"[+] SUCCESS: No DHCP relays were found on {target.name}."
)
exit(0)
# The real work begins.
# Dictionary contains interfaces with relays. So let's check and fix them.
else:
easy.log_message("info", "-" * 72)
# If the relay_list_key wasn't supplied by the user, then it will be
# assigned the default value of the Script-Variable.
if relay_list_key == "Row ID Key from DHCP Relay List": # <- Default
easy.log_message("error",
"[!] ERROR: A list key must be supplied."
)
# Exit with error.
exit(1)
else:
# Get the authorized DHCP relay list from the user supplied key.
relay_list = target.dis.get_list_value(
RELAY_LIST_NAME,
RELAY_LIST_KEY,
str(relay_list_key),
RELAY_LIST_KEYVAL,
'NOTFOUND'
)
if relay_list == "NOTFOUND":
easy.log_message("error",
f"[!] Key \"{relay_list_key}\" does not exist in "
f"{RELAY_LIST_NAME}."
)
exit(1)
else:
# Split according to delimeter in list.
auth_relays = relay_list.split(',')
# Get any excluded relays from that same list, reuse relay_list
relay_list = target.dis.get_list_value(
RELAY_LIST_NAME,
RELAY_LIST_KEY,
str(relay_list_key),
RELAY_LIST_EXCLUSIONS,
'NOTFOUND'
)
if relay_list != "NOTFOUND":
excluded_relays = relay_list.split(',')
# Free relay_list
relay_list = None
# Go through each interface and look at the configured relays.
# If the relay does not exist in the NetMRI list, then it will
# be saved for de-configuring later.
if dry_run == "off":
target.enter_global_config()
# Cycle through all of the interfaces that had
# DHCP relays configured.
for intf_id in target.relay_intfs:
bad_relays = []
easy.log_message("info",
"[+] Working in interface: "
f"{target.relay_intfs[intf_id]['name']}"
)
# Check if the relay is in the list.
for relay in target.relay_intfs[intf_id]['relays']:
# Relay isn't in the list, so we keep it for removal.
if relay not in auth_relays and relay not in excluded_relays:
bad_relays.append(relay)
# First, enter the interface sub-config to begin work.
cmd = "interface " + str(target.relay_intfs[intf_id]['name'])
if dry_run == "on":
easy.log_message("debug",
f"[-] DEBUG: target.dis.send_command({cmd})"
)
else:
target.dis.send_command(cmd)
# Now we'll configure the DHCP relays from the list.
# First, prepare the command to deconfigure the relays.
if target.os_type == "ASA":
relay_cmd_prefix = "dhcprelay server "
elif target.os_type == "NXOS":
relay_cmd_prefix = "ip dhcp relay address "
else:
relay_cmd_prefix = "ip helper-address "
# If there was relays found then we'll remove them.
if len(bad_relays) > 0:
# Remove the "bad" relays.
for badrelay in bad_relays:
easy.log_message("info",
f"[-] ... Removing {badrelay} "
f"from {target.relay_intfs[intf_id]['name']}"
)
cmd = "no " + relay_cmd_prefix + str(badrelay)
if dry_run == "on":
easy.log_message("debug",
f"[-] DEBUG: target.dis.send_command({cmd})"
)
else:
target.dis.send_command(cmd)
# Now add the "good" relays in from the list.
for goodrelay in auth_relays:
easy.log_message("info",
f"[+] ... Adding | |
united': '436f05df',
'whyteleafe': '0cb86048',
'wick & barnham united': '637a268c',
'wick barnham united': '637a268c',
'widnes': '1d19434e',
'wigan athletic': 'e59ddc76',
'willand rovers': '3bf9b5e5',
'wimbledon': '3679c494',
'wimborne town': 'f3a2ba41',
'winchester city': '82cfb72a',
'windsor': '2b534fac',
'wingate & finchley': '32a87154',
'wingate finchley': '32a87154',
'winsford united': 'e90d6cd6',
'winslow united': 'edd0e873',
'winterton rangers': '95161f9a',
'wisbech town': 'e1b8d29b',
'witham town': '4361d7f9',
'witton albion': '8a79714f',
'wivenhoe town': '9ee75fde',
'wodson park': '441530ba',
'woking': 'c30e88bc',
'wolverhampton casuals': 'b7d7d1f9',
'wolverhampton sporting c.f.c.': 'c708fffd',
'wolverhampton sportingc': 'c708fffd',
'wolverhampton wanderers': '8cec06e1',
'wolverhampton wanderers u23': '79cadc09',
'woodbridge town': '798d6e7a',
'woodford town': 'f034ffd5',
'woodley town': '0a78c053',
'wootton bassett town': '9a02e66f',
'worcester city': '526a6c40',
'workington afc': 'ee7e1f51',
'worksop town': '7ee8d7c2',
'worthing': 'cc9afa4b',
'worthing united': '41dae1df',
'wrexham': 'dad7970b',
'wroxham': 'd46965c7',
'wycombe wanderers': '43c2583e',
'yate town': 'dd903901',
'yaxley': '65f298d4',
'yeovil town': 'a506e4a2',
'yeovil town lfc': 'a506e4a2',
'york city': 'e272e7a8',
'yorkshire amateur afc': '66379800',
'flora': '719d83f1',
'flora tallinn': 'aee32dcc',
'infonet': '82d952b5',
'levadia tallinn': '112c9642',
'santos tartu': '0d984553',
'tvmk': 'a288b442',
'jk narva trans': '8a9775ad',
'jk sillamäe kalev': '7bc6abf1',
'jk sillamae kalev': '7bc6abf1',
'nõmme kalju': '33afefe1',
'nomme kalju': '33afefe1',
'pärnu jk': '7b538d76',
'parnu': '7b538d76',
'b36 tórshavn': '056a9b27',
'b36 torshavn': '056a9b27',
'eb/streymur': '882e0345',
'ebstreymur': '882e0345',
'eb/streymur/skála': 'a6da32bd',
'ebstreymurskala': 'a6da32bd',
'havnar bóltfelag': '15c5743b',
'havnar boltfelag': '15c5743b',
'íf fuglafjørður': 'b393ffaf',
'if fuglafjordur': 'b393ffaf',
'kí klaksvík': '869753d6',
'ki klaksvik': 'bd5e1428',
'kí klaksvík kvinnur': 'bd5e1428',
'nsí runavík': '448e1bb9',
'nsi runavik': '448e1bb9',
'vb vágur': '38f14e89',
'vb vagur': '38f14e89',
'víkingur gøta': 'ea73b616',
'vikingur gota': 'ea73b616',
'ac allianssi': 'f3eb9119',
'ac oulu': '9fc6bb9a',
'åland united': '1da16b8f',
'aland united': '1da16b8f',
'haka': '87f2fc2b',
'hämeenlinna': 'd6b8d72e',
'hameenlinna': 'd6b8d72e',
'honka': '7ae0d809',
'honka naiset': '7ae0d809',
'ilves': 'acffac85',
'inter turku': 'e9fa2e8b',
'jazz': '0512211b',
'jokerit': '5ae4ee29',
'kooteepee': '68d9124e',
'lahti': '5b8cfb05',
'viikingit': 'f07e2544',
'ff jaro': '227eb96c',
'helsinki ifk': '2d783ae1',
'hjk helsinki': 'd7319d80',
'ifk mariehamn': 'c4e86b86',
'jjk jyväskylä': '5b94dbc6',
'jjk jyvaskyla': '5b94dbc6',
'kokkolan palloveikot': '769ee103',
'kpv': '769ee103',
'kotkan työväen palloilijat': '922ef390',
'ktp': '922ef390',
'kuopion ps': 'e6f63673',
'kups': 'e6f63673',
'myllykosken pallo −47': '251418b2',
'mypa': '251418b2',
'pk-35 vantaa': 'f30d7206',
'pk 35 vantaa': 'ee9cfa46',
'pk–35 vantaa': 'ee9cfa46',
'ps kemi kings': '05e9e5b3',
'rovaniemen ps': '4fea542b',
'rops': '4fea542b',
'seinäjoen sjk': 'c889f292',
'sjk': 'c889f292',
'tampere united': '5c8dbd5c',
'tp−47': 'e0c76d4e',
'tp47': 'e0c76d4e',
'turun palloseura': '489de62a',
'tps': '489de62a',
'vaasan ps': '8b63ce55',
'vps': '8b63ce55',
'ac ajaccio': '7a54bb4f',
'ajaccio': '7a54bb4f',
'ac arles-avignon': 'e95faa7f',
'arles avignon': 'e95faa7f',
'ac cambrai': '475b29b6',
'ac chapelain foot': '75edd4df',
'ac pouzauges réaumur': 'cf001bc5',
'ac pouzauges reaumur': 'cf001bc5',
'ac seyssinet': '9555a089',
'af bobigny': 'c9ff0fe4',
'af lozère': '8fc9af77',
'af lozere': '8fc9af77',
'af virois': '946f0eef',
'acompiègne': 'e20fb8d5',
'acompiegne': 'e20fb8d5',
'acreil': '6deb77ba',
'ag caennaise': '9dcf2f81',
'agl drapeau fougeres': 'd5501d62',
'aiglon du lamentin': '22c09819',
'ailly sur somme': '8bd1d0ff',
'ain sud foot': 'f83ef755',
'aj auxerre': '5ae09109',
'auxerre': '5ae09109',
'aj petite-île': '3f5bc7c0',
'aj petite ile': '3f5bc7c0',
'aj saint-georges': '6a5ea615',
'aj saint georges': '6a5ea615',
'amiens ac': '6917ef41',
'amiens sc': '25622401',
'amiens': '25622401',
'ancienne de château-gontier': '014e7411',
'ancienne de chateau gontier': '014e7411',
'angers sco': '69236f98',
'angers': '69236f98',
'angoulêmec': 'dc1dd993',
'angoulemec': 'dc1dd993',
'annecy': 'c94d9135',
'apm metz': '47d5d984',
'arras football': 'd929fedb',
'as aix': '4fa24c7e',
'as aixoise': 'e0fae827',
'as beauvais oise': '4a0ff629',
'as belfort sud': 'fad74de7',
'as béziers': '9551340f',
'as beziers': '9551340f',
'as bourny-laval': '2f20a6ab',
'as bourny laval': '2f20a6ab',
'as bron grand lyon': '07209b8f',
'as cagnes-le cros': '3a4b7d88',
'as cagnes le cros': '3a4b7d88',
'as carrières grésillons': '0da45149',
'as carrieres gresillons': '0da45149',
'as cherbourg football': '2337bb5e',
'as clouange': '765339f2',
'as dragon': 'f855efb6',
'as erstein': 'd21391f1',
'as étaples football': 'a66e428b',
'as etaples football': 'a66e428b',
'as excelsior': '5cefc9c1',
'as fabrègues': '8f401f0f',
'as fabregues': '8f401f0f',
'as frontignan ac': '2e098377',
'as furiani-agliani': 'e231f1ed',
'as furiani agliani': 'e231f1ed',
'as gamaches': 'f9aff6a2',
'as gémenos': '02c07bb5',
'as gemenos': '02c07bb5',
'as ginglin-cesson': '1b379b16',
'as ginglin cesson': '1b379b16',
'as girancourt dommartin': '5b30dbf6',
'as grâces': 'e9ae1367',
'as graces': 'e9ae1367',
'as jumeaux de mzouazia': '16f96707',
'as la châtaigneraie': 'bdf0586a',
'as la chataigneraie': 'bdf0586a',
'as lattes': '3084b150',
'as lössi': '5910d619',
'as lossi': '5910d619',
'as lyon-duchère': 'b8d4b9f8',
'as lyon duchere': 'b8d4b9f8',
'as magenta': '6896cdd5',
'as marck': '54def196',
'as misérieux-trévoux': 'd334f4bd',
'as miserieux trevoux': 'd334f4bd',
'as monaco': 'fd6114db',
'monaco': 'fd6114db',
'as montlouis': 'd6ca45c4',
'as morhange': 'ea40415a',
'as moulins': '17e11d1b',
'as muret': '119ccdbb',
'as nancy': 'e88fc6e5',
'nancy': 'e88fc6e5',
'as pagny-sur-moselle': '5931d12b',
'as pagny sur moselle': '5931d12b',
'as panazol': '549484ec',
'as pays neslois': '1bed76a3',
'as pirae': 'd48e8557',
'as plobannalec lesconil': '325d63c9',
'as plomelin': '8106537c',
'as poissy': '6883bc02',
'as prix-lès-mézières': '6f4d0e0b',
'as prix les mezieres': '6f4d0e0b',
'as reding': '3cfb33c2',
'as saint-étienne': 'd298ef2c',
'saint etienne': 'd298ef2c',
"as saint-ouen-l'aumône": '8fd33f63',
'as saint ouen laumone': '8fd33f63',
'as saint-priest': 'a15a3e67',
'as saint priest': 'a15a3e67',
'as savigneux montbrison': '5afeccf6',
'as st pantaleon': 'a1cc6921',
'as steenvoorde': '9ce51fe8',
'as sud ardèche': '0cc7e9fc',
'as sud ardeche': '0cc7e9fc',
'as sundhoffen': '477991c5',
'as tefana': '5dd32ef0',
'as tournefeuille': '13511a72',
'as trouville-deauville': 'b5764402',
'as trouville deauville': 'b5764402',
'as valence': '6dc6f493',
'as vénus': 'd4c6c2dc',
'as venus': 'd4c6c2dc',
'as vignoc hédé guipel': '3fdcac82',
'as vignoc hede guipel': '3fdcac82',
'as villers-houlgate': '044b2813',
'as villers houlgate': '044b2813',
'as vitré': '4003a380',
'as vitre': '4003a380',
'as yzeure': '0c72364a',
'asc biesheim': '0a03ba6d',
'asc hazebrouck': '7cfc75a5',
'asc la courneuve': '28f6d783',
'asc le geldar': 'da66dce9',
'asf andrézieux': '8d6b4f2b',
'asf andrezieux': '8d6b4f2b',
'asi murs-erigné': '689bb876',
'asi murs erigne': '689bb876',
'asj soyaux': 'b54d31b3',
'soyaux': 'b54d31b3',
'asm belfort': 'acad13f6',
'asptt - caeb cholet': '17e6c5a1',
'asptt caeb cholet': '17e6c5a1',
'asptt albi': 'adf64e0d',
'albi': 'adf64e0d',
'asptt brest': '52a1b35a',
'asptt caen football': 'ccc9ee19',
'aspv strasbourg': '19926992',
'auch football': '0ade1cd8',
'aurillaca': '4e9c7caf',
'avant garde de plouvorn': '8e5a0e71',
'avenir de theix': '8012940b',
'aviron bayonnais': '77021edb',
'avoine olympique cc': 'dbdb54ca',
'balma sc': '845c9f99',
'beaune': 'd9061b4f',
'bergerac foot': '62f836ba',
'berre sc': 'ee9d31e7',
'besançon football': 'b2302a29',
'besancon football': 'b2302a29',
'blagnac': '62bda86a',
'blois foot 41': 'ddbba88a',
'borgo': 'f339f392',
'bourg-en-bresse péronnas': '724d8770',
'bourg peronnas': '724d8770',
'bourges 18': '7a26a834',
'bourges foot': '2d935efe',
'bretigny foot cs': 'fc1c07db',
'ca bastia': '28fd1f13',
'ca boulay': '943aa94a',
'ca meymacois': '2f8ccd4b',
'ca pontarlier': 'a396295a',
'calais rufc': 'ceb29db2',
'cas escaudoeuvres': '95d79c28',
'ce palavas': '7de0e7ec',
'cep lorient': 'eccc5069',
'chamois niortais': '61d9850e',
'niort': '61d9850e',
'champigny 94': 'fc5cd4a8',
'chassieu décines': '97f67652',
'chassieu decines': '97f67652',
'chaumont': '0be0fa09',
'claye-souilly sf': '621b9848',
'claye souilly sf': '621b9848',
'clermont foot': 'd9676424',
'club colonial': '4e4b608d',
'club franciscain': 'aa13fd54',
'cluses-scionzier': '4eb023fd',
'cluses scionzier': '4eb023fd',
'cms oissel': '7a512320',
'co avallon': 'a1bf53a2',
'co les ulis': '759d2c27',
'co saint-saturnin-arche': '1d98da18',
'co saint saturnin arche': '1d98da18',
'cormontreuil': '5e0cc307',
'côte chaude sportif saint-ètienne': 'c63fddc1',
'cote chaude sportif saint etienne': 'c63fddc1',
'cpb bréquigny foot': 'c56d61c3',
'cpb brequigny foot': 'c56d61c3',
'croix football iris club': 'f90efe28',
'cs avion': '99dddc73',
'cs betton': '4df07e8f',
'cs feytiat': '410c7de5',
'cs homécourt': 'd3bdd5ef',
'cs homecourt': 'd3bdd5ef',
'cs louhans-cuiseaux': '9c5e2b64',
'cs louhans cuiseaux': '9c5e2b64',
'cs mainvilliers': '84983cbd',
'cs meaux academy': '24d3177f',
'cs moulien': '6640d390',
'cs plédranais': '6fd47d29',
'cs pledranais': '6fd47d29',
'cs sedan ardennes': 'e4e952b9',
'sedan': 'e4e952b9',
'cs volvic': '9a7f5e09',
'csc cayenne': '563b1491',
'csm gennevilliers': 'c4fae78d',
'cso amnéville': '9feaeb2a',
'cso amneville': '9feaeb2a',
'diables noirs de combani': 'ff7706bc',
'dijono': '8dfb7350',
'dijon': '8dfb7350',
'dinan-léhon': '0c1c166a',
'din<NAME>': '0c1c166a',
'éds montluçon': 'f14178b9',
'eds montlucon': 'f14178b9',
'ef reims sainte-anne chatillons': '149a2f71',
'ef reims sainte anne chatillons': '149a2f71',
'eglantine vierzon': 'aed0c6fc',
'en avant de guingamp': 'd41b5f53',
'guingamp': 'd41b5f53',
'en avante de st. renen': 'ec74ab7d',
'en avante de st renen': 'ec74ab7d',
'entente centre ornain': '0dc3506a',
'entente crest-aouste': '2ead7c0b',
'entente crest aouste': '2ead7c0b',
'entente itancourt-neuville': '118497f4',
'entente itancourt neuville': '118497f4',
'entente uga ardziv': '2151f19e',
'ernéenne foot': '8dda65c3',
'erneenne foot': '8dda65c3',
'es bonchamp': '58984120',
'es chilly': 'd2d3631c',
'es guérétoise': '6ddd6173',
'es gueretoise': '6ddd6173',
'es heillecourt': '9c783267',
'es la rochelle': 'ed23e9b8',
'es nanterre': 'b9288690',
'es parisienne': 'a0f3931c',
'es paulhan-pézenas': '41efb74f',
'es paulhan pezenas': '41efb74f',
'es tarentaise': '934951d8',
'es thaon': '89ff5424',
'es villerupt thil': 'ac797ca0',
'es viry-châtillon': '30b116fb',
'es viry chatillon': '30b116fb',
'es wasquehal': 'c67deffd',
'esa brive': '4fa2bb19',
'esa linas-montlhéry': '6b355862',
'esa linas montlhery': '6b355862',
'esc longueau': '65888fdc',
'esm gonfreville': 'a9a75e6a',
'étoile fréjus saint-raphaël': '17f5e100',
'etoile frejus saint raphael': '17f5e100',
'etoile naborienne saint-avold': 'f20de566',
'etoile naborienne saint avold': 'f20de566',
'<NAME>': '633fbb6e',
'evian': '633fbb6e',
'évreux': '8edf8646',
'evreux': '8edf8646',
'fa illkirch-graffenstaden': 'd4066387',
'fa illkirch graffenstaden': 'd4066387',
'fa le cendre': '15026c8a',
'4 rivières 70': '1f2cc52a',
'4 rivieres 70': '1f2cc52a',
'albères-argelès': '59d62656',
'alberes argeles': '59d62656',
'atlantique vilaine': '72c2629a',
"bassin d'arcachon": '14413f24',
'bassin darcachon': '14413f24',
'bastia-borgo': '97fb83f1',
'bastia borgo': '97fb83f1',
'biars bretenoux': '04ab9043',
'bords-de-saône': 'fb1f3deb',
'bords de saone': 'fb1f3deb',
| |
("glGet", F, 4, "GL_FRAGMENT_LIGHT_MODEL_AMBIENT_SGIX"), # 0x840A
("glGet", I, 1, "GL_FRAGMENT_LIGHT_MODEL_NORMAL_INTERPOLATION_SGIX"), # 0x840B
("glGet", I, 1, "GL_FRAGMENT_LIGHT0_SGIX"), # 0x840C
("", X, 1, "GL_FRAGMENT_LIGHT1_SGIX"), # 0x840D
("", X, 1, "GL_FRAGMENT_LIGHT2_SGIX"), # 0x840E
("", X, 1, "GL_FRAGMENT_LIGHT3_SGIX"), # 0x840F
("", X, 1, "GL_FRAGMENT_LIGHT4_SGIX"), # 0x8410
("", X, 1, "GL_FRAGMENT_LIGHT5_SGIX"), # 0x8411
("", X, 1, "GL_FRAGMENT_LIGHT6_SGIX"), # 0x8412
("", X, 1, "GL_FRAGMENT_LIGHT7_SGIX"), # 0x8413
("", X, 1, "GL_PACK_RESAMPLE_SGIX"), # 0x842C
("", X, 1, "GL_UNPACK_RESAMPLE_SGIX"), # 0x842D
("", X, 1, "GL_RESAMPLE_REPLICATE_SGIX"), # 0x842E
("", X, 1, "GL_RESAMPLE_ZERO_FILL_SGIX"), # 0x842F
("", X, 1, "GL_RESAMPLE_DECIMATE_SGIX"), # 0x8430
("", X, 1, "GL_TANGENT_ARRAY_EXT"), # 0x8439
("", X, 1, "GL_BINORMAL_ARRAY_EXT"), # 0x843A
("", X, 1, "GL_CURRENT_TANGENT_EXT"), # 0x843B
("", X, 1, "GL_CURRENT_BINORMAL_EXT"), # 0x843C
("glGet", E, 1, "GL_TANGENT_ARRAY_TYPE_EXT"), # 0x843E
("", X, 1, "GL_TANGENT_ARRAY_STRIDE_EXT"), # 0x843F
("glGet", E, 1, "GL_BINORMAL_ARRAY_TYPE_EXT"), # 0x8440
("", X, 1, "GL_BINORMAL_ARRAY_STRIDE_EXT"), # 0x8441
("glGet", P, 1, "GL_TANGENT_ARRAY_POINTER_EXT"), # 0x8442
("glGet", P, 1, "GL_BINORMAL_ARRAY_POINTER_EXT"), # 0x8443
("", X, 1, "GL_MAP1_TANGENT_EXT"), # 0x8444
("", X, 1, "GL_MAP2_TANGENT_EXT"), # 0x8445
("", X, 1, "GL_MAP1_BINORMAL_EXT"), # 0x8446
("", X, 1, "GL_MAP2_BINORMAL_EXT"), # 0x8447
("", X, 1, "GL_NEAREST_CLIPMAP_NEAREST_SGIX"), # 0x844D
("", X, 1, "GL_NEAREST_CLIPMAP_LINEAR_SGIX"), # 0x844E
("", X, 1, "GL_LINEAR_CLIPMAP_NEAREST_SGIX"), # 0x844F
("glGet", E, 1, "GL_FOG_COORD_SRC"), # 0x8450
("", X, 1, "GL_FOG_COORD"), # 0x8451
("", X, 1, "GL_FRAGMENT_DEPTH"), # 0x8452
("glGet", F, 1, "GL_CURRENT_FOG_COORD"), # 0x8453
("glGet", E, 1, "GL_FOG_COORD_ARRAY_TYPE"), # 0x8454
("glGet", I, 1, "GL_FOG_COORD_ARRAY_STRIDE"), # 0x8455
("", X, 1, "GL_FOG_COORD_ARRAY_POINTER"), # 0x8456
("glGet", B, 1, "GL_FOG_COORD_ARRAY"), # 0x8457
("glGet", B, 1, "GL_COLOR_SUM"), # 0x8458
("glGet", F, 4, "GL_CURRENT_SECONDARY_COLOR"), # 0x8459
("glGet", I, 1, "GL_SECONDARY_COLOR_ARRAY_SIZE"), # 0x845A
("glGet", E, 1, "GL_SECONDARY_COLOR_ARRAY_TYPE"), # 0x845B
("glGet", I, 1, "GL_SECONDARY_COLOR_ARRAY_STRIDE"), # 0x845C
("", X, 1, "GL_SECONDARY_COLOR_ARRAY_POINTER"), # 0x845D
("glGet", B, 1, "GL_SECONDARY_COLOR_ARRAY"), # 0x845E
("", X, 1, "GL_CURRENT_RASTER_SECONDARY_COLOR"), # 0x845F
("glGet", F, 2, "GL_ALIASED_POINT_SIZE_RANGE"), # 0x846D
("glGet", F, 2, "GL_ALIASED_LINE_WIDTH_RANGE"), # 0x846E
("", X, 1, "GL_SCREEN_COORDINATES_REND"), # 0x8490
("", X, 1, "GL_INVERTED_SCREEN_W_REND"), # 0x8491
("", X, 1, "GL_TEXTURE0"), # 0x84C0
("", X, 1, "GL_TEXTURE1"), # 0x84C1
("", X, 1, "GL_TEXTURE2"), # 0x84C2
("", X, 1, "GL_TEXTURE3"), # 0x84C3
("", X, 1, "GL_TEXTURE4"), # 0x84C4
("", X, 1, "GL_TEXTURE5"), # 0x84C5
("", X, 1, "GL_TEXTURE6"), # 0x84C6
("", X, 1, "GL_TEXTURE7"), # 0x84C7
("", X, 1, "GL_TEXTURE8"), # 0x84C8
("", X, 1, "GL_TEXTURE9"), # 0x84C9
("", X, 1, "GL_TEXTURE10"), # 0x84CA
("", X, 1, "GL_TEXTURE11"), # 0x84CB
("", X, 1, "GL_TEXTURE12"), # 0x84CC
("", X, 1, "GL_TEXTURE13"), # 0x84CD
("", X, 1, "GL_TEXTURE14"), # 0x84CE
("", X, 1, "GL_TEXTURE15"), # 0x84CF
("", X, 1, "GL_TEXTURE16"), # 0x84D0
("", X, 1, "GL_TEXTURE17"), # 0x84D1
("", X, 1, "GL_TEXTURE18"), # 0x84D2
("", X, 1, "GL_TEXTURE19"), # 0x84D3
("", X, 1, "GL_TEXTURE20"), # 0x84D4
("", X, 1, "GL_TEXTURE21"), # 0x84D5
("", X, 1, "GL_TEXTURE22"), # 0x84D6
("", X, 1, "GL_TEXTURE23"), # 0x84D7
("", X, 1, "GL_TEXTURE24"), # 0x84D8
("", X, 1, "GL_TEXTURE25"), # 0x84D9
("", X, 1, "GL_TEXTURE26"), # 0x84DA
("", X, 1, "GL_TEXTURE27"), # 0x84DB
("", X, 1, "GL_TEXTURE28"), # 0x84DC
("", X, 1, "GL_TEXTURE29"), # 0x84DD
("", X, 1, "GL_TEXTURE30"), # 0x84DE
("", X, 1, "GL_TEXTURE31"), # 0x84DF
("glGet", E, 1, "GL_ACTIVE_TEXTURE"), # 0x84E0
("glGet", E, 1, "GL_CLIENT_ACTIVE_TEXTURE"), # 0x84E1
("glGet", I, 1, "GL_MAX_TEXTURE_UNITS"), # 0x84E2
("glGet", F, 16, "GL_TRANSPOSE_MODELVIEW_MATRIX"), # 0x84E3
("glGet", F, 16, "GL_TRANSPOSE_PROJECTION_MATRIX"), # 0x84E4
("glGet", F, 16, "GL_TRANSPOSE_TEXTURE_MATRIX"), # 0x84E5
("glGet", F, 16, "GL_TRANSPOSE_COLOR_MATRIX"), # 0x84E6
("", X, 1, "GL_SUBTRACT"), # 0x84E7
("glGet", I, 1, "GL_MAX_RENDERBUFFER_SIZE"), # 0x84E8
("", X, 1, "GL_COMPRESSED_ALPHA"), # 0x84E9
("", X, 1, "GL_COMPRESSED_LUMINANCE"), # 0x84EA
("", X, 1, "GL_COMPRESSED_LUMINANCE_ALPHA"), # 0x84EB
("", X, 1, "GL_COMPRESSED_INTENSITY"), # 0x84EC
("", X, 1, "GL_COMPRESSED_RGB"), # 0x84ED
("", X, 1, "GL_COMPRESSED_RGBA"), # 0x84EE
("glGet", E, 1, "GL_TEXTURE_COMPRESSION_HINT"), # 0x84EF
("glGetActiveUniformBlock", B, 1, "GL_UNIFORM_BLOCK_REFERENCED_BY_TESS_CONTROL_SHADER"), # 0x84F0
("glGetActiveUniformBlock", B, 1, "GL_UNIFORM_BLOCK_REFERENCED_BY_TESS_EVALUATION_SHADER"), # 0x84F1
("", X, 1, "GL_ALL_COMPLETED_NV"), # 0x84F2
("", X, 1, "GL_FENCE_STATUS_NV"), # 0x84F3
("", X, 1, "GL_FENCE_CONDITION_NV"), # 0x84F4
("_glGet", B, 1, "GL_TEXTURE_RECTANGLE"), # 0x84F5
("_glGet", I, 1, "GL_TEXTURE_BINDING_RECTANGLE"), # 0x84F6
("", X, 1, "GL_PROXY_TEXTURE_RECTANGLE"), # 0x84F7
("glGet", I, 1, "GL_MAX_RECTANGLE_TEXTURE_SIZE"), # 0x84F8
("", X, 1, "GL_DEPTH_STENCIL"), # 0x84F9
("", X, 1, "GL_UNSIGNED_INT_24_8"), # 0x84FA
("glGet", F, 1, "GL_MAX_TEXTURE_LOD_BIAS"), # 0x84FD
("glGetTexParameter", F, 1, "GL_TEXTURE_MAX_ANISOTROPY_EXT"), # 0x84FE
("glGet", F, 1, "GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT"), # 0x84FF
("", X, 1, "GL_TEXTURE_FILTER_CONTROL"), # 0x8500
("glGetTexParameter,glGetTexEnv", F, 1, "GL_TEXTURE_LOD_BIAS"), # 0x8501
("", X, 1, "GL_MODELVIEW1_STACK_DEPTH_EXT"), # 0x8502
("", X, 1, "GL_COMBINE4_NV"), # 0x8503
("glGet", F, 1, "GL_MAX_SHININESS_NV"), # 0x8504
("glGet", F, 1, "GL_MAX_SPOT_EXPONENT_NV"), # 0x8505
("", X, 1, "GL_MODELVIEW1_MATRIX_EXT"), # 0x8506
("", X, 1, "GL_INCR_WRAP"), # 0x8507
("", X, 1, "GL_DECR_WRAP"), # 0x8508
("", X, 1, "GL_VERTEX_WEIGHTING_EXT"), # 0x8509
("", X, 1, "GL_MODELVIEW1_ARB"), # 0x850A
("", X, 1, "GL_CURRENT_VERTEX_WEIGHT_EXT"), # 0x850B
("", X, 1, "GL_VERTEX_WEIGHT_ARRAY_EXT"), # 0x850C
("", X, 1, "GL_VERTEX_WEIGHT_ARRAY_SIZE_EXT"), # 0x850D
("glGet", E, 1, "GL_VERTEX_WEIGHT_ARRAY_TYPE_EXT"), # 0x850E
("", X, 1, "GL_VERTEX_WEIGHT_ARRAY_STRIDE_EXT"), # 0x850F
("", X, 1, "GL_VERTEX_WEIGHT_ARRAY_POINTER_EXT"), # 0x8510
("", X, 1, "GL_NORMAL_MAP"), # 0x8511
("", X, 1, "GL_REFLECTION_MAP"), # 0x8512
("_glGet", B, 1, "GL_TEXTURE_CUBE_MAP"), # 0x8513
("_glGet", I, 1, "GL_TEXTURE_BINDING_CUBE_MAP"), # 0x8514
("", X, 1, "GL_TEXTURE_CUBE_MAP_POSITIVE_X"), # 0x8515
("", X, 1, "GL_TEXTURE_CUBE_MAP_NEGATIVE_X"), # 0x8516
("", X, 1, "GL_TEXTURE_CUBE_MAP_POSITIVE_Y"), # 0x8517
("", X, 1, "GL_TEXTURE_CUBE_MAP_NEGATIVE_Y"), # 0x8518
("", X, 1, "GL_TEXTURE_CUBE_MAP_POSITIVE_Z"), # 0x8519
("", X, 1, "GL_TEXTURE_CUBE_MAP_NEGATIVE_Z"), # 0x851A
("", X, 1, "GL_PROXY_TEXTURE_CUBE_MAP"), # 0x851B
("glGet", I, 1, "GL_MAX_CUBE_MAP_TEXTURE_SIZE"), # 0x851C
("_glGet", B, 1, "GL_VERTEX_ARRAY_RANGE_NV"), # 0x851D
("_glGet", I, 1, "GL_VERTEX_ARRAY_RANGE_LENGTH_NV"), # 0x851E
("_glGet", B, 1, "GL_VERTEX_ARRAY_RANGE_VALID_NV"), # 0x851F
("_glGet", I, 1, "GL_MAX_VERTEX_ARRAY_RANGE_ELEMENT_NV"), # 0x8520
("_glGet", P, 1, "GL_VERTEX_ARRAY_RANGE_POINTER_NV"), # 0x8521
("", X, 1, "GL_REGISTER_COMBINERS_NV"), # 0x8522
("", X, 1, "GL_VARIABLE_A_NV"), # 0x8523
("", X, 1, "GL_VARIABLE_B_NV"), # 0x8524
("", X, 1, "GL_VARIABLE_C_NV"), # 0x8525
("", X, 1, "GL_VARIABLE_D_NV"), # 0x8526
("", X, 1, "GL_VARIABLE_E_NV"), # 0x8527
("", X, 1, "GL_VARIABLE_F_NV"), # 0x8528
("", X, 1, "GL_VARIABLE_G_NV"), # 0x8529
("glGet", F, 4, "GL_CONSTANT_COLOR0_NV"), # 0x852A
("glGet", F, 4, "GL_CONSTANT_COLOR1_NV"), # 0x852B
("", X, 1, "GL_PRIMARY_COLOR_NV"), # 0x852C
("", X, 1, "GL_SECONDARY_COLOR_NV"), # 0x852D
("", X, 1, "GL_SPARE0_NV"), # 0x852E
("", X, 1, "GL_SPARE1_NV"), # 0x852F
("", X, 1, "GL_DISCARD_NV"), # 0x8530
("", X, 1, "GL_E_TIMES_F_NV"), # 0x8531
("", X, 1, "GL_SPARE0_PLUS_SECONDARY_COLOR_NV"), # 0x8532
("", X, 1, "GL_VERTEX_ARRAY_RANGE_WITHOUT_FLUSH_NV"), # 0x8533
("glGet", E, 1, "GL_MULTISAMPLE_FILTER_HINT_NV"), # 0x8534
("", X, 1, "GL_PER_STAGE_CONSTANTS_NV"), # 0x8535
("", X, 1, "GL_UNSIGNED_IDENTITY_NV"), # 0x8536
("", X, 1, "GL_UNSIGNED_INVERT_NV"), # 0x8537
("", X, 1, "GL_EXPAND_NORMAL_NV"), # 0x8538
("", X, 1, "GL_EXPAND_NEGATE_NV"), # 0x8539
("", X, 1, "GL_HALF_BIAS_NORMAL_NV"), # 0x853A
("", X, 1, "GL_HALF_BIAS_NEGATE_NV"), # 0x853B
("", X, 1, "GL_SIGNED_IDENTITY_NV"), # 0x853C
("", X, 1, "GL_SIGNED_NEGATE_NV"), # 0x853D
("", X, 1, "GL_SCALE_BY_TWO_NV"), # 0x853E
("", X, 1, "GL_SCALE_BY_FOUR_NV"), # 0x853F
("", X, 1, "GL_SCALE_BY_ONE_HALF_NV"), # 0x8540
("", X, 1, "GL_BIAS_BY_NEGATIVE_ONE_HALF_NV"), # 0x8541
("", X, 1, "GL_COMBINER_INPUT_NV"), # 0x8542
("", X, 1, "GL_COMBINER_MAPPING_NV"), # 0x8543
("", X, 1, "GL_COMBINER_COMPONENT_USAGE_NV"), # 0x8544
("", X, 1, "GL_COMBINER_AB_DOT_PRODUCT_NV"), # 0x8545
("", X, 1, "GL_COMBINER_CD_DOT_PRODUCT_NV"), # 0x8546
("", X, 1, "GL_COMBINER_MUX_SUM_NV"), # 0x8547
("", X, 1, "GL_COMBINER_SCALE_NV"), # 0x8548
("", X, 1, "GL_COMBINER_BIAS_NV"), # 0x8549
("", X, 1, "GL_COMBINER_AB_OUTPUT_NV"), # 0x854A
("", X, 1, "GL_COMBINER_CD_OUTPUT_NV"), # 0x854B
("", X, 1, "GL_COMBINER_SUM_OUTPUT_NV"), # 0x854C
("glGet", I, 1, "GL_MAX_GENERAL_COMBINERS_NV"), # 0x854D
("glGet", I, 1, "GL_NUM_GENERAL_COMBINERS_NV"), # 0x854E
("glGet", B, 1, "GL_COLOR_SUM_CLAMP_NV"), # 0x854F
("", X, 1, "GL_COMBINER0_NV"), # 0x8550
("", X, 1, "GL_COMBINER1_NV"), # 0x8551
("", X, 1, "GL_COMBINER2_NV"), # 0x8552
("", X, 1, "GL_COMBINER3_NV"), # 0x8553
("", X, 1, "GL_COMBINER4_NV"), # 0x8554
("", X, 1, "GL_COMBINER5_NV"), # 0x8555
("", X, 1, "GL_COMBINER6_NV"), # 0x8556
("", X, 1, "GL_COMBINER7_NV"), # 0x8557
("", X, 1, "GL_PRIMITIVE_RESTART_NV"), # 0x8558
("", X, 1, "GL_PRIMITIVE_RESTART_INDEX_NV"), # 0x8559
("glGet", E, 1, "GL_FOG_DISTANCE_MODE_NV"), # 0x855A
("", X, 1, "GL_EYE_RADIAL_NV"), # 0x855B
("", X, 1, "GL_EYE_PLANE_ABSOLUTE_NV"), # 0x855C
("", X, 1, "GL_EMBOSS_LIGHT_NV"), # 0x855D
("", X, 1, "GL_EMBOSS_CONSTANT_NV"), # 0x855E
("", X, 1, "GL_EMBOSS_MAP_NV"), # 0x855F
("", X, 1, "GL_RED_MIN_CLAMP_INGR"), # 0x8560
("", X, 1, "GL_GREEN_MIN_CLAMP_INGR"), # 0x8561
("", X, 1, "GL_BLUE_MIN_CLAMP_INGR"), # 0x8562
("", X, 1, "GL_ALPHA_MIN_CLAMP_INGR"), # 0x8563
("", X, 1, "GL_RED_MAX_CLAMP_INGR"), # 0x8564
("", X, 1, "GL_GREEN_MAX_CLAMP_INGR"), # 0x8565
("", X, 1, "GL_BLUE_MAX_CLAMP_INGR"), # 0x8566
("", X, 1, "GL_ALPHA_MAX_CLAMP_INGR"), # 0x8567
("", X, 1, "GL_INTERLACE_READ_INGR"), # 0x8568
("", X, 1, "GL_COMBINE"), # 0x8570
("glGetTexEnv", E, 1, "GL_COMBINE_RGB"), # 0x8571
("glGetTexEnv", E, 1, "GL_COMBINE_ALPHA"), # 0x8572
("glGetTexEnv", F, 1, "GL_RGB_SCALE"), # 0x8573
("", X, 1, "GL_ADD_SIGNED"), # 0x8574
("", X, 1, "GL_INTERPOLATE"), # 0x8575
("", X, 1, "GL_CONSTANT"), # 0x8576
("", X, 1, "GL_PRIMARY_COLOR"), # 0x8577
("", X, 1, "GL_PREVIOUS"), # 0x8578
("glGetTexEnv", E, 1, "GL_SRC0_RGB"), # 0x8580
("glGetTexEnv", E, 1, "GL_SRC1_RGB"), # 0x8581
("glGetTexEnv", E, 1, "GL_SRC2_RGB"), # 0x8582
("glGetTexEnv", E, 1, "GL_SOURCE3_RGB_NV"), # 0x8583
("glGetTexEnv", E, 1, "GL_SRC0_ALPHA"), # 0x8588
("glGetTexEnv", E, 1, "GL_SRC1_ALPHA"), # 0x8589
("glGetTexEnv", E, 1, "GL_SRC2_ALPHA"), # 0x858A
("glGetTexEnv", E, 1, "GL_SOURCE3_ALPHA_NV"), # 0x858B
("glGetTexEnv", E, 1, "GL_OPERAND0_RGB"), # 0x8590
("glGetTexEnv", E, 1, "GL_OPERAND1_RGB"), # 0x8591
("glGetTexEnv", E, 1, "GL_OPERAND2_RGB"), # 0x8592
("glGetTexEnv", E, 1, "GL_OPERAND3_RGB_NV"), # 0x8593
("glGetTexEnv", E, 1, "GL_OPERAND0_ALPHA"), # 0x8598
("glGetTexEnv", E, 1, "GL_OPERAND1_ALPHA"), # 0x8599
("glGetTexEnv", E, 1, "GL_OPERAND2_ALPHA"), # 0x859A
("glGetTexEnv", E, 1, "GL_OPERAND3_ALPHA_NV"), # 0x859B
("", X, 1, "GL_PACK_SUBSAMPLE_RATE_SGIX"), # 0x85A0
("", X, 1, "GL_UNPACK_SUBSAMPLE_RATE_SGIX"), # 0x85A1
("", X, 1, "GL_PIXEL_SUBSAMPLE_4444_SGIX"), # 0x85A2
("", X, 1, "GL_PIXEL_SUBSAMPLE_2424_SGIX"), # 0x85A3
("", X, 1, "GL_PIXEL_SUBSAMPLE_4242_SGIX"), # 0x85A4
("", X, 1, "GL_PERTURB_EXT"), # 0x85AE
("", X, 1, "GL_TEXTURE_NORMAL_EXT"), # 0x85AF
("", X, 1, "GL_LIGHT_MODEL_SPECULAR_VECTOR_APPLE"), # 0x85B0
("", X, 1, "GL_TRANSFORM_HINT_APPLE"), # 0x85B1
("", X, 1, "GL_UNPACK_CLIENT_STORAGE_APPLE"), # 0x85B2
("", X, 1, "GL_BUFFER_OBJECT_APPLE"), # 0x85B3
("", X, 1, "GL_STORAGE_CLIENT_APPLE"), # 0x85B4
("glGet", I, 1, "GL_VERTEX_ARRAY_BINDING"), # 0x85B5
("glGetTexParameter", X, 1, "GL_TEXTURE_RANGE_LENGTH_APPLE"), # 0x85B7
("", P, 1, "GL_TEXTURE_RANGE_POINTER_APPLE"), # 0x85B8
("", X, 1, "GL_YCBCR_422_APPLE"), # 0x85B9
("", X, 1, "GL_UNSIGNED_SHORT_8_8_MESA"), # 0x85BA
("", X, 1, "GL_UNSIGNED_SHORT_8_8_REV_MESA"), # 0x85BB
("glGetTexParameter", E, 1, "GL_TEXTURE_STORAGE_HINT_APPLE"), # 0x85BC
("", X, 1, "GL_STORAGE_PRIVATE_APPLE"), # 0x85BD
("", X, 1, "GL_STORAGE_CACHED_APPLE"), # 0x85BE
("", X, 1, "GL_STORAGE_SHARED_APPLE"), # 0x85BF
("", X, 1, "GL_REPLACEMENT_CODE_ARRAY_SUN"), # 0x85C0
("glGet", E, 1, "GL_REPLACEMENT_CODE_ARRAY_TYPE_SUN"), # 0x85C1
("", X, 1, "GL_REPLACEMENT_CODE_ARRAY_STRIDE_SUN"), # 0x85C2
("", X, 1, "GL_REPLACEMENT_CODE_ARRAY_POINTER_SUN"), # 0x85C3
("", X, 1, "GL_R1UI_V3F_SUN"), # 0x85C4
("", X, 1, "GL_R1UI_C4UB_V3F_SUN"), # 0x85C5
("", X, 1, "GL_R1UI_C3F_V3F_SUN"), # 0x85C6
("", X, 1, "GL_R1UI_N3F_V3F_SUN"), # 0x85C7
("", X, 1, "GL_R1UI_C4F_N3F_V3F_SUN"), # 0x85C8
("", X, 1, "GL_R1UI_T2F_V3F_SUN"), # 0x85C9
("", X, 1, "GL_R1UI_T2F_N3F_V3F_SUN"), # 0x85CA
("", X, 1, "GL_R1UI_T2F_C4F_N3F_V3F_SUN"), # 0x85CB
("", X, 1, "GL_SLICE_ACCUM_SUN"), # 0x85CC
("", X, 1, "GL_QUAD_MESH_SUN"), # 0x8614
("", X, 1, "GL_TRIANGLE_MESH_SUN"), # 0x8615
("_glGet", B, 1, "GL_VERTEX_PROGRAM_ARB"), # 0x8620
("", X, 1, "GL_VERTEX_STATE_PROGRAM_NV"), # 0x8621
("glGetVertexAttrib", B, 1, "GL_VERTEX_ATTRIB_ARRAY_ENABLED"), # 0x8622
("glGetVertexAttrib", I, 1, "GL_VERTEX_ATTRIB_ARRAY_SIZE"), # 0x8623
("glGetVertexAttrib", I, 1, "GL_VERTEX_ATTRIB_ARRAY_STRIDE"), # 0x8624
("glGetVertexAttrib", E, 1, "GL_VERTEX_ATTRIB_ARRAY_TYPE"), # 0x8625
("glGetVertexAttrib", D, 4, "GL_CURRENT_VERTEX_ATTRIB"), # 0x8626
("glGetProgramARB", I, 1, "GL_PROGRAM_LENGTH_ARB"), # 0x8627
("", S, 1, "GL_PROGRAM_STRING_ARB"), # 0x8628
("", X, 1, "GL_MODELVIEW_PROJECTION_NV"), # 0x8629
("", X, 1, "GL_IDENTITY_NV"), # 0x862A
("", X, 1, "GL_INVERSE_NV"), # 0x862B
("", X, 1, "GL_TRANSPOSE_NV"), # 0x862C
("", X, 1, "GL_INVERSE_TRANSPOSE_NV"), # 0x862D
("glGet", I, 1, "GL_MAX_PROGRAM_MATRIX_STACK_DEPTH_ARB"), # 0x862E
("glGet", I, 1, "GL_MAX_PROGRAM_MATRICES_ARB"), # 0x862F
("", X, 1, "GL_MATRIX0_NV"), # 0x8630
("", X, 1, "GL_MATRIX1_NV"), # 0x8631
("", X, 1, "GL_MATRIX2_NV"), # 0x8632
("", X, 1, "GL_MATRIX3_NV"), # 0x8633
("", X, 1, "GL_MATRIX4_NV"), # 0x8634
("", X, 1, "GL_MATRIX5_NV"), # 0x8635
("", X, 1, "GL_MATRIX6_NV"), # 0x8636
("", X, 1, "GL_MATRIX7_NV"), # 0x8637
("glGet", I, 1, "GL_CURRENT_MATRIX_STACK_DEPTH_ARB"), # 0x8640
("glGet", F, 16, "GL_CURRENT_MATRIX_ARB"), # 0x8641
("glGet", B, 1, "GL_PROGRAM_POINT_SIZE"), # 0x8642
("glGet", B, 1, "GL_VERTEX_PROGRAM_TWO_SIDE"), # 0x8643
("", X, 1, "GL_PROGRAM_PARAMETER_NV"), # 0x8644
("glGetVertexAttrib", P, 1, "GL_VERTEX_ATTRIB_ARRAY_POINTER"), # 0x8645
("glGetProgramNV", I, 1, "GL_PROGRAM_TARGET_NV"), # 0x8646
("glGetProgramNV", B, 1, "GL_PROGRAM_RESIDENT_NV"), # 0x8647
("", X, 1, "GL_TRACK_MATRIX_NV"), # 0x8648
("", X, 1, "GL_TRACK_MATRIX_TRANSFORM_NV"), # 0x8649
("glGet", I, 1, "GL_VERTEX_PROGRAM_BINDING_NV"), # 0x864A
("glGet", I, 1, "GL_PROGRAM_ERROR_POSITION_ARB"), # 0x864B
("", X, 1, "GL_OFFSET_TEXTURE_RECTANGLE_NV"), # 0x864C
("", X, 1, "GL_OFFSET_TEXTURE_RECTANGLE_SCALE_NV"), # 0x864D
("", X, 1, "GL_DOT_PRODUCT_TEXTURE_RECTANGLE_NV"), # 0x864E
("glGet", B, 1, "GL_DEPTH_CLAMP"), # 0x864F
("_glGet", B, 1, "GL_VERTEX_ATTRIB_ARRAY0_NV"), # 0x8650
("_glGet", B, 1, "GL_VERTEX_ATTRIB_ARRAY1_NV"), # 0x8651
("_glGet", B, 1, "GL_VERTEX_ATTRIB_ARRAY2_NV"), # 0x8652
("_glGet", B, 1, "GL_VERTEX_ATTRIB_ARRAY3_NV"), # 0x8653
("_glGet", B, 1, "GL_VERTEX_ATTRIB_ARRAY4_NV"), # 0x8654
("_glGet", B, 1, "GL_VERTEX_ATTRIB_ARRAY5_NV"), # 0x8655
("_glGet", B, 1, "GL_VERTEX_ATTRIB_ARRAY6_NV"), # 0x8656
("_glGet", B, 1, "GL_VERTEX_ATTRIB_ARRAY7_NV"), # 0x8657
("_glGet", B, 1, "GL_VERTEX_ATTRIB_ARRAY8_NV"), # 0x8658
("_glGet", B, 1, "GL_VERTEX_ATTRIB_ARRAY9_NV"), # 0x8659
("_glGet", B, 1, "GL_VERTEX_ATTRIB_ARRAY10_NV"), # 0x865A
("_glGet", B, 1, "GL_VERTEX_ATTRIB_ARRAY11_NV"), # 0x865B
("_glGet", B, 1, "GL_VERTEX_ATTRIB_ARRAY12_NV"), # 0x865C
("_glGet", B, 1, "GL_VERTEX_ATTRIB_ARRAY13_NV"), # 0x865D
("_glGet", B, 1, "GL_VERTEX_ATTRIB_ARRAY14_NV"), # 0x865E
("_glGet", B, 1, "GL_VERTEX_ATTRIB_ARRAY15_NV"), # 0x865F
("", X, 1, "GL_MAP1_VERTEX_ATTRIB0_4_NV"), # 0x8660
("", X, 1, "GL_MAP1_VERTEX_ATTRIB1_4_NV"), # 0x8661
("", X, 1, "GL_MAP1_VERTEX_ATTRIB2_4_NV"), # 0x8662
("", X, 1, "GL_MAP1_VERTEX_ATTRIB3_4_NV"), # 0x8663
("", X, 1, "GL_MAP1_VERTEX_ATTRIB4_4_NV"), # 0x8664
("", X, 1, "GL_MAP1_VERTEX_ATTRIB5_4_NV"), # 0x8665
("", X, 1, "GL_MAP1_VERTEX_ATTRIB6_4_NV"), # 0x8666
("", X, 1, "GL_MAP1_VERTEX_ATTRIB7_4_NV"), # 0x8667
("", X, 1, "GL_MAP1_VERTEX_ATTRIB8_4_NV"), # 0x8668
("", X, 1, "GL_MAP1_VERTEX_ATTRIB9_4_NV"), # 0x8669
("", X, 1, "GL_MAP1_VERTEX_ATTRIB10_4_NV"), # 0x866A
("", X, 1, "GL_MAP1_VERTEX_ATTRIB11_4_NV"), # 0x866B
("", X, 1, "GL_MAP1_VERTEX_ATTRIB12_4_NV"), # 0x866C
("", X, 1, "GL_MAP1_VERTEX_ATTRIB13_4_NV"), # 0x866D
("", X, 1, "GL_MAP1_VERTEX_ATTRIB14_4_NV"), # 0x866E
("", X, 1, "GL_MAP1_VERTEX_ATTRIB15_4_NV"), # 0x866F
("", X, 1, "GL_MAP2_VERTEX_ATTRIB0_4_NV"), # 0x8670
("", X, 1, "GL_MAP2_VERTEX_ATTRIB1_4_NV"), # 0x8671
("", X, 1, "GL_MAP2_VERTEX_ATTRIB2_4_NV"), # 0x8672
("", X, 1, "GL_MAP2_VERTEX_ATTRIB3_4_NV"), # 0x8673
("", X, 1, "GL_MAP2_VERTEX_ATTRIB4_4_NV"), # 0x8674
("", X, 1, "GL_MAP2_VERTEX_ATTRIB5_4_NV"), # 0x8675
("", X, 1, "GL_MAP2_VERTEX_ATTRIB6_4_NV"), # 0x8676
("glGetProgramARB", I, 1, "GL_PROGRAM_BINDING_ARB"), # 0x8677
("", X, 1, "GL_MAP2_VERTEX_ATTRIB8_4_NV"), # 0x8678
("", X, 1, "GL_MAP2_VERTEX_ATTRIB9_4_NV"), # 0x8679
("", X, 1, "GL_MAP2_VERTEX_ATTRIB10_4_NV"), # 0x867A
("", X, 1, "GL_MAP2_VERTEX_ATTRIB11_4_NV"), # 0x867B
("", X, 1, "GL_MAP2_VERTEX_ATTRIB12_4_NV"), # 0x867C
("", X, 1, "GL_MAP2_VERTEX_ATTRIB13_4_NV"), # 0x867D
("", X, 1, "GL_MAP2_VERTEX_ATTRIB14_4_NV"), # 0x867E
("", X, 1, "GL_MAP2_VERTEX_ATTRIB15_4_NV"), # 0x867F
("glGetTexLevelParameter", I, 1, "GL_TEXTURE_COMPRESSED_IMAGE_SIZE"), # 0x86A0
("glGetTexLevelParameter", B, 1, "GL_TEXTURE_COMPRESSED"), # 0x86A1
("glGet", I, 1, "GL_NUM_COMPRESSED_TEXTURE_FORMATS"), # 0x86A2
("glGet", E, '_glGetInteger(GL_NUM_COMPRESSED_TEXTURE_FORMATS)', "GL_COMPRESSED_TEXTURE_FORMATS"), # 0x86A3
("glGet", I, 1, "GL_MAX_VERTEX_UNITS_ARB"), # 0x86A4
("glGet", I, 1, "GL_ACTIVE_VERTEX_UNITS_ARB"), # 0x86A5
("glGet", B, 1, "GL_WEIGHT_SUM_UNITY_ARB"), # 0x86A6
("glGet", B, 1, "GL_VERTEX_BLEND_ARB"), # 0x86A7
("glGet", F, 1, "GL_CURRENT_WEIGHT_ARB"), # 0x86A8
("glGet", E, 1, "GL_WEIGHT_ARRAY_TYPE_ARB"), # 0x86A9
("glGet", I, 1, "GL_WEIGHT_ARRAY_STRIDE_ARB"), # 0x86AA
("glGet", I, 1, "GL_WEIGHT_ARRAY_SIZE_ARB"), # 0x86AB
("glGet", P, 1, "GL_WEIGHT_ARRAY_POINTER_ARB"), # 0x86AC
("glGet", B, 1, "GL_WEIGHT_ARRAY_ARB"), # 0x86AD
("", X, 1, "GL_DOT3_RGB"), # 0x86AE
("", X, 1, "GL_DOT3_RGBA"), # 0x86AF
("", X, 1, "GL_COMPRESSED_RGB_FXT1_3DFX"), # 0x86B0
("", X, 1, "GL_COMPRESSED_RGBA_FXT1_3DFX"), # 0x86B1
("", X, 1, "GL_MULTISAMPLE_3DFX"), # 0x86B2
("", X, 1, "GL_SAMPLE_BUFFERS_3DFX"), # 0x86B3
("", X, 1, "GL_SAMPLES_3DFX"), # 0x86B4
("", X, 1, "GL_EVAL_2D_NV"), # 0x86C0
("", X, 1, "GL_EVAL_TRIANGULAR_2D_NV"), # 0x86C1
("", X, 1, "GL_MAP_TESSELLATION_NV"), # 0x86C2
("", X, 1, "GL_MAP_ATTRIB_U_ORDER_NV"), # 0x86C3
("", X, 1, "GL_MAP_ATTRIB_V_ORDER_NV"), # 0x86C4
("", X, 1, "GL_EVAL_FRACTIONAL_TESSELLATION_NV"), # 0x86C5
("", X, 1, "GL_EVAL_VERTEX_ATTRIB0_NV"), # 0x86C6
("", X, 1, "GL_EVAL_VERTEX_ATTRIB1_NV"), # 0x86C7
("", X, 1, "GL_EVAL_VERTEX_ATTRIB2_NV"), # 0x86C8
("", X, 1, "GL_EVAL_VERTEX_ATTRIB3_NV"), # 0x86C9
("", X, 1, "GL_EVAL_VERTEX_ATTRIB4_NV"), # 0x86CA
("", X, 1, "GL_EVAL_VERTEX_ATTRIB5_NV"), # 0x86CB
("", X, 1, "GL_EVAL_VERTEX_ATTRIB6_NV"), # 0x86CC
("", X, 1, "GL_EVAL_VERTEX_ATTRIB7_NV"), # 0x86CD
("", X, 1, "GL_EVAL_VERTEX_ATTRIB8_NV"), # 0x86CE
("", X, 1, "GL_EVAL_VERTEX_ATTRIB9_NV"), # 0x86CF
("", X, 1, "GL_EVAL_VERTEX_ATTRIB10_NV"), # 0x86D0
("", X, 1, "GL_EVAL_VERTEX_ATTRIB11_NV"), # 0x86D1
("", X, 1, "GL_EVAL_VERTEX_ATTRIB12_NV"), # 0x86D2
("", X, 1, "GL_EVAL_VERTEX_ATTRIB13_NV"), # 0x86D3
("", X, 1, "GL_EVAL_VERTEX_ATTRIB14_NV"), # 0x86D4
("", X, 1, "GL_EVAL_VERTEX_ATTRIB15_NV"), # 0x86D5
("", X, 1, "GL_MAX_MAP_TESSELLATION_NV"), # 0x86D6
("", X, 1, "GL_MAX_RATIONAL_EVAL_ORDER_NV"), # 0x86D7
("", X, 1, "GL_MAX_PROGRAM_PATCH_ATTRIBS_NV"), # 0x86D8
("glGetTexEnv", E, 1, "GL_RGBA_UNSIGNED_DOT_PRODUCT_MAPPING_NV"), # 0x86D9
("", X, 1, "GL_UNSIGNED_INT_S8_S8_8_8_NV"), # 0x86DA
("", X, 1, "GL_UNSIGNED_INT_8_8_S8_S8_REV_NV"), # 0x86DB
("", X, 1, "GL_DSDT_MAG_INTENSITY_NV"), # 0x86DC
("", X, 1, "GL_SHADER_CONSISTENT_NV"), # 0x86DD
("", X, 1, "GL_TEXTURE_SHADER_NV"), # 0x86DE
("glGetTexEnv", E, 1, "GL_SHADER_OPERATION_NV"), # 0x86DF
("glGetTexEnv", E, 4, "GL_CULL_MODES_NV"), # 0x86E0
("glGetTexEnv", F, 4, "GL_OFFSET_TEXTURE_MATRIX_NV"), # 0x86E1
("glGetTexEnv", F, 1, "GL_OFFSET_TEXTURE_SCALE_NV"), # 0x86E2
("glGetTexEnv", F, 1, "GL_OFFSET_TEXTURE_BIAS_NV"), # 0x86E3
("glGetTexEnv", E, 1, "GL_PREVIOUS_TEXTURE_INPUT_NV"), # 0x86E4
("glGetTexEnv", F, 3, "GL_CONST_EYE_NV"), # 0x86E5
("", X, 1, "GL_PASS_THROUGH_NV"), # 0x86E6
("", X, 1, "GL_CULL_FRAGMENT_NV"), # 0x86E7
("", X, 1, "GL_OFFSET_TEXTURE_2D_NV"), # 0x86E8
("", X, 1, "GL_DEPENDENT_AR_TEXTURE_2D_NV"), # 0x86E9
("", X, 1, "GL_DEPENDENT_GB_TEXTURE_2D_NV"), # 0x86EA
("", X, 1, "GL_SURFACE_STATE_NV"), # 0x86EB
("", X, 1, "GL_DOT_PRODUCT_NV"), # 0x86EC
("", X, 1, "GL_DOT_PRODUCT_DEPTH_REPLACE_NV"), # 0x86ED
("", X, 1, "GL_DOT_PRODUCT_TEXTURE_2D_NV"), # 0x86EE
("", X, 1, "GL_DOT_PRODUCT_TEXTURE_3D_NV"), # 0x86EF
("", X, 1, "GL_DOT_PRODUCT_TEXTURE_CUBE_MAP_NV"), # 0x86F0
("", X, 1, "GL_DOT_PRODUCT_DIFFUSE_CUBE_MAP_NV"), # 0x86F1
("", X, 1, "GL_DOT_PRODUCT_REFLECT_CUBE_MAP_NV"), # 0x86F2
("", X, 1, "GL_DOT_PRODUCT_CONST_EYE_REFLECT_CUBE_MAP_NV"), # 0x86F3
("", X, 1, "GL_HILO_NV"), # 0x86F4
("", X, 1, "GL_DSDT_NV"), # 0x86F5
("", X, 1, "GL_DSDT_MAG_NV"), # 0x86F6
("", X, 1, "GL_DSDT_MAG_VIB_NV"), # 0x86F7
("", X, 1, "GL_HILO16_NV"), # 0x86F8
("", X, 1, "GL_SIGNED_HILO_NV"), # 0x86F9
("", X, 1, "GL_SIGNED_HILO16_NV"), # 0x86FA
("", X, 1, "GL_SIGNED_RGBA_NV"), # 0x86FB
("", X, 1, "GL_SIGNED_RGBA8_NV"), # 0x86FC
("", X, 1, "GL_SURFACE_REGISTERED_NV"), # 0x86FD
("", X, 1, "GL_SIGNED_RGB_NV"), # 0x86FE
("", X, 1, "GL_SIGNED_RGB8_NV"), # 0x86FF
("", X, 1, "GL_SURFACE_MAPPED_NV"), # 0x8700
("", X, 1, "GL_SIGNED_LUMINANCE_NV"), # 0x8701
("", X, 1, "GL_SIGNED_LUMINANCE8_NV"), # 0x8702
("", X, 1, "GL_SIGNED_LUMINANCE_ALPHA_NV"), # 0x8703
("", X, 1, "GL_SIGNED_LUMINANCE8_ALPHA8_NV"), # 0x8704
("", X, 1, "GL_SIGNED_ALPHA_NV"), # 0x8705
("", X, 1, "GL_SIGNED_ALPHA8_NV"), # 0x8706
("", X, 1, "GL_SIGNED_INTENSITY_NV"), # 0x8707
("", X, 1, "GL_SIGNED_INTENSITY8_NV"), # 0x8708
("", X, 1, "GL_DSDT8_NV"), # 0x8709
("", X, 1, "GL_DSDT8_MAG8_NV"), # 0x870A
("", X, 1, "GL_DSDT8_MAG8_INTENSITY8_NV"), # 0x870B
("", X, 1, "GL_SIGNED_RGB_UNSIGNED_ALPHA_NV"), # 0x870C
("", X, 1, "GL_SIGNED_RGB8_UNSIGNED_ALPHA8_NV"), # 0x870D
("", X, 1, "GL_HI_SCALE_NV"), # 0x870E
("", X, 1, "GL_LO_SCALE_NV"), # 0x870F
("", X, 1, "GL_DS_SCALE_NV"), # 0x8710
("", X, 1, "GL_DT_SCALE_NV"), # 0x8711
("", X, 1, "GL_MAGNITUDE_SCALE_NV"), # 0x8712
("", X, 1, "GL_VIBRANCE_SCALE_NV"), # 0x8713
("", X, 1, "GL_HI_BIAS_NV"), # 0x8714
("", X, 1, "GL_LO_BIAS_NV"), # 0x8715
("", X, 1, "GL_DS_BIAS_NV"), # 0x8716
("", X, 1, "GL_DT_BIAS_NV"), # 0x8717
("", X, 1, "GL_MAGNITUDE_BIAS_NV"), # 0x8718
("", X, 1, "GL_VIBRANCE_BIAS_NV"), # 0x8719
("", X, 1, "GL_TEXTURE_BORDER_VALUES_NV"), # 0x871A
("", X, 1, "GL_TEXTURE_HI_SIZE_NV"), # 0x871B
("", X, 1, "GL_TEXTURE_LO_SIZE_NV"), # 0x871C
("", X, 1, "GL_TEXTURE_DS_SIZE_NV"), # 0x871D
("", X, 1, "GL_TEXTURE_DT_SIZE_NV"), # 0x871E
("", X, 1, "GL_TEXTURE_MAG_SIZE_NV"), # 0x871F
("_glGet", F, 16, "GL_MODELVIEW2_ARB"), # 0x8722
("_glGet", F, 16, "GL_MODELVIEW3_ARB"), # 0x8723
("_glGet", F, 16, "GL_MODELVIEW4_ARB"), # 0x8724
("_glGet", F, 16, "GL_MODELVIEW5_ARB"), # 0x8725
("_glGet", F, 16, "GL_MODELVIEW6_ARB"), # 0x8726
("_glGet", F, 16, "GL_MODELVIEW7_ARB"), # 0x8727
("_glGet", F, 16, "GL_MODELVIEW8_ARB"), # 0x8728
("_glGet", F, 16, "GL_MODELVIEW9_ARB"), # 0x8729
("_glGet", F, 16, "GL_MODELVIEW10_ARB"), # 0x872A
("_glGet", F, 16, "GL_MODELVIEW11_ARB"), # 0x872B
("_glGet", F, 16, "GL_MODELVIEW12_ARB"), # 0x872C
("_glGet", F, 16, "GL_MODELVIEW13_ARB"), # 0x872D
("_glGet", F, 16, "GL_MODELVIEW14_ARB"), # 0x872E
("_glGet", F, 16, "GL_MODELVIEW15_ARB"), # 0x872F
("_glGet", F, 16, "GL_MODELVIEW16_ARB"), # 0x8730
("_glGet", F, 16, "GL_MODELVIEW17_ARB"), # 0x8731
("_glGet", F, 16, "GL_MODELVIEW18_ARB"), # 0x8732
("_glGet", F, 16, "GL_MODELVIEW19_ARB"), # 0x8733
("_glGet", F, 16, "GL_MODELVIEW20_ARB"), # 0x8734
("_glGet", F, 16, "GL_MODELVIEW21_ARB"), # 0x8735
("_glGet", F, 16, "GL_MODELVIEW22_ARB"), # 0x8736
("_glGet", F, 16, "GL_MODELVIEW23_ARB"), # 0x8737
("_glGet", F, 16, "GL_MODELVIEW24_ARB"), # 0x8738
("_glGet", F, 16, "GL_MODELVIEW25_ARB"), # 0x8739
("_glGet", F, 16, "GL_MODELVIEW26_ARB"), # 0x873A
("_glGet", F, 16, "GL_MODELVIEW27_ARB"), # 0x873B
("_glGet", F, 16, "GL_MODELVIEW28_ARB"), # 0x873C
("_glGet", F, 16, "GL_MODELVIEW29_ARB"), # 0x873D
("_glGet", F, 16, "GL_MODELVIEW30_ARB"), # 0x873E
("_glGet", F, 16, "GL_MODELVIEW31_ARB"), # 0x873F
("", X, 1, "GL_DOT3_RGB_EXT"), # 0x8740
("", X, 1, "GL_DOT3_RGBA_EXT"), # 0x8741
("", X, 1, "GL_MIRROR_CLAMP_ATI"), # 0x8742
("", X, 1, "GL_MIRROR_CLAMP_TO_EDGE_ATI"), # 0x8743
("", X, 1, "GL_MODULATE_ADD_ATI"), # 0x8744
("", X, 1, "GL_MODULATE_SIGNED_ADD_ATI"), # 0x8745
("", X, 1, "GL_MODULATE_SUBTRACT_ATI"), # 0x8746
("", X, 1, "GL_YCBCR_MESA"), # 0x8757
("glGet", B, 1, "GL_PACK_INVERT_MESA"), # 0x8758
("", X, 1, "GL_TEXTURE_1D_STACK_MESAX"), # 0x8759
("", X, 1, "GL_TEXTURE_2D_STACK_MESAX"), # 0x875A
("", X, 1, "GL_PROXY_TEXTURE_1D_STACK_MESAX"), # 0x875B
("", X, 1, "GL_PROXY_TEXTURE_2D_STACK_MESAX"), # 0x875C
("", X, 1, "GL_TEXTURE_1D_STACK_BINDING_MESAX"), # 0x875D
("", X, 1, "GL_TEXTURE_2D_STACK_BINDING_MESAX"), # 0x875E
("", X, 1, "GL_STATIC_ATI"), # 0x8760
("", X, 1, "GL_DYNAMIC_ATI"), # 0x8761
("", X, 1, "GL_PRESERVE_ATI"), # 0x8762
("", X, 1, "GL_DISCARD_ATI"), # 0x8763
("glGetBufferParameter", I, 1, "GL_BUFFER_SIZE"), # 0x8764
("glGetBufferParameter", E, 1, "GL_BUFFER_USAGE"), # 0x8765
("", X, 1, "GL_ARRAY_OBJECT_BUFFER_ATI"), # 0x8766
("", X, 1, "GL_ARRAY_OBJECT_OFFSET_ATI"), # 0x8767
("", X, 1, "GL_ELEMENT_ARRAY_ATI"), # 0x8768
("glGet", E, 1, "GL_ELEMENT_ARRAY_TYPE_ATI"), # 0x8769
("", X, 1, "GL_ELEMENT_ARRAY_POINTER_ATI"), # 0x876A
("", X, 1, "GL_MAX_VERTEX_STREAMS_ATI"), # 0x876B
("", X, 1, "GL_VERTEX_STREAM0_ATI"), # 0x876C
("", X, 1, "GL_VERTEX_STREAM1_ATI"), # 0x876D
("", X, 1, "GL_VERTEX_STREAM2_ATI"), # 0x876E
("", X, 1, "GL_VERTEX_STREAM3_ATI"), # 0x876F
("", X, 1, "GL_VERTEX_STREAM4_ATI"), # 0x8770
("", X, 1, "GL_VERTEX_STREAM5_ATI"), # 0x8771
("", X, 1, "GL_VERTEX_STREAM6_ATI"), # 0x8772
("", X, 1, "GL_VERTEX_STREAM7_ATI"), # 0x8773
("", X, 1, "GL_VERTEX_SOURCE_ATI"), # 0x8774
("", X, 1, "GL_BUMP_ROT_MATRIX_ATI"), # 0x8775
("", X, 1, "GL_BUMP_ROT_MATRIX_SIZE_ATI"), # 0x8776
("", X, 1, "GL_BUMP_NUM_TEX_UNITS_ATI"), # 0x8777
("", X, 1, "GL_BUMP_TEX_UNITS_ATI"), # 0x8778
("", X, 1, "GL_DUDV_ATI"), # 0x8779
("", X, 1, "GL_DU8DV8_ATI"), # 0x877A
("", X, 1, "GL_BUMP_ENVMAP_ATI"), # 0x877B
("glGetTexEnv", E, 1, "GL_BUMP_TARGET_ATI"), # 0x877C
("", X, 1, "GL_VERTEX_SHADER_EXT"), # 0x8780
("glGet", I, 1, "GL_VERTEX_SHADER_BINDING_EXT"), # 0x8781
("", X, 1, "GL_OP_INDEX_EXT"), # 0x8782
("", X, 1, "GL_OP_NEGATE_EXT"), # 0x8783
("", X, 1, "GL_OP_DOT3_EXT"), # 0x8784
("", X, 1, "GL_OP_DOT4_EXT"), # 0x8785
("", X, 1, "GL_OP_MUL_EXT"), # 0x8786
("", X, 1, "GL_OP_ADD_EXT"), # 0x8787
("", X, 1, "GL_OP_MADD_EXT"), # 0x8788
("", X, 1, "GL_OP_FRAC_EXT"), # 0x8789
("", X, 1, "GL_OP_MAX_EXT"), # 0x878A
("", X, 1, "GL_OP_MIN_EXT"), # 0x878B
("", X, 1, "GL_OP_SET_GE_EXT"), # 0x878C
("", X, 1, "GL_OP_SET_LT_EXT"), # 0x878D
("", X, 1, "GL_OP_CLAMP_EXT"), # 0x878E
("", X, 1, "GL_OP_FLOOR_EXT"), # 0x878F
("", X, 1, "GL_OP_ROUND_EXT"), # 0x8790
("", X, 1, "GL_OP_EXP_BASE_2_EXT"), # 0x8791
("", X, 1, "GL_OP_LOG_BASE_2_EXT"), # 0x8792
("", X, 1, "GL_OP_POWER_EXT"), # 0x8793
("", X, 1, "GL_OP_RECIP_EXT"), # 0x8794
("", X, 1, "GL_OP_RECIP_SQRT_EXT"), # 0x8795
("", X, 1, "GL_OP_SUB_EXT"), # 0x8796
("", X, 1, "GL_OP_CROSS_PRODUCT_EXT"), # 0x8797
("", X, 1, "GL_OP_MULTIPLY_MATRIX_EXT"), # 0x8798
("", X, 1, "GL_OP_MOV_EXT"), # 0x8799
("", X, 1, "GL_OUTPUT_VERTEX_EXT"), # 0x879A
("", X, 1, "GL_OUTPUT_COLOR0_EXT"), # 0x879B
("", X, 1, "GL_OUTPUT_COLOR1_EXT"), # 0x879C
("", X, 1, "GL_OUTPUT_TEXTURE_COORD0_EXT"), # 0x879D
("", X, 1, "GL_OUTPUT_TEXTURE_COORD1_EXT"), # 0x879E
("", X, 1, "GL_OUTPUT_TEXTURE_COORD2_EXT"), # 0x879F
("", X, 1, "GL_OUTPUT_TEXTURE_COORD3_EXT"), # 0x87A0
("", X, 1, "GL_OUTPUT_TEXTURE_COORD4_EXT"), # 0x87A1
("", X, 1, "GL_OUTPUT_TEXTURE_COORD5_EXT"), # 0x87A2
("", X, 1, "GL_OUTPUT_TEXTURE_COORD6_EXT"), # 0x87A3
("", X, 1, "GL_OUTPUT_TEXTURE_COORD7_EXT"), # 0x87A4
("", X, 1, "GL_OUTPUT_TEXTURE_COORD8_EXT"), # 0x87A5
("", X, 1, "GL_OUTPUT_TEXTURE_COORD9_EXT"), # 0x87A6
("", X, 1, "GL_OUTPUT_TEXTURE_COORD10_EXT"), # 0x87A7
("", X, 1, "GL_OUTPUT_TEXTURE_COORD11_EXT"), # 0x87A8
("", X, 1, "GL_OUTPUT_TEXTURE_COORD12_EXT"), # 0x87A9
("", X, 1, "GL_OUTPUT_TEXTURE_COORD13_EXT"), # 0x87AA
("", X, 1, "GL_OUTPUT_TEXTURE_COORD14_EXT"), # 0x87AB
("", X, 1, "GL_OUTPUT_TEXTURE_COORD15_EXT"), # 0x87AC
("", X, 1, "GL_OUTPUT_TEXTURE_COORD16_EXT"), # 0x87AD
("", X, 1, "GL_OUTPUT_TEXTURE_COORD17_EXT"), # 0x87AE
("", X, 1, "GL_OUTPUT_TEXTURE_COORD18_EXT"), # 0x87AF
("", X, 1, "GL_OUTPUT_TEXTURE_COORD19_EXT"), # 0x87B0
("", X, 1, "GL_OUTPUT_TEXTURE_COORD20_EXT"), # 0x87B1
("", X, 1, "GL_OUTPUT_TEXTURE_COORD21_EXT"), # 0x87B2
("", X, 1, "GL_OUTPUT_TEXTURE_COORD22_EXT"), # 0x87B3
("", X, 1, "GL_OUTPUT_TEXTURE_COORD23_EXT"), # 0x87B4
("", X, 1, "GL_OUTPUT_TEXTURE_COORD24_EXT"), # 0x87B5
("", X, 1, "GL_OUTPUT_TEXTURE_COORD25_EXT"), # 0x87B6
("", X, 1, "GL_OUTPUT_TEXTURE_COORD26_EXT"), # 0x87B7
("", X, 1, "GL_OUTPUT_TEXTURE_COORD27_EXT"), # 0x87B8
("", X, 1, "GL_OUTPUT_TEXTURE_COORD28_EXT"), # 0x87B9
("", X, 1, "GL_OUTPUT_TEXTURE_COORD29_EXT"), # 0x87BA
("", X, 1, "GL_OUTPUT_TEXTURE_COORD30_EXT"), # 0x87BB
("", X, 1, "GL_OUTPUT_TEXTURE_COORD31_EXT"), # 0x87BC
("", X, 1, "GL_OUTPUT_FOG_EXT"), # 0x87BD
("", X, 1, "GL_SCALAR_EXT"), # 0x87BE
("", X, 1, "GL_VECTOR_EXT"), # 0x87BF
("", X, 1, "GL_MATRIX_EXT"), # 0x87C0
("", X, 1, "GL_VARIANT_EXT"), # 0x87C1
("", X, 1, "GL_INVARIANT_EXT"), # 0x87C2
("", X, 1, "GL_LOCAL_CONSTANT_EXT"), # 0x87C3
("", X, 1, "GL_LOCAL_EXT"), # 0x87C4
("", X, 1, "GL_MAX_VERTEX_SHADER_INSTRUCTIONS_EXT"), # 0x87C5
("", X, 1, "GL_MAX_VERTEX_SHADER_VARIANTS_EXT"), # 0x87C6
("", X, 1, "GL_MAX_VERTEX_SHADER_INVARIANTS_EXT"), # 0x87C7
("", X, 1, "GL_MAX_VERTEX_SHADER_LOCAL_CONSTANTS_EXT"), # 0x87C8
("", X, 1, "GL_MAX_VERTEX_SHADER_LOCALS_EXT"), # 0x87C9
("", X, 1, "GL_MAX_OPTIMIZED_VERTEX_SHADER_INSTRUCTIONS_EXT"), # 0x87CA
("", X, 1, "GL_MAX_OPTIMIZED_VERTEX_SHADER_VARIANTS_EXT"), # 0x87CB
("", X, 1, "GL_MAX_OPTIMIZED_VERTEX_SHADER_LOCAL_CONSTANTS_EXT"), # 0x87CC
("", X, 1, "GL_MAX_OPTIMIZED_VERTEX_SHADER_INVARIANTS_EXT"), # 0x87CD
("", X, 1, "GL_MAX_OPTIMIZED_VERTEX_SHADER_LOCALS_EXT"), # 0x87CE
("", X, 1, "GL_VERTEX_SHADER_INSTRUCTIONS_EXT"), # 0x87CF
| |
= cms.bool(True),
chi2n_no1Dmod_par = cms.double(9999),
chi2n_par = cms.double(1.0),
copyExtras = cms.untracked.bool(True),
copyTrajectories = cms.untracked.bool(False),
d0_par1 = cms.vdouble(0.9, 3.0),
d0_par2 = cms.vdouble(1.0, 3.0),
dz_par1 = cms.vdouble(0.9, 3.0),
dz_par2 = cms.vdouble(1.0, 3.0),
keepAllTracks = cms.bool(False),
maxNumberLostLayers = cms.uint32(999),
max_d0 = cms.double(100.0),
max_eta = cms.double(9999.0),
max_lostHitFraction = cms.double(1.0),
max_minMissHitOutOrIn = cms.int32(99),
max_relpterr = cms.double(9999.0),
max_z0 = cms.double(100.0),
minHitsToBypassChecks = cms.uint32(20),
minNumber3DLayers = cms.uint32(0),
minNumberLayers = cms.uint32(3),
min_eta = cms.double(-9999.0),
min_nhits = cms.uint32(0),
nSigmaZ = cms.double(4.0),
name = cms.string('detachedQuadStepVtxLoose'),
preFilterName = cms.string(''),
qualityBit = cms.string('loose'),
res_par = cms.vdouble(0.003, 0.001),
vertexCut = cms.string('ndof>=2&!isFake'),
vtxNumber = cms.int32(-1)
),
cms.PSet(
applyAbsCutsIfNoPV = cms.bool(False),
applyAdaptedPVCuts = cms.bool(True),
chi2n_no1Dmod_par = cms.double(9999),
chi2n_par = cms.double(0.6),
copyExtras = cms.untracked.bool(True),
copyTrajectories = cms.untracked.bool(False),
d0_par1 = cms.vdouble(1.3, 4.0),
d0_par2 = cms.vdouble(1.3, 4.0),
dz_par1 = cms.vdouble(1.3, 4.0),
dz_par2 = cms.vdouble(1.3, 4.0),
keepAllTracks = cms.bool(False),
maxNumberLostLayers = cms.uint32(999),
max_d0 = cms.double(100.0),
max_eta = cms.double(9999.0),
max_lostHitFraction = cms.double(1.0),
max_minMissHitOutOrIn = cms.int32(99),
max_relpterr = cms.double(9999.0),
max_z0 = cms.double(100.0),
minHitsToBypassChecks = cms.uint32(20),
minNumber3DLayers = cms.uint32(0),
minNumberLayers = cms.uint32(3),
min_eta = cms.double(-9999.0),
min_nhits = cms.uint32(0),
nSigmaZ = cms.double(4.0),
name = cms.string('detachedQuadStepTrkLoose'),
preFilterName = cms.string(''),
qualityBit = cms.string('loose'),
res_par = cms.vdouble(0.003, 0.001),
vertexCut = cms.string('ndof>=2&!isFake'),
vtxNumber = cms.int32(-1)
),
cms.PSet(
applyAbsCutsIfNoPV = cms.bool(False),
applyAdaptedPVCuts = cms.bool(True),
chi2n_no1Dmod_par = cms.double(9999),
chi2n_par = cms.double(0.9),
copyExtras = cms.untracked.bool(True),
copyTrajectories = cms.untracked.bool(False),
d0_par1 = cms.vdouble(0.9, 3.0),
d0_par2 = cms.vdouble(0.9, 3.0),
dz_par1 = cms.vdouble(0.9, 3.0),
dz_par2 = cms.vdouble(0.9, 3.0),
keepAllTracks = cms.bool(True),
maxNumberLostLayers = cms.uint32(1),
max_d0 = cms.double(100.0),
max_eta = cms.double(9999.0),
max_lostHitFraction = cms.double(1.0),
max_minMissHitOutOrIn = cms.int32(99),
max_relpterr = cms.double(9999.0),
max_z0 = cms.double(100.0),
minHitsToBypassChecks = cms.uint32(20),
minNumber3DLayers = cms.uint32(3),
minNumberLayers = cms.uint32(3),
min_eta = cms.double(-9999.0),
min_nhits = cms.uint32(0),
nSigmaZ = cms.double(4.0),
name = cms.string('detachedQuadStepVtxTight'),
preFilterName = cms.string('detachedQuadStepVtxLoose'),
qualityBit = cms.string('tight'),
res_par = cms.vdouble(0.003, 0.001),
vertexCut = cms.string('ndof>=2&!isFake'),
vtxNumber = cms.int32(-1)
),
cms.PSet(
applyAbsCutsIfNoPV = cms.bool(False),
applyAdaptedPVCuts = cms.bool(True),
chi2n_no1Dmod_par = cms.double(9999),
chi2n_par = cms.double(0.5),
copyExtras = cms.untracked.bool(True),
copyTrajectories = cms.untracked.bool(False),
d0_par1 = cms.vdouble(1.1, 4.0),
d0_par2 = cms.vdouble(1.1, 4.0),
dz_par1 = cms.vdouble(1.1, 4.0),
dz_par2 = cms.vdouble(1.1, 4.0),
keepAllTracks = cms.bool(True),
maxNumberLostLayers = cms.uint32(1),
max_d0 = cms.double(100.0),
max_eta = cms.double(9999.0),
max_lostHitFraction = cms.double(1.0),
max_minMissHitOutOrIn = cms.int32(99),
max_relpterr = cms.double(9999.0),
max_z0 = cms.double(100.0),
minHitsToBypassChecks = cms.uint32(20),
minNumber3DLayers = cms.uint32(3),
minNumberLayers = cms.uint32(4),
min_eta = cms.double(-9999.0),
min_nhits = cms.uint32(0),
nSigmaZ = cms.double(4.0),
name = cms.string('detachedQuadStepTrkTight'),
preFilterName = cms.string('detachedQuadStepTrkLoose'),
qualityBit = cms.string('tight'),
res_par = cms.vdouble(0.003, 0.001),
vertexCut = cms.string('ndof>=2&!isFake'),
vtxNumber = cms.int32(-1)
),
cms.PSet(
applyAbsCutsIfNoPV = cms.bool(False),
applyAdaptedPVCuts = cms.bool(True),
chi2n_no1Dmod_par = cms.double(9999),
chi2n_par = cms.double(0.9),
copyExtras = cms.untracked.bool(True),
copyTrajectories = cms.untracked.bool(False),
d0_par1 = cms.vdouble(0.8, 3.0),
d0_par2 = cms.vdouble(0.8, 3.0),
dz_par1 = cms.vdouble(0.8, 3.0),
dz_par2 = cms.vdouble(0.8, 3.0),
keepAllTracks = cms.bool(True),
maxNumberLostLayers = cms.uint32(1),
max_d0 = cms.double(100.0),
max_eta = cms.double(9999.0),
max_lostHitFraction = cms.double(1.0),
max_minMissHitOutOrIn = cms.int32(99),
max_relpterr = cms.double(9999.0),
max_z0 = cms.double(100.0),
minHitsToBypassChecks = cms.uint32(20),
minNumber3DLayers = cms.uint32(3),
minNumberLayers = cms.uint32(3),
min_eta = cms.double(-9999.0),
min_nhits = cms.uint32(0),
nSigmaZ = cms.double(4.0),
name = cms.string('detachedQuadStepVtx'),
preFilterName = cms.string('detachedQuadStepVtxTight'),
qualityBit = cms.string('highPurity'),
res_par = cms.vdouble(0.003, 0.001),
vertexCut = cms.string('ndof>=2&!isFake'),
vtxNumber = cms.int32(-1)
),
cms.PSet(
applyAbsCutsIfNoPV = cms.bool(False),
applyAdaptedPVCuts = cms.bool(True),
chi2n_no1Dmod_par = cms.double(9999),
chi2n_par = cms.double(0.5),
copyExtras = cms.untracked.bool(True),
copyTrajectories = cms.untracked.bool(False),
d0_par1 = cms.vdouble(0.9, 4.0),
d0_par2 = cms.vdouble(0.9, 4.0),
dz_par1 = cms.vdouble(0.9, 4.0),
dz_par2 = cms.vdouble(0.9, 4.0),
keepAllTracks = cms.bool(True),
maxNumberLostLayers = cms.uint32(1),
max_d0 = cms.double(100.0),
max_eta = cms.double(9999.0),
max_lostHitFraction = cms.double(1.0),
max_minMissHitOutOrIn = cms.int32(99),
max_relpterr = cms.double(9999.0),
max_z0 = cms.double(100.0),
minHitsToBypassChecks = cms.uint32(20),
minNumber3DLayers = cms.uint32(3),
minNumberLayers = cms.uint32(4),
min_eta = cms.double(-9999.0),
min_nhits = cms.uint32(0),
nSigmaZ = cms.double(4.0),
name = cms.string('detachedQuadStepTrk'),
preFilterName = cms.string('detachedQuadStepTrkTight'),
qualityBit = cms.string('highPurity'),
res_par = cms.vdouble(0.003, 0.001),
vertexCut = cms.string('ndof>=2&!isFake'),
vtxNumber = cms.int32(-1)
)
),
useVertices = cms.bool(True),
useVtxError = cms.bool(False),
vertices = cms.InputTag("firstStepPrimaryVertices")
)
detachedQuadStepChi2Est = cms.ESProducer("Chi2ChargeMeasurementEstimatorESProducer",
ComponentName = cms.string('detachedQuadStepChi2Est'),
MaxChi2 = cms.double(12.0),
MaxDisplacement = cms.double(0.5),
MaxSagitta = cms.double(2),
MinPtForHitRecoveryInGluedDet = cms.double(1000000000000),
MinimalTolerance = cms.double(0.5),
appendToDataLabel = cms.string(''),
clusterChargeCut = cms.PSet(
refToPSet_ = cms.string('SiStripClusterChargeCutNone')
),
nSigma = cms.double(3),
pTChargeCutThreshold = cms.double(-1)
)
detachedQuadStepTrajectoryCleanerBySharedHits = cms.ESProducer("TrajectoryCleanerESProducer",
ComponentName = cms.string('detachedQuadStepTrajectoryCleanerBySharedHits'),
ComponentType = cms.string('TrajectoryCleanerBySharedHits'),
MissingHitPenalty = cms.double(20.0),
ValidHitBonus = cms.double(5.0),
allowSharedFirstHit = cms.bool(True),
fractionShared = cms.double(0.13)
)
detachedQuadStepTrajectoryBuilder = cms.PSet(
ComponentType = cms.string('GroupedCkfTrajectoryBuilder'),
MeasurementTrackerName = cms.string(''),
TTRHBuilder = cms.string('WithTrackAngle'),
alwaysUseInvalidHits = cms.bool(False),
bestHitOnly = cms.bool(True),
estimator = cms.string('detachedQuadStepChi2Est'),
foundHitBonus = cms.double(10.0),
inOutTrajectoryFilter = cms.PSet(
refToPSet_ = cms.string('CkfBaseTrajectoryFilter_block')
),
intermediateCleaning = cms.bool(True),
keepOriginalIfRebuildFails = cms.bool(False),
lockHits = cms.bool(True),
lostHitPenalty = cms.double(30.0),
maxCand = cms.int32(2),
maxDPhiForLooperReconstruction = cms.double(2.0),
maxPtForLooperReconstruction = cms.double(0.7),
minNrOfHitsForRebuild = cms.int32(5),
propagatorAlong = cms.string('PropagatorWithMaterial'),
propagatorOpposite = cms.string('PropagatorWithMaterialOpposite'),
requireSeedHitsInRebuild = cms.bool(True),
seedAs5DHit = cms.bool(False), #cmssw_11_0
trajectoryFilter = cms.PSet(
refToPSet_ = cms.string('detachedQuadStepTrajectoryFilter')
),
updator = cms.string('KFUpdator'),
useSameTrajFilter = cms.bool(True)
)
detachedQuadStepTrajectoryFilter = cms.PSet(
ComponentType = cms.string('CompositeTrajectoryFilter'),
filters = cms.VPSet(
cms.PSet(
refToPSet_ = cms.string('detachedQuadStepTrajectoryFilterBase')
),
cms.PSet(
refToPSet_ = cms.string('ClusterShapeTrajectoryFilter')
)
)
)
detachedQuadStepTrajectoryFilterBase = cms.PSet(
ComponentType = cms.string('CkfBaseTrajectoryFilter'),
chargeSignificance = cms.double(-1.0),
constantValueForLostHitsFractionFilter = cms.double(0.301),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32(4),
maxCCCLostHits = cms.int32(9999),
maxConsecLostHits = cms.int32(1),
maxLostHits = cms.int32(999),
maxLostHitsFraction = cms.double(0.1),
maxNumberOfHits = cms.int32(100),
minGoodStripCharge = cms.PSet(
refToPSet_ = cms.string('SiStripClusterChargeCutNone')
),
minHitsMinPt = cms.int32(3),
minNumberOfHitsForLoopers = cms.int32(13),
minNumberOfHitsPerLoop = cms.int32(4),
minPt = cms.double(0.075),
minimumNumberOfHits = cms.int32(3),
nSigmaMinPt = cms.double(5.0),
pixelSeedExtension = cms.bool(False),
seedExtension = cms.int32(0),
seedPairPenalty = cms.int32(0),
strictSeedExtension = cms.bool(False)
)
detachedQuadStepTrackCandidates = cms.EDProducer("CkfTrackCandidateMaker",
MeasurementTrackerEvent = cms.InputTag("MeasurementTrackerEvent"),
NavigationSchool = cms.string('SimpleNavigationSchool'),
RedundantSeedCleaner = cms.string('CachingSeedCleanerBySharedInput'),
SimpleMagneticField = cms.string(''),
TrajectoryBuilder = cms.string('GroupedCkfTrajectoryBuilder'),
TrajectoryBuilderPSet = cms.PSet(
refToPSet_ = cms.string('detachedQuadStepTrajectoryBuilder')
),
TrajectoryCleaner = cms.string('detachedQuadStepTrajectoryCleanerBySharedHits'),
TransientInitialStateEstimatorParameters = cms.PSet(
numberMeasurementsForFit = cms.int32(4),
propagatorAlongTISE = cms.string('PropagatorWithMaterial'),
propagatorOppositeTISE = cms.string('PropagatorWithMaterialOpposite')
),
cleanTrajectoryAfterInOut = cms.bool(True),
doSeedingRegionRebuilding = cms.bool(True),
maxNSeeds = cms.uint32(500000),
maxSeedsBeforeCleaning = cms.uint32(5000),
numHitsForSeedCleaner = cms.int32(50),
onlyPixelHitsForSeedCleaner = cms.bool(True),
phase2clustersToSkip = cms.InputTag("detachedQuadStepClusters"),
reverseTrajectories = cms.bool(False),
src = cms.InputTag("detachedQuadStepSeeds"),
useHitsSplitting = cms.bool(True)
)
detachedQuadStepTrackingRegions = cms.EDProducer("GlobalTrackingRegionFromBeamSpotEDProducer",
RegionPSet = cms.PSet(
beamSpot = cms.InputTag("offlineBeamSpot"),
nSigmaZ = cms.double(5.0),
originHalfLength = cms.double(0),
originRadius = cms.double(0.9),
precise = cms.bool(True),
ptMin = cms.double(0.45),
useMultipleScattering = cms.bool(False)
)
)
detachedQuadStepTracks = cms.EDProducer("TrackProducer",
AlgorithmName = cms.string('detachedQuadStep'),
Fitter = cms.string('FlexibleKFFittingSmoother'),
GeometricInnerState = cms.bool(False),
MeasurementTracker = cms.string(''),
MeasurementTrackerEvent = cms.InputTag("MeasurementTrackerEvent"),
NavigationSchool = cms.string('SimpleNavigationSchool'),
Propagator = cms.string('RungeKuttaTrackerPropagator'),
SimpleMagneticField = cms.string(''),
TTRHBuilder = cms.string('WithTrackAngle'),
TrajectoryInEvent = cms.bool(False),
alias = cms.untracked.string('ctfWithMaterialTracks'),
beamSpot = cms.InputTag("offlineBeamSpot"),
clusterRemovalInfo = cms.InputTag(""),
src = cms.InputTag("detachedQuadStepTrackCandidates"),
useHitsSplitting = cms.bool(False),
useSimpleMF = cms.bool(False)
)
displacedMuonSeeds = cms.EDProducer("CosmicMuonSeedGenerator",
CSCRecSegmentLabel = cms.InputTag("cscSegments"),
DTRecSegmentLabel = cms.InputTag("dt4DSegments"),
EnableCSCMeasurement = cms.bool(True),
EnableDTMeasurement = cms.bool(True),
ForcePointDown = cms.bool(False),
MaxCSCChi2 = cms.double(300.0),
MaxDTChi2 = cms.double(300.0),
MaxSeeds = cms.int32(1000)
)
duplicateDisplaceTrackCandidatesChi2Est = cms.ESProducer("Chi2MeasurementEstimatorESProducer",
ComponentName = cms.string('duplicateDisplacedTrackCandidatesChi2Est'),
MaxChi2 = cms.double(100),
MaxDisplacement = cms.double(0.5),
MaxSagitta = cms.double(2),
MinPtForHitRecoveryInGluedDet = cms.double(1000000000000),
MinimalTolerance = cms.double(0.5),
appendToDataLabel = cms.string(''),
nSigma = cms.double(3)
)
duplicateTrackCandidatesChi2Est = cms.ESProducer("Chi2MeasurementEstimatorESProducer",
ComponentName = cms.string('duplicateTrackCandidatesChi2Est'),
MaxChi2 = cms.double(100),
MaxDisplacement = cms.double(0.5),
MaxSagitta = cms.double(2),
MinPtForHitRecoveryInGluedDet = cms.double(1000000000000),
MinimalTolerance = cms.double(0.5),
appendToDataLabel = cms.string(''),
nSigma = cms.double(3)
)
duplicateTrackCandidates = cms.EDProducer("DuplicateTrackMerger",
GBRForestFileName = cms.string(''),
chi2EstimatorName = cms.string('duplicateTrackCandidatesChi2Est'),
forestLabel = cms.string('MVADuplicate'),
maxDCA = cms.double(30),
maxDLambda = cms.double(0.3),
maxDPhi = cms.double(0.3),
maxDQoP = cms.double(0.25),
maxDdsz = cms.double(10),
maxDdxy = cms.double(10),
minBDTG = cms.double(-0.1),
minDeltaR3d = cms.double(-4),
minP = cms.double(0.4),
minpT = cms.double(0.2),
overlapCheckMaxHits = cms.uint32(4),
overlapCheckMaxMissingLayers = cms.uint32(1),
overlapCheckMinCosT = cms.double(0.99),
propagatorName = cms.string('PropagatorWithMaterial'),
source = cms.InputTag("preDuplicateMergingGeneralTracks"),
ttrhBuilderName = cms.string('WithTrackAngle'),
useInnermostState = cms.bool(True)
)
duplicateTrackClassifier = cms.EDProducer("TrackCutClassifier",
beamspot = cms.InputTag("offlineBeamSpot"),
ignoreVertices = cms.bool(False),
mva = cms.PSet(
dr_par = cms.PSet(
d0err = cms.vdouble(0.003, 0.003, 0.003),
d0err_par = cms.vdouble(0.001, 0.001, 0.001),
drWPVerr_par = cms.vdouble(3.40282346639e+38, 3.40282346639e+38, 3.40282346639e+38),
dr_exp = cms.vint32(2147483647, 2147483647, 2147483647),
dr_par1 = cms.vdouble(3.40282346639e+38, 3.40282346639e+38, 3.40282346639e+38),
dr_par2 = cms.vdouble(3.40282346639e+38, 3.40282346639e+38, 3.40282346639e+38)
),
dz_par = cms.PSet(
dzWPVerr_par = cms.vdouble(3.40282346639e+38, 3.40282346639e+38, 3.40282346639e+38),
dz_exp = cms.vint32(2147483647, 2147483647, 2147483647),
dz_par1 = cms.vdouble(3.40282346639e+38, 3.40282346639e+38, 3.40282346639e+38),
dz_par2 = cms.vdouble(3.40282346639e+38, 3.40282346639e+38, 3.40282346639e+38)
),
isHLT = cms.bool(False),
maxChi2 = cms.vdouble(9999.0, 9999.0, 9999.0),
maxChi2n = cms.vdouble(10.0, 1.0, 0.4),
maxDr = cms.vdouble(3.40282346639e+38, 3.40282346639e+38, 3.40282346639e+38),
maxDz = cms.vdouble(3.40282346639e+38, 3.40282346639e+38, 3.40282346639e+38),
maxDzWrtBS = cms.vdouble(3.40282346639e+38, 24, 15),
maxLostLayers = cms.vint32(99, 99, 99),
maxRelPtErr = cms.vdouble(3.40282346639e+38, 3.40282346639e+38, 3.40282346639e+38),
min3DLayers = cms.vint32(0, 0, 0),
minHits = cms.vint32(0, 0, 1),
minHits4pass = cms.vint32(2147483647, 2147483647, 2147483647),
minLayers = cms.vint32(0, 0, 0),
minNVtxTrk = cms.int32(2),
minNdof = cms.vdouble(-1, -1, -1),
minPixelHits = cms.vint32(0, 0, 0)
),
qualityCuts = cms.vdouble(-0.7, 0.1, 0.7),
src = cms.InputTag("mergedDuplicateTracks"),
vertices = cms.InputTag("firstStepPrimaryVertices")
)
trackAlgoPriorityOrder = cms.ESProducer("TrackAlgoPriorityOrderESProducer",
ComponentName = cms.string('trackAlgoPriorityOrder'),
algoOrder = cms.vstring(
'initialStep',
'highPtTripletStep',
'lowPtQuadStep',
'lowPtTripletStep',
'detachedQuadStep',
'pixelPairStep',
'muonSeededStepInOut',
'muonSeededStepOutIn'
),
appendToDataLabel = cms.string('')
)
trackCleaner | |
<filename>train_frcnn_val.py
from __future__ import division
import random
import pprint
import sys
import time
import numpy as np
from optparse import OptionParser
import pickle
import re
import pandas as pd
from keras import backend as K
import tensorflow as tf
from keras_preprocessing.image import ImageDataGenerator
from keras.optimizers import Adam
from keras.layers import Input
from keras.models import Model
from keras_frcnn import config, data_generators
from keras_frcnn import losses as losses
import keras_frcnn.roi_helpers as roi_helpers
from keras.utils import generic_utils
from keras_frcnn.simple_parser import get_data
from keras_frcnn.dataframe import filenames_per_batch
from keras_frcnn.simple_parser_datagen import get_classes_batch
from keras_frcnn.dataframe import get_dataframe
from keras_frcnn.simple_parser_datagen import get_data_batch
sys.setrecursionlimit(40000)
# Parsing annotation file
parser = OptionParser()
parser.add_option("-p", "--path", dest="train_path",
help="Path to training data.")
parser.add_option("-o", "--parser", dest="parser", help="Parser to use. One of simple or pascal_voc",
default="pascal_voc")
parser.add_option("-n", "--num_rois", type="int", dest="num_rois",
help="Number of RoIs to process at once.", default=32)
parser.add_option("--network", dest="network",
help="Base network to use. Supports vgg or resnet50.", default='resnet50')
parser.add_option("--hf", dest="horizontal_flips",
help="Augment with horizontal flips in training. (Default=false).", action="store_true", default=False)
parser.add_option("--vf", dest="vertical_flips",
help="Augment with vertical flips in training. (Default=false).", action="store_true", default=False)
parser.add_option("--rot", "--rot_90", dest="rot_90", help="Augment with 90 degree rotations in training. (Default=false).",
action="store_true", default=False)
parser.add_option("--num_epochs", type="int", dest="num_epochs",
help="Number of epochs.", default=1000)
parser.add_option("--batch_size", type="int", dest="batch_size",
help="Size of batch for training for Image Data Generator")
parser.add_option("--config_filename", dest="config_filename", help="Location to store all the metadata related to the training (to be used when testing).",
default="config.pickle")
parser.add_option("--output_weight_path", dest="output_weight_path",
help="Output path for weights.", default='./model_frcnn.hdf5')
parser.add_option("--input_weight_path", dest="input_weight_path",
help="Input path for weights. If not specified, will try to load default weights provided by keras.")
parser.add_option("--ovt", "--overlap_threshold", type="float", dest="overlap_threshold",
help="Value of overlap threshold for non-max-suppression.", default=0.7)
(options, args) = parser.parse_args()
# Raise error if name of input path not given
if not options.train_path: # if filename is not given
parser.error(
'Error: path to training data must be specified. Pass --path to command line')
# Chose the right parser according to type of data
if options.parser == 'pascal_voc':
from keras_frcnn.pascal_voc_parser import get_data
elif options.parser == 'simple_datagen_val':
from keras_frcnn.simple_parser_datagen_val import get_data_batch
else:
raise ValueError(
"Command line option parser must be one of 'pascal_voc' or 'simple'")
# pass the settings from the command line, and persist them in the config object
C = config.Config()
C.use_horizontal_flips = bool(options.horizontal_flips)
C.use_vertical_flips = bool(options.vertical_flips)
C.rot_90 = bool(options.rot_90)
# To save the weights of the model in .hdf5
C.model_path = options.output_weight_path
model_path_regex = re.match("^(.+)(\.hdf5)$", C.model_path)
if model_path_regex.group(2) != '.hdf5':
print('Output weights must have .hdf5 filetype')
exit(1)
C.num_rois = int(options.num_rois)
# To select based model
if options.network == 'vgg':
C.network = 'vgg'
from keras_frcnn import vgg as nn
elif options.network == 'resnet50':
from keras_frcnn import resnet as nn
C.network = 'resnet50'
else:
print('Not a valid model')
raise ValueError
# check if weight path was passed via command line
if options.input_weight_path:
C.base_net_weights = options.input_weight_path
else:
# set the path to weights based on backend and model
C.base_net_weights = nn.get_weight_path()
overlap_threshold = options.overlap_threshold
# Training Data
# get dataframe from train_images folder
df_train = get_dataframe("train_images")
print("Number of training images:",len(df_train))
# use ImageDataGenerator to load images per batch
train_datagen = ImageDataGenerator(rescale=1./255.)
# set batch size
batch_size= int(options.batch_size)
BATCH_SIZE = batch_size
# Create the batches of training images
train_generator = train_datagen.flow_from_dataframe(dataframe=df_train,
directory='train_images/',
x_col="image_path",
y_col="class",
subset="training",
batch_size=BATCH_SIZE,
seed=42,
shuffle=True,
class_mode="categorical",
target_size=(256, 256),
validate_filenames=True)
# to get the filenames of the images put in the different batches (returns a list of lists, the first list of this list contains the filenames of the first batch)
imgs_per_batch_train = filenames_per_batch(train_generator)
# just for checking, must be equal to the number of batches per epoch below
print('length of list called imgs per batch train (must be equal to number of batches):',len(imgs_per_batch_train))
# to calculate the number of batches per epoch using the number of samples and the batch size
batches_per_epoch = train_generator.samples // train_generator.batch_size + (train_generator.samples % train_generator.batch_size > 0)
print('Number of batches per epoch = {}'.format(batches_per_epoch))
# validation data
df_val = get_dataframe("val_images")
num_val_imgs = len(df_val)
print("Number of validation images:",len(df_val))
# calculate the batch size to fit the number of batches in training
if num_val_imgs % batches_per_epoch == 0:
BATCH_SIZE_VAL = num_val_imgs // batches_per_epoch
else:
BATCH_SIZE_VAL = (num_val_imgs // batches_per_epoch)+1
print("batch size for validation:",BATCH_SIZE_VAL)
val_datagen = ImageDataGenerator(rescale=1./255.)
val_generator = val_datagen.flow_from_dataframe(dataframe=df_val,
directory='val_images/',
x_col="image_path",
y_col="class",
subset="training",
batch_size=BATCH_SIZE_VAL,
seed=42,
shuffle=True,
class_mode="categorical",
target_size=(256, 256),
validate_filenames=True)
# to get the filenames of the images put in the different batches
imgs_per_batch_val = filenames_per_batch(val_generator)
#print('number of imgs per batch for validation:',len(imgs_per_batch_val))
save_log_data = ('\n batch size training = {} , batch size validation = {} ,Number of training images ={}, Number of validation images={}'.format(BATCH_SIZE, BATCH_SIZE_VAL, len(df_train), len (df_val)))
with open("./saving_params.txt","a") as f:
f.write(save_log_data)
# Model parameters
optimizer = Adam(lr=1e-5)
optimizer_classifier = Adam(lr=1e-5)
epoch_length = BATCH_SIZE
num_epochs = int(options.num_epochs)
classes_count, class_mapping = get_classes_batch(
options.train_path, imgs_per_batch_train[0], 'train')
if 'bg' not in classes_count:
classes_count['bg'] = 0
class_mapping['bg'] = len(class_mapping)
C.class_mapping = class_mapping
inv_map = {v: k for k, v in class_mapping.items()}
print('Num classes (including bg) = {}'.format(len(classes_count)))
num_anchors = len(C.anchor_box_scales) * len(C.anchor_box_ratios)
#print('number of anchors:', num_anchors)
if K.common.image_dim_ordering() == 'th':
input_shape_img = (3, None, None)
else:
input_shape_img = (None, None, 3)
img_input = Input(shape=input_shape_img)
roi_input = Input(shape=(None, 4))
# define the base network (resnet here, can be VGG, Inception, etc)
shared_layers = nn.nn_base(img_input, trainable=True)
# define the RPN, built on the base layers
num_anchors = len(C.anchor_box_scales) * len(C.anchor_box_ratios)
rpn = nn.rpn(shared_layers, num_anchors)
classifier = nn.classifier(shared_layers, roi_input, C.num_rois, nb_classes=len(
classes_count), trainable=True)
model_base = Model(img_input, shared_layers)
model_rpn = Model(img_input, rpn[:2])
model_classifier = Model([img_input, roi_input], classifier)
# this is a model that holds both the RPN and the classifier, used to load/save weights for the models
model_all = Model([img_input, roi_input], rpn[:2] + classifier)
try:
print('loading weights from {C.base_net_weights}')
model_rpn.load_weights(C.base_net_weights, by_name=True)
model_classifier.load_weights(C.base_net_weights, by_name=True)
except:
print('Could not load pretrained model weights')
model_rpn.compile(optimizer=optimizer, loss=[losses.rpn_loss_cls( num_anchors), losses.rpn_loss_regr(num_anchors)])
model_classifier.compile(optimizer=optimizer_classifier, loss=[losses.class_loss_cls, losses.class_loss_regr(len(classes_count)-1)], metrics={'dense_class_{}'.format(len(classes_count)): 'accuracy'})
model_all.compile(optimizer='sgd', loss='mae')
config_output_filename = options.config_filename
with open(config_output_filename, 'wb') as config_f:
pickle.dump(C, config_f)
print("Config has been written to {}".format(config_output_filename))
# create empty lists to store values
loss_valid = []
acc_valid=[]
loss_train = []
acc_train=[]
epoch_number = []
# timer for total training
start_total = time.time()
print('..............Starting training ............\n')
# loop on epochs numbers
for epoch_num in range(num_epochs):
progbar = generic_utils.Progbar(epoch_length)
print("Epoch {} / {}".format(epoch_num + 1, num_epochs))
# timer for epoch training
start_ep = time.time()
iter_num = 0
# loop over batches
for i in range(batches_per_epoch):
print("Batch {} / {}".format(i + 1, batches_per_epoch))
# get data for images in batch
train_imgs = get_data_batch(options.train_path, imgs_per_batch_train[i])
print("Num train samples {}".format(len(train_imgs)))
# get ground truth data
data_gen_train = data_generators.get_anchor_gt(train_imgs, classes_count, C, nn.get_img_output_length, K.common.image_dim_ordering(), mode='train')
# set initial values
losses = np.zeros((epoch_length, 5))
rpn_accuracy_rpn_monitor = []
rpn_accuracy_for_epoch = []
best_loss = np.Inf
# timer for batch training
start_batch = time.time()
while True:
try:
#
if len(rpn_accuracy_rpn_monitor) == epoch_length and C.verbose:
mean_overlapping_bboxes = float(sum(rpn_accuracy_rpn_monitor))/len(rpn_accuracy_rpn_monitor)
rpn_accuracy_rpn_monitor = []
print("Average number of overlapping bounding boxes from RPN = {} for {} previous iterations".format(mean_overlapping_bboxes, epoch_length))
if mean_overlapping_bboxes == 0:
print('RPN is not producing bounding boxes that overlap the ground truth boxes. Check RPN settings or keep training.')
X, Y, img_data = next(data_gen_train)
loss_rpn = model_rpn.train_on_batch(X, Y)
P_rpn = model_rpn.predict_on_batch(X)
R = roi_helpers.rpn_to_roi(P_rpn[0], P_rpn[1], C, K.common.image_dim_ordering(), use_regr=True, overlap_thresh=overlap_threshold, max_boxes=300)
# note: calc_iou converts from (x1,y1,x2,y2) to (x,y,w,h) format
X2, Y1, Y2, IouS = roi_helpers.calc_iou( R, img_data, C, class_mapping)
if X2 is None:
rpn_accuracy_rpn_monitor.append(0)
rpn_accuracy_for_epoch.append(0)
continue
neg_samples = np.where(Y1[0, :, -1] == 1)
pos_samples = np.where(Y1[0, :, -1] == 0)
if len(neg_samples) > 0:
neg_samples = neg_samples[0]
else:
neg_samples = []
if len(pos_samples) > 0:
pos_samples = pos_samples[0]
else:
pos_samples = []
rpn_accuracy_rpn_monitor.append(len(pos_samples))
rpn_accuracy_for_epoch.append((len(pos_samples)))
if C.num_rois > 1:
if len(pos_samples) < C.num_rois//2:
selected_pos_samples = pos_samples.tolist()
else:
selected_pos_samples = np.random.choice(
pos_samples, C.num_rois//2, replace=False).tolist()
try:
selected_neg_samples = np.random.choice(
neg_samples, C.num_rois - len(selected_pos_samples), replace=False).tolist()
except:
selected_neg_samples = np.random.choice(neg_samples, C.num_rois - len(selected_pos_samples), replace=True).tolist()
sel_samples = selected_pos_samples + selected_neg_samples
else:
# in the extreme case where num_rois = 1, we pick a random pos or neg sample
selected_pos_samples = pos_samples.tolist()
selected_neg_samples = neg_samples.tolist()
if np.random.randint(0, 2):
sel_samples = random.choice(neg_samples)
else:
sel_samples = random.choice(pos_samples)
loss_class = model_classifier.train_on_batch( [X, X2[:, sel_samples, :]], [Y1[:, sel_samples, :], Y2[:, sel_samples, :]])
losses[iter_num, 0] = loss_rpn[1]
losses[iter_num, 1] = loss_rpn[2]
losses[iter_num, 2] = loss_class[1]
losses[iter_num, 3] = loss_class[2]
losses[iter_num, 4] = loss_class[3]
progbar.update(iter_num+1, [('rpn_cls', losses[iter_num, 0]), ('rpn_regr', losses[iter_num, 1]),
('detector_cls', losses[iter_num, 2]), ('detector_regr', losses[iter_num, 3])])
iter_num += 1
if iter_num == epoch_length:
loss_rpn_cls = np.mean(losses[:, 0])
loss_rpn_regr = np.mean(losses[:, 1])
loss_class_cls = np.mean(losses[:, 2])
loss_class_regr = np.mean(losses[:, 3])
class_acc = np.mean(losses[:, 4])
mean_overlapping_bboxes = float(sum(rpn_accuracy_for_epoch)) / len(rpn_accuracy_for_epoch)
rpn_accuracy_for_epoch = []
if C.verbose:
print('Mean number of bounding boxes from RPN overlapping ground truth boxes: {0}'.format( mean_overlapping_bboxes))
print('Classifier accuracy for bounding boxes from RPN: {0}'.format(class_acc))
print('Loss RPN classifier: {0}'.format(loss_rpn_cls))
print('Loss RPN regression: {0}'.format(loss_rpn_regr))
print('Loss Detector classifier: {0}'.format(loss_class_cls))
print('Loss Detector | |
= prob['o']
assert_near_equal(obj, 20.0, 1e-6)
def test_driver_supports(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('x', 50.0), promotes=['*'])
prob.driver = pyOptSparseDriver(optimizer=OPTIMIZER, print_results=False)
with self.assertRaises(KeyError) as raises_msg:
prob.driver.supports['equality_constraints'] = False
exception = raises_msg.exception
msg = "pyOptSparseDriver: Tried to set read-only option 'equality_constraints'."
self.assertEqual(exception.args[0], msg)
def test_fan_out(self):
# This tests sparse-response specification.
# This is a slightly modified FanOut
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('x', 1.0))
model.add_subsystem('p2', om.IndepVarComp('x', 1.0))
model.add_subsystem('comp1', om.ExecComp('y = 3.0*x'))
model.add_subsystem('comp2', om.ExecComp('y = 5.0*x'))
model.add_subsystem('obj', om.ExecComp('o = i1 + i2'))
model.add_subsystem('con1', om.ExecComp('c = 15.0 - x'))
model.add_subsystem('con2', om.ExecComp('c = 15.0 - x'))
# hook up explicitly
model.connect('p1.x', 'comp1.x')
model.connect('p2.x', 'comp2.x')
model.connect('comp1.y', 'obj.i1')
model.connect('comp2.y', 'obj.i2')
model.connect('comp1.y', 'con1.x')
model.connect('comp2.y', 'con2.x')
prob.set_solver_print(level=0)
prob.driver = pyOptSparseDriver()
prob.driver.options['optimizer'] = OPTIMIZER
prob.driver.options['print_results'] = False
model.add_design_var('p1.x', lower=-50.0, upper=50.0)
model.add_design_var('p2.x', lower=-50.0, upper=50.0)
model.add_objective('obj.o')
model.add_constraint('con1.c', equals=0.0)
model.add_constraint('con2.c', equals=0.0)
prob.setup()
failed = prob.run_driver()
self.assertFalse(failed, "Optimization failed, info = " +
str(prob.driver.pyopt_solution.optInform))
obj = prob['obj.o']
assert_near_equal(obj, 30.0, 1e-6)
# Verify that pyOpt has the correct wrt names
con1 = prob.driver.pyopt_solution.constraints['con1.c']
self.assertEqual(con1.wrt, ['p1.x'])
con2 = prob.driver.pyopt_solution.constraints['con2.c']
self.assertEqual(con2.wrt, ['p2.x'])
def test_inf_as_desvar_bounds(self):
# User may use np.inf as a bound. It is unneccessary, but the user
# may do it anyway, so make sure SLSQP doesn't blow up with it (bug
# reported by rfalck)
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('x', 50.0), promotes=['*'])
model.add_subsystem('p2', om.IndepVarComp('y', 50.0), promotes=['*'])
model.add_subsystem('comp', Paraboloid(), promotes=['*'])
model.add_subsystem('con', om.ExecComp('c = - x + y'), promotes=['*'])
prob.set_solver_print(level=0)
prob.driver = pyOptSparseDriver()
prob.driver.options['optimizer'] = 'SLSQP'
prob.driver.opt_settings['ACC'] = 1e-9
prob.driver.options['print_results'] = False
model.add_design_var('x', lower=-np.inf, upper=np.inf)
model.add_design_var('y', lower=-50.0, upper=50.0)
model.add_objective('f_xy')
model.add_constraint('c', upper=-15.0)
prob.setup()
failed = prob.run_driver()
self.assertFalse(failed, "Optimization failed, info = " +
str(prob.driver.pyopt_solution.optInform))
# Minimum should be at (7.166667, -7.833334)
assert_near_equal(prob['x'], 7.16667, 1e-6)
assert_near_equal(prob['y'], -7.833334, 1e-6)
def test_pyopt_fd_solution(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('x', 50.0), promotes=['*'])
model.add_subsystem('p2', om.IndepVarComp('y', 50.0), promotes=['*'])
model.add_subsystem('comp', Paraboloid(), promotes=['*'])
model.add_subsystem('con', om.ExecComp('c = - x + y'), promotes=['*'])
prob.set_solver_print(level=0)
prob.driver = pyOptSparseDriver()
prob.driver.options['optimizer'] = OPTIMIZER
prob.driver.options['gradient method'] = 'pyopt_fd'
prob.driver.options['print_results'] = False
model.add_design_var('x', lower=-50.0, upper=50.0)
model.add_design_var('y', lower=-50.0, upper=50.0)
model.add_objective('f_xy')
model.add_constraint('c', upper=-15.0)
prob.setup()
failed = prob.run_driver()
self.assertFalse(failed, "Optimization failed, info = " +
str(prob.driver.pyopt_solution.optInform))
# Minimum should be at (7.166667, -7.833334)
assert_near_equal(prob['x'], 7.16667, 1e-4)
assert_near_equal(prob['y'], -7.833334, 1e-4)
def test_pyopt_fd_is_called(self):
class ParaboloidApplyLinear(Paraboloid):
def apply_linear(params, unknowns, resids):
raise Exception("OpenMDAO's finite difference has been called."
" pyopt_fd option has failed.")
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('x', 50.0), promotes=['*'])
model.add_subsystem('p2', om.IndepVarComp('y', 50.0), promotes=['*'])
model.add_subsystem('comp', ParaboloidApplyLinear(), promotes=['*'])
model.add_subsystem('con', om.ExecComp('c = - x + y'), promotes=['*'])
prob.set_solver_print(level=0)
prob.driver = pyOptSparseDriver()
prob.driver.options['optimizer'] = OPTIMIZER
prob.driver.options['gradient method'] = 'pyopt_fd'
prob.driver.options['print_results'] = False
model.add_design_var('x', lower=-50.0, upper=50.0)
model.add_design_var('y', lower=-50.0, upper=50.0)
model.add_objective('f_xy')
model.add_constraint('c', upper=-15.0)
prob.setup()
failed = prob.run_driver()
self.assertFalse(failed, "Optimization failed, info = " +
str(prob.driver.pyopt_solution.optInform))
# Minimum should be at (7.166667, -7.833334)
assert_near_equal(prob['x'], 7.16667, 1e-4)
assert_near_equal(prob['y'], -7.833334, 1e-4)
def test_snopt_fd_option_error(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('x', 50.0), promotes=['*'])
model.add_subsystem('p2', om.IndepVarComp('y', 50.0), promotes=['*'])
model.add_subsystem('comp', Paraboloid(), promotes=['*'])
model.add_subsystem('con', om.ExecComp('c = - x + y'), promotes=['*'])
prob.set_solver_print(level=0)
prob.driver = pyOptSparseDriver()
prob.driver.options['optimizer'] = 'SLSQP'
prob.driver.options['gradient method'] = 'snopt_fd'
prob.driver.options['print_results'] = False
model.add_design_var('x', lower=-50.0, upper=50.0)
model.add_design_var('y', lower=-50.0, upper=50.0)
model.add_objective('f_xy')
model.add_constraint('c', upper=-15.0)
prob.setup()
with self.assertRaises(Exception) as raises_cm:
prob.run_driver()
exception = raises_cm.exception
msg = "SNOPT's internal finite difference can only be used with SNOPT"
self.assertEqual(exception.args[0], msg)
def test_unsupported_multiple_obj(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('x', 50.0), promotes=['*'])
model.add_subsystem('p2', om.IndepVarComp('y', 50.0), promotes=['*'])
model.add_subsystem('comp', Paraboloid(), promotes=['*'])
model.add_subsystem('comp2', Paraboloid())
model.add_subsystem('con', om.ExecComp('c = - x + y'), promotes=['*'])
prob.set_solver_print(level=0)
prob.driver = pyOptSparseDriver()
prob.driver.options['optimizer'] = 'SLSQP'
prob.driver.options['gradient method'] = 'snopt_fd'
prob.driver.options['print_results'] = False
model.add_design_var('x', lower=-50.0, upper=50.0)
model.add_design_var('y', lower=-50.0, upper=50.0)
model.add_objective('f_xy')
model.add_objective('comp2.f_xy')
model.add_constraint('c', upper=-15.0)
expected = 'Multiple objectives have been added to pyOptSparseDriver' \
' but the selected optimizer (SLSQP) does not support' \
' multiple objectives.'
prob.setup()
with self.assertRaises(RuntimeError) as cm:
prob.final_setup()
self.assertEqual(str(cm.exception), expected)
def test_simple_paraboloid_scaled_desvars_fwd(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('x', 50.0), promotes=['*'])
model.add_subsystem('p2', om.IndepVarComp('y', 50.0), promotes=['*'])
model.add_subsystem('comp', Paraboloid(), promotes=['*'])
model.add_subsystem('con', om.ExecComp('c = x - y'), promotes=['*'])
prob.set_solver_print(level=0)
prob.driver = pyOptSparseDriver()
prob.driver.options['optimizer'] = OPTIMIZER
if OPTIMIZER == 'SNOPT':
prob.driver.opt_settings['Verify level'] = 3
prob.driver.options['print_results'] = False
model.add_design_var('x', lower=-50.0, upper=50.0, ref=50.0)
model.add_design_var('y', lower=-50.0, upper=50.0, ref=50.0)
model.add_objective('f_xy')
model.add_constraint('c', lower=10.0, upper=11.0)
prob.setup(check=False, mode='fwd')
failed = prob.run_driver()
self.assertFalse(failed, "Optimization failed, info = " +
str(prob.driver.pyopt_solution.optInform))
# Minimum should be at (7.166667, -7.833334)
assert_near_equal(prob['x'] - prob['y'], 11.0, 1e-6)
def test_simple_paraboloid_scaled_desvars_fd(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('x', 50.0), promotes=['*'])
model.add_subsystem('p2', om.IndepVarComp('y', 50.0), promotes=['*'])
model.add_subsystem('comp', Paraboloid(), promotes=['*'])
model.add_subsystem('con', om.ExecComp('c = x - y'), promotes=['*'])
prob.set_solver_print(level=0)
prob.driver = pyOptSparseDriver()
prob.driver.options['optimizer'] = OPTIMIZER
if OPTIMIZER == 'SNOPT':
prob.driver.opt_settings['Verify level'] = 3
prob.driver.options['print_results'] = False
model.add_design_var('x', lower=-50.0, upper=50.0, ref=50.0)
model.add_design_var('y', lower=-50.0, upper=50.0, ref=50.0)
model.add_objective('f_xy')
model.add_constraint('c', lower=10.0, upper=11.0)
model.approx_totals(method='fd')
prob.setup()
failed = prob.run_driver()
self.assertFalse(failed, "Optimization failed, info = " +
str(prob.driver.pyopt_solution.optInform))
# Minimum should be at (7.166667, -7.833334)
assert_near_equal(prob['x'] - prob['y'], 11.0, 1e-6)
def test_simple_paraboloid_scaled_desvars_cs(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('x', 50.0), promotes=['*'])
model.add_subsystem('p2', om.IndepVarComp('y', 50.0), promotes=['*'])
model.add_subsystem('comp', Paraboloid(), promotes=['*'])
model.add_subsystem('con', om.ExecComp('c = x - y'), promotes=['*'])
prob.set_solver_print(level=0)
prob.driver = pyOptSparseDriver()
prob.driver.options['optimizer'] = OPTIMIZER
if OPTIMIZER == 'SNOPT':
prob.driver.opt_settings['Verify level'] = 3
prob.driver.options['print_results'] = False
model.add_design_var('x', lower=-50.0, upper=50.0, ref=50.0)
model.add_design_var('y', lower=-50.0, upper=50.0, ref=50.0)
model.add_objective('f_xy')
model.add_constraint('c', lower=10.0, upper=11.0)
model.approx_totals(method='cs')
prob.setup()
failed = prob.run_driver()
self.assertFalse(failed, "Optimization failed, info = " +
str(prob.driver.pyopt_solution.optInform))
# Minimum should be at (7.166667, -7.833334)
assert_near_equal(prob['x'] - prob['y'], 11.0, 1e-6)
def test_simple_paraboloid_scaled_desvars_rev(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('x', 50.0), promotes=['*'])
model.add_subsystem('p2', om.IndepVarComp('y', 50.0), promotes=['*'])
model.add_subsystem('comp', Paraboloid(), promotes=['*'])
model.add_subsystem('con', om.ExecComp('c = x - y'), promotes=['*'])
prob.set_solver_print(level=0)
prob.driver = pyOptSparseDriver()
prob.driver.options['optimizer'] = OPTIMIZER
if OPTIMIZER == 'SNOPT':
prob.driver.opt_settings['Verify level'] = 3
prob.driver.options['print_results'] = False
model.add_design_var('x', lower=-50.0, upper=50.0, ref=50.0)
model.add_design_var('y', lower=-50.0, upper=50.0, ref=50.0)
model.add_objective('f_xy')
model.add_constraint('c', lower=10.0, upper=11.0)
prob.setup(check=False, mode='rev')
failed = prob.run_driver()
self.assertFalse(failed, "Optimization failed, info = " +
str(prob.driver.pyopt_solution.optInform))
# Minimum should be at (7.166667, -7.833334)
assert_near_equal(prob['x'] - prob['y'], 11.0, 1e-6)
def test_simple_paraboloid_scaled_constraint_fwd(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('x', 50.0), promotes=['*'])
model.add_subsystem('p2', om.IndepVarComp('y', 50.0), promotes=['*'])
model.add_subsystem('comp', Paraboloid(), promotes=['*'])
model.add_subsystem('con', om.ExecComp('c = x - y'), promotes=['*'])
prob.set_solver_print(level=0)
prob.driver = pyOptSparseDriver()
prob.driver.options['optimizer'] = OPTIMIZER
if OPTIMIZER == 'SNOPT':
prob.driver.opt_settings['Verify level'] = 3
prob.driver.options['print_results'] = False
model.add_design_var('x', lower=-50.0, upper=50.0)
model.add_design_var('y', lower=-50.0, upper=50.0)
model.add_objective('f_xy')
model.add_constraint('c', lower=10.0, upper=11.0, ref=10.)
prob.setup(check=False, mode='fwd')
failed = prob.run_driver()
self.assertFalse(failed, "Optimization failed, info = " +
str(prob.driver.pyopt_solution.optInform))
# Minimum should be at (7.166667, -7.833334)
assert_near_equal(prob['x'] - prob['y'], 11.0, 1e-6)
def test_simple_paraboloid_scaled_constraint_fd(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('x', 50.0), promotes=['*'])
model.add_subsystem('p2', om.IndepVarComp('y', 50.0), promotes=['*'])
model.add_subsystem('comp', Paraboloid(), promotes=['*'])
model.add_subsystem('con', om.ExecComp('c = x - y'), promotes=['*'])
prob.set_solver_print(level=0)
prob.driver = pyOptSparseDriver()
prob.driver.options['optimizer'] = OPTIMIZER
if OPTIMIZER == 'SNOPT':
prob.driver.opt_settings['Verify level'] = 3
prob.driver.options['print_results'] = False
model.add_design_var('x', lower=-50.0, upper=50.0)
model.add_design_var('y', lower=-50.0, upper=50.0)
model.add_objective('f_xy')
model.add_constraint('c', lower=10.0, upper=11.0, ref=10.)
model.approx_totals(method='fd')
prob.setup()
failed = prob.run_driver()
self.assertFalse(failed, "Optimization failed, info = " +
str(prob.driver.pyopt_solution.optInform))
# Minimum should be at (7.166667, -7.833334)
assert_near_equal(prob['x'] - prob['y'], 11.0, 1e-6)
def test_simple_paraboloid_scaled_constraint_cs(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('x', 50.0), promotes=['*'])
model.add_subsystem('p2', om.IndepVarComp('y', 50.0), promotes=['*'])
model.add_subsystem('comp', Paraboloid(), promotes=['*'])
model.add_subsystem('con', om.ExecComp('c = x - y'), promotes=['*'])
prob.set_solver_print(level=0)
prob.driver = pyOptSparseDriver()
prob.driver.options['optimizer'] = OPTIMIZER
if OPTIMIZER == 'SNOPT':
prob.driver.opt_settings['Verify level'] = 3
prob.driver.options['print_results'] = False
model.add_design_var('x', lower=-50.0, upper=50.0)
model.add_design_var('y', lower=-50.0, upper=50.0)
model.add_objective('f_xy')
model.add_constraint('c', lower=10.0, upper=11.0, ref=10.)
model.approx_totals(method='cs')
prob.setup()
failed = prob.run_driver()
self.assertFalse(failed, "Optimization failed, info = " +
str(prob.driver.pyopt_solution.optInform))
# Minimum should be at (7.166667, -7.833334)
assert_near_equal(prob['x'] - prob['y'], 11.0, 1e-6)
def test_simple_paraboloid_scaled_constraint_rev(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('x', 50.0), promotes=['*'])
model.add_subsystem('p2', om.IndepVarComp('y', 50.0), promotes=['*'])
model.add_subsystem('comp', Paraboloid(), promotes=['*'])
model.add_subsystem('con', om.ExecComp('c = x - y'), promotes=['*'])
prob.set_solver_print(level=0)
prob.driver = pyOptSparseDriver()
prob.driver.options['optimizer'] = OPTIMIZER
if OPTIMIZER == 'SNOPT':
prob.driver.opt_settings['Verify level'] = 3
prob.driver.options['print_results'] = False
model.add_design_var('x', lower=-50.0, upper=50.0)
model.add_design_var('y', lower=-50.0, upper=50.0)
model.add_objective('f_xy')
model.add_constraint('c', lower=10.0, upper=11.0, ref=10.)
prob.setup(check=False, mode='rev')
failed = prob.run_driver()
self.assertFalse(failed, "Optimization failed, info = " +
str(prob.driver.pyopt_solution.optInform))
# Minimum should be at (7.166667, -7.833334)
assert_near_equal(prob['x'] - prob['y'], 11.0, 1e-6)
def test_simple_paraboloid_scaled_objective_fwd(self):
prob = om.Problem()
model = prob.model
prob.set_solver_print(level=0)
model.add_subsystem('p1', om.IndepVarComp('x', 50.0), promotes=['*'])
model.add_subsystem('p2', om.IndepVarComp('y', 50.0), promotes=['*'])
model.add_subsystem('comp', Paraboloid(), promotes=['*'])
model.add_subsystem('con', om.ExecComp('c = x - y'), promotes=['*'])
prob.driver = pyOptSparseDriver()
prob.driver.options['optimizer'] = OPTIMIZER
if OPTIMIZER == | |
-1
return _f(values, pos, size)
class AbstractSecsCommunicator:
__DEFAULT_TIMEOUT_T1 = 1.0
__DEFAULT_TIMEOUT_T2 = 15.0
__DEFAULT_TIMEOUT_T3 = 45.0
__DEFAULT_TIMEOUT_T4 = 45.0
__DEFAULT_TIMEOUT_T5 = 10.0
__DEFAULT_TIMEOUT_T6 = 5.0
__DEFAULT_TIMEOUT_T7 = 10.0
__DEFAULT_TIMEOUT_T8 = 5.0
def __init__(self, device_id, is_equip, **kwargs):
self.__gem = Gem(self)
self.device_id = device_id
self.is_equip = is_equip
self.name = kwargs.get('name', None)
self.timeout_t1 = kwargs.get('timeout_t1', self.__DEFAULT_TIMEOUT_T1)
self.timeout_t2 = kwargs.get('timeout_t2', self.__DEFAULT_TIMEOUT_T2)
self.timeout_t3 = kwargs.get('timeout_t3', self.__DEFAULT_TIMEOUT_T3)
self.timeout_t4 = kwargs.get('timeout_t4', self.__DEFAULT_TIMEOUT_T4)
self.timeout_t5 = kwargs.get('timeout_t5', self.__DEFAULT_TIMEOUT_T5)
self.timeout_t6 = kwargs.get('timeout_t6', self.__DEFAULT_TIMEOUT_T6)
self.timeout_t7 = kwargs.get('timeout_t7', self.__DEFAULT_TIMEOUT_T7)
self.timeout_t8 = kwargs.get('timeout_t8', self.__DEFAULT_TIMEOUT_T8)
gem_mdln = kwargs.get('gem_mdln', None)
if gem_mdln is not None:
self.gem.mdln = gem_mdln
gem_softrev = kwargs.get('gem_softrev', None)
if gem_softrev is not None:
self.gem.softrev = gem_softrev
gem_clock_type = kwargs.get('gem_clock_type', None)
if gem_clock_type is not None:
self.gem.clock_type = gem_clock_type
self._sys_num = 0
self.__communicating = False
self.__comm_cdt = threading.Condition()
self.__recv_primary_msg_lstnrs = list()
self.__communicate_lstnrs = list()
self.__error_lstnrs = list()
self.__recv_all_msg_lstnrs = list()
self.__sended_msg_lstnrs = list()
recv_pri_msg_lstnr = kwargs.get('recv_primary_msg', None)
if recv_pri_msg_lstnr is not None:
self.add_recv_primary_msg_listener(recv_pri_msg_lstnr)
err_lstnr = kwargs.get('error', None)
if err_lstnr is not None:
self.add_error_listener(err_lstnr)
comm_lstnr = kwargs.get('communicate', None)
if comm_lstnr is not None:
self.add_communicate_listener(comm_lstnr)
self.__opened = False
self.__closed = False
self._open_close_rlock = threading.RLock()
@property
def gem(self):
pass
@gem.getter
def gem(self):
"""GEM getter
Returns:
Gem: GEM-instance
"""
return self.__gem
@property
def device_id(self):
pass
@device_id.getter
def device_id(self):
"""Device-ID getter.
Returns:
int: Device-ID
"""
return self.__device_id
@device_id.setter
def device_id(self, val):
"""Device-ID setter.
Args:
val (int): Device_ID
"""
self.__device_id = val
@property
def is_equip(self):
pass
@is_equip.setter
def is_equip(self, val):
"""is-Equipment setter.
Args:
val (bool): is-Equipment
"""
self.__is_equip = bool(val)
@is_equip.getter
def is_equip(self):
"""is-Equipment getter.
Returns:
bool: True if Equipment
"""
return self.__is_equip
@property
def name(self):
pass
@name.setter
def name(self, val):
"""Communicator-Name setter.
Args:
val (str or None): Communicator-Name
"""
self.__name = val if val is None else str(val)
@name.getter
def name(self):
"""Communicator-Name getter.
Returns:
str: Communicator-Name
"""
return self.__name
@staticmethod
def _try_gt_zero(v):
"""test-set-timeout-tx
Args:
v (int or float): timeout-time-seconds.
Raises:
TypeError: raise if v is None.
ValueError: raise if v is not greater than 0.0.
Returns:
float: tested value
"""
if v is None:
raise TypeError("Timeout-value require not None")
if v > 0.0:
return float(v)
else:
raise ValueError("Timeout-value require > 0.0")
@property
def timeout_t1(self):
pass
@timeout_t1.getter
def timeout_t1(self):
"""Timeout-T1 getter.
Returns:
float: Timeout-T1
"""
return self.__timeout_t1
@timeout_t1.setter
def timeout_t1(self, val):
"""Timeout-T1 setter.
Args:
val (int or float): Timeout-T1 value.
Raises:
TypeError: if value is None.
ValueError: if value is not greater than 0.0.
"""
self.__timeout_t1 = self._try_gt_zero(val)
@property
def timeout_t2(self):
pass
@timeout_t2.getter
def timeout_t2(self):
"""Timeout-T2 getter.
Returns:
float: Timeout-T2
"""
return self.__timeout_t2
@timeout_t2.setter
def timeout_t2(self, val):
"""Timeout-T2 setter.
Args:
val (int or float): Timeout-T2 value.
Raises:
TypeError: if value is None.
ValueError: if value is not greater than 0.0.
"""
self.__timeout_t2 = self._try_gt_zero(val)
@property
def timeout_t3(self):
pass
@timeout_t3.getter
def timeout_t3(self):
"""Timeout-T3 getter.
Returns:
float: Timeout-T3
"""
return self.__timeout_t3
@timeout_t3.setter
def timeout_t3(self, val):
"""Timeout-T3 setter.
Args:
val (int or float): Timeout-T3 value.
Raises:
TypeError: if value is None.
ValueError: if value is not greater than 0.0.
"""
self.__timeout_t3 = self._try_gt_zero(val)
@property
def timeout_t4(self):
pass
@timeout_t4.getter
def timeout_t4(self):
"""Timeout-T4 getter.
Returns:
float: Timeout-T4
"""
return self.__timeout_t4
@timeout_t4.setter
def timeout_t4(self, val):
"""Timeout-T4 setter.
Args:
val (int or float): Timeout-T4 value.
Raises:
TypeError: if value is None.
ValueError: if value is not greater than 0.0.
"""
self.__timeout_t4 = self._try_gt_zero(val)
@property
def timeout_t5(self):
pass
@timeout_t5.getter
def timeout_t5(self):
"""Timeout-T5 getter.
Returns:
float: Timeout-T5
"""
return self.__timeout_t5
@timeout_t5.setter
def timeout_t5(self, val):
"""Timeout-T5 setter.
Args:
val (int or float): Timeout-T5 value.
Raises:
TypeError: if value is None.
ValueError: if value is not greater than 0.0.
"""
self.__timeout_t5 = self._try_gt_zero(val)
@property
def timeout_t6(self):
pass
@timeout_t6.getter
def timeout_t6(self):
"""Timeout-T6 getter.
Returns:
float: Timeout-T6
"""
return self.__timeout_t6
@timeout_t6.setter
def timeout_t6(self, val):
"""Timeout-T6 setter.
Args:
val (int or float): Timeout-T6 value.
Raises:
TypeError: if value is None.
ValueError: if value is not greater than 0.0.
"""
self.__timeout_t6 = self._try_gt_zero(val)
@property
def timeout_t7(self):
pass
@timeout_t7.getter
def timeout_t7(self):
"""Timeout-T7 getter.
Returns:
float: Timeout-T7
"""
return self.__timeout_t7
@timeout_t7.setter
def timeout_t7(self, val):
"""Timeout-T7 setter.
Args:
val (int or float): Timeout-T7 value.
Raises:
TypeError: if value is None.
ValueError: if value is not greater than 0.0.
"""
self.__timeout_t7 = self._try_gt_zero(val)
@property
def timeout_t8(self):
pass
@timeout_t8.getter
def timeout_t8(self):
"""Timeout-T8 getter.
Returns:
float: Timeout-T8
"""
return self.__timeout_t8
@timeout_t8.setter
def timeout_t8(self, val):
"""Timeout-T8 setter.
Args:
val (int or float): Timeout-T8 value.
Raises:
TypeError: if value is None.
ValueError: if value is not greater than 0.0.
"""
self.__timeout_t8 = self._try_gt_zero(val)
def open(self):
"""Open communicator
"""
self._open()
def _open(self):
# prototype-pattern
raise NotImplementedError()
def close(self):
"""Close communicator
"""
self._close()
def _close(self):
# prototype-pattern
raise NotImplementedError()
def open_and_wait_until_communicating(self, timeout=None):
if not self.is_open:
self._open()
with self.__comm_cdt:
def _p():
return self.is_closed or self.is_communicating
r = self.__comm_cdt.wait_for(_p, timeout)
if r:
if self.is_closed:
raise SecsCommunicatorError("Communicator closed")
return r
@property
def is_open(self):
pass
@is_open.getter
def is_open(self):
with self._open_close_rlock:
return self.__opened and not self.__closed
@property
def is_closed(self):
pass
@is_closed.getter
def is_closed(self):
with self._open_close_rlock:
return self.__closed
def _set_opened(self):
with self._open_close_rlock:
self.__opened = True
def _set_closed(self):
with self._open_close_rlock:
self.__closed = True
with self.__comm_cdt:
self.__comm_cdt.notify_all()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self._close()
def __del__(self):
self._close()
def send(self, strm, func, wbit, secs2body=None):
"""Send primary message
Args:
strm (int): Stream-Number.
func (int): Function-Number.
wbit (bool): W-Bit.
secs2body (tuple or list or AbstractSecs2Body): SECS-II-body. Defaults to None.
Raises:
SecsCommunicatorError: if communicator not opened.
SecsSendMessageError: if send failed.
SecsWaitReplyError: if reply not received.
Returns:
SecsMessage: Reply-Message if exist, otherwise None.
Examples:
if send 'S1F1 W.',
send(1, 1, True)
if send
'S5F1 W
<L
<B 0x01>
<U2 1001>
<A "ON FIRE">
>.',
send(
5, 1, True,
('L', [
('B', [0x01]),
('U2', [1001]),
('A', "ON FIRE")
])
)
"""
return self._send(
strm, func, wbit,
self._create_secs2body(secs2body),
self._create_system_bytes(),
self.device_id)
def send_sml(self, sml_str):
"""Send primary message by SML
Args:
sml_str (str): SML-string.
Raises:
SecsCommunicatorError: if communicator not opened.
SecsSendMessageError: if send failed.
SecsWaitReplyError: if reply not received.
Secs2BodySmlParseError: if Secs2body parse failed.
SmlParseError: if SML parse failed.
Returns:
SecsMessage: Reply-Message if exist, otherwise None.
"""
strm, func, wbit, s2b = SmlParser.parse(sml_str)
return self.send(strm, func, wbit, s2b)
def reply(self, primary, strm, func, wbit, secs2body=None):
"""Send reply message
Args:
primary (SecsMessage): Primary-Message.
strm (int): Stream-Number.
func (int): Function-Number.
wbit (bool: W-Bit.
secs2body (tuple or list or AbstractSecs2Body): SECS-II-body. Defaults to None.
Raises:
SecsCommunicatorError: if communicator not opened.
SecsSendMessageError: if send failed.
Returns:
None: None
Examples:
if reply 'S1F18 <B 0x0>.',
reply(2, 18, False, ('B', [0x0]))
"""
return self._send(
strm, func, wbit,
self._create_secs2body(secs2body),
primary.system_bytes,
self.device_id)
def reply_sml(self, primary, sml_str):
"""Send reply message by SML
Args:
primary (SecsMessage): Primary-Message
sml_str (str): SML-String
Raises:
SecsCommunicatorError: if communicator not opened.
SecsSendMessageError: if send failed.
Secs2BodySmlParseError: if Secs2body parse failed.
SmlParseError: if SML parse failed.
Returns:
None: None
"""
strm, func, wbit, s2b = SmlParser.parse(sml_str)
return self.reply(
primary,
strm, func, wbit,
self._create_secs2body(s2b))
def _create_system_bytes(self):
self._sys_num = (self._sys_num + 1) & 0xFFFF
n = self._sys_num
d = self.device_id if self.is_equip else 0
return bytes([
(d >> 8) & 0x7F,
d & 0xFF,
(n >> 8) & 0xFF,
n & 0xFF
])
@staticmethod
def _create_secs2body(v):
if v is None:
return None
elif isinstance(v, AbstractSecs2Body):
return v
else:
tt = type(v)
if (tt is list or tt is tuple) and len(v) == 2:
return Secs2BodyBuilder.build(v[0], v[1])
else:
raise TypeError('Secs2Body is tuple or list, and length == 2')
def _send(self, strm, func, wbit, secs2body, system_bytes, device_id):
"""prototype-pattern send
Args:
strm (int): Stream-Number.
func (int): Function-Number.
wbit (bool): W-Bit.
secs2body (tuple or list or AbstractSecs2Body): SECS-II-body.
system_bytes (bytes): System-4-bytes.
device_id (int): Device-ID.
Raises:
SecsCommunicatorError: if communicator not opened.
SecsSendMessageError: if send failed.
SecsWaitReplyError: if reply not received.
Returns:
SecsMessage: Reply-Message if exist, otherwise None
"""
raise NotImplementedError()
@staticmethod
def _is_single_args_listener(listener):
n = len(inspect.signature(listener).parameters)
return n == 1
def add_recv_primary_msg_listener(self, listener):
"""Add receive-primary-message listener
If listener-arguments is 1, put receive-primary-message.
If listener-arguments is 2, put receive-primary-message and self-communicator-instance.
receive-primary-message is instance of `SecsMessage`.
self-communicator-instance is instance of `AbstractSecsCommunicator`.
Args:
listener (function): listener
Returns:
None
"""
self.__recv_primary_msg_lstnrs.append(listener)
def remove_recv_primary_msg_listener(self, listener):
| |
import numpy as np
import scipy as sp
def sim_state_eq( A, B, xi, U):
"""This function caclulates the trajectory for the network given our model
if there are no constraints, and the target state is unknown, using the
control equation precess x(t+1) = Ax(t) + BU(t). x(t) is the state vector, A is
the adjacency matrix, U(t) is the time varying input as specified by the
user, and B selects the control set (stimulating electrodes)
Args:
A : NxN state matrix (numpy array), where N is the number of nodes in your
network (for example, a structural connectivity matrix
constructed from DTI). A should be stable to prevent
uncontrolled trajectories.
B : NxN input matrix (numpy array), where N is the number of nodes. B
selects where you want your input energy to be applied to.
For example, if B is the Identity matrix, then input energy
will be applied to all nodes in the network. If B is a
matrix of zeros, but B(1,1) = 1. then energy will only be
applied at the first node.
xi : Nx1 initial state (numpy array) of your system where N is the number of
nodes. xi MUST have N rows.
U : NxT matrix of Energy (numpy array), where N is the number of nodes
and T is the number of
time points. For example, if you want to simulate the
trajectory resulting from stimulation, U could have
log(StimFreq)*StimAmp*StimDur as every element. You can
also enter U's that vary with time
Returns:
x : x is the NxT trajectory (numpy array) that results from simulating
x(t+1) = Ax(t) + Bu(t) the equation with the parameters
above.
@author JStiso
June 2017
"""
# Simulate trajectory
T = np.size(U,1)
N = np.size(A,0)
# initialize x
x = np.zeros((N, T))
xt = xi
for t in range(T):
x[:,t] = np.reshape(xt, N) # annoying python 1d array thing
xt_1 = np.matmul(A,xt) + np.matmul(B,np.reshape(U[:,t],(N,1) ))# state equation
xt = xt_1
return x
def optimal_energy(A, T, B, x0, xf, rho, S):
"""This is a python adaptation of matlab code originally written by <NAME> and <NAME>
compute optimal inputs/trajectories for a system to transition between two states
<NAME> September 2017
Args:
A: (NxN numpy array) Structural connectivity matrix
B: (NxN numpy array) Input matrix: selects which nodes to put input into. Define
so there is a 1 on the diagonal of elements you want to add input to,
and 0 otherwise
S: (NxN numpy array) Selects nodes whose distance you want to constrain, Define so
that there is a 1 on the diagonal of elements you want to
constrain, and a zero otherwise
T: (float) Time horizon: how long you want to control for. Too large will give
large error, too short will not give enough time for control
rho: (float) weights energy and distance constraints. Small rho leads to larger
energy
Returns:
X_opt: (TxN numpy array)
The optimal trajectory through state space
U_opt: (TxN numpy array)
The optimal energy
n_err: (float)
the error associated with this calculation. Errors will be larger when B is not identity,
and when A is large. Large T and rho will also tend to increase the error
-------------- Change Log -------------
JStiso April 2018
Changed S to be an input, rather than something defined internally
<NAME> January 2021
Changed the forward propagation of states to matrix exponential to
avoid reliance on MATLAB toolboxes. Also changed definition of expanded
input U to save time by avoiding having to resize the matrix.
Also changed the initialization of U_opt for the same reason.
JStiso 2021
Translated to Python
"""
n = np.shape(A)[1]
# state vectors to float if they're bools
if type(x0[0]) == np.bool_:
x0 = x0.astype(float)
if type(xf[0]) == np.bool_:
xf = xf.astype(float)
Sbar = np.eye(n) - S
np.shape(np.dot(-B,B.T)/(2*rho))
Atilde = np.concatenate((np.concatenate((A, np.dot(-B,B.T)/(2*rho)), axis=1),
np.concatenate((-2*S, -A.T), axis=1)), axis=0)
M = sp.linalg.expm(Atilde*T)
M11 = M[0:n,0:n]
M12 = M[0:n,n:]
M21 = M[n:,0:n]
M22 = M[n:,n:]
N = np.linalg.solve(Atilde,(M-np.eye(np.shape(Atilde)[0])))
c = np.dot(np.dot(N,np.concatenate((np.zeros((n,n)),S),axis = 0)),2*xf)
c1 = c[0:n]
c2 = c[n:]
p0 = np.dot(np.linalg.pinv(np.concatenate((np.dot(S,M12),np.dot(Sbar,M22)), axis = 0)),
(-np.dot(np.concatenate((np.dot(S,M11),np.dot(Sbar,M21)),axis=0),x0) -
np.concatenate((np.dot(S,c1),np.dot(Sbar,c2)), axis=0) +
np.concatenate((np.dot(S,xf),np.zeros((n,1))), axis=0)))
n_err = np.linalg.norm(np.dot(np.concatenate((np.dot(S,M12),np.dot(Sbar,M22)), axis = 0),p0) -
(-np.dot(np.concatenate((np.dot(S,M11),np.dot(Sbar,M21)),axis=0),x0) -
np.concatenate((np.dot(S,c1),np.dot(Sbar,c2)), axis=0) +
np.concatenate((np.dot(S,xf),np.zeros((n,1))), axis=0))) # norm(error)
STEP = 0.001
t = np.arange(0,(T+STEP),STEP)
U = np.dot(np.ones((np.size(t),1)),2*xf.T)
# Discretize continuous-time input for convolution
Atilde_d = sp.linalg.expm(Atilde*STEP)
Btilde_d = np.linalg.solve(Atilde,
np.dot((Atilde_d-np.eye(2*n)),np.concatenate((np.zeros((n,n)),S), axis=0)))
# Propagate forward discretized model
xp = np.zeros((2*n,np.size(t)))
xp[:,0:1] = np.concatenate((x0,p0), axis=0)
for i in np.arange(1,np.size(t)):
xp[:,i] = np.dot(Atilde_d,xp[:,i-1]) + np.dot(Btilde_d,U[i-1,:].T)
xp = xp.T
U_opt = np.zeros((np.size(t),np.shape(B)[1]))
for i in range(np.size(t)):
U_opt[i,:] = -(1/(2*rho))*np.dot(B.T,xp[i,n:].T)
X_opt = xp[:,0:n]
return X_opt, U_opt, n_err
def minimum_energy(A, T, B, x0, xf):
""" This function computes the minimum energy required to transition between two states
This is a python adaptation of code originally written by <NAME>
Computes minimum control energy for state transition.
Args:
A: numpy array (N x N)
System adjacency matrix
B: numpy array (N x N)
Control input matrix
x0: numpy array (N x t)
Initial state
xf: numpy array (N x t)
Final state
T: float (1 x 1)
Control horizon
Returns:
x: numpy array (N x t)
State Trajectory
u: numpy array (N x t)
Control Input
"""
# System Size
n = np.shape(A)[0]
# state vectors to float if they're bools
if type(x0[0]) == np.bool_:
x0 = x0.astype(float)
if type(xf[0]) == np.bool_:
xf = xf.astype(float)
# Compute Matrix Exponential
AT = np.concatenate((np.concatenate((A, -.5*(B.dot(B.T))), axis=1),
np.concatenate((np.zeros(np.shape(A)), -A.T), axis=1)), axis=0)
E = sp.linalg.expm(AT*T)
# Compute Costate Initial Condition
E12 = E[0:n,n:]
E11 = E[0:n,0:n]
p0 = np.linalg.pinv(E12).dot(xf - E11.dot(x0))
# Compute Costate Initial Condition Error Induced by Inverse
n_err = np.linalg.norm(E12.dot(p0) - (xf - E11.dot(x0)))
# Prepare Simulation
nStep=1000
t = np.linspace(0,T,nStep+1)
v0 = np.concatenate((x0, p0), axis=0) # Initial Condition
v = np.zeros((2*n,len(t))) # Trajectory
Et = sp.linalg.expm(AT*T/(len(t)-1))
v[:,0] = v0.T
# Simulate State and Costate Trajectories
for i in np.arange(1,len(t)):
v[:,i] = Et.dot(v[:,i-1])
x = v[0:n,:]
u = -0.5*B.T.dot(v[np.arange(0,n)+n,:])
# transpose to be similar to opt_eng_cont
u = u.T
x = x.T
return x, u, n_err
def minimum_energy_fast(A, T, B, x0_mat, xf_mat):
""" This function computes the minimum energy required to transition between all pairs of brain states
encoded in (x0_mat,xf_mat)
Args:
A: numpy array (N x N)
System adjacency matrix
B: numpy array (N x N)
Control input matrix
x0_mat: numpy array (N x n_transitions)
Initial states (see expand_states)
xf_mat: numpy array (N x n_transitions)
Final states (see expand_states)
T: float (1 x 1)
Control horizon
Returns:
E: numpy array (N x n_transitions)
Regional energy for all state transition pairs.
Notes,
np.sum(E, axis=0)
collapse over regions to yield energy associated with all transitions.
np.sum(E, axis=0).reshape(n_states, n_states)
collapse over regions and reshape into a state by state transition matrix.
"""
# System Size
n_parcels = A.shape[0]
if type(x0_mat[0][0]) == np.bool_:
x0_mat = x0_mat.astype(float)
if type(xf_mat[0][0]) == np.bool_:
xf_mat = xf_mat.astype(float)
# Number of integration steps
nt = 1000
dt = T/nt
# Numerical integration with Simpson's 1/3 rule
# Integration step
dE = sp.linalg.expm(A * dt)
# Accumulation of expm(A * dt)
dEA = np.eye(n_parcels)
# Gramian
G = np.zeros((n_parcels, n_parcels))
for i in np.arange(1, nt/2):
# Add odd terms
dEA = np.matmul(dEA, dE)
p1 = np.matmul(dEA, B)
# Add even terms
dEA = np.matmul(dEA, dE)
p2 = np.matmul(dEA, B)
G = G + 4 * (np.matmul(p1, p1.transpose())) + 2 * (np.matmul(p2, p2.transpose()))
# Add final odd term
dEA = np.matmul(dEA, dE)
p1 = np.matmul(dEA, B)
G = G + 4 * (np.matmul(p1, p1.transpose()))
# Divide by integration step
E = sp.linalg.expm(A * T)
G = (G + np.matmul(B, B.transpose()) | |
# -----------------------------------------------------------
# A discord bot that has every features you want in a discord server !
#
# (C) 2022 TheophileDiot
# Released under MIT License (MIT)
# email <EMAIL>
# linting: black
# -----------------------------------------------------------
from asyncio import new_event_loop
from datetime import date
from itertools import chain
from logging import basicConfig, DEBUG, error, info, INFO
from multiprocessing import Process
from os import getenv, listdir, makedirs, name, path, system, remove
from subprocess import PIPE, call
from sys import exc_info
from traceback import format_exc
from typing import Union
from aiohttp import ClientSession
from disnake import (
ApplicationCommandInteraction,
Colour,
Forbidden,
Intents,
Member,
Message,
OptionType,
)
from disnake.ext.commands import Bot, Context
from disnake.ext.commands.errors import (
BotMissingPermissions,
BadArgument,
BadUnionArgument,
CheckFailure,
CommandOnCooldown,
MaxConcurrencyReached,
MissingAnyRole,
MissingRequiredArgument,
MissingPermissions,
NoPrivateMessage,
NotOwner,
)
from dotenv import load_dotenv
def get_qualified_name_from_interaction(inter: ApplicationCommandInteraction) -> str:
cmd_name = inter.data.name
options = bool(inter.data.options)
data = inter.data
while options:
data = data.options[0]
if data.type not in (OptionType.sub_command_group, OptionType.sub_command):
options = False
continue
cmd_name += f" {data.name}"
if not data.options:
options = False
return cmd_name
class Omnitron(Bot):
def __init__(self, **kwargs):
"""Initialize the bot"""
super().__init__(
command_prefix=Utils.get_guild_pre or BOT_PREFIX,
intents=self.get_intents(),
help_command=None,
case_insensitive=True,
strip_after_prefix=True,
self_bot=False,
sync_commands_debug=True,
asyncio_debug=True,
test_guilds=[872500404540280893, 874311358018105385]
if getenv("ENV") == "DEVELOPMENT"
else None,
**kwargs,
)
[remove(f) for f in listdir(".") if f.startswith("hs_err_pid")]
[remove(path.join("temp", "musics", f)) for f in listdir("temp/musics")]
with open(path.join("logs", "spring.log"), "w"):
pass
with open(path.join("temp", "musics.txt"), "w"):
pass
dirs = chain.from_iterable(
[
[
f"{f}.{_f.replace('.py', '')}"
if path.isfile(path.join("cogs", f, _f))
else f"{f}.{_f}"
for _f in listdir(path.join("cogs", f))
if _f not in "__pycache__"
]
for f in listdir("cogs")
if path.isdir(path.join("cogs", f))
and f not in ("__init__", "__pycache__")
]
)
self._extensions = [f for f in dirs]
self.load_extensions()
self.session = ClientSession(loop=self.loop)
self.starting = True
self.model = Model.setup()
self.last_check = None
self.utils_class = Utils(self)
self.main_repo = Main(self.model)
self.config_repo = Config(self.model)
self.poll_repo = Poll(self.model)
self.ticket_repo = Ticket(self.model)
self.user_repo = User(self.model, self)
print("Database successfully initialized.")
info("Database loaded")
self.configs = {}
self.moderators = {}
self.djs = {}
self.playlists = {}
self.tasks = {}
lavalink = False
if getenv("internal_lavalink") == "true":
process = Process(target=self.start_lavalink)
process.start() # start the process
lavalink = True
if lavalink:
print("Lavalink successfully initialized.")
info("Lavalink started")
self.color = Colour(BOT_COLOR) or self.user.color
""" EVENTS """
async def on_message_command(self, inter: ApplicationCommandInteraction):
self.user_repo.add_command_count(
inter.guild.id,
inter.author.id,
get_qualified_name_from_interaction(inter),
)
async def on_user_command(self, inter: ApplicationCommandInteraction):
self.user_repo.add_command_count(
inter.guild.id,
inter.author.id,
get_qualified_name_from_interaction(inter),
)
async def on_slash_command_completion(self, inter: ApplicationCommandInteraction):
self.user_repo.add_command_count(
inter.guild.id,
inter.author.id,
get_qualified_name_from_interaction(inter),
)
async def on_command_completion(self, ctx: Context):
self.user_repo.add_command_count(
ctx.guild.id, ctx.author.id, ctx.command.qualified_name
)
async def on_message_command_error(
self, inter: ApplicationCommandInteraction, _error
) -> None:
"""Override default message command error handler to log errors and prevent the bot from crashing."""
await self.handle_error(inter, _error)
async def on_user_command_error(
self, inter: ApplicationCommandInteraction, _error
) -> None:
"""Override default user command error handler to log errors and prevent the bot from crashing."""
await self.handle_error(inter, _error)
async def on_slash_command_error(
self, inter: ApplicationCommandInteraction, _error
) -> None:
"""Override default slash command error handler to log errors and prevent the bot from crashing."""
await self.handle_error(inter, _error)
async def on_command_error(self, ctx: Context, _error) -> None:
"""Override default command error handler to log errors and prevent the bot from crashing."""
await self.handle_error(ctx, _error)
async def on_error(self, event, *args, **kwargs):
error(
f"{exc_info()[0]}\n{exc_info()[1]}\n{exc_info()[2]}\n\n{format_exc()}\n\nIn guild `{args[0].guild if args else 'not found'}` (ID: `{args[0].guild.id if args else 'not found'}`)"
)
print(
f"{exc_info()[0]}\n{exc_info()[1]}\n{exc_info()[2]}\n\n{format_exc()}\n\nIn guild `{args[0].guild if args else 'not found'}` (ID: `{args[0].guild.id if args else 'not found'}`)"
)
_error = exc_info()[1]
if isinstance(_error, Forbidden):
try:
await self.utils_class.send_message_to_mods(
_error.text, args[0].guild.id
)
except AttributeError:
await self.utils_class.send_message_to_mods(
_error.text, args[0].guild_id
)
else:
# Log that the bot had an error
source = args[0]
if isinstance(source, Context) or isinstance(source, Member):
await source.send(
f"⚠️ - An error happened, the developer has been informed about this! If you want help contact `Batgregate900#2562`"
)
elif isinstance(source, ApplicationCommandInteraction):
await source.response.send_message(
f"⚠️ - An error happened, the developer has been informed about this! If you want help contact `Batgregate900#2562`",
ephemeral=True,
)
bot_owner = self.owner
if not bot_owner:
bot_owner = await self.fetch_user(
int(
self.owner_id or list(self.owner_ids)[0]
if self.owner_ids
else self.get_ownerid()
)
)
return await bot_owner.send(
f"{exc_info()[0]}\n{exc_info()[1]}\n{exc_info()[2]}\n\n{format_exc()}\n\nIn guild `{args[0].guild if args else 'not found'}` (ID: `{args[0].guild.id if args else 'not found'}`)"
)
async def on_message(self, message: Message):
if (
message.is_system()
or message.author.bot
or not message.guild
or self.starting
):
return
prefix = self.utils_class.get_guild_pre(message)
if message.content.lower().startswith(prefix[0].lower()):
await self.process_commands(message)
""" METHOD(S) """
async def handle_error(
self, source: Union[Context, ApplicationCommandInteraction], _error
):
if isinstance(source, Context):
cmd_name = source.command.qualified_name
else:
cmd_name = get_qualified_name_from_interaction(source)
if isinstance(_error, NoPrivateMessage):
resp = f"⚠️ - this command is deactivated outside of guilds!"
elif isinstance(_error, MissingRequiredArgument):
resp = f"ℹ️ - The `{cmd_name}` command is missing an argument! Missing parameter: `{_error.param.name}`. `{self.utils_class.get_guild_pre(source.author)[0]}{f'{source.command.parents[0]}' if source.command and source.command.parents else f'help {cmd_name}'}` to get more help."
elif isinstance(_error, MissingPermissions):
resp = f"⛔ - You do not have the necessary perms to run this command! Required perms: `{', '.join(_error.missing_permissions)}`"
elif isinstance(_error, MissingAnyRole):
resp = f"⛔ - You do not have one of the required roles to run this command! One of these roles is required: `{', '.join(_error.missing_roles)}`"
elif isinstance(_error, NotOwner):
resp = f"⛔ - The `{cmd_name}` command is reserved for the bot owner!"
elif isinstance(_error, BotMissingPermissions):
resp = f"⛔ - I don't have the necessary perms to run this command! Required perms: `{', '.join(_error.missing_permissions)}`"
elif isinstance(_error, CommandOnCooldown):
resp = f"ℹ️ - The `{cmd_name}` command is currently in cooldown, please try again in `{'%.2f' % _error.retry_after}` seconds, this command can be used `{_error.cooldown.rate}` times every `{_error.cooldown.per}` seconds."
elif isinstance(_error, MaxConcurrencyReached):
resp = f"ℹ️ - The `{cmd_name}` command has too many executions in progress (`{_error.number}` executions), please try again in a few seconds, this command can only be used a maximum of `{_error.number}` times simultaneously."
elif isinstance(_error, BadArgument):
resp = f"⚠️ - Please provide valid arguments! `{self.utils_class.get_guild_pre(source.author)[0]}{f'{source.command.parents[0]}' if source.command and source.command.parents else f'help {cmd_name}'}` to get more help."
elif isinstance(_error, BadUnionArgument):
resp = f"⚠️ - Please provide valid arguments! The argument `{_error.param.name}` should be within these types: ({', '.join([f'`{c.__name__}`' for c in _error.converters])})! `{self.utils_class.get_guild_pre(source.author)[0]}{f'{source.command.parents[0]}' if source.command and source.command.parents else f'help {cmd_name}'}` to get more help."
elif isinstance(_error, CheckFailure):
if self.last_check == "moderator":
resp = f"⛔ - {source.author.mention} - You must be moderator of this server to use this command!"
elif self.last_check == "dj":
resp = f"⛔ - {source.author.mention} - You must be a dj in this server to use this command!"
else:
raise _error
self.last_check = None
else:
raise _error.original
try:
if isinstance(source, Context):
await source.reply(resp, delete_after=20)
else:
await source.response.send_message(resp, ephemeral=True)
except Forbidden as f:
f.text = f"⚠️ - I don't have the right permissions to send messages in the channel {source.channel.mention} (message (replying to {source.author}): `{resp}`)!"
raise
async def process_commands(self, message: Message):
"""This function processes the commands that the user has sent"""
await self.wait_until_ready()
ctx = await self.get_context(message=message)
if ctx.command is None:
return await ctx.reply(
f"ℹ️ - This command doesn't exist or is deactivated! Command: `{message.content[len(self.utils_class.get_guild_pre(message)[0])::]}`",
delete_after=15,
)
elif (
"commands_channels" in self.configs[ctx.guild.id]
and ctx.channel.id not in self.configs[ctx.guild.id]["commands_channels"]
and not ctx.author.guild_permissions.administrator
):
return await ctx.reply(
f"⛔ - Commands are not allowed in this channel!",
delete_after=15,
)
await self.invoke(ctx)
def load_extensions(self, cogs: Context = None, path: str = "cogs."):
"""Loads the default set of extensions or a seperate one if given"""
for extension in cogs or self._extensions:
try:
self.load_extension(f"{path}{extension}")
print(f"Loaded cog: {extension}")
except Exception as e:
print(f"LoadError: {extension}\n" f"{type(e).__name__}: {e}")
error(f"LoadError: {extension}\n" f"{type(e).__name__}: {e}")
info("All cogs loaded")
@staticmethod
def start_lavalink():
"""starts lavalink."""
try:
call(["java", "-jar", "data/Lavalink.jar"], stdout=PIPE, stderr=PIPE)
except Exception:
pass
@staticmethod
def get_ownerid():
"""Returns the owner id."""
return getenv("OWNER_ID") or OWNER_ID or 559057271737548810
@staticmethod
def get_intents():
"""Configure the intents for the bot"""
intents = Intents(
guilds=True,
guild_messages=True,
guild_reactions=True,
members=True,
voice_states=True,
message_content=True,
)
return intents
@classmethod
async def setup(self, **kwargs):
"""Setup the bot with a token from data.constants or the .env file"""
bot = self()
try:
await bot.start(
getenv("BOT_TOKEN_DEV")
if getenv("ENV") == "DEVELOPMENT"
else BOT_TOKEN or getenv("BOT_TOKEN"),
**kwargs,
)
except KeyboardInterrupt:
await bot.close()
if __name__ == "__main__":
from data import (
Model,
Utils,
BOT_TOKEN,
BOT_PREFIX,
BOT_COLOR,
OWNER_ID,
)
from data.Database import Main, Config, Poll, Ticket, User
load_dotenv(path.join(".", ".env")) # Load data from the .env file
if not path.exists("logs"): # Create logs folder if | |
tables defining the metric
groups in section 'Metric groups' in the :term:`HMC API` book.
type (:term:`callable`):
Python type for the metric value. The type must be a constructor
(callable) that takes the metrics value from the `MetricsResponse`
string as its only argument, using the following Python types
for the metric group description types shown in the :term:`HMC API`
book:
============================= ======================
Description type Python type
============================= ======================
Boolean :class:`py:bool`
Byte :term:`integer`
Short :term:`integer`
Integer :term:`integer`
Long :term:`integer`
Double :class:`py:float`
String, String Enum :term:`unicode string`
============================= ======================
unit (:term:`string`):
Unit of the metric value.
All these parameters are also available as same-named attributes.
"""
self = super(MetricDefinition, cls).__new__(
cls, index, name, type, unit)
return self
__slots__ = ()
def __repr__(self):
repr_str = "MetricDefinition(" \
"index={s.index!r}, " \
"name={s.name!r}, " \
"type={s.type!r}, " \
"unit={s.unit!r})". \
format(s=self)
return repr_str
def _metric_type(metric_type_name):
"""
Return a constructor callable for the given metric type name.
The returned callable takes the metric value as a string as its only
argument and returns a Python object representing that metric value using
the correct Python type.
"""
return _METRIC_TYPES_BY_NAME[metric_type_name]
_METRIC_TYPES_BY_NAME = {
'boolean-metric': bool,
'byte-metric': int,
'short-metric': int,
'integer-metric': int,
'long-metric': int,
'double-metric': float,
'string-metric': six.text_type,
}
def _metric_value(value_str, metric_type):
"""
Return a Python-typed metric value from a metric value string.
"""
if metric_type in (int, float):
try:
return metric_type(value_str)
except ValueError:
raise ValueError("Invalid {} metric value: {!r}".
format(metric_type.__class__.__name__, value_str))
elif metric_type is six.text_type:
# In Python 3, decode('unicode_escape) requires bytes, so we need
# to encode to bytes. This also works in Python 2.
return value_str.strip('"').encode('utf-8').decode('unicode_escape')
else:
assert metric_type is bool
lower_str = value_str.lower()
if lower_str == 'true':
return True
if lower_str == 'false':
return False
raise ValueError("Invalid boolean metric value: {!r}".format(value_str))
def _metric_unit_from_name(metric_name):
"""
Return a metric unit string for human consumption, that is inferred from
the metric name.
If a unit cannot be inferred, `None` is returned.
"""
for item in _PATTERN_UNIT_LIST:
pattern, unit = item
if pattern.match(metric_name):
return unit
return None
_USE_UNICODE = True
if _USE_UNICODE:
MICROSECONDS = u"\u00b5s" # U+00B5 = Micro Sign
CELSIUS = u"\u00B0C" # U+00B0 = Degree Sign
# Note: Use of U+2103 (Degree Celsius) is discouraged by Unicode standard
else:
MICROSECONDS = u"us"
CELSIUS = u"degree Celsius" # Official SI unit when not using degree sign
_PATTERN_UNIT_LIST = {
# End patterns:
(re.compile(r".+-usage$"), u"%"),
(re.compile(r".+-time$"), MICROSECONDS),
(re.compile(r".+-time-used$"), MICROSECONDS),
(re.compile(r".+-celsius$"), CELSIUS),
(re.compile(r".+-watts$"), u"W"),
(re.compile(r".+-paging-rate$"), u"pages/s"),
(re.compile(r".+-sampling-rate$"), u"samples/s"),
# Begin patterns:
(re.compile(r"^bytes-.+"), u"B"),
(re.compile(r"^heat-load.+"), u"BTU/h"), # Note: No trailing hyphen
(re.compile(r"^interval-bytes-.+"), u"B"),
(re.compile(r"^bytes-per-second-.+"), u"B/s"),
# Special cases:
(re.compile(r"^storage-rate$"), u"kB/s"),
(re.compile(r"^humidity$"), u"%"),
(re.compile(r"^memory-used$"), u"MiB"),
(re.compile(r"^policy-activation-time$"), u""), # timestamp
(re.compile(r"^velocity-numerator$"), MICROSECONDS),
(re.compile(r"^velocity-denominator$"), MICROSECONDS),
(re.compile(r"^utilization$"), u"%"),
}
def _resource_class_from_group(metric_group_name):
"""
Return the resource class string from the metric group name.
Metric groups for resources that are specific to ensemble mode are not
supported.
Returns an empty string if a metric group name is unknown.
"""
return _CLASS_FROM_GROUP.get(metric_group_name, '')
_CLASS_FROM_GROUP = {
# DPM mode only:
'dpm-system-usage-overview': 'cpc',
'partition-usage': 'partition',
'adapter-usage': 'adapter',
'network-physical-adapter-port': 'adapter',
'partition-attached-network-interface': 'nic',
# Classic mode only:
'cpc-usage-overview': 'cpc',
'logical-partition-usage': 'logical-partition',
'channel-usage': 'cpc',
'crypto-usage': 'cpc',
'flash-memory-usage': 'cpc', # TODO: verify CPC mode dependency
'roce-usage': 'cpc', # TODO: verify CPC mode dependency
# DPM mode or classic mode:
'zcpc-environmentals-and-power': 'cpc',
'zcpc-processor-usage': 'cpc',
}
class MetricsResponse(object):
"""
Represents the metric values returned by one call to the
:meth:`~zhmcclient.MetricsContext.get_metrics` method, and provides
structured access to the data.
"""
def __init__(self, metrics_context, metrics_response_str):
"""
Parameters:
metrics_context (:class:`~zhmcclient.MetricsContext`):
The :class:`~zhmcclient.MetricsContext` object that was used to
retrieve the metrics response string. It defines the structure of
the metric values in the metrics response string.
metrics_response_str (:term:`string`):
The metrics response string, as returned by the
:meth:`~zhmcclient.MetricsContext.get_metrics` method.
"""
self._metrics_context = metrics_context
self._metrics_response_str = metrics_response_str
self._client = self._metrics_context.manager.client
self._metric_group_values = self._setup_metric_group_values()
def _setup_metric_group_values(self):
"""
Return the list of MetricGroupValues objects for this metrics response,
by processing its metrics response string.
The lines in the metrics response string are::
MetricsResponse: MetricsGroup{0,*}
<emptyline> a third empty line at the end
MetricsGroup: MetricsGroupName
ObjectValues{0,*}
<emptyline> a second empty line after each MG
ObjectValues: ObjectURI
Timestamp
ValueRow{1,*}
<emptyline> a first empty line after this blk
"""
mg_defs = self._metrics_context.metric_group_definitions
metric_group_name = None
resource_uri = None
dt_timestamp = None
object_values = None
metric_group_values = list()
state = 0
for mr_line in self._metrics_response_str.splitlines():
if state == 0:
if object_values is not None:
# Store the result from the previous metric group
mgv = MetricGroupValues(metric_group_name, object_values)
metric_group_values.append(mgv)
object_values = None
if mr_line == '':
# Skip initial (or trailing) empty lines
pass
else:
# Process the next metrics group
metric_group_name = mr_line.strip('"') # No " or \ inside
assert metric_group_name in mg_defs
m_defs = mg_defs[metric_group_name].metric_definitions
object_values = list()
state = 1
elif state == 1:
if mr_line == '':
# There are no (or no more) ObjectValues items in this
# metrics group
state = 0
else:
# There are ObjectValues items
resource_uri = mr_line.strip('"') # No " or \ inside
state = 2
elif state == 2:
# Process the timestamp
assert mr_line != ''
try:
dt_timestamp = datetime_from_timestamp(int(mr_line))
except ValueError:
# Sometimes, the returned epoch timestamp values are way
# too large, e.g. 3651584404810066 (which would translate
# to the year 115791 A.D.). Python datetime supports
# up to the year 9999. We circumvent this issue by
# simply using the current date&time.
# TODO: Remove the circumvention for too large timestamps.
dt_timestamp = datetime.now(pytz.utc)
state = 3
elif state == 3:
if mr_line != '':
# Process the metric values in the ValueRow line
str_values = mr_line.split(',')
metrics = dict()
for m_name in m_defs:
m_def = m_defs[m_name]
m_type = m_def.type
m_value_str = str_values[m_def.index]
m_value = _metric_value(m_value_str, m_type)
metrics[m_name] = m_value
ov = MetricObjectValues(
self._client, mg_defs[metric_group_name], resource_uri,
dt_timestamp, metrics)
object_values.append(ov)
# stay in this state, for more ValueRow lines
else:
# On the empty line after the last ValueRow line
state = 1
return metric_group_values
@property
def metrics_context(self):
"""
:class:`~zhmcclient.MetricsContext` object for this metric response.
This can be used to access the metric definitions for this response.
"""
return self._metrics_context
@property
def metric_group_values(self):
"""
:class:`py:list`: The list of :class:`~zhmcclient.MetricGroupValues`
objects representing the metric groups in this metric response.
Each :class:`~zhmcclient.MetricGroupValues` object contains a list of
:class:`~zhmcclient.MetricObjectValues` objects representing the
metric values in this group (each for a single resource and point in
time).
"""
return self._metric_group_values
class MetricGroupValues(object):
"""
Represents the metric values for a metric group in a MetricsResponse
string.
"""
def __init__(self, name, object_values):
"""
Parameters:
name (:term:`string`):
Metric group name.
object_values (:class:`py:list`):
The :class:`~zhmcclient.MetricObjectValues` objects in this metric
group. Each of them represents the metric values for a single
resource at a single point in time.
"""
self._name = name
self._object_values = object_values
@property
def name(self):
"""
string: The metric group name.
"""
return self._name
@property
def object_values(self):
"""
:class:`py:list`: The :class:`~zhmcclient.MetricObjectValues` objects
in this metric group. Each of them represents the metric values for
a single resource at a single point in time.
"""
return self._object_values
class MetricObjectValues(object):
"""
Represents the metric values for a single resource at a single point in
time.
"""
def __init__(self, client, metric_group_definition, resource_uri,
timestamp, metrics):
"""
Parameters:
client (:class:`~zhmcclient.Client`):
Client object, for retrieving the actual resource.
metric_group_definition (:class:`~zhmcclient.MetricGroupDefinition`):
Metric group definition for this set of metric values.
resource_uri (:term:`string`):
Resource URI of the resource these metric values apply to.
timestamp (:class:`py:datetime.datetime`):
Point in time when the HMC captured these metric values (as a
timezone-aware datetime object).
metrics (dict):
The metric values, as a dictionary of the (Python typed) metric
values, by metric name.
"""
self._client = client
self._metric_group_definition = metric_group_definition
self._resource_uri = resource_uri
self._timestamp = timestamp
self._metrics = metrics
self._resource = None # Lazy initialization
@property
def client(self):
"""
:class:`~zhmcclient.Client`: Client object, for retrieving the actual
resource.
"""
return self._client
@property
def metric_group_definition(self):
"""
:class:`~zhmcclient.MetricGroupDefinition`: Metric group definition for
this set of metric values.
"""
return self._metric_group_definition
@property
def resource_uri(self):
"""
string: The canonical URI path of the resource these metric | |
<gh_stars>1-10
import unittest
import unittest.mock
import datetime
import io
import uuid
from g1.bases import datetimes
from g1.containers import bases
from g1.containers import builders
from g1.containers import images
from g1.containers import models
from g1.containers import pods
from g1.files import locks
from g1.texts import jsons
try:
from g1.devtools.tests import filelocks
except ImportError:
filelocks = None
from tests import fixtures
class PodsTest(
fixtures.TestCaseBase,
filelocks.Fixture if filelocks else object,
):
sample_pod_id = '01234567-89ab-cdef-0123-456789abcdef'
sample_config = models.PodConfig(
name='test-pod',
version='0.0.1',
apps=[
models.PodConfig.App(
name='hello',
exec=['/bin/echo', 'hello', 'world'],
),
],
images=[
models.PodConfig.Image(
name='base',
version='0.0.1',
),
models.PodConfig.Image(
name='sample-app',
version='1.0',
),
],
mounts=[
models.PodConfig.Mount(
source='/dev/null',
target='/this/is/pod/path',
read_only=True,
),
],
overlays=[
models.PodConfig.Overlay(
sources=[''],
target='/this/is/some/other/pod/path',
read_only=False,
),
],
)
sample_image_id = '0123456789abcdef' * 4
sample_metadata = images.ImageMetadata(
name='sample-app',
version='1.0',
)
def setUp(self):
super().setUp()
bases.cmd_init()
images.cmd_init()
pods.cmd_init()
self.sample_pod_dir_path = pods._get_pod_dir_path(self.sample_pod_id)
patcher = unittest.mock.patch.object(pods, 'journals')
self.mock_journals = patcher.start()
self.addCleanup(patcher.stop)
@staticmethod
def make_pod_id(id_int):
return str(uuid.UUID(int=id_int))
@staticmethod
def create_pod_dir(pod_id, config):
pod_dir_path = pods._get_pod_dir_path(pod_id)
pod_dir_path.mkdir()
pods._setup_pod_dir_barely(pod_dir_path, config)
pods._pod_dir_create_config(pod_dir_path, config)
@staticmethod
def list_pod_dir_paths():
return sorted(p.name for p in pods._iter_pod_dir_paths())
@staticmethod
def list_active():
return sorted(p.name for p in pods._get_active_path().iterdir())
@staticmethod
def list_graveyard():
return sorted(p.name for p in pods._get_graveyard_path().iterdir())
@staticmethod
def list_tmp():
return sorted(p.name for p in pods._get_tmp_path().iterdir())
@staticmethod
def make_image_id(id_int):
return '%064d' % id_int
@staticmethod
def create_image_dir(image_id, metadata):
image_dir_path = images.get_image_dir_path(image_id)
image_dir_path.mkdir()
jsons.dump_dataobject(
metadata,
images._get_metadata_path(image_dir_path),
)
images.get_rootfs_path(image_dir_path).mkdir()
#
# Top-level commands.
#
def test_cmd_init(self):
self.assertEqual(
sorted(p.name for p in pods._get_pod_repo_path().iterdir()),
['active', 'graveyard', 'tmp'],
)
def test_cmd_list(self):
def cmd_list():
return sorted(result['id'] for result in pods.cmd_list())
self.assertEqual(cmd_list(), [])
self.create_pod_dir(self.sample_pod_id, self.sample_config)
for i, image in enumerate(self.sample_config.images):
self.create_image_dir(
self.make_image_id(i + 1),
images.ImageMetadata(name=image.name, version=image.version),
)
self.assertEqual(cmd_list(), [self.sample_pod_id])
def test_cmd_show(self):
with self.assertRaisesRegex(AssertionError, r'expect.*is_dir'):
pods.cmd_show(self.sample_pod_id)
self.create_pod_dir(self.sample_pod_id, self.sample_config)
self.assertEqual(
pods.cmd_show(self.sample_pod_id),
[{
'name': 'hello',
'status': None,
'last-updated': None,
'ref-count': 1,
}],
)
def test_cmd_cat_config(self):
with self.assertRaisesRegex(AssertionError, r'expect.*is_file'):
pods.cmd_cat_config(self.sample_pod_id, io.BytesIO())
self.create_pod_dir(self.sample_pod_id, self.sample_config)
buffer = io.BytesIO()
pods.cmd_cat_config(self.sample_pod_id, buffer)
self.assertEqual(
buffer.getvalue(),
pods._get_config_path(self.sample_pod_dir_path).read_bytes(),
)
@unittest.skipUnless(filelocks, 'g1.tests.filelocks unavailable')
def test_cmd_prepare(self):
config_path = self.test_repo_path / 'sample-config'
jsons.dump_dataobject(self.sample_config, config_path)
for i, image in enumerate(self.sample_config.images):
self.create_image_dir(
self.make_image_id(i + 1),
images.ImageMetadata(name=image.name, version=image.version),
)
self.assertEqual(self.list_pod_dir_paths(), [])
self.assertEqual(list(pods._get_tmp_path().iterdir()), [])
with unittest.mock.patch.multiple(
pods.__name__,
scripts=unittest.mock.DEFAULT,
# We don't have a valid base image, and so we can't really
# call ``builders.generate_unit_file``, etc.
builders=unittest.mock.DEFAULT,
_generate_hostname=unittest.mock.DEFAULT,
):
pods.cmd_prepare(self.sample_pod_id, config_path)
self.assertEqual(self.list_pod_dir_paths(), [self.sample_pod_id])
self.assertEqual(list(pods._get_tmp_path().iterdir()), [])
self.assertFalse(self.check_exclusive(self.sample_pod_dir_path))
def test_cmd_remove(self):
config_path = self.test_repo_path / 'sample-config'
jsons.dump_dataobject(self.sample_config, config_path)
self.assertEqual(self.list_pod_dir_paths(), [])
self.assertEqual(list(pods._get_graveyard_path().iterdir()), [])
self.assertEqual(list(pods._get_tmp_path().iterdir()), [])
self.create_pod_dir(self.sample_pod_id, self.sample_config)
self.assertEqual(pods._get_ref_count(self.sample_pod_dir_path), 1)
self.assertEqual(self.list_pod_dir_paths(), [self.sample_pod_id])
self.assertEqual(list(pods._get_graveyard_path().iterdir()), [])
self.assertEqual(list(pods._get_tmp_path().iterdir()), [])
ref_path = self.test_repo_path / 'ref'
pods.cmd_add_ref(self.sample_pod_id, ref_path)
with unittest.mock.patch(pods.__name__ + '.scripts'):
with self.assertRaisesRegex(
AssertionError, r'expect x <= 1, not 2'
):
pods.cmd_remove(self.sample_pod_id)
self.assertEqual(pods._get_ref_count(self.sample_pod_dir_path), 2)
self.assertEqual(self.list_pod_dir_paths(), [self.sample_pod_id])
self.assertEqual(list(pods._get_graveyard_path().iterdir()), [])
self.assertEqual(list(pods._get_tmp_path().iterdir()), [])
self.mock_journals.remove_journal_dir.assert_not_called()
ref_path.unlink()
with unittest.mock.patch(pods.__name__ + '.scripts'):
pods.cmd_remove(self.sample_pod_id)
self.assertEqual(self.list_pod_dir_paths(), [])
self.assertEqual(list(pods._get_graveyard_path().iterdir()), [])
self.assertEqual(list(pods._get_tmp_path().iterdir()), [])
self.mock_journals.remove_journal_dir.assert_called_once_with(
self.sample_pod_id
)
@unittest.skipUnless(filelocks, 'g1.tests.filelocks unavailable')
def test_cmd_cleanup(self):
future = datetimes.utcnow() + datetime.timedelta(days=1)
pod_id_1 = self.make_pod_id(1)
pod_id_2 = self.make_pod_id(2)
self.create_pod_dir(pod_id_1, self.sample_config)
self.create_pod_dir(pod_id_2, self.sample_config)
self.assertEqual(self.list_active(), [pod_id_1, pod_id_2])
self.assertEqual(self.list_graveyard(), [])
self.assertEqual(self.list_tmp(), [])
ref_path = self.test_repo_path / 'ref'
pods.cmd_add_ref(pod_id_1, ref_path)
pods.cmd_cleanup(future)
self.assertEqual(self.list_active(), [pod_id_1])
self.assertEqual(self.list_graveyard(), [])
self.assertEqual(self.list_tmp(), [])
ref_path.unlink()
self.mock_journals.remove_journal_dir.assert_called_once_with(pod_id_2)
self.mock_journals.remove_journal_dir.reset_mock()
with self.using_exclusive(pods._get_pod_dir_path(pod_id_1)):
pods.cmd_cleanup(future)
self.assertEqual(self.list_active(), [pod_id_1])
self.assertEqual(self.list_graveyard(), [])
self.assertEqual(self.list_tmp(), [])
self.mock_journals.remove_journal_dir.assert_not_called()
self.mock_journals.remove_journal_dir.reset_mock()
pods.cmd_cleanup(future)
self.assertEqual(self.list_active(), [])
self.assertEqual(self.list_graveyard(), [])
self.assertEqual(self.list_tmp(), [])
self.mock_journals.remove_journal_dir.assert_called_once_with(pod_id_1)
@unittest.skipUnless(filelocks, 'g1.tests.filelocks unavailable')
def test_cleanup_active(self):
future = datetimes.utcnow() + datetime.timedelta(days=1)
pod_id_1 = self.make_pod_id(1)
pod_id_2 = self.make_pod_id(2)
self.create_pod_dir(pod_id_1, self.sample_config)
self.create_pod_dir(pod_id_2, self.sample_config)
self.assertEqual(self.list_active(), [pod_id_1, pod_id_2])
self.assertEqual(self.list_graveyard(), [])
self.assertEqual(self.list_tmp(), [])
with self.using_exclusive(pods._get_pod_dir_path(pod_id_1)):
pods._cleanup_active(future)
self.assertEqual(self.list_active(), [pod_id_1])
self.assertEqual(self.list_graveyard(), [pod_id_2])
self.assertEqual(self.list_tmp(), [])
self.mock_journals.remove_journal_dir.assert_called_once_with(pod_id_2)
self.mock_journals.remove_journal_dir.reset_mock()
pods._cleanup_active(future)
self.assertEqual(self.list_active(), [])
self.assertEqual(self.list_graveyard(), [pod_id_1, pod_id_2])
self.assertEqual(self.list_tmp(), [])
self.mock_journals.remove_journal_dir.assert_called_once_with(pod_id_1)
#
# Locking strategy.
#
@unittest.skipUnless(filelocks, 'g1.tests.filelocks unavailable')
def test_create_tmp_pod_dir(self):
tmp_path = pods._create_tmp_pod_dir()
self.assertFalse(self.check_exclusive(tmp_path))
#
# Data type.
#
def test_config(self):
with self.assertRaisesRegex(AssertionError, r'expect non-empty'):
models.PodConfig(
name='test-pod',
version='0.0.1',
apps=self.sample_config.apps,
images=[],
)
with self.assertRaisesRegex(
AssertionError, r'expect unique elements in '
):
models.PodConfig(
name='test-pod',
version='0.0.1',
apps=[
models.PodConfig.App(name='some-app', exec=['/bin/true']),
models.PodConfig.App(name='some-app', exec=['/bin/false']),
],
images=self.sample_config.images,
)
with self.assertRaisesRegex(
AssertionError, r'expect unique elements in '
):
models.PodConfig(
name='test-pod',
version='0.0.1',
apps=self.sample_config.apps,
images=self.sample_config.images,
mounts=[
models.PodConfig.Mount(source='/p', target='/a'),
],
overlays=[
models.PodConfig.Overlay(sources=['/q'], target='/a'),
],
)
with self.assertRaisesRegex(AssertionError, r'expect only one'):
models.PodConfig.Image()
with self.assertRaisesRegex(AssertionError, r'expect.*xor.*be false'):
models.PodConfig.Image(name='name')
with self.assertRaisesRegex(AssertionError, r'expect.*is_absolute'):
models.PodConfig.Mount(source='foo', target='/bar')
with self.assertRaisesRegex(AssertionError, r'expect.*is_absolute'):
models.PodConfig.Mount(source='/foo', target='bar')
with self.assertRaisesRegex(AssertionError, r'expect non-empty'):
models.PodConfig.Overlay(sources=[], target='/bar')
with self.assertRaisesRegex(AssertionError, r'expect.*is_absolute'):
models.PodConfig.Overlay(sources=['foo'], target='/bar')
with self.assertRaisesRegex(AssertionError, r'expect.*is_absolute'):
models.PodConfig.Overlay(sources=['/foo'], target='bar')
with self.assertRaisesRegex(AssertionError, r'expect x == 1, not 0'):
models.PodConfig.Overlay(sources=['', '/foo'], target='/bar')
def test_validate_id(self):
self.assertEqual(
models.validate_pod_id(self.sample_pod_id), self.sample_pod_id
)
for test_data in (
'',
'01234567-89AB-CDEF-0123-456789ABCDEF',
'01234567-89ab-cdef-0123-456789abcde',
):
with self.subTest(test_data):
with self.assertRaisesRegex(
AssertionError, r'expect .*fullmatch.*'
):
models.validate_pod_id(test_data)
def test_id_converter(self):
self.assertEqual(
models.
pod_id_to_machine_id('01234567-89ab-cdef-0123-456789abcdef'),
'0123456789abcdef0123456789abcdef',
)
self.assertEqual(
models.machine_id_to_pod_id('0123456789abcdef0123456789abcdef'),
'01234567-89ab-cdef-0123-456789abcdef',
)
def test_generate_id(self):
id1 = models.generate_pod_id()
id2 = models.generate_pod_id()
self.assertNotEqual(id1, id2)
self.assertEqual(models.validate_pod_id(id1), id1)
self.assertEqual(models.validate_pod_id(id2), id2)
#
# Repo layout.
#
def test_repo_layout(self):
for path1, path2 in (
(
pods._get_pod_repo_path(),
bases.get_repo_path() / 'pods',
),
(
pods._get_active_path(),
pods._get_pod_repo_path() / 'active',
),
(
pods._get_graveyard_path(),
pods._get_pod_repo_path() / 'graveyard',
),
(
pods._get_tmp_path(),
pods._get_pod_repo_path() / 'tmp',
),
(
pods._get_pod_dir_path(self.sample_pod_id),
pods._get_active_path() / self.sample_pod_id,
),
(
pods._get_id(self.sample_pod_dir_path),
self.sample_pod_id,
),
(
pods._get_config_path(self.sample_pod_dir_path),
pods._get_active_path() / self.sample_pod_id / 'config',
),
(
pods._get_orig_config_path(self.sample_pod_dir_path),
pods._get_active_path() / self.sample_pod_id / 'config.orig',
),
(
pods._get_deps_path(self.sample_pod_dir_path),
pods._get_active_path() / self.sample_pod_id / 'deps',
),
(
pods._get_work_path(self.sample_pod_dir_path),
pods._get_active_path() / self.sample_pod_id / 'work',
),
(
pods._get_upper_path(self.sample_pod_dir_path),
pods._get_active_path() / self.sample_pod_id / 'upper',
),
(
pods._get_rootfs_path(self.sample_pod_dir_path),
pods._get_active_path() / self.sample_pod_id / 'rootfs',
),
):
with self.subTest((path1, path2)):
self.assertEqual(path1, path2)
#
# Top-level directories.
#
def test_cleanup_top_dir(self):
pod_id_1 = self.make_pod_id(1)
pod_id_2 = self.make_pod_id(2)
self.create_pod_dir(pod_id_1, self.sample_config)
self.create_pod_dir(pod_id_2, self.sample_config)
self.assertEqual(self.list_pod_dir_paths(), [pod_id_1, pod_id_2])
with unittest.mock.patch(pods.__name__ + '.scripts'):
with locks.acquiring_exclusive(pods._get_pod_dir_path(pod_id_2)):
pods._cleanup_top_dir(pods._get_active_path())
self.assertEqual(self.list_pod_dir_paths(), [pod_id_2])
pods._cleanup_top_dir(pods._get_active_path())
self.assertEqual(self.list_pod_dir_paths(), [])
#
# Pod directories.
#
def test_iter_pod_dir_paths(self):
pod_id_1 = self.make_pod_id(1)
pod_id_2 = self.make_pod_id(2)
(pods._get_active_path() / 'irrelevant').touch()
self.assertEqual(self.list_pod_dir_paths(), [])
self.assertEqual(self.list_active(), ['irrelevant'])
self.create_pod_dir(pod_id_2, self.sample_config)
self.assertEqual(self.list_pod_dir_paths(), [pod_id_2])
(pods._get_active_path() / pod_id_1).mkdir()
self.assertEqual(self.list_pod_dir_paths(), [pod_id_1, pod_id_2])
def test_maybe_move_pod_dir_to_active(self):
self.assertEqual(self.list_pod_dir_paths(), [])
path = self.test_repo_path / 'some-dir'
path.mkdir()
self.assertTrue(path.exists())
self.assertTrue(
pods._maybe_move_pod_dir_to_active(path, self.sample_pod_id)
)
self.assertEqual(self.list_pod_dir_paths(), [self.sample_pod_id])
self.assertFalse(path.exists())
path.mkdir()
self.assertFalse(
pods._maybe_move_pod_dir_to_active(path, self.sample_pod_id)
)
def test_move_pod_dir_to_graveyard(self):
def list_grave_paths():
return sorted(p.name for p in pods._get_graveyard_path().iterdir())
self.assertEqual(list_grave_paths(), [])
self.create_pod_dir(self.sample_pod_id, self.sample_config)
self.assertTrue(self.sample_pod_dir_path.exists())
pods._move_pod_dir_to_graveyard(self.sample_pod_dir_path)
self.assertEqual(list_grave_paths(), [self.sample_pod_id])
self.assertFalse(self.sample_pod_dir_path.exists())
#
# Pod directory.
#
def test_prepare_pod_dir(self):
self.sample_pod_dir_path.mkdir()
for i, image in enumerate(self.sample_config.images):
self.create_image_dir(
self.make_image_id(i + 1),
images.ImageMetadata(name=image.name, version=image.version),
)
with unittest.mock.patch.multiple(
pods.__name__,
scripts=unittest.mock.DEFAULT,
# We don't have a valid base image, and so we can't really
# call ``builders.generate_unit_file``, etc.
builders=unittest.mock.DEFAULT,
_generate_hostname=unittest.mock.DEFAULT,
):
pods._prepare_pod_dir(
self.sample_pod_dir_path,
self.sample_pod_id,
self.sample_config,
)
def test_setup_pod_dir_barely(self):
pod_dir_path = pods._get_pod_dir_path(self.sample_pod_id)
pod_dir_path.mkdir()
pods._setup_pod_dir_barely(pod_dir_path, self.sample_config)
self.assertFalse(
pods._get_config_path(self.sample_pod_dir_path).is_file()
)
self.assertTrue(
pods._get_orig_config_path(self.sample_pod_dir_path).is_file()
)
self.assertTrue(pods._get_deps_path(self.sample_pod_dir_path).is_dir())
self.assertTrue(pods._get_work_path(self.sample_pod_dir_path).is_dir())
self.assertTrue(
pods._get_upper_path(self.sample_pod_dir_path).is_dir()
)
self.assertTrue(
pods._get_rootfs_path(self.sample_pod_dir_path).is_dir()
)
self.assertEqual(
sorted(p.name for p in self.sample_pod_dir_path.iterdir()),
['config.orig', 'deps', 'rootfs', 'upper', 'work'],
)
def test_remove_pod_dir(self):
self.create_pod_dir(self.sample_pod_id, self.sample_config)
self.assertTrue(self.sample_pod_dir_path.is_dir())
with unittest.mock.patch(pods.__name__ + '.scripts'):
pods._remove_pod_dir(self.sample_pod_dir_path)
self.assertFalse(self.sample_pod_dir_path.exists())
#
# Pod.
#
@unittest.mock.patch(pods.__name__ + '.scripts')
def test_mount_overlay(self, scripts_mock):
image_id_1 = self.make_image_id(1)
image_id_2 = self.make_image_id(2)
self.create_image_dir(
image_id_1,
images.ImageMetadata(name='base', version='0.0.1'),
)
self.create_image_dir(image_id_2, self.sample_metadata)
pods._mount_overlay(self.sample_pod_dir_path, self.sample_config)
scripts_mock.run.assert_called_once_with([
'mount',
*('-t', 'overlay'),
*(
'-o',
'lowerdir=%s,upperdir=%s,workdir=%s' % (
':'.join([
str(pods._get_image_rootfs_path(image_id_2)),
str(pods._get_image_rootfs_path(image_id_1)),
]),
pods._get_upper_path(self.sample_pod_dir_path),
pods._get_work_path(self.sample_pod_dir_path),
),
),
'overlay',
pods._get_rootfs_path(self.sample_pod_dir_path),
])
def test_make_bind_argument(self):
self.assertEqual(
pods._make_bind_argument(
models.PodConfig.Mount(
source='/a',
target='/b',
read_only=True,
)
),
'--bind-ro=/a:/b',
)
self.assertEqual(
pods._make_bind_argument(
models.PodConfig.Mount(
source='/a',
target='/b',
read_only=False,
)
),
'--bind=/a:/b',
)
def test_make_overlay_argument(self):
self.assertEqual(
pods._make_overlay_argument(
models.PodConfig.Overlay(
sources=['/a', '/b'],
target='/c',
read_only=True,
)
),
'--overlay-ro=/a:/b:/c',
)
self.assertEqual(
pods._make_overlay_argument(
models.PodConfig.Overlay(
sources=['/a', ''],
target='/b',
read_only=False,
)
),
'--overlay=/a::/b',
)
#
# Configs.
#
def test_iter_configs(self):
def list_configs():
return sorted((p.name, c) for p, c in pods._iter_configs())
pod_id_1 = self.make_pod_id(1)
pod_id_2 = self.make_pod_id(2)
self.assertEqual(list_configs(), [])
self.create_pod_dir(pod_id_2, self.sample_config)
self.assertEqual(list_configs(), [(pod_id_2, self.sample_config)])
self.create_pod_dir(pod_id_1, self.sample_config)
self.assertEqual(
list_configs(),
[(pod_id_1, self.sample_config), (pod_id_2, self.sample_config)],
)
def test_read_config(self):
self.create_pod_dir(self.sample_pod_id, self.sample_config)
self.assertEqual(
pods._read_config(self.sample_pod_dir_path),
self.sample_config,
)
def test_write_config(self):
self.assertFalse((self.test_repo_path / 'config').exists())
pods._write_config(self.sample_config, self.test_repo_path)
self.assertTrue((self.test_repo_path / 'config').exists())
self.assertEqual(
pods._read_config(self.test_repo_path),
self.sample_config,
)
self.assertFalse((self.test_repo_path / 'config.orig').exists())
def test_write_orig_config(self):
self.assertFalse((self.test_repo_path / 'config.orig').exists())
pods._write_orig_config(self.sample_config, self.test_repo_path)
self.assertTrue((self.test_repo_path / 'config.orig').exists())
self.assertEqual(
pods._read_orig_config(self.test_repo_path),
self.sample_config,
)
self.assertFalse((self.test_repo_path / 'config').exists())
def test_iter_image_ids(self):
def list_image_ids(config):
return sorted(pods._iter_image_ids(config))
self.create_image_dir(self.sample_image_id, self.sample_metadata)
images.cmd_tag(image_id=self.sample_image_id, new_tag='some-tag')
config = models.PodConfig(
name='test-pod',
version='0.0.1',
apps=self.sample_config.apps,
images=[models.PodConfig.Image(id=self.sample_image_id)],
)
self.assertEqual(list_image_ids(config), [self.sample_image_id])
config = models.PodConfig(
name='test-pod',
version='0.0.1',
apps=self.sample_config.apps,
images=[models.PodConfig.Image(name='sample-app', version='1.0')],
)
self.assertEqual(list_image_ids(config), [self.sample_image_id])
config = models.PodConfig(
name='test-pod',
version='0.0.1',
apps=self.sample_config.apps,
images=[models.PodConfig.Image(tag='some-tag')],
)
self.assertEqual(list_image_ids(config), [self.sample_image_id])
config = models.PodConfig(
name='test-pod',
version='0.0.1',
apps=self.sample_config.apps,
images=[models.PodConfig.Image(name='no-such-app', version='1.0')],
)
with self.assertRaisesRegex(AssertionError, r'expect non-None value'):
list_image_ids(config)
config = models.PodConfig(
name='test-pod',
version='0.0.1',
apps=self.sample_config.apps,
images=[models.PodConfig.Image(tag='no-such-tag')],
)
with self.assertRaisesRegex(AssertionError, r'expect non-None value'):
list_image_ids(config)
#
# Dependent images.
#
def test_add_ref_image_ids(self):
def list_image_ids():
return sorted(
p.name for p in \
pods._get_deps_path(self.sample_pod_dir_path).iterdir()
)
image_id_1 = self.make_image_id(1)
image_id_2 = self.make_image_id(2)
self.create_pod_dir(self.sample_pod_id, self.sample_config)
self.create_image_dir(image_id_1, self.sample_metadata)
self.create_image_dir(image_id_2, self.sample_metadata)
self.assertEqual(list_image_ids(), [])
self.assertEqual(
images._get_ref_count(images.get_image_dir_path(image_id_1)), 1
)
self.assertEqual(
images._get_ref_count(images.get_image_dir_path(image_id_2)), 1
)
config = models.PodConfig(
name='test-pod',
version='0.0.1',
apps=self.sample_config.apps,
images=[
models.PodConfig.Image(id=image_id_1),
models.PodConfig.Image(id=image_id_2),
],
)
new_config = pods._add_ref_image_ids(self.sample_pod_dir_path, config)
self.assertEqual(config, new_config)
| |
organization = database.team.organization
if organization and organization.external:
endpoint = organization.grafana_endpoint
datasource = organization.grafana_datasource
else:
endpoint = credential.endpoint
datasource = credential.get_parameter_by_name('environment')
engine_type = (
database.engine_type if not database.engine_type == "mysql_percona" else "mysql"
)
grafana_url_zabbix = '{}/dashboard/{}?{}={}&{}={}&{}={}&{}={}'.format(
endpoint,
credential.project.format(engine_type),
credential.get_parameter_by_name('db_param'), instance.dns,
credential.get_parameter_by_name('os_param'),
instance.hostname.hostname,
credential.get_parameter_by_name('disk_param'),
credential.get_parameter_by_name('disk_dir'),
credential.get_parameter_by_name('env_param'),
datasource
)
if organization and organization.external:
grafana_url_zabbix += "&orgId={}".format(organization.grafana_orgid)
context['grafana_url_zabbix'] = grafana_url_zabbix
print "grafana_url_zabbix:", grafana_url_zabbix
dashboard = credential.get_parameter_by_name('sofia_dbaas_database_dashboard')
dashboard = dashboard.format(engine_type)
url = "{}/{}?var-host_name={}&var-datasource={}".format(
credential.endpoint,
dashboard,
instance.hostname.hostname.split('.')[0],
credential.get_parameter_by_name('datasource'),
)
context['grafana_url_sofia'] = url
return render_to_response(
"logical/database/details/metrics_tab.html",
context, RequestContext(request)
)
def _disk_resize(request, database):
try:
check_is_database_enabled(database.id, 'disk resize')
except DisabledDatabase as err:
messages.add_message(request, messages.ERROR, err.message)
return
disk_offering = DiskOffering.objects.get(
id=request.POST.get('disk_offering')
)
current_used = round(database.used_size_in_gb, 2)
offering_size = round(disk_offering.size_gb(), 2)
if current_used >= offering_size:
messages.add_message(
request, messages.ERROR,
'Your database has {} GB, please choose a bigger disk'.format(
current_used
)
)
return
Database.disk_resize(
database=database,
new_disk_offering=disk_offering.id,
user=request.user
)
def _vm_resize(request, database):
try:
check_is_database_dead(database.id, 'VM resize')
check_is_database_enabled(database.id, 'VM resize')
except DisabledDatabase as err:
messages.add_message(request, messages.ERROR, err.message)
else:
offering = Offering.objects.get(
id=request.POST.get('vm_offering')
)
Database.resize(
database=database,
offering=offering,
user=request.user,
)
def get_last_valid_resize(request, database):
can_do_resize, error = database.can_do_resize_retry()
if not can_do_resize:
messages.add_message(request, messages.ERROR, error)
return None
last_resize = database.resizes.last()
if not last_resize.is_status_error:
error = "Cannot do retry, last resize status is '{}'!".format(
last_resize.get_status_display()
)
messages.add_message(request, messages.ERROR, error)
return None
return last_resize
@database_view("")
def database_resize_retry(request, context, database):
last_resize = get_last_valid_resize(request, database)
if last_resize:
TaskRegister.database_resize_retry(
database=database,
user=request.user,
offering=last_resize.target_offer,
original_offering=last_resize.source_offer,
since_step=last_resize.current_step
)
return HttpResponseRedirect(
reverse('admin:logical_database_resizes', kwargs={'id': database.id})
)
@database_view("")
def database_resize_rollback(request, context, database):
last_resize = get_last_valid_resize(request, database)
if last_resize:
TaskRegister.database_resize_rollback(last_resize, request.user)
return HttpResponseRedirect(
reverse('admin:logical_database_resizes', kwargs={'id': database.id})
)
@database_view("")
def database_upgrade(request, context, database):
can_do_upgrade, error = database.can_do_upgrade()
if not can_do_upgrade:
messages.add_message(request, messages.ERROR, error)
else:
TaskRegister.database_upgrade(
database=database,
user=request.user
)
return HttpResponseRedirect(
reverse('admin:logical_database_maintenance', kwargs={'id': database.id})
)
@database_view("")
def database_upgrade_retry(request, context, database):
can_do_upgrade, error = database.can_do_upgrade_retry()
if can_do_upgrade:
source_plan = database.databaseinfra.plan
upgrades = database.upgrades.filter(source_plan=source_plan)
last_upgrade = upgrades.last()
if not last_upgrade:
error = "Database does not have upgrades from {} {}!".format(
source_plan.engine.engine_type, source_plan.engine.version
)
elif not last_upgrade.is_status_error:
error = "Cannot do retry, last upgrade status is '{}'!".format(
last_upgrade.get_status_display()
)
else:
since_step = last_upgrade.current_step
if error:
messages.add_message(request, messages.ERROR, error)
else:
TaskRegister.database_upgrade(
database=database,
user=request.user,
since_step=since_step
)
return HttpResponseRedirect(
reverse('admin:logical_database_maintenance', kwargs={'id': database.id})
)
def _upgrade_patch(request, database, target_patch):
can_do_upgrade, error = database.can_do_upgrade_patch()
if not can_do_upgrade:
messages.add_message(request, messages.ERROR, error)
else:
target_patch = database.engine.available_patches(
database
).get(
id=target_patch
)
TaskRegister.database_upgrade_patch(
database=database,
patch=target_patch,
user=request.user
)
@database_view("")
def database_upgrade_patch_retry(request, context, database):
_upgrade_patch_retry(request, database)
return HttpResponseRedirect(
reverse('admin:logical_database_resizes', kwargs={'id': database.id})
)
def _upgrade_patch_retry(request, database):
can_do_upgrade, error = database.can_do_upgrade_patch_retry()
if can_do_upgrade:
upgrades = database.upgrades_patch.all()
last_upgrade = upgrades.last()
if not last_upgrade:
error = "Database does not have upgrades"
elif not last_upgrade.is_status_error:
error = "Cannot do retry, last upgrade status is '{}'!".format(
last_upgrade.get_status_display()
)
else:
since_step = last_upgrade.current_step
if error:
messages.add_message(request, messages.ERROR, error)
else:
TaskRegister.database_upgrade_patch(
database=database,
patch=last_upgrade.target_patch,
user=request.user,
since_step=since_step
)
@database_view('resizes')
def database_resizes(request, context, database):
if request.method == 'POST':
if 'disk_resize' in request.POST and request.POST.get('disk_offering'):
_disk_resize(request, database)
elif 'vm_resize' in request.POST and request.POST.get('vm_offering'):
_vm_resize(request, database)
else:
disk_auto_resize = request.POST.get('disk_auto_resize', False)
database.disk_auto_resize = disk_auto_resize
database.save()
context['last_vm_resize'] = database.resizes.last()
context['vm_offerings'] = list(database.environment.offerings.all(
).order_by('cpus', 'memory_size_mb'))
context['current_vm_offering'] = database.infra.hosts[0].offering
for offering in context['vm_offerings']:
if offering == context['current_vm_offering']:
break
else:
context['vm_offerings'].append(context['current_vm_offering'])
disk_used_size_kb = database.infra.disk_used_size_in_kb
if not disk_used_size_kb:
disk_used_size_kb = database.used_size_in_kb
context['disk_offerings'] = list(
DiskOffering.objects.filter(size_kb__gt=disk_used_size_kb)
)
if database.infra.disk_offering not in context['disk_offerings']:
context['disk_offerings'].insert(0, database.infra.disk_offering)
return render_to_response(
"logical/database/details/resizes_tab.html",
context, RequestContext(request)
)
@database_view('maintenance')
def database_maintenance(request, context, database):
if request.method == 'POST':
if (
'upgrade_patch' in request.POST and
request.POST.get('target_patch')
):
_upgrade_patch(request, database, request.POST.get('target_patch'))
elif ('upgrade_patch_retry' in request.POST):
_upgrade_patch_retry(request, database)
elif ('backup_hour' or 'maintenance_window' or 'maintenance_day' in request.POST):
if request.POST.get('backup_hour') == request.POST.get('maintenance_window'):
messages.add_message(request, messages.ERROR, 'Backup hour must not be equal to maintenance window.')
else:
database.infra.backup_hour = request.POST['backup_hour']
database.infra.maintenance_window = request.POST['maintenance_window']
database.infra.maintenance_day = request.POST['maintenance_day']
database.infra.save()
else:
database.save()
WEEKDAYS = [
(0, 'Sunday'),
(1, 'Monday'),
(2, 'Tuesday'),
(3, 'Wednesday'),
(4, 'Thursday'),
(5, 'Friday'),
(6, 'Saturday')
]
context['upgrade_mongo_24_to_30'] = \
database.is_mongodb_24() and \
request.user.has_perm(constants.PERM_UPGRADE_MONGO24_TO_30)
context['can_do_upgrade'] = \
bool(database.infra.plan.engine_equivalent_plan) and \
request.user.has_perm(constants.PERM_UPGRADE_DATABASE)
context['last_upgrade'] = database.upgrades.filter(
source_plan=database.infra.plan
).last()
context['retry_patch'] = DatabaseUpgradePatch.objects.need_retry(
database=database
)
context['available_patches'] = list(
database.engine.available_patches(database)
)
context['maintenance_windows'] = \
[(hour, datetime.time(hour, 0).strftime('%H:%M - ' +
str((datetime.datetime.combine(datetime.date.today(), datetime.time(hour)) +
datetime.timedelta(hours=1)).strftime('%H:%M')))) for hour in range(24)]
context['current_maintenance_window'] = \
int(database.infra.maintenance_window)
context['backup_hours'] = \
[(hour, datetime.time(hour, 0).strftime(format="%H:%M")) for hour in range(24)]
context['current_backup_hour'] = int(database.infra.backup_hour)
context['maintenance_days'] = WEEKDAYS
context['current_maintenance_day'] = int(database.infra.maintenance_day)
return render_to_response(
"logical/database/details/maintenance_tab.html",
context, RequestContext(request)
)
def _add_read_only_instances(request, database):
try:
check_is_database_dead(database.id, 'Add read-only instances')
check_is_database_enabled(database.id, 'Add read-only instances')
except DisabledDatabase as err:
messages.add_message(request, messages.ERROR, err.message)
return
if not database.plan.replication_topology.has_horizontal_scalability:
messages.add_message(
request, messages.ERROR,
'Database topology do not have horizontal scalability'
)
return
if 'add_read_qtd' not in request.POST:
messages.add_message(request, messages.ERROR, 'Quantity is required')
return
max_read_hosts = Configuration.get_by_name_as_int('max_read_hosts', 5)
qtd_new_hosts = int(request.POST['add_read_qtd'])
current_read_nodes = len(database.infra.instances.filter(read_only=True))
total_read_hosts = qtd_new_hosts + current_read_nodes
if total_read_hosts > max_read_hosts:
messages.add_message(
request, messages.ERROR,
'Current limit of read only hosts is {} and you are trying to setup {}'.format(
max_read_hosts, total_read_hosts
)
)
return
TaskRegister.database_add_instances(
database=database,
user=request.user,
number_of_instances=qtd_new_hosts
)
@database_view('hosts')
def database_hosts(request, context, database):
from maintenance.models import RecreateSlave
if request.method == 'POST':
if 'add_read_only' in request.POST:
_add_read_only_instances(request, database)
reverse(
'admin:logical_database_hosts',
kwargs={'id': database.id}
)
if 'recreate_slave' in request.POST:
host_id = request.POST.get('host_id')
host = database.infra.instances.filter(
hostname__id=host_id
).first().hostname
TaskRegister.recreate_slave(host, request.user)
return HttpResponseRedirect(
reverse(
'admin:logical_database_hosts',
kwargs={'id': database.id}
)
)
hosts = OrderedDict()
instances = database.infra.instances.all().order_by('shard', 'id')
if instances[0].shard:
instances_tmp = []
instances_slaves = []
last_shard = None
for instance in instances:
if instance.is_current_write:
instances_tmp.append(instance)
last_shard = instance.shard
if instances_slaves:
instances_tmp += instances_slaves
instances_slaves = []
else:
if last_shard == instance.shard:
instances_tmp.append(instance)
else:
instances_slaves.append(instance)
if instances_slaves:
instances_tmp += instances_slaves
instances_slaves = []
instances = instances_tmp
for instance in instances:
if instance.hostname not in hosts:
hosts[instance.hostname] = []
hosts[instance.hostname].append(instance)
context['core_attribute'] = database.engine.write_node_description
context['read_only_attribute'] = database.engine.read_node_description
context['last_reinstall_vm'] = database.reinstall_vm.last()
context['last_recreat_slave'] = RecreateSlave.objects.filter(
host__in=database.infra.hosts,
can_do_retry=True,
status=RecreateSlave.ERROR
).last()
context['instances_core'] = []
context['instances_read_only'] = []
for host, instances in hosts.items():
attributes = []
is_read_only = False
status = ''
switch_database = False
for instance in instances:
is_read_only = instance.read_only
status = instance.status_html()
if not instance.is_database:
context['non_database_attribute'] = instance.get_instance_type_display()
attributes.append(context['non_database_attribute'])
elif instance.is_current_write:
attributes.append(context['core_attribute'])
if database.databaseinfra.plan.is_ha:
switch_database = True
else:
attributes.append(context['read_only_attribute'])
full_description = host.hostname
padding = False
if not instance.is_current_write:
if instance.shard:
padding = True
if len(hosts) > 1:
full_description += ' - ' + '/'.join(attributes)
host_data = {
'id': host.id, 'status': status, 'description': full_description,
'switch_database': switch_database, 'padding': padding,
'is_database': host.is_database
}
if is_read_only:
context['instances_read_only'].append(host_data)
else:
context['instances_core'].append(host_data)
context['max_read_hosts'] = Configuration.get_by_name_as_int('max_read_hosts', 5)
enable_host = context['max_read_hosts'] - len(context['instances_read_only'])
context['enable_host'] = range(1, enable_host+1)
return render_to_response(
"logical/database/details/hosts_tab.html",
context, RequestContext(request)
)
def database_delete_host(request, database_id, host_id):
database = Database.objects.get(id=database_id)
instance = database.infra.instances.get(hostname_id=host_id)
can_delete = True
if not instance.read_only:
messages.add_message(
request, messages.ERROR,
'Host is not read only, cannot be removed.'
)
can_delete = False
if database.is_being_used_elsewhere():
messages.add_message(
request, messages.ERROR,
'Host cannot be deleted because database is in use by another task.'
)
can_delete = False
if can_delete:
TaskRegister.database_remove_instance(database=database, instance=instance, user=request.user)
return HttpResponseRedirect(
reverse('admin:logical_database_hosts', kwargs={'id': database.id})
)
def _clone_database(request, database):
can_be_cloned, error = database.can_be_cloned()
if error:
messages.add_message(request, messages.ERROR, error)
return
if 'clone_name' not in request.POST:
messages.add_message(request, messages.ERROR, 'Destination is required')
return
if 'clone_env' not in request.POST:
messages.add_message(request, messages.ERROR, 'Environment is required')
return
if 'clone_plan' not in request.POST:
messages.add_message(request, messages.ERROR, 'Plan is required')
return
name = request.POST['clone_name']
environment = Environment.objects.get(id=request.POST['clone_env'])
plan = Plan.objects.get(id=request.POST['clone_plan'])
current = len(database.team.databases_in_use_for(environment))
if current >= database.team.database_alocation_limit:
messages.add_message(
request, messages.ERROR,
'The database allocation limit of %s has been exceeded for the '
'team: {} => {}'.format(
current, database.team.database_alocation_limit
)
)
return
if name in database.infra.get_driver().RESERVED_DATABASES_NAME:
messages.add_message(
request, messages.ERROR,
'{} is a reserved database name'.format(name)
)
return
if len(name) > 40:
messages.add_message(request, messages.ERROR, 'Database name too long')
return
if Database.objects.filter(name=name, environment=environment):
messages.add_message(
request, messages.ERROR,
'There is already a database called {} on {}'.format(
name, environment
)
)
return
Database.clone(
database=database, clone_name=name, plan=plan,
environment=environment, user=request.user
)
def _restore_database(request, database):
can_be_restored, error = database.can_be_restored()
if error:
messages.add_message(request, messages.ERROR, error)
return
if 'restore_snapshot' not in request.POST:
messages.add_message(request, messages.ERROR, 'Snapshot is required')
return
snapshot = request.POST.get('restore_snapshot')
Database.restore(database=database, snapshot=snapshot, user=request.user)
def _delete_snapshot(request, database):
if 'restore_snapshot' not in request.POST:
messages.add_message(request, messages.ERROR, 'Snapshot is required')
return
snapshot_id = request.POST.get('restore_snapshot')
for instance in database.infra.instances.all():
snapshot = instance.backup_instance.filter(id=snapshot_id).first()
if snapshot:
break
else:
messages.add_message(
request, messages.ERROR, 'The snapshot {} is not from {}'.format(
snapshot_id, database
)
)
return
if snapshot.purge_at:
messages.add_message(
request, messages.ERROR,
'This snapshot, was deleted at {}'.format(snapshot.purge_at)
)
return
elif | |
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
testRecipe1 = """\
class TestRecipe1(PackageRecipe):
name = 'testcase'
version = '1.0'
clearBuildReqs()
owner = 'root'
group = 'root'
withBinary = True
withUse = False
changedconfig = '%(sysconfdir)s/changedconfig'
unchangedconfig = '%(sysconfdir)s/unchangedconfig'
changed = '%(datadir)s/changed'
unchanged = '%(datadir)s/unchanged'
initialFileText = '\\n'.join([str(x) for x in range(0,10)]) + '\\n'
fileText = initialFileText
def modifyFiles(self):
pass
def setup(self):
if self.withUse:
if Use.readline:
pass
if self.withBinary:
self.Run('''
cat > hello.c <<'EOF'
#include <stdio.h>
int main(void) {
return printf("Hello, world.\\\\n");
}
EOF
''')
self.Make('hello', preMake='LDFLAGS="-static"')
self.Install('hello', '%(bindir)s/')
self.Create(self.changedconfig, self.unchangedconfig,
self.changed, self.unchanged, contents=self.initialFileText)
self.modifyFiles()
self.Ownership(self.owner, self.group, '.*')
self.ComponentSpec('runtime', '%(datadir)s/', '%(sysconfdir)s/')
self.Strip(debuginfo=False)
"""
testRecipe2="""\
class TestRecipe2(TestRecipe1):
version = '1.1'
fileText = TestRecipe1.fileText.replace("5", "1")
def modifyFile(self, path):
return 'sed -i s/^5/1/g %(destdir)s'+path
def modifyFiles(self):
for path in (self.changedconfig, self.changed):
self.Run(self.modifyFile(path))
def setup(self):
TestRecipe1.setup(self)
"""
testRecipe3="""\
class TestRecipe3(TestRecipe1):
version = '1.2'
fileText = TestRecipe1.fileText.replace("6", "2")
def modifyFile(self, path):
return 'sed -i s/^6/2/g %(destdir)s'+path
def modifyFiles(self):
for path in (self.changedconfig,):
self.Run(self.modifyFile(path))
def setup(self):
TestRecipe1.setup(self)
"""
testRecipe4="""\
class TestRecipe4(TestRecipe1):
version = '1.3'
def setup(self):
TestRecipe1.setup(self)
self.Config(exceptions = "/etc/.*")
"""
# like TestRecipe1, but only includes /usr/bin/hello
testRecipe5="""\
class TestRecipe5(TestRecipe1):
version = '1.4'
def setup(r):
TestRecipe1.setup(r)
r.Remove(r.changed)
r.Remove(r.unchanged)
r.Remove(r.changedconfig)
r.Remove(r.unchangedconfig)
"""
testTransientRecipe1=r"""\
class TransientRecipe1(PackageRecipe):
name = 'testcase'
version = '1.0'
clearBuildReqs()
fileText = 'bar\n'
def setup(r):
r.Create('/foo', contents=r.fileText)
r.Transient('/foo')
"""
testTransientRecipe2=r"""\
class TransientRecipe2(PackageRecipe):
name = 'testcase'
version = '1.1'
clearBuildReqs()
fileText = 'blah\n'
def setup(r):
r.Create('/foo', contents=r.fileText)
r.Transient('/foo')
"""
testTransientRecipe3=r"""\
class TransientRecipe3(PackageRecipe):
name = 'testcase'
version = '1.2'
clearBuildReqs()
fileText = 'blah\n'
def setup(r):
#don't create foo
r.Create('/foo2', contents=r.fileText)
r.Transient('/foo2')
"""
testTransientRecipe4=r"""\
class TransientRecipe4(PackageRecipe):
name = 'testcase'
version = '1.3'
clearBuildReqs()
fileText = 'blahblech\n'
def setup(r):
#don't create foo
r.Create('/foo3', contents=r.fileText)
r.Transient('/foo3')
"""
libhelloRecipePreface="""\
class Libhello(PackageRecipe):
name = 'libhello'
version = '0'
clearBuildReqs()
def setup(self):
# NormalizeInterpreterPaths not the purpose of these tests,
# and dealing with it running would make tests needlessly
# and uselessly more verbose.
del self.NormalizeInterpreterPaths
self.Create('libhello.c', contents='''
/* libhello.c - Simple example of a shared library */
void return_one(void) {
return 1;
}
''')
self.Create('true.c', contents='''
int main() {
return 0;
}
''')
self.Create('user.c', contents='''
int main() {
return return_one();
}
''')
"""
libhelloRecipe = libhelloRecipePreface + r"""
self.Run('%(cc)s %(ldflags)s -fPIC -shared -Wl,-soname,libhello.so.0 -o libhello.so.0.0 libhello.c -nostdlib')
self.Run('%(cc)s %(ldflags)s -static -o true true.c')
self.Run('%(cc)s %(ldflags)s -nostdlib -o user user.c libhello.so.0.0')
self.Install('libhello.so.0.0', '%(libdir)s/libhello.so.0.0')
self.Install('true', '%(essentialsbindir)s/ldconfig', mode=0755)
self.Install('user', '%(essentialsbindir)s/user', mode=0755)
self.Create('/etc/ld.so.conf', contents='/%(lib)s')
self.Create('%(essentialbindir)s/script',
contents='#!%(essentialsbindir)s/user', mode = 0755)
self.Provides('file', '%(essentialsbindir)s/user')
self.ComponentSpec('runtime', '%(essentialsbindir)s/ldconfig',
'%(libdir)s/libhello.so.0.*',
'%(sysconfdir)s/')
self.ComponentSpec('user', '%(essentialsbindir)s/user')
self.ComponentSpec('script', '%(essentialbindir)s/script')
self.Strip(debuginfo=False)
"""
libhelloRecipeLdConfD = libhelloRecipePreface + r"""
self.Run('%(cc)s %(ldflags)s -fPIC -shared -Wl,-soname,libhello.so.0 -o libhello.so.0.0 libhello.c -nostdlib')
self.Run('%(cc)s %(ldflags)s -static -o true true.c')
self.Run('%(cc)s %(ldflags)s -nostdlib -o user user.c libhello.so.0.0')
self.Install('libhello.so.0.0', '%(libdir)s/libhello.so.0.0')
self.Install('libhello.so.0.0', '%(essentiallibdir)s/libhello.so.0.0')
self.Install('true', '%(essentialsbindir)s/ldconfig', mode=0755)
self.Install('user', '%(essentialsbindir)s/user', mode=0755)
self.Create('/etc/ld.so.conf', contents='/opt/foo')
self.Create('/etc/ld.so.conf.d/first.conf', contents='%(essentiallibdir)s')
self.Create('%(essentialbindir)s/script',
contents='#!%(essentialsbindir)s/user', mode = 0755)
self.Provides('file', '%(essentialsbindir)s/user')
self.ComponentSpec('runtime', '%(essentialsbindir)s/ldconfig',
'%(libdir)s/libhello.so.0.*',
'%(essentiallibdir)s/libhello.so.0.*',
'/etc/ld.so.conf.d/first.conf',
'%(sysconfdir)s/')
self.ComponentSpec('user', '%(essentialsbindir)s/user')
self.ComponentSpec('script', '%(essentialbindir)s/script')
self.Strip(debuginfo=False)
"""
libhelloRecipeNoVersion = libhelloRecipePreface + """\
self.Run('%(cc)s %(ldflags)s -fPIC -shared -Wl,-soname,libhello.so -o libhello.so libhello.c -nostdlib')
self.Run('%(cc)s %(ldflags)s -static -o true true.c')
self.Run('%(cc)s %(ldflags)s -nostdlib -o user user.c libhello.so')
self.Install('libhello.so', '%(libdir)s/libhello.so', mode=0644)
self.Install('true', '%(essentialsbindir)s/ldconfig', mode=0755)
self.Install('user', '%(essentialsbindir)s/user', mode=0755)
self.Create('/etc/ld.so.conf', contents='/lib')
self.Create('%(essentialbindir)s/script',
contents='#!%(essentialsbindir)s/user', mode = 0755)
self.Provides('file', '%(essentialsbindir)s/user')
self.ComponentSpec('runtime', '%(essentialsbindir)s/ldconfig',
'%(libdir)s/libhello.so',
'%(sysconfdir)s/')
self.ComponentSpec('user', '%(essentialsbindir)s/user')
self.ComponentSpec('script', '%(essentialbindir)s/script')
self.Strip(debuginfo=False)
"""
bashRecipe="""\
class Bash(PackageRecipe):
name = 'bash'
version = '0'
clearBuildReqs()
def setup(r):
del r.NormalizeInterpreterPaths
r.Create('%(essentialbindir)s/bash', mode=0755)
r.Create('%(essentialbindir)s/conflict', mode=0755)
r.Provides('file', '%(essentialbindir)s/(ba)?sh')
if Use.ssl:
# turn on this use flag; we use this in the tests for flavor
# dependent resolution
pass
"""
bashMissingRecipe="""\
class Bash(PackageRecipe):
name = 'bash'
version = '1'
clearBuildReqs()
def setup(r):
del r.NormalizeInterpreterPaths
r.Create('%(essentialbindir)s/conflict', mode=0755)
if Use.ssl:
# turn on this use flag; we use this in the tests for flavor
# dependent resolution
pass
"""
bashUserRecipe="""\
class BashUser(PackageRecipe):
name = 'bashuser'
version = '0'
clearBuildReqs()
def setup(r):
del r.NormalizeInterpreterPaths
r.Create('%(essentialbindir)s/script', mode=0755,
contents = '#!/bin/bash')
"""
bashTroveUserRecipe="""\
class BashTroveUser(PackageRecipe):
name = 'bashtroveuser'
version = '0'
clearBuildReqs()
def setup(r):
del r.NormalizeInterpreterPaths
r.Create('%(essentiallibdir)s/empty', mode=0644)
r.Requires('bash:runtime', '%(essentiallibdir)s/empty')
"""
gconfRecipe="""\
class Gconf(PackageRecipe):
name = 'gconf'
version = '0'
clearBuildReqs()
def setup(r):
r.Create('%(sysconfdir)s/gconf/schemas/foo')
r.Install('/bin/true', '%(bindir)s/gconftool-2', mode=0755)
self.ComponentSpec('runtime', '%(sysconfdir)s/')
"""
chkconfigRecipe="""\
class ChkconfigTest(PackageRecipe):
name = 'testchk'
version = '0'
clearBuildReqs()
def setup(self):
self.Run('''
cat > chkconfig.c <<'EOF'
int main(int argc, char ** argv) {
int fd;
char ** chptr;
fd = open(\"OUT\", 0102, 0666);
for (chptr = argv; *chptr; chptr++) {
write(fd, *chptr, strlen(*chptr));
if (*(chptr + 1)) write(fd, \" \", 1);
}
write(fd, \"\\\\n\", 1);
close(fd);
}
EOF
''')
self.Run('''
cat > testchk <<'EOF'
# chkconfig: 345 95 5
# description: Runs commands scheduled by the at command at the time \
# specified when at was run, and runs batch commands when the load \
# average is low enough.
# processname: atd
EOF
''')
self.Run('%(cc)s %(ldflags)s -static -o chkconfig chkconfig.c')
self.Install("chkconfig", "%(essentialsbindir)s/", mode = 0755)
self.Install("testchk", "%(initdir)s/", mode = 0755)
self.Strip(debuginfo=False)
"""
doubleRecipe1 = """
class Double(PackageRecipe):
name = 'double'
version = '1.0'
clearBuildReqs()
owner = 'root'
group = 'root'
def setup(self):
self.Create("/etc/foo1", contents = "text1")
self.Ownership(self.owner, self.group, '.*')
self.ComponentSpec('runtime', '%(sysconfdir)s/')
"""
doubleRecipe1_1 = """
class Double(PackageRecipe):
name = 'double'
version = '1.1'
clearBuildReqs()
owner = 'root'
group = 'root'
def setup(self):
self.Create("/etc/foo1.1", contents = "text1.1")
self.Ownership(self.owner, self.group, '.*')
self.ComponentSpec('runtime', '%(sysconfdir)s/')
"""
doubleRecipe1_2 = """
class Double(PackageRecipe):
name = 'double'
version = '1.2'
clearBuildReqs()
owner = 'root'
group = 'root'
def setup(self):
self.Create("/etc/foo1.2", contents = "text1.2")
self.Ownership(self.owner, self.group, '.*')
self.ComponentSpec('runtime', '%(sysconfdir)s/')
"""
doubleRecipe1_3 = """
class Double(PackageRecipe):
name = 'double'
version = '1.3'
clearBuildReqs()
owner = 'root'
group = 'root'
def setup(self):
self.Create("/etc/foo1.3", contents = "text1.3")
self.Ownership(self.owner, self.group, '.*')
self.ComponentSpec('runtime', '%(sysconfdir)s/')
"""
doubleRecipe2 = """
class Double(PackageRecipe):
name = 'double'
version = '2.0'
clearBuildReqs()
owner = 'root'
group = 'root'
def setup(self):
self.Create("/etc/foo2", contents = "text2")
self.Ownership(self.owner, self.group, '.*')
self.ComponentSpec('runtime', '%(sysconfdir)s/')
"""
doubleRecipe2_1 = """
class Double(PackageRecipe):
name = 'double'
version = '2.1'
clearBuildReqs()
owner = 'root'
group = 'root'
def setup(self):
self.Create("/etc/foo2.1", contents = "text2.1")
self.Ownership(self.owner, self.group, '.*')
self.ComponentSpec('runtime', '%(sysconfdir)s/')
"""
simpleTagHandler = """r.Run('''
cat > testtag.taghandler.c <<'EOF'
int main(int argc, char ** argv) {
int fd;
char ** chptr;
fd = open(\"OUT%s\", 0102, 0666);
for (chptr = argv; *chptr; chptr++) {
write(fd, *chptr, strlen(*chptr));
if (*(chptr + 1)) write(fd, \" \", 1);
}
write(fd, \"\\\\n\", 1);
close(fd);
}
EOF
''')
r.Run('%%(cc)s %%(ldflags)s -static -o testtag.taghandler testtag.taghandler.c')
r.Strip(debuginfo=False)"""
tagProviderRecipe1 = """
class TagProvider(PackageRecipe):
name = 'tagprovider'
version = '0'
clearBuildReqs()
def setup(r):
r.Run('''
cat > testtag.tagdescription <<EOF
file /usr/libexec/conary/tags/testtag
implements files update
implements files remove
include /etc/test.*
EOF
''')
%(simpleTagHandler)s
r.Install('testtag.tagdescription',
'%%(tagdescriptiondir)s/testtag')
r.Install('testtag.taghandler',
'%%(taghandlerdir)s/testtag')
# Also test tagging our own files
r.Create('/etc/testself.1')
r.ComponentSpec('runtime', '%%(sysconfdir)s/')
""" % { 'simpleTagHandler' : (simpleTagHandler % "") }
tagProviderRecipe2 = """
class TagProvider(PackageRecipe):
name = 'tagprovider'
version = '1'
clearBuildReqs()
def setup(r):
r.Run('''
cat > testtag.tagdescription <<EOF
file /usr/libexec/conary/tags/testtag
implements files update
implements files preremove
implements files remove
implements files preupdate
implements handler update
implements handler preremove
datasource args
include /etc/test.*
EOF
''')
%(simpleTagHandler)s
r.Install('testtag.tagdescription',
'%%(tagdescriptiondir)s/testtag')
r.Install('testtag.taghandler',
'%%(taghandlerdir)s/testtag')
# Also test tagging our own files
r.Create('/etc/testself.1')
r.ComponentSpec('runtime', '%%(sysconfdir)s/')
""" % { 'simpleTagHandler' : (simpleTagHandler % "") }
tagProviderRecipe3 = """
class TagProvider(PackageRecipe):
name = 'tagprovider'
version = '1'
clearBuildReqs()
def setup(r):
r.Run('''
cat > testtag.tagdescription <<EOF
file /usr/libexec/conary/tags/testtag
implements files update
datasource stdin
include /etc/test.*
EOF
''')
%(simpleTagHandler)s
r.Install('testtag.tagdescription',
'%%(tagdescriptiondir)s/testtag')
r.Install('testtag.taghandler',
'%%(taghandlerdir)s/testtag')
""" % { 'simpleTagHandler' : (simpleTagHandler % "") }
# this is just like tagProviderRecipe2, but the tagdescription will create
# /tmp/OUT2 instead of /tmp/OUT
tagProviderRecipe4 = """
class TagProvider(PackageRecipe):
name = 'tagprovider'
version = '1'
clearBuildReqs()
def setup(r):
r.Run('''
cat > testtag.tagdescription <<EOF
file /usr/libexec/conary/tags/testtag
implements files update
implements files preremove
implements files remove
implements handler update
implements handler preremove
datasource args
include /etc/test.*
EOF
''')
%(simpleTagHandler)s
r.Install('testtag.tagdescription',
'%%(tagdescriptiondir)s/testtag')
r.Install('testtag.taghandler',
'%%(taghandlerdir)s/testtag')
# Also test tagging our own files
r.Create('/etc/testself.1')
r.ComponentSpec('runtime', '%%(sysconfdir)s/')
""" % { 'simpleTagHandler' : (simpleTagHandler % "2") }
# this is just like tagProviderRecipe2, but it has a more limited implements
# set
tagProviderRecipe5 = """
class TagProvider(PackageRecipe):
name = 'tagprovider'
version = '1'
clearBuildReqs()
def setup(r):
r.Run('''
cat > testtag.tagdescription <<EOF
file /usr/libexec/conary/tags/testtag
implements files remove
datasource args
include /etc/test.*
EOF
''')
%(simpleTagHandler)s
r.Install('testtag.tagdescription',
'%%(tagdescriptiondir)s/testtag')
r.Install('testtag.taghandler',
'%%(taghandlerdir)s/testtag')
# Also test tagging our own files
r.Create('/etc/testself.1')
r.ComponentSpec('runtime', '%%(sysconfdir)s/')
""" % { 'simpleTagHandler' : (simpleTagHandler % "") }
firstTagUserRecipe1 = """
class FirstTagUser(PackageRecipe):
name = 'firsttaguser'
version = '0'
clearBuildReqs()
def setup(r):
r.Run('''
cat > testfirst.1 <<EOF
first.1
EOF
''')
r.Run('''
cat > testfirst.2 | |
<reponame>piovere/beschi
from ..protocol import Protocol, BASE_TYPE_SIZES
from ..writer import Writer
from .. import LIB_NAME, LIB_VERSION
LANGUAGE_NAME = "C"
class CWriter(Writer):
language_name = LANGUAGE_NAME
default_extension = ".h"
def __init__(self, p: Protocol):
super().__init__(protocol=p, tab=" ")
self.type_mapping["byte"] = "uint8_t"
self.type_mapping["bool"] = "bool"
self.type_mapping["uint16"] = "uint16_t"
self.type_mapping["int16"] = "int16_t"
self.type_mapping["uint32"] = "uint32_t"
self.type_mapping["int32"] = "int32_t"
self.type_mapping["uint64"] = "uint64_t"
self.type_mapping["int64"] = "int64_t"
self.type_mapping["float"] = "float"
self.type_mapping["double"] = "double"
self.type_mapping["string"] = "char*"
self.base_serializers: dict[str,str] = {
"byte": "UInt8",
"bool": "Bool",
"uint16": "UInt16",
"int16": "Int16",
"uint32": "UInt32",
"int32": "Int32",
"uint64": "UInt64",
"int64": "Int64",
"float": "Float",
"double": "Double",
}
self.base_defaults: dict[str,str] = {
"byte": "0",
"bool": "false",
"uint16": "0",
"int16": "0",
"uint32": "0",
"int32": "0",
"uint64": "0",
"int64": "0",
"float": "0.0f",
"double": "0.0",
}
self.subs: list[tuple[str,str]] = []
self.prefix = "beschi_"
if self.protocol.namespace != None:
self.subs = [("beschi", self.protocol.namespace), ("BESCHI", self.protocol.namespace.upper())]
self.prefix = f"{self.protocol.namespace}_"
def err_check_return(self):
self.write_line(f"if (err != {self.prefix.upper()}ERR_OK) {{")
self.indent_level += 1
self.write_line("return err;")
self.indent_level -= 1
self.write_line("}")
def gen_default(self, members: list[tuple[str,str]]):
for mname, mtype in members:
if mtype in self.base_defaults:
self.write_line(f".{mname} = {self.base_defaults[mtype]},")
elif mtype == "string":
self.write_line(f".{mname} = (char*)\"\",")
elif mtype[0] == "[" and mtype[-1] == "]":
self.write_line(f".{mname} = NULL,")
else:
self.write_line(f".{mname} = {{")
self.indent_level += 1
self.gen_default(self.protocol.structs[mtype])
self.indent_level -= 1
self.write_line("},")
def gen_struct(self, sname: str, members: list[tuple[str,str]], is_message: bool = False):
self.write_line("typedef struct {")
self.indent_level += 1
if is_message:
self.write_line(f"{self.prefix}MessageType _mt;")
for member_name, member_type in members:
if member_type in BASE_TYPE_SIZES.keys():
self.write_line(f"{self.type_mapping[member_type]} {member_name};")
elif member_type == "string":
self.write_line(f"{self.type_mapping['uint32']} {member_name}_len;")
self.write_line(f"{self.type_mapping[member_type]} {member_name};")
elif member_type[0] == "[" and member_type[-1] == "]":
listed_type = member_type[1:-1]
self.write_line(f"{self.type_mapping['uint32']} {member_name}_len;")
if listed_type == "string":
self.write_line(f"{self.type_mapping['uint32']}* {member_name}_els_len;")
if listed_type in BASE_TYPE_SIZES.keys() or listed_type == "string":
self.write_line(f"{self.type_mapping[listed_type]}* {member_name};")
elif listed_type in self.protocol.structs:
self.write_line(f"{self.prefix}{listed_type}* {member_name};")
elif member_type in self.protocol.structs:
self.write_line(f"{self.prefix}{member_type} {member_name};")
self.indent_level -= 1
self.write_line(f"}} {self.prefix}{sname};")
if is_message:
self.write_line(f"extern const {self.prefix}{sname} {self.prefix}{sname}_default;")
self.write_line(f"const {self.prefix}{sname} {self.prefix}{sname}_default = {{")
self.indent_level += 1
self.write_line(f"._mt = {self.prefix}MessageType_{sname},")
self.gen_default(members)
self.indent_level -= 1
self.write_line("};")
self.write_line()
if is_message:
self.write_line(f"{self.prefix}err_t {self.prefix}{sname}_WriteBytes({self.prefix}DataAccess* w, const {self.prefix}{sname}* src, bool tag);")
else:
self.write_line(f"{self.prefix}err_t {self.prefix}{sname}_WriteBytes({self.prefix}DataAccess* w, const {self.prefix}{sname}* src);")
self.write_line(f"{self.prefix}err_t {self.prefix}{sname}_FromBytes({self.prefix}DataAccess* r, {self.prefix}{sname}* dst);")
if is_message:
self.write_line(f"{self.prefix}err_t {self.prefix}{sname}_GetSizeInBytes(const {self.prefix}{sname}* m, size_t* size);")
self.write_line(f"void {self.prefix}{sname}_Destroy({self.prefix}{sname} *m);")
self.write_line()
self.write_line()
def deserializer(self, varname: str, vartype: str, accessor: str):
if vartype in BASE_TYPE_SIZES.keys():
self.write_line(f"err = {self.prefix}_Read{self.base_serializers[vartype]}(r, &({accessor}{varname}));")
self.err_check_return()
elif vartype == "string":
self.write_line(f"err = {self.prefix}_ReadString(r, &({accessor}{varname}), &({accessor}{varname}_len));")
self.err_check_return()
elif vartype[0] == "[" and vartype[-1] == "]":
listed_type = vartype[1:-1]
self.write_line(f"err = {self.prefix}_ReadUInt32(r, &({accessor}{varname}_len));")
self.err_check_return()
if listed_type in BASE_TYPE_SIZES.keys() or listed_type == "string":
self.write_line(f"{accessor}{varname} = ({self.type_mapping[listed_type]}*)malloc(sizeof({self.type_mapping[listed_type]}) * {accessor}{varname}_len);")
self.write_line(f"if ({accessor}{varname} == NULL) {{ return {self.prefix.upper()}ERR_ALLOCATION_FAILURE; }}")
if listed_type == "string":
self.write_line(f"{accessor}{varname}_els_len = ({self.type_mapping['uint32']}*)malloc(sizeof({self.type_mapping['uint32']}) * {accessor}{varname}_len);")
self.write_line(f"if ({accessor}{varname} == NULL) {{ return {self.prefix.upper()}ERR_ALLOCATION_FAILURE; }}")
else:
self.write_line(f"{accessor}{varname} = ({self.prefix}{listed_type}*)malloc(sizeof({self.prefix}{listed_type}) * {accessor}{varname}_len);")
self.write_line(f"if ({accessor}{varname} == NULL) {{ return {self.prefix.upper()}ERR_ALLOCATION_FAILURE; }}")
self.write_line(f"for (uint32_t i = 0; i < {accessor}{varname}_len; i++) {{")
self.indent_level += 1
if listed_type in BASE_TYPE_SIZES.keys():
self.write_line(f"err = {self.prefix}_Read{self.base_serializers[listed_type]}(r, &({accessor}{varname}[i]));")
elif listed_type == "string":
self.write_line(f"err = {self.prefix}_ReadString(r, &({accessor}{varname}[i]), &({accessor}{varname}_els_len[i]));")
else:
self.write_line(f"err = {self.prefix}{listed_type}_FromBytes(r, &({accessor}{varname}[i]));")
self.err_check_return()
self.indent_level -= 1
self.write_line("}")
else:
self.write_line(f"err = {self.prefix}{vartype}_FromBytes(r, &({accessor}{varname}));")
self.err_check_return()
def serializer(self, varname: str, vartype: str, accessor: str):
if vartype in BASE_TYPE_SIZES.keys():
self.write_line(f"err = {self.prefix}_Write{self.base_serializers[vartype]}(w, &({accessor}{varname}));")
self.err_check_return()
elif vartype == "string":
self.write_line(f"err = {self.prefix}_WriteString(w, &({accessor}{varname}), &({accessor}{varname}_len));")
self.err_check_return()
elif vartype[0] == "[" and vartype[-1] == "]":
listed_type = vartype[1:-1]
self.write_line(f"err = {self.prefix}_WriteUInt32(w, &({accessor}{varname}_len));")
self.err_check_return()
self.write_line(f"for (uint32_t i = 0; i < {accessor}{varname}_len; i++) {{")
self.indent_level += 1
if listed_type in BASE_TYPE_SIZES.keys():
self.write_line(f"err = {self.prefix}_Write{self.base_serializers[listed_type]}(w, &({accessor}{varname}[i]));")
elif listed_type == "string":
self.write_line(f"err = {self.prefix}_WriteString(w, &({accessor}{varname}[i]), &({accessor}{varname}_els_len[i]));")
else:
self.write_line(f"err = {self.prefix}{listed_type}_WriteBytes(w, &({accessor}{varname}[i]));")
self.err_check_return()
self.indent_level -= 1
self.write_line("}")
else:
self.write_line(f"err = {self.prefix}{vartype}_WriteBytes(w, &({accessor}{varname}));")
self.err_check_return()
def gen_measurement(self, s: tuple[str, list[tuple[str,str]]], accessor_prefix: str = "") -> tuple[list[str], int]:
lines: list[str] = []
accum = 0
if self.protocol.is_simple(s[0]):
lines.append(f"*size = {self.protocol.calculate_size(s[0])};")
else:
size_init = "*size = 0;"
lines.append(size_init)
for var_name, var_type in s[1]:
if self.protocol.is_simple(var_type):
accum += self.protocol.calculate_size(var_type)
else:
if var_type == "string":
accum += BASE_TYPE_SIZES["uint32"]
lines.append(f"*size += {accessor_prefix}{var_name}_len;")
elif var_type == "[string]":
accum += BASE_TYPE_SIZES["uint32"]
lines.append(f"for (uint32_t i = 0; i < {accessor_prefix}{var_name}_len; i++) {{")
lines.append(f"{self.tab}*size += {BASE_TYPE_SIZES['uint32']} + {accessor_prefix}{var_name}_els_len[i];")
lines.append("}")
elif var_type[0] == "[" and var_type[-1] == "]":
listed_var_type = var_type[1:-1]
if self.protocol.is_simple(listed_var_type):
accum += BASE_TYPE_SIZES["uint32"]
lines.append(f"*size += {accessor_prefix}{var_name}_len * {self.protocol.calculate_size(listed_var_type)};")
else:
accum += BASE_TYPE_SIZES["uint32"]
lines.append(f"for (uint32_t i = 0; i < {accessor_prefix}{var_name}_len; i++) {{")
clines, caccum = self.gen_measurement((var_type, self.protocol.structs[listed_var_type]), f"{accessor_prefix}{var_name}[i].")
if clines[0] == size_init:
clines = clines[1:]
clines.append(f"*size += {caccum};")
lines += [f"{self.tab}{l}" for l in clines]
lines.append("}")
else:
clines, caccum = self.gen_measurement((var_type, self.protocol.structs[var_type]), f"{accessor_prefix}{var_name}.")
if clines[0] == size_init:
clines = clines[1:]
lines += clines
accum += caccum
return lines, accum
def gen_implementation(self, sname: str, members: list[tuple[str,str]], is_message: bool = False):
self.write_line(f"{self.prefix}err_t {self.prefix}{sname}_FromBytes({self.prefix}DataAccess* r, {self.prefix}{sname}* dst) {{")
self.indent_level += 1
if is_message:
self.write_line(f"dst->_mt = {self.prefix}MessageType_{sname};")
if len(members) > 0:
self.write_line(f"{self.prefix}err_t err;")
[self.deserializer(v, t, "dst->") for v,t in members]
self.write_line(f"return {self.prefix.upper()}ERR_OK;")
self.indent_level -= 1
self.write_line("}")
self.write_line()
if is_message:
self.write_line(f"{self.prefix}err_t {self.prefix}{sname}_WriteBytes({self.prefix}DataAccess* w, const {self.prefix}{sname}* src, bool tag) {{")
else:
self.write_line(f"{self.prefix}err_t {self.prefix}{sname}_WriteBytes({self.prefix}DataAccess* w, const {self.prefix}{sname}* src) {{")
self.indent_level += 1
self.write_line(f"{self.prefix}err_t err;")
if is_message:
self.write_line("if (tag) {")
self.indent_level += 1
self.write_line(f"err = {self.prefix}_WriteUInt8(w, (const uint8_t *)&(src->_mt));")
self.err_check_return()
self.indent_level -= 1
self.write_line("}")
[self.serializer(v, t, "src->") for v,t in members]
self.write_line(f"return {self.prefix.upper()}ERR_OK;")
self.indent_level -= 1
self.write_line("}")
self.write_line()
if is_message:
self.write_line(f"{self.prefix}err_t {self.prefix}{sname}_GetSizeInBytes(const {self.prefix}{sname}* m, size_t* size) {{")
self.indent_level += 1
measure_lines, accumulator = self.gen_measurement((sname, members), "m->")
[self.write_line(s) for s in measure_lines]
if accumulator > 0:
self.write_line(f"*size += {accumulator};")
self.write_line(f"return {self.prefix.upper()}ERR_OK;")
self.indent_level -= 1
self.write_line("}")
self.write_line()
self.write_line(f"void {self.prefix}{sname}_Destroy({self.prefix}{sname} *m) {{")
self.indent_level += 1
def destroyer(vartype: str, varname: str):
if self.protocol.is_simple(vartype):
return
if vartype == "string":
self.write_line(f"free({varname});")
elif vartype == "[string]":
self.write_line(f"for (uint32_t i = 0; i < {varname}_len; i++) {{")
self.indent_level += 1
self.write_line(f"free({varname}[i]);")
self.indent_level -= 1
self.write_line("}")
self.write_line(f"free({varname}_els_len);")
self.write_line(f"free({varname});")
elif vartype[0] == "[" and vartype[-1] == "]":
listed_type = vartype[1:-1]
if self.protocol.is_simple(listed_type):
self.write_line(f"free({varname});")
else:
self.write_line(f"for (uint32_t i = 0; i < {varname}_len; i++) {{")
self.indent_level += 1
destroyer(listed_type, f"{varname}[i]")
self.indent_level -= 1
self.write_line("}")
self.write_line(f"free({varname});")
else:
[destroyer(t, f"{varname}.{n}") for (n,t) in self.protocol.structs[vartype]]
[destroyer(t, f"m->{n}") for (n,t) in members]
self.write_line("free(m);")
self.indent_level -= 1
self.write_line("}")
self.write_line()
def generate(self) -> str:
self.output = []
self.write_line(f"// This file was automatically generated by {LIB_NAME} v{LIB_VERSION}.")
self.write_line( "// <https://github.com/sjml/beschi>")
self.write_line(f"// Do not edit directly.")
self.write_line()
self.add_boilerplate(self.subs, 0)
# structure definitions and message declarations
self.write_line("typedef enum {")
self.indent_level += 1
self.write_line(f"{self.prefix}MessageType___NullMessage = 0,")
[self.write_line(f"{self.prefix}MessageType_{k} = {i+1}{',' if i < len(self.protocol.messages)-1 else ''}") for i, k in enumerate(self.protocol.messages.keys())]
self.indent_level -= 1
self.write_line(f"}} {self.prefix}MessageType;")
self.write_line()
self.write_line(f"{self.prefix}MessageType {self.prefix}GetMessageType(const void* m);")
self.write_line(f"{self.prefix}err_t {self.prefix}GetSizeInBytes(const void* m, size_t* len);")
self.write_line(f"{self.prefix}err_t {self.prefix}ProcessRawBytes({self.prefix}DataAccess* r, void*** msgListOut, size_t* len);")
self.write_line(f"{self.prefix}err_t {self.prefix}DestroyMessageList(void** msgList, size_t len);")
self.write_line()
for sname, smembers in self.protocol.structs.items():
self.gen_struct(sname, smembers)
for mname, mmembers in self.protocol.messages.items():
self.gen_struct(mname, mmembers, True)
self.add_boilerplate(self.subs, 1)
self.write_line(f"{self.prefix}MessageType {self.prefix}GetMessageType(const void* m) {{")
self.indent_level += 1
self.write_line("const uint8_t* buffer = (const uint8_t*)m;")
self.write_line("uint8_t msgType = buffer[0];")
self.write_line(f"if (msgType > {len(self.protocol.messages)}) {{")
self.indent_level += 1
self.write_line(f"return {self.prefix}MessageType___NullMessage;")
self.indent_level -= 1
self.write_line("}")
self.write_line(f"return ({self.prefix}MessageType)msgType;")
self.indent_level -= 1
self.write_line("}")
self.write_line()
self.write_line(f"{self.prefix}err_t {self.prefix}GetSizeInBytes(const void* m, size_t* len) {{")
self.indent_level += 1
self.write_line(f"{self.prefix}MessageType msgType = {self.prefix}GetMessageType(m);")
self.write_line("switch (msgType) {")
self.write_line(f"case {self.prefix}MessageType___NullMessage:")
self.indent_level += 1
self.write_line(f"return {self.prefix.upper()}ERR_INVALID_DATA;")
self.write_line("break;")
self.indent_level -= 1
for msg_type in self.protocol.messages.keys():
self.write_line(f"case {self.prefix}MessageType_{msg_type}:")
self.indent_level += 1
self.write_line(f"return {self.prefix}{msg_type}_GetSizeInBytes((const {self.prefix}{msg_type}*)m, len);")
self.write_line("break;")
self.indent_level -= 1
self.write_line("}")
self.write_line(f"return {self.prefix.upper()}ERR_INVALID_DATA;")
self.indent_level -= 1
self.write_line("}")
self.write_line()
self.write_line(f"{self.prefix}err_t {self.prefix}ProcessRawBytes({self.prefix}DataAccess* r, void*** msgListDst, size_t* len) {{")
self.indent_level += 1
self.write_line(f"{self.prefix}err_t err = {self.prefix.upper()}ERR_OK;")
self.write_line("size_t currCapacity = 8;")
self.write_line("*msgListDst = (void**)malloc(sizeof(void*) * currCapacity);")
self.write_line(f"if (*msgListDst == NULL) {{ return {self.prefix.upper()}ERR_ALLOCATION_FAILURE; }}")
self.write_line("*len = 0;")
self.write_line(f"while (!{self.prefix}IsFinished(r)) {{")
self.indent_level += 1
self.write_line("while (*len >= currCapacity) {")
self.indent_level += 1
self.write_line("currCapacity *= 2;")
self.write_line("*msgListDst = (void**)realloc(*msgListDst, (sizeof(void*) * currCapacity));")
self.write_line(f"if (*msgListDst == NULL) {{ return {self.prefix.upper()}ERR_ALLOCATION_FAILURE; }}")
self.indent_level -= 1
self.write_line("}")
self.write_line("uint8_t msgType;")
self.write_line(f"{self.prefix}_ReadUInt8(r, &msgType);")
self.err_check_return()
self.write_line()
self.write_line("void* out;")
self.write_line("switch (msgType) {")
for msg_type in self.protocol.messages.keys():
self.write_line(f"case {self.prefix}MessageType_{msg_type}:")
self.indent_level += 1
self.write_line(f"out = malloc(sizeof({self.prefix}{msg_type}));")
self.write_line(f"if (out == NULL) {{ return {self.prefix.upper()}ERR_ALLOCATION_FAILURE; }}")
self.write_line(f"err = {self.prefix}{msg_type}_FromBytes(r, ({self.prefix}{msg_type}*)out);")
self.write_line("(*msgListDst)[*len] = out;")
self.write_line("*len += 1;")
self.err_check_return()
self.write_line("break;")
self.indent_level -= 1
self.write_line("default:")
self.indent_level += 1
self.write_line(f"return {self.prefix.upper()}ERR_INVALID_DATA;")
self.write_line("break;")
self.indent_level -= 1
self.write_line("}")
| |
#!/usr/bin/env python3
#
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Create an Android application bundle from one or more bundle modules."""
import argparse
import json
import os
import shutil
import sys
import zipfile
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir)))
from pylib.utils import dexdump
from util import build_utils
from util import manifest_utils
from util import resource_utils
from xml.etree import ElementTree
import bundletool
# Location of language-based assets in bundle modules.
_LOCALES_SUBDIR = 'assets/locales/'
# The fallback locale should always have its .pak file included in
# the base apk, i.e. not use language-based asset targetting. This ensures
# that Chrome won't crash on startup if its bundle is installed on a device
# with an unsupported system locale (e.g. fur-rIT).
_FALLBACK_LOCALE = 'en-US'
# List of split dimensions recognized by this tool.
_ALL_SPLIT_DIMENSIONS = [ 'ABI', 'SCREEN_DENSITY', 'LANGUAGE' ]
# Due to historical reasons, certain languages identified by Chromium with a
# 3-letters ISO 639-2 code, are mapped to a nearly equivalent 2-letters
# ISO 639-1 code instead (due to the fact that older Android releases only
# supported the latter when matching resources).
#
# the same conversion as for Java resources.
_SHORTEN_LANGUAGE_CODE_MAP = {
'fil': 'tl', # Filipino to Tagalog.
}
# A list of extensions corresponding to files that should never be compressed
# in the bundle. This used to be handled by bundletool automatically until
# release 0.8.0, which required that this be passed to the BundleConfig
# file instead.
#
# This is the original list, which was taken from aapt2, with 'webp' added to
# it (which curiously was missing from the list).
_UNCOMPRESSED_FILE_EXTS = [
'3g2', '3gp', '3gpp', '3gpp2', 'aac', 'amr', 'awb', 'git', 'imy', 'jet',
'jpeg', 'jpg', 'm4a', 'm4v', 'mid', 'midi', 'mkv', 'mp2', 'mp3', 'mp4',
'mpeg', 'mpg', 'ogg', 'png', 'rtttl', 'smf', 'wav', 'webm', 'webp', 'wmv',
'xmf'
]
def _ParseArgs(args):
parser = argparse.ArgumentParser()
parser.add_argument('--out-bundle', required=True,
help='Output bundle zip archive.')
parser.add_argument('--module-zips', required=True,
help='GN-list of module zip archives.')
parser.add_argument(
'--pathmap-in-paths',
action='append',
help='List of module pathmap files.')
parser.add_argument(
'--module-name',
action='append',
dest='module_names',
help='List of module names.')
parser.add_argument(
'--pathmap-out-path', help='Path to combined pathmap file for bundle.')
parser.add_argument(
'--rtxt-in-paths', action='append', help='GN-list of module R.txt files.')
parser.add_argument(
'--rtxt-out-path', help='Path to combined R.txt file for bundle.')
parser.add_argument('--uncompressed-assets', action='append',
help='GN-list of uncompressed assets.')
parser.add_argument(
'--compress-shared-libraries',
action='store_true',
help='Whether to store native libraries compressed.')
parser.add_argument('--compress-dex',
action='store_true',
help='Compress .dex files')
parser.add_argument('--split-dimensions',
help="GN-list of split dimensions to support.")
parser.add_argument(
'--base-module-rtxt-path',
help='Optional path to the base module\'s R.txt file, only used with '
'language split dimension.')
parser.add_argument(
'--base-allowlist-rtxt-path',
help='Optional path to an R.txt file, string resources '
'listed there _and_ in --base-module-rtxt-path will '
'be kept in the base bundle module, even if language'
' splitting is enabled.')
parser.add_argument('--warnings-as-errors',
action='store_true',
help='Treat all warnings as errors.')
parser.add_argument(
'--validate-services',
action='store_true',
help='Check if services are in base module if isolatedSplits is enabled.')
options = parser.parse_args(args)
options.module_zips = build_utils.ParseGnList(options.module_zips)
options.rtxt_in_paths = build_utils.ParseGnList(options.rtxt_in_paths)
options.pathmap_in_paths = build_utils.ParseGnList(options.pathmap_in_paths)
if len(options.module_zips) == 0:
raise Exception('The module zip list cannot be empty.')
# Merge all uncompressed assets into a set.
uncompressed_list = []
if options.uncompressed_assets:
for l in options.uncompressed_assets:
for entry in build_utils.ParseGnList(l):
# Each entry has the following format: 'zipPath' or 'srcPath:zipPath'
pos = entry.find(':')
if pos >= 0:
uncompressed_list.append(entry[pos + 1:])
else:
uncompressed_list.append(entry)
options.uncompressed_assets = set(uncompressed_list)
# Check that all split dimensions are valid
if options.split_dimensions:
options.split_dimensions = build_utils.ParseGnList(options.split_dimensions)
for dim in options.split_dimensions:
if dim.upper() not in _ALL_SPLIT_DIMENSIONS:
parser.error('Invalid split dimension "%s" (expected one of: %s)' % (
dim, ', '.join(x.lower() for x in _ALL_SPLIT_DIMENSIONS)))
# As a special case, --base-allowlist-rtxt-path can be empty to indicate
# that the module doesn't need such a allowlist. That's because it is easier
# to check this condition here than through GN rules :-(
if options.base_allowlist_rtxt_path == '':
options.base_module_rtxt_path = None
# Check --base-module-rtxt-path and --base-allowlist-rtxt-path usage.
if options.base_module_rtxt_path:
if not options.base_allowlist_rtxt_path:
parser.error(
'--base-module-rtxt-path requires --base-allowlist-rtxt-path')
if 'language' not in options.split_dimensions:
parser.error('--base-module-rtxt-path is only valid with '
'language-based splits.')
return options
def _MakeSplitDimension(value, enabled):
"""Return dict modelling a BundleConfig splitDimension entry."""
return {'value': value, 'negate': not enabled}
def _GenerateBundleConfigJson(uncompressed_assets, compress_dex,
compress_shared_libraries, split_dimensions,
base_master_resource_ids):
"""Generate a dictionary that can be written to a JSON BuildConfig.
Args:
uncompressed_assets: A list or set of file paths under assets/ that always
be stored uncompressed.
compressed_dex: Boolean, whether to compress .dex.
compress_shared_libraries: Boolean, whether to compress native libs.
split_dimensions: list of split dimensions.
base_master_resource_ids: Optional list of 32-bit resource IDs to keep
inside the base module, even when split dimensions are enabled.
Returns:
A dictionary that can be written as a json file.
"""
# Compute splitsConfig list. Each item is a dictionary that can have
# the following keys:
# 'value': One of ['LANGUAGE', 'DENSITY', 'ABI']
# 'negate': Boolean, True to indicate that the bundle should *not* be
# split (unused at the moment by this script).
split_dimensions = [ _MakeSplitDimension(dim, dim in split_dimensions)
for dim in _ALL_SPLIT_DIMENSIONS ]
# Native libraries loaded by the crazy linker.
# Whether other .so files are compressed is controlled by
# "uncompressNativeLibraries".
uncompressed_globs = ['lib/*/crazy.*']
# Locale-specific pak files stored in bundle splits need not be compressed.
uncompressed_globs.extend(
['assets/locales#lang_*/*.pak', 'assets/fallback-locales/*.pak'])
uncompressed_globs.extend('assets/' + x for x in uncompressed_assets)
# NOTE: Use '**' instead of '*' to work through directories!
uncompressed_globs.extend('**.' + ext for ext in _UNCOMPRESSED_FILE_EXTS)
if not compress_dex:
# Explicit glob required only when using bundletool to create .apks files.
# Play Store looks for and respects "uncompressDexFiles" set below.
# b/176198991
# This is added as a placeholder entry in order to have no effect unless
# processed with app_bundle_utils.GenerateBundleApks().
uncompressed_globs.append('classesX.dex')
data = {
'optimizations': {
'splitsConfig': {
'splitDimension': split_dimensions,
},
'uncompressNativeLibraries': {
'enabled': not compress_shared_libraries,
},
'uncompressDexFiles': {
'enabled': True, # Applies only for P+.
}
},
'compression': {
'uncompressedGlob': sorted(uncompressed_globs),
},
}
if base_master_resource_ids:
data['master_resources'] = {
'resource_ids': list(base_master_resource_ids),
}
return json.dumps(data, indent=2)
def _RewriteLanguageAssetPath(src_path):
"""Rewrite the destination path of a locale asset for language-based splits.
Should only be used when generating bundles with language-based splits.
This will rewrite paths that look like locales/<locale>.pak into
locales#<language>/<locale>.pak, where <language> is the language code
from the locale.
Returns new path.
"""
if not src_path.startswith(_LOCALES_SUBDIR) or not src_path.endswith('.pak'):
return [src_path]
locale = src_path[len(_LOCALES_SUBDIR):-4]
android_locale = resource_utils.ToAndroidLocaleName(locale)
# The locale format is <lang>-<region> or <lang> or BCP-47 (e.g b+sr+Latn).
# Extract the language.
pos = android_locale.find('-')
if android_locale.startswith('b+'):
# If locale is in BCP-47 the language is the second tag (e.g. b+sr+Latn)
android_language = android_locale.split('+')[1]
elif pos >= 0:
android_language = android_locale[:pos]
else:
android_language = android_locale
if locale == _FALLBACK_LOCALE:
# Fallback locale .pak files must be placed in a different directory
# to ensure they are always stored in the base module.
result_path = 'assets/fallback-locales/%s.pak' % locale
else:
# Other language .pak files go into a language-specific asset directory
# that bundletool will store in separate split APKs.
result_path = 'assets/locales#lang_%s/%s.pak' % (android_language, locale)
return result_path
def _SplitModuleForAssetTargeting(src_module_zip, tmp_dir, split_dimensions):
"""Splits assets in a module if needed.
Args:
src_module_zip: input zip module path.
tmp_dir: Path to temporary directory, where the new output module might
be written to.
split_dimensions: list of split dimensions.
Returns:
If the module doesn't need asset targeting, doesn't do anything and
returns src_module_zip. Otherwise, create a new module zip archive under
tmp_dir with the same file name, but which contains assets paths targeting
the proper dimensions.
"""
split_language = 'LANGUAGE' in split_dimensions
if not split_language:
# Nothing to target, so return original module path.
return src_module_zip
with zipfile.ZipFile(src_module_zip, 'r') as src_zip:
language_files = [
f for f in src_zip.namelist() if f.startswith(_LOCALES_SUBDIR)]
if not language_files:
# Not language-based assets to split in this module.
return src_module_zip
tmp_zip = os.path.join(tmp_dir, os.path.basename(src_module_zip))
with zipfile.ZipFile(tmp_zip, 'w') as dst_zip:
for info in src_zip.infolist():
src_path = info.filename
is_compressed = info.compress_type != zipfile.ZIP_STORED
dst_path = src_path
if src_path in language_files:
dst_path = _RewriteLanguageAssetPath(src_path)
build_utils.AddToZipHermetic(
dst_zip,
dst_path,
data=src_zip.read(src_path),
compress=is_compressed)
return tmp_zip
def _GenerateBaseResourcesAllowList(base_module_rtxt_path,
base_allowlist_rtxt_path):
"""Generate a allowlist of base master resource ids.
Args:
base_module_rtxt_path: Path to base module R.txt file.
base_allowlist_rtxt_path: Path to base allowlist R.txt file.
Returns:
list of resource | |
This URL can be used in conjunction with the
video content authorization token to download the video MP4 file. The resulting MP4 file can be
played on any standard media player. It is available when the video type is 'file' and video
file is available for consumption.
:type download_url: str
:param archive_base_url: Video archive streaming base URL. The archived content can be
automatically played by the Azure Video Analyzer player widget. Alternatively, this URL can be
used in conjunction with the video content authorization token on any compatible DASH or HLS
players by appending the following to the base URL:
.. code-block::
- HLSv4: /manifest(format=m3u8-aapl).m3u8
- HLS CMAF: /manifest(format=m3u8-cmaf)
- DASH CMAF: /manifest(format=mpd-time-cmaf)
Moreover, an ongoing video recording can be played in "live mode" with latencies which are
approximately double of the chosen video segment length. It is available when the video type is
'archive' and video archiving is enabled.
:type archive_base_url: str
:param rtsp_tunnel_url: Video low-latency streaming URL. The live content can be automatically
played by the Azure Video Analyzer player widget. Alternatively, this URL can be used in
conjunction with the video content authorization token to expose a WebSocket tunneled RTSP
stream. It is available when the video type is 'archive' and a live, low-latency feed is
available from the source.
:type rtsp_tunnel_url: str
:param preview_image_urls: Video preview image URLs. These URLs can be used in conjunction with
the video content authorization token to download the most recent still image from the video
archive in different resolutions. They are available when the video type is 'archive' and
preview images are enabled.
:type preview_image_urls: ~video_analyzer.models.VideoPreviewImageUrls
"""
_attribute_map = {
'download_url': {'key': 'downloadUrl', 'type': 'str'},
'archive_base_url': {'key': 'archiveBaseUrl', 'type': 'str'},
'rtsp_tunnel_url': {'key': 'rtspTunnelUrl', 'type': 'str'},
'preview_image_urls': {'key': 'previewImageUrls', 'type': 'VideoPreviewImageUrls'},
}
def __init__(
self,
**kwargs
):
super(VideoContentUrls, self).__init__(**kwargs)
self.download_url = kwargs.get('download_url', None)
self.archive_base_url = kwargs.get('archive_base_url', None)
self.rtsp_tunnel_url = kwargs.get('rtsp_tunnel_url', None)
self.preview_image_urls = kwargs.get('preview_image_urls', None)
class VideoCreationProperties(msrest.serialization.Model):
"""Optional properties to be used in case a new video resource needs to be created on the service. These will not take effect if the video already exists.
:param title: Optional title provided by the user. Value can be up to 256 characters long.
:type title: str
:param description: Optional description provided by the user. Value can be up to 2048
characters long.
:type description: str
:param segment_length: Segment length indicates the length of individual content files
(segments) which are persisted to storage. Smaller segments provide lower archive playback
latency but generate larger volume of storage transactions. Larger segments reduce the amount
of storage transactions while increasing the archive playback latency. Value must be specified
in ISO8601 duration format (i.e. "PT30S" equals 30 seconds) and can vary between 30 seconds to
5 minutes, in 30 seconds increments. Changing this value after the initial call to create the
video resource can lead to errors when uploading content to the archive. Default value is 30
seconds. This property is only allowed for topologies where "kind" is set to "live".
:type segment_length: str
:param retention_period: Video retention period indicates how long the video is kept in
storage. Value must be specified in ISO8601 duration format (i.e. "P1D" equals 1 day) and can
vary between 1 day to 10 years, in 1 day increments. When absent (null), all video content is
retained indefinitely. This property is only allowed for topologies where "kind" is set to
"live".
:type retention_period: str
"""
_attribute_map = {
'title': {'key': 'title', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'segment_length': {'key': 'segmentLength', 'type': 'str'},
'retention_period': {'key': 'retentionPeriod', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VideoCreationProperties, self).__init__(**kwargs)
self.title = kwargs.get('title', None)
self.description = kwargs.get('description', None)
self.segment_length = kwargs.get('segment_length', None)
self.retention_period = kwargs.get('retention_period', None)
class VideoEncoderBase(msrest.serialization.Model):
"""Base type for all video encoding presets, which define the recipe or instructions on how the input video should be processed.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: VideoEncoderH264.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param bitrate_kbps: The maximum bitrate, in kilobits per second or Kbps, at which video should
be encoded. If omitted, encoder sets it automatically to try and match the quality of the input
video.
:type bitrate_kbps: str
:param frame_rate: The frame rate (in frames per second) of the encoded video. The value must
be greater than zero, and less than or equal to 300. If omitted, the encoder uses the average
frame rate of the input video.
:type frame_rate: str
:param scale: Describes the resolution of the encoded video. If omitted, the encoder uses the
resolution of the input video.
:type scale: ~video_analyzer.models.VideoScale
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'bitrate_kbps': {'key': 'bitrateKbps', 'type': 'str'},
'frame_rate': {'key': 'frameRate', 'type': 'str'},
'scale': {'key': 'scale', 'type': 'VideoScale'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.VideoEncoderH264': 'VideoEncoderH264'}
}
def __init__(
self,
**kwargs
):
super(VideoEncoderBase, self).__init__(**kwargs)
self.type = None # type: Optional[str]
self.bitrate_kbps = kwargs.get('bitrate_kbps', None)
self.frame_rate = kwargs.get('frame_rate', None)
self.scale = kwargs.get('scale', None)
class VideoEncoderH264(VideoEncoderBase):
"""A custom preset for encoding video with the H.264 (AVC) codec.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param bitrate_kbps: The maximum bitrate, in kilobits per second or Kbps, at which video should
be encoded. If omitted, encoder sets it automatically to try and match the quality of the input
video.
:type bitrate_kbps: str
:param frame_rate: The frame rate (in frames per second) of the encoded video. The value must
be greater than zero, and less than or equal to 300. If omitted, the encoder uses the average
frame rate of the input video.
:type frame_rate: str
:param scale: Describes the resolution of the encoded video. If omitted, the encoder uses the
resolution of the input video.
:type scale: ~video_analyzer.models.VideoScale
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'bitrate_kbps': {'key': 'bitrateKbps', 'type': 'str'},
'frame_rate': {'key': 'frameRate', 'type': 'str'},
'scale': {'key': 'scale', 'type': 'VideoScale'},
}
def __init__(
self,
**kwargs
):
super(VideoEncoderH264, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.VideoEncoderH264' # type: str
class VideoEntity(ProxyResource):
"""Represents a video resource within Azure Video Analyzer. Videos can be ingested from RTSP cameras through live pipelines or can be created by exporting sequences from existing captured video through a pipeline job. Videos ingested through live pipelines can be streamed through Azure Video Analyzer Player Widget or compatible players. Exported videos can be downloaded as MP4 files.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:param title: Optional video title provided by the user. Value can be up to 256 characters
long.
:type title: str
:param description: Optional video description provided by the user. Value can be up to 2048
characters long.
:type description: str
:ivar type_properties_type: Video content type. Different content types are suitable for
different applications and scenarios. Possible values include: "Archive", "File".
:vartype type_properties_type: str or ~video_analyzer.models.VideoType
:ivar flags: Video flags contain information about the available video actions and its dynamic
properties based on the current video state.
:vartype flags: ~video_analyzer.models.VideoFlags
:ivar content_urls: Set of URLs to the video content.
:vartype content_urls: ~video_analyzer.models.VideoContentUrls
:param media_info: Contains information about the video and audio content.
:type media_info: ~video_analyzer.models.VideoMediaInfo
:param archival: Video archival properties.
:type archival: ~video_analyzer.models.VideoArchival
"""
_validation = {
'id': | |
import collections
import warnings
try:
import ssl
except ImportError: # pragma: no cover
ssl = None
from . import compat
from . import protocols
from . import transports
from .log import logger
def _create_transport_context(server_side, server_hostname):
if server_side:
raise ValueError('Server side SSL needs a valid SSLContext')
# Client side may pass ssl=True to use a default
# context; in that case the sslcontext passed is None.
# The default is secure for client connections.
if hasattr(ssl, 'create_default_context'):
# Python 3.4+: use up-to-date strong settings.
sslcontext = ssl.create_default_context()
if not server_hostname:
sslcontext.check_hostname = False
else:
# Fallback for Python 3.3.
sslcontext = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext.options |= ssl.OP_NO_SSLv2
sslcontext.options |= ssl.OP_NO_SSLv3
sslcontext.set_default_verify_paths()
sslcontext.verify_mode = ssl.CERT_REQUIRED
return sslcontext
def _is_sslproto_available():
return hasattr(ssl, "MemoryBIO")
# States of an _SSLPipe.
_UNWRAPPED = "UNWRAPPED"
_DO_HANDSHAKE = "DO_HANDSHAKE"
_WRAPPED = "WRAPPED"
_SHUTDOWN = "SHUTDOWN"
class _SSLPipe(object):
"""An SSL "Pipe".
An SSL pipe allows you to communicate with an SSL/TLS protocol instance
through memory buffers. It can be used to implement a security layer for an
existing connection where you don't have access to the connection's file
descriptor, or for some reason you don't want to use it.
An SSL pipe can be in "wrapped" and "unwrapped" mode. In unwrapped mode,
data is passed through untransformed. In wrapped mode, application level
data is encrypted to SSL record level data and vice versa. The SSL record
level is the lowest level in the SSL protocol suite and is what travels
as-is over the wire.
An SslPipe initially is in "unwrapped" mode. To start SSL, call
do_handshake(). To shutdown SSL again, call unwrap().
"""
max_size = 256 * 1024 # Buffer size passed to read()
def __init__(self, context, server_side, server_hostname=None):
"""
The *context* argument specifies the ssl.SSLContext to use.
The *server_side* argument indicates whether this is a server side or
client side transport.
The optional *server_hostname* argument can be used to specify the
hostname you are connecting to. You may only specify this parameter if
the _ssl module supports Server Name Indication (SNI).
"""
self._context = context
self._server_side = server_side
self._server_hostname = server_hostname
self._state = _UNWRAPPED
self._incoming = ssl.MemoryBIO()
self._outgoing = ssl.MemoryBIO()
self._sslobj = None
self._need_ssldata = False
self._handshake_cb = None
self._shutdown_cb = None
@property
def context(self):
"""The SSL context passed to the constructor."""
return self._context
@property
def ssl_object(self):
"""The internal ssl.SSLObject instance.
Return None if the pipe is not wrapped.
"""
return self._sslobj
@property
def need_ssldata(self):
"""Whether more record level data is needed to complete a handshake
that is currently in progress."""
return self._need_ssldata
@property
def wrapped(self):
"""
Whether a security layer is currently in effect.
Return False during handshake.
"""
return self._state == _WRAPPED
def do_handshake(self, callback=None):
"""Start the SSL handshake.
Return a list of ssldata. A ssldata element is a list of buffers
The optional *callback* argument can be used to install a callback that
will be called when the handshake is complete. The callback will be
called with None if successful, else an exception instance.
"""
if self._state != _UNWRAPPED:
raise RuntimeError('handshake in progress or completed')
self._sslobj = self._context.wrap_bio(
self._incoming, self._outgoing,
server_side=self._server_side,
server_hostname=self._server_hostname)
self._state = _DO_HANDSHAKE
self._handshake_cb = callback
ssldata, appdata = self.feed_ssldata(b'', only_handshake=True)
assert len(appdata) == 0
return ssldata
def shutdown(self, callback=None):
"""Start the SSL shutdown sequence.
Return a list of ssldata. A ssldata element is a list of buffers
The optional *callback* argument can be used to install a callback that
will be called when the shutdown is complete. The callback will be
called without arguments.
"""
if self._state == _UNWRAPPED:
raise RuntimeError('no security layer present')
if self._state == _SHUTDOWN:
raise RuntimeError('shutdown in progress')
assert self._state in (_WRAPPED, _DO_HANDSHAKE)
self._state = _SHUTDOWN
self._shutdown_cb = callback
ssldata, appdata = self.feed_ssldata(b'')
assert appdata == [] or appdata == [b'']
return ssldata
def feed_eof(self):
"""Send a potentially "ragged" EOF.
This method will raise an SSL_ERROR_EOF exception if the EOF is
unexpected.
"""
self._incoming.write_eof()
ssldata, appdata = self.feed_ssldata(b'')
assert appdata == [] or appdata == [b'']
def feed_ssldata(self, data, only_handshake=False):
"""Feed SSL record level data into the pipe.
The data must be a bytes instance. It is OK to send an empty bytes
instance. This can be used to get ssldata for a handshake initiated by
this endpoint.
Return a (ssldata, appdata) tuple. The ssldata element is a list of
buffers containing SSL data that needs to be sent to the remote SSL.
The appdata element is a list of buffers containing plaintext data that
needs to be forwarded to the application. The appdata list may contain
an empty buffer indicating an SSL "close_notify" alert. This alert must
be acknowledged by calling shutdown().
"""
if self._state == _UNWRAPPED:
# If unwrapped, pass plaintext data straight through.
if data:
appdata = [data]
else:
appdata = []
return ([], appdata)
self._need_ssldata = False
if data:
self._incoming.write(data)
ssldata = []
appdata = []
try:
if self._state == _DO_HANDSHAKE:
# Call do_handshake() until it doesn't raise anymore.
self._sslobj.do_handshake()
self._state = _WRAPPED
if self._handshake_cb:
self._handshake_cb(None)
if only_handshake:
return (ssldata, appdata)
# Handshake done: execute the wrapped block
if self._state == _WRAPPED:
# Main state: read data from SSL until close_notify
while True:
chunk = self._sslobj.read(self.max_size)
appdata.append(chunk)
if not chunk: # close_notify
break
elif self._state == _SHUTDOWN:
# Call shutdown() until it doesn't raise anymore.
self._sslobj.unwrap()
self._sslobj = None
self._state = _UNWRAPPED
if self._shutdown_cb:
self._shutdown_cb()
elif self._state == _UNWRAPPED:
# Drain possible plaintext data after close_notify.
appdata.append(self._incoming.read())
except (ssl.SSLError, ssl.CertificateError) as exc:
if getattr(exc, 'errno', None) not in (
ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE,
ssl.SSL_ERROR_SYSCALL):
if self._state == _DO_HANDSHAKE and self._handshake_cb:
self._handshake_cb(exc)
raise
self._need_ssldata = (exc.errno == ssl.SSL_ERROR_WANT_READ)
# Check for record level data that needs to be sent back.
# Happens for the initial handshake and renegotiations.
if self._outgoing.pending:
ssldata.append(self._outgoing.read())
return (ssldata, appdata)
def feed_appdata(self, data, offset=0):
"""Feed plaintext data into the pipe.
Return an (ssldata, offset) tuple. The ssldata element is a list of
buffers containing record level data that needs to be sent to the
remote SSL instance. The offset is the number of plaintext bytes that
were processed, which may be less than the length of data.
NOTE: In case of short writes, this call MUST be retried with the SAME
buffer passed into the *data* argument (i.e. the id() must be the
same). This is an OpenSSL requirement. A further particularity is that
a short write will always have offset == 0, because the _ssl module
does not enable partial writes. And even though the offset is zero,
there will still be encrypted data in ssldata.
"""
assert 0 <= offset <= len(data)
if self._state == _UNWRAPPED:
# pass through data in unwrapped mode
if offset < len(data):
ssldata = [data[offset:]]
else:
ssldata = []
return (ssldata, len(data))
ssldata = []
view = memoryview(data)
while True:
self._need_ssldata = False
try:
if offset < len(view):
offset += self._sslobj.write(view[offset:])
except ssl.SSLError as exc:
# It is not allowed to call write() after unwrap() until the
# close_notify is acknowledged. We return the condition to the
# caller as a short write.
if exc.reason == 'PROTOCOL_IS_SHUTDOWN':
exc.errno = ssl.SSL_ERROR_WANT_READ
if exc.errno not in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE,
ssl.SSL_ERROR_SYSCALL):
raise
self._need_ssldata = (exc.errno == ssl.SSL_ERROR_WANT_READ)
# See if there's any record level data back for us.
if self._outgoing.pending:
ssldata.append(self._outgoing.read())
if offset == len(view) or self._need_ssldata:
break
return (ssldata, offset)
class _SSLProtocolTransport(transports._FlowControlMixin,
transports.Transport):
def __init__(self, loop, ssl_protocol, app_protocol):
self._loop = loop
# SSLProtocol instance
self._ssl_protocol = ssl_protocol
self._app_protocol = app_protocol
self._closed = False
def get_extra_info(self, name, default=None):
"""Get optional transport information."""
return self._ssl_protocol._get_extra_info(name, default)
def close(self):
"""Close the transport.
Buffered data will be flushed asynchronously. No more data
will be received. After all buffered data is flushed, the
protocol's connection_lost() method will (eventually) called
with None as its argument.
"""
self._closed = True
self._ssl_protocol._start_shutdown()
# On Python 3.3 and older, objects with a destructor part of a reference
# cycle are never destroyed. It's not more the case on Python 3.4 thanks
# to the PEP 442.
| |
# If it is not the *very last* index, find the the critical level
# of mNrm where the artificial borrowing contraint begins to bind.
d0 = cNrmNow[idx] - cNrmCnst[idx]
d1 = cNrmCnst[idx + 1] - cNrmNow[idx + 1]
m0 = mNrmNow[idx]
m1 = mNrmNow[idx + 1]
alpha = d0 / (d0 + d1)
mCrit = m0 + alpha * (m1 - m0)
# Adjust the grids of mNrm and cNrm to account for the borrowing constraint.
cCrit = mCrit - BoroCnstArt
mNrmNow = np.concatenate(([BoroCnstArt, mCrit], mNrmNow[(idx + 1):]))
cNrmNow = np.concatenate(([0.0, cCrit], cNrmNow[(idx + 1):]))
else:
# If it *is* the very last index, then there are only three points
# that characterize the consumption function: the artificial borrowing
# constraint, the constraint kink, and the extrapolation point.
mXtra = (cNrmNow[-1] - cNrmCnst[-1]) / (1.0 - self.MPCmin)
mCrit = mNrmNow[-1] + mXtra
cCrit = mCrit - BoroCnstArt
mNrmNow = np.array([BoroCnstArt, mCrit, mCrit + 1.0])
cNrmNow = np.array([0.0, cCrit, cCrit + self.MPCmin])
# If the mNrm and cNrm grids have become too large, throw out the last
# kink point, being sure to adjust the extrapolation.
if mNrmNow.size > self.MaxKinks:
mNrmNow = np.concatenate((mNrmNow[:-2], [mNrmNow[-3] + 1.0]))
cNrmNow = np.concatenate((cNrmNow[:-2], [cNrmNow[-3] + self.MPCmin]))
# Construct the consumption function as a linear interpolation.
self.cFunc = LinearInterp(mNrmNow, cNrmNow)
# Calculate the upper bound of the MPC as the slope of the bottom segment.
self.MPCmax = (cNrmNow[1] - cNrmNow[0]) / (mNrmNow[1] - mNrmNow[0])
# Add two attributes to enable calculation of steady state market resources.
self.Ex_IncNext = 1.0 # Perfect foresight income of 1
self.mNrmMinNow = mNrmNow[0] # Relabeling for compatibility with add_mNrmStE
def add_mNrmTrg(self, solution):
"""
Finds value of (normalized) market resources m at which individual consumer
expects m not to change.
This will exist if the GICNrm holds.
https://econ-ark.github.io/BufferStockTheory#UniqueStablePoints
Parameters
----------
solution : ConsumerSolution
Solution to this period's problem, which must have attribute cFunc.
Returns
-------
solution : ConsumerSolution
Same solution that was passed, but now with the attribute mNrmStE.
"""
# If no uncertainty, return the degenerate targets for the PF model
if hasattr(self, "TranShkMinNext"): # Then it has transitory shocks
# Handle the degenerate case where shocks are of size zero
if ((self.TranShkMinNext == 1.0) and (self.PermShkMinNext == 1.0)):
# but they are of zero size (and also permanent are zero)
if self.GICRaw: # max of nat and art boro cnst
if type(self.BoroCnstArt) == type(None):
solution.mNrmStE = -self.hNrmNow
solution.mNrmTrg = -self.hNrmNow
else:
bNrmNxt = -self.BoroCnstArt * self.Rfree/self.PermGroFac
solution.mNrmStE = bNrmNxt + 1.0
solution.mNrmTrg = bNrmNxt + 1.0
else: # infinity
solution.mNrmStE = float('inf')
solution.mNrmTrg = float('inf')
return solution
# First find
# \bar{\mathcal{R}} = E_t[R/Gamma_{t+1}] = R/Gamma E_t[1/psi_{t+1}]
if type(self) == ConsPerfForesightSolver:
Ex_PermShkInv = 1.0
else:
Ex_PermShkInv = np.dot(1/self.PermShkValsNext, self.ShkPrbsNext)
Ex_RNrmFac = (self.Rfree/self.PermGroFac)*Ex_PermShkInv
# mNrmTrg solves Rcalbar*(m - c(m)) + E[inc_next] = m. Define a
# rearranged version.
Ex_m_tp1_minus_m_t = (
lambda m: Ex_RNrmFac * (m - solution.cFunc(m)) + self.Ex_IncNext - m
)
# Minimum market resources plus next income is okay starting guess
m_init_guess = self.mNrmMinNow + self.Ex_IncNext
try:
mNrmTrg = newton(Ex_m_tp1_minus_m_t, m_init_guess)
except:
mNrmTrg = None
# Add mNrmTrg to the solution and return it
solution.mNrmTrg = mNrmTrg
return solution
def add_mNrmStE(self, solution):
"""
Finds market resources ratio at which 'balanced growth' is expected.
This is the m ratio such that the expected growth rate of the M level
matches the expected growth rate of permanent income. This value does
not exist if the Growth Impatience Condition does not hold.
https://econ-ark.github.io/BufferStockTheory#Unique-Stable-Points
Parameters
----------
solution : ConsumerSolution
Solution to this period's problem, which must have attribute cFunc.
Returns
-------
solution : ConsumerSolution
Same solution that was passed, but now with the attribute mNrmStE
"""
# Probably should test whether GICRaw holds and log error if it does not
# using check_conditions
# All combinations of c and m that yield E[PermGroFac PermShkVal mNext] = mNow
# https://econ-ark.github.io/BufferStockTheory/#The-Individual-Steady-State
PF_RNrm = self.Rfree/self.PermGroFac
# If we are working with a model that permits uncertainty but that
# uncertainty has been set to zero, return the correct answer
# by hand because in this degenerate case numerical search may
# have trouble
if hasattr(self, "TranShkMinNext"): # Then it has transitory shocks
if ((self.TranShkMinNext == 1.0) and (self.PermShkMinNext == 1.0)):
# but they are of zero size (and permanent shocks also not there)
if self.GICRaw: # max of nat and art boro cnst
# breakpoint()
if type(self.BoroCnstArt) == type(None):
solution.mNrmStE = -self.hNrmNow
solution.mNrmTrg = -self.hNrmNow
else:
bNrmNxt = -self.BoroCnstArt * self.Rfree/self.PermGroFac
solution.mNrmStE = bNrmNxt + 1.0
solution.mNrmTrg = bNrmNxt + 1.0
else: # infinity
solution.mNrmStE = float('inf')
solution.mNrmTrg = float('inf')
return solution
Ex_PermShk_tp1_times_m_tp1_minus_m_t = (
lambda mStE: PF_RNrm * (mStE - solution.cFunc(mStE)) + 1.0 - mStE
)
# Minimum market resources plus next income is okay starting guess
m_init_guess = self.mNrmMinNow + self.Ex_IncNext
try:
mNrmStE = newton(Ex_PermShk_tp1_times_m_tp1_minus_m_t, m_init_guess)
except:
mNrmStE = None
solution.mNrmStE = mNrmStE
return solution
def add_stable_points(self, solution):
"""
Checks necessary conditions for the existence of the individual steady
state and target levels of market resources (see above).
If the conditions are satisfied, computes and adds the stable points
to the solution.
Parameters
----------
solution : ConsumerSolution
Solution to this period's problem, which must have attribute cFunc.
Returns
-------
solution : ConsumerSolution
Same solution that was provided, augmented with attributes mNrmStE and
mNrmTrg, if they exist.
"""
# 0. There is no non-degenerate steady state for any unconstrained PF model.
# 1. There is a non-degenerate SS for constrained PF model if GICRaw holds.
# Therefore
# Check if (GICRaw and BoroCnstArt) and if so compute them both
thorn = (self.Rfree*self.DiscFacEff)**(1/self.CRRA)
GICRaw = 1 > thorn/self.PermGroFac
if self.BoroCnstArt is not None and GICRaw:
solution = self.add_mNrmStE(solution)
solution = self.add_mNrmTrg(solution)
return solution
def solve(self):
"""
Solves the one period perfect foresight consumption-saving problem.
Parameters
----------
None
Returns
-------
solution : ConsumerSolution
The solution to this period's problem.
"""
self.def_utility_funcs()
self.DiscFacEff = self.DiscFac * self.LivPrb # Effective=pure x LivPrb
self.make_cFunc_PF()
self.def_value_funcs()
solution = ConsumerSolution(
cFunc=self.cFunc,
vFunc=self.vFunc,
vPfunc=self.vPfunc,
mNrmMin=self.mNrmMinNow,
hNrm=self.hNrmNow,
MPCmin=self.MPCmin,
MPCmax=self.MPCmax,
)
solution = self.add_stable_points(solution)
return solution
###############################################################################
###############################################################################
class ConsIndShockSetup(ConsPerfForesightSolver):
"""
A superclass for solvers of one period consumption-saving problems with
constant relative risk aversion utility and permanent and transitory shocks
to income. Has methods to set up but not solve the one period problem.
Parameters
----------
solution_next : ConsumerSolution
The solution to next period's one period problem.
IncShkDstn : distribution.Distribution
A discrete
approximation to the income process between the period being solved
and the one immediately following (in solution_next).
LivPrb : float
Survival probability; likelihood of being alive at the beginning of
the succeeding period.
DiscFac : float
Intertemporal discount factor for future utility.
CRRA : float
Coefficient of relative risk aversion.
Rfree : float
Risk free interest factor on end-of-period assets.
PermGroFac : float
Expected permanent income growth factor at the end of this period.
BoroCnstArt: float or None
Borrowing constraint for the minimum allowable assets to end the
period with. If it is less than the natural borrowing constraint,
then it is irrelevant; BoroCnstArt=None indicates no artificial bor-
rowing constraint.
aXtraGrid: np.array
Array of "extra" end-of-period asset values-- assets above the
absolute minimum acceptable level.
vFuncBool: boolean
An indicator for whether the value function should be computed and
included in the reported solution.
CubicBool: boolean
An indicator for whether the solver should use cubic or linear inter-
polation.
"""
def __init__(
self,
solution_next,
IncShkDstn,
LivPrb,
DiscFac,
CRRA,
Rfree,
PermGroFac,
BoroCnstArt,
aXtraGrid,
vFuncBool,
CubicBool,
):
"""
Constructor for a new solver-setup for problems with income subject to
permanent and transitory shocks.
"""
self.solution_next = solution_next
self.IncShkDstn = IncShkDstn
self.LivPrb = LivPrb
self.DiscFac = DiscFac
self.CRRA = CRRA
self.Rfree = Rfree
self.PermGroFac = PermGroFac
self.BoroCnstArt = BoroCnstArt
self.aXtraGrid = aXtraGrid
self.vFuncBool = vFuncBool
self.CubicBool = CubicBool
self.def_utility_funcs()
def def_utility_funcs(self):
"""
Defines CRRA utility function for | |
true, run the while loop. We set win to false at the start therefore this will always run
guess = int(input("Have a guess: "))
tries = tries + 1
if guess == number:
win = True # set win to true when the user guesses correctly.
elif guess < number:
print("Guess Higher")
elif guess > number:
print("Guess Lower")
else:
question
# if win is true then output message
print("That's Correct!! The number was {}".format(number))
print("it took you {} tries to get it correct".format(tries))
next_level = input("Would you like to move to the next level?[Y/N]: ")
if next_level.lower() == "y":
lvl3()
elif next_level.lower() == "n":
print("Well, I hope to see you soon!!")
AISpeak()
def lvl3(): #Turns out I figured out how to work more then one level (must take advantage of this)
number = random.randint(1, 30)
tries = 0
win = False # setting a win flag to false
print("")
question = input("Would you like to play round three? [Y/N] ")
if question.lower() == "n": #in case of capital letters is entered
print("oh..okay")
exit()
if question.lower() == "y":
print("I'm thinking of a number between 1 & 30")
while not win: # while the win is not true, run the while loop. We set win to false at the start therefore this will always run
guess = int(input("Have a guess: "))
tries = tries + 1
if guess == number:
win = True # set win to true when the user guesses correctly.
elif guess < number:
print("Guess Higher")
elif guess > number:
print("Guess Lower")
else:
question
# if win is true then output message
print("That's Correct!! The number was {}".format(number))
print("it took you {} tries to get it correct".format(tries))
next_level = input("Would you like to move to the next level?[Y/N]: ")
if next_level.lower() == "y":
lvl4()
elif next_level.lower() == "n":
print("Well, I hope to see you soon!!")
AISpeak()
def lvl4():
number = random.randint(1, 40)
tries = 0
win = False # setting a win flag to false
print("")
question = input("Would you like to play round four? [Y/N] ")
if question.lower() == "n": #in case of capital letters is entered
print("oh..okay")
exit()
if question.lower() == "y":
print("I'm thinking of a number between 1 & 40")
while not win: # while the win is not true, run the while loop. We set win to false at the start therefore this will always run
guess = int(input("Have a guess: "))
tries = tries + 1
if guess == number:
win = True # set win to true when the user guesses correctly.
elif guess < number:
print("Guess Higher")
elif guess > number:
print("Guess Lower")
else:
question
# if win is true then output message
print("That's Correct!! The number was {}".format(number))
print("it took you {} tries to get it correct".format(tries))
next_level = input("Would you like to move to the next level?[Y/N]: ")
if next_level.lower() == "y":
lvl5()
elif next_level.lower() == "n":
print("Well, I hope to see you soon!!")
AISpeak()
def lvl5():
number = random.randint(1, 50)
tries = 0
win = False # setting a win flag to false
print("")
question = input("Would you like to play round five? [Y/N] ")
if question.lower() == "n": #in case of capital letters is entered
print("oh..okay")
exit()
if question.lower() == "y":
print("I'm thinking of a number between 1 & 50")
while not win: # while the win is not true, run the while loop. We set win to false at the start therefore this will always run
guess = int(input("Have a guess: "))
tries = tries + 1
if guess == number:
win = True # set win to true when the user guesses correctly.
elif guess < number:
print("Guess Higher")
elif guess > number:
print("Guess Lower")
else:
question
# if win is true then output message
print("That's Correct!! The number was {}".format(number))
print("it took you {} tries to get it correct".format(tries))
print("Thank you for playing my game, Hope we see you soon!!")
AISpeak()
#Games the program can run
def games():
games = input("Which game would you like to play? (rock, paper scissors, and guessing game): ")
print("")
if games == "rock paper scissors":
rock_paper_scissors()
elif games == "guessing game":
guessing_game()
def calculations():
Num1 = 0
Num2 = 0
Typeofcalc = input("What type of calculation would you like to do?: ")
if Typeofcalc == "Multiplication" or Typeofcalc == "multiply" or Typeofcalc == "mult":
Num1 = int(input("Enter the first number: "))
Num2 = int(input("Enter the second number: "))
mult = Num1 * Num2
print(mult)
Next = input("Would you like to do another calculation?: ")
if Next == "yes":
calculations()
elif Next == "no":
AISpeak()
elif Typeofcalc == "div" or Typeofcalc == "division":
Num1 = int(input("Enter first number: "))
Num2 = int(input("Enter second number: "))
div = Num1 / Num2
print(div)
Next = input("Would you like to do another calculation?: ")
if Next == "yes":
calculations()
elif Next == "no":
AISpeak()
elif Typeofcalc == "add" or Typeofcalc == "addition":
Num1 = int(input("Enter first number: "))
Num2 = int(input("Enter second number: "))
add = Num1 + Num2
print(add)
Next = input("Would you like to do another calculation?: ")
if Next == "yes":
calculations()
elif Next == "no":
AISpeak()
elif Typeofcalc == "subtract" or Typeofcalc == "sub":
Num1 = int(input("Enter first number: "))
Num2 = int(input("Enter second number: "))
sub = Num1 - Num2
print(sub)
Next = input("Would you like to do another calculation?: ")
if Next == "yes":
calculations()
elif Next == "no":
AISpeak()
elif Typeofcalc == "exit" or Typeofcalc == "end":
end = input("Would you like to exit calculations?: ")
if end == "yes":
AISpeak()
if end == "no":
calculations()
elif Typeofcalc == "square" or Typeofcalc == "Square": #NEEDS WORK, THE SQUARE WILL NOT ALLOW A SEQUENCE TO MULT ITSELF (VERY ODD CONSIDERING THE MULTIPLICATION WORKS PERFECTLY)
Num1 = input("Enter the number you would like to square: ")
print(Num1 * Num1)
Next = input("Would you like to do another calculation?: ")
if Next == "yes":
calculations()
elif Next == "no":
AISpeak()
elif Typeofcalc == "exponent" or Typeofcalc == "exp": #NEEDS WORK (WTF IS GOING ON THAT IT'S NOT WORKING PROPERLY?)
num1 = input("Enter number: ")
num2 = input("Enter exponent: ")
exp = num1 ** num2
print(exp)
Next = input("Would you like to do another calculation?: ")
if Next == "yes":
calculations()
elif Next == "no":
AISpeak()
else:
print("I dont understand, please try again")
calculations()
def jokes():
joke = random.randint(1,10)
if joke == 1:
print("")
print("So a lightbulb walks into a hotel, he checks in and the bell hop asks for the lightbulbs luggage"
" the lightbulb replies.....I don't have any, im traveling light")
end = input("Would you like to hear another?: ")
if end == "yes":
jokes()
elif end == "no":
AISpeak()
elif joke == 2:
print("")
print("What do you call an aligator in a vest?.........an investigator")
end = input("Would you like to hear another?: ")
if end == "yes":
jokes()
elif end == "no":
AISpeak()
elif joke == 3:
print("")
print("Two popsicle's are in a freezer, one popsicle says 'man it's cold in here' and the other popsicle says 'Holly crap a talking popsicle!!'")
end = input("Would you like to hear another?: ")
if end == "yes":
jokes()
elif end == "no":
AISpeak()
elif joke == 4:
print("")
print("Why did the chicken cross the road?....to get to the other side")
end = input("Would you like to hear another?: ")
if end == "yes":
jokes()
elif end == "no":
AISpeak()
elif joke == 5:
print("")
print("So a horse walks into a bar, sits at the table, the bar tender comes up and says 'Why the long face?'")
end = input("Would you like to hear another?: ")
if end == "yes":
jokes()
elif end == "no":
AISpeak()
elif joke == 6:
print("")
print("What do you call a fly with no wings?............A walk!")
end = | |
' + topic.member.username
template_values['page_description'] = template_values['page_description'].replace("\r\n", " ")
if member:
if member.level == 0:
can_edit = True
can_move = True
if topic.member_num == member.num:
now = datetime.datetime.now()
if (now - topic.created).seconds < 300:
can_edit = True
can_move = True
try:
taskqueue.add(url='/hit/topic/' + str(topic.key()))
except:
pass
template_values['page_title'] = site.title + u' › ' + topic.title
template_values['canonical'] = 'http://' + site.domain + '/t/' + str(topic.num)
if topic.content_rendered is None:
path = os.path.join(os.path.dirname(__file__), 'tpl', 'portion', 'topic_content.html')
output = template.render(path, {'topic' : topic})
topic = db.get(topic.key())
topic.content_rendered = output.decode('utf-8')
memcache.delete('Topic_' + str(topic.num))
topic.put()
else:
template_values['page_title'] = site.title + u' › 主题未找到'
template_values['topic'] = topic
template_values['can_edit'] = can_edit
template_values['can_move'] = can_move
if (topic):
node = False
section = False
node = GetKindByNum('Node', topic.node_num)
if (node):
section = GetKindByNum('Section', node.section_num)
template_values['node'] = node
template_values['section'] = section
page_size = TOPIC_PAGE_SIZE
pages = 1
if topic.replies > page_size:
if (topic.replies % page_size) > 0:
pages = int(math.floor(topic.replies / page_size)) + 1
else:
pages = int(math.floor(topic.replies / page_size))
try:
page_current = int(self.request.get('p'))
if page_current < 1:
page_current = 1
if page_current > pages:
page_current = pages
except:
page_current = pages
page_start = (page_current - 1) * page_size
template_values['pages'] = pages
template_values['page_current'] = page_current
template_values['ps'] = False
i = 1
ps = []
while i <= pages:
ps.append(i)
i = i + 1
if len(ps) > 1:
template_values['ps'] = ps
replies = False
if browser['ios']:
path = os.path.join(os.path.dirname(__file__), 'tpl', 'portion', 'topic_replies_mobile.html')
else:
path = os.path.join(os.path.dirname(__file__), 'tpl', 'portion', 'topic_replies.html')
if filter_mode:
if browser['ios']:
r_tag = 'topic_' + str(topic.num) + '_replies_filtered_rendered_ios_' + str(page_current)
else:
r_tag = 'topic_' + str(topic.num) + '_replies_filtered_rendered_desktop_' + str(page_current)
r = memcache.get(r_tag)
if r is None:
replies = memcache.get('topic_' + str(topic.num) + '_replies_filtered_compressed_' + str(page_current))
if replies is None:
q5 = db.GqlQuery("SELECT * FROM Reply WHERE topic_num = :1 AND member_num = :2 ORDER BY created ASC LIMIT " + str(page_start) + "," + str(page_size), topic.num, topic.member.num)
replies = q5
memcache.set('topic_' + str(topic.num) + '_replies_filtered_compressed_' + str(page_current), GetPacked(replies), 7200)
else:
replies = GetUnpacked(replies)
template_values['replies'] = replies
template_values['replies_count'] = replies.count()
r = template.render(path, template_values)
memcache.set(r_tag, r, 86400)
else:
if reply_reversed:
if browser['ios']:
r_tag = 'topic_' + str(topic.num) + '_replies_desc_rendered_ios_' + str(page_current)
else:
r_tag = 'topic_' + str(topic.num) + '_replies_desc_rendered_desktop_' + str(page_current)
r = memcache.get(r_tag)
if r is None:
replies = memcache.get('topic_' + str(topic.num) + '_replies_desc_compressed_' + str(page_current))
if replies is None:
q4 = db.GqlQuery("SELECT * FROM Reply WHERE topic_num = :1 ORDER BY created DESC LIMIT " + str(page_start) + "," + str(page_size), topic.num)
replies = q4
memcache.set('topic_' + str(topic.num) + '_replies_desc_compressed_' + str(page_current), GetPacked(q4), 86400)
else:
replies = GetUnpacked(replies)
template_values['replies'] = replies
template_values['replies_count'] = replies.count()
r = template.render(path, template_values)
memcache.set(r_tag, r, 86400)
else:
if browser['ios']:
r_tag = 'topic_' + str(topic.num) + '_replies_asc_rendered_ios_' + str(page_current)
else:
r_tag = 'topic_' + str(topic.num) + '_replies_asc_rendered_desktop_' + str(page_current)
r = memcache.get(r_tag)
if r is None:
replies = memcache.get('topic_' + str(topic.num) + '_replies_asc_compressed_' + str(page_current))
if replies is None:
q4 = db.GqlQuery("SELECT * FROM Reply WHERE topic_num = :1 ORDER BY created ASC LIMIT " + str(page_start) + "," + str(page_size), topic.num)
replies = q4
memcache.set('topic_' + str(topic.num) + '_replies_asc_compressed_' + str(page_current), GetPacked(q4), 86400)
else:
replies = GetUnpacked(replies)
template_values['replies'] = replies
template_values['replies_count'] = replies.count()
r = template.render(path, template_values)
memcache.set(r_tag, r, 86400)
template_values['r'] = r
if topic and member:
if member.hasFavorited(topic):
template_values['favorited'] = True
else:
template_values['favorited'] = False
if browser['ios']:
path = os.path.join(os.path.dirname(__file__), 'tpl', 'mobile', 'topic.html')
else:
path = os.path.join(os.path.dirname(__file__), 'tpl', 'desktop', 'topic.html')
else:
if browser['ios']:
path = os.path.join(os.path.dirname(__file__), 'tpl', 'mobile', 'topic_not_found.html')
else:
path = os.path.join(os.path.dirname(__file__), 'tpl', 'desktop', 'topic_not_found.html')
output = template.render(path, template_values)
self.response.out.write(output)
def post(self, topic_num):
site = GetSite()
### BEGIN: CAN CONTINUE
can_continue = True
if ('Host' in self.request.headers):
if (self.request.headers['Host'] not in ['www.v2ex.com', 'v2ex.appspot.com', 'fast.v2ex.com', 'beta.v2ex.com', 'us.v2ex.com', 'eu.v2ex.com', 'jp.v2ex.com', 'localhost:10000']):
can_continue = True
else:
can_continue = False
if ('User-Agent' not in self.request.headers):
can_continue = False
if ('Cookie' not in self.request.headers):
can_continue = False
if ('Referer' in self.request.headers):
has_v2ex = False
if ('http://localhost:10000' in self.request.headers['Referer']):
has_v2ex = True
if ('http://www.v2ex.com' in self.request.headers['Referer']):
has_v2ex = True
if ('http://v2ex.appspot.com' in self.request.headers['Referer']):
has_v2ex = True
if ('https://www.v2ex.com' in self.request.headers['Referer']):
has_v2ex = True
if ('http://eu.v2ex.com' in self.request.headers['Referer']):
has_v2ex = True
if ('http://us.v2ex.com' in self.request.headers['Referer']):
has_v2ex = True
if ('http://jp.v2ex.com' in self.request.headers['Referer']):
has_v2ex = True
if ('https://v2ex.appspot.com' in self.request.headers['Referer']):
has_v2ex = True
if ('http://fast.v2ex.com' in self.request.headers['Referer']):
has_v2ex = True
if ('http://beta.v2ex.com' in self.request.headers['Referer']):
has_v2ex = True
if ('http://' + site.domain in self.request.headers['Referer']):
has_v2ex = True
if has_v2ex is False:
can_continue = False
else:
can_continue = True
if ('Content-Type' in self.request.headers):
if self.request.headers['Content-Type'].startswith( 'application/x-www-form-urlencoded') is False:
can_continue = False
else:
can_continue = False
if can_continue is False:
return self.redirect('http://' + site.domain + '/')
### END: CAN CONTINUE
browser = detect(self.request)
template_values = {}
template_values['site'] = site
template_values['system_version'] = SYSTEM_VERSION
member = CheckAuth(self)
template_values['member'] = member
l10n = GetMessages(self, member, site)
template_values['l10n'] = l10n
topic_num_str = str(topic_num)
if len(topic_num_str) > 8:
if browser['ios']:
path = os.path.join(os.path.dirname(__file__), 'tpl', 'mobile', 'topic_not_found.html')
else:
path = os.path.join(os.path.dirname(__file__), 'tpl', 'desktop', 'topic_not_found.html')
output = template.render(path, template_values)
self.response.out.write(output)
return
if (member):
topic = False
topic = GetKindByNum('Topic', int(topic_num))
template_values['topic'] = topic
errors = 0
# Verification: content
reply_content_error = 0
reply_content_error_messages = ['',
u'请输入回复内容',
u'回复内容长度不能超过 200000 个字符'
]
reply_content = self.request.get('content').strip()
if (len(reply_content) == 0):
errors = errors + 1
reply_content_error = 1
else:
if (len(reply_content) > 200000):
errors = errors + 1
reply_content_error = 2
template_values['reply_content'] = reply_content
template_values['reply_content_error'] = reply_content_error
template_values['reply_content_error_message'] = reply_content_error_messages[reply_content_error]
template_values['errors'] = errors
if (topic and (errors == 0)):
reply = Reply(parent=topic)
q = db.GqlQuery('SELECT * FROM Counter WHERE name = :1', 'reply.max')
if (q.count() == 1):
counter = q[0]
counter.value = counter.value + 1
else:
counter = Counter()
counter.name = 'reply.max'
counter.value = 1
q2 = db.GqlQuery('SELECT * FROM Counter WHERE name = :1', 'reply.total')
if (q2.count() == 1):
counter2 = q2[0]
counter2.value = counter2.value + 1
else:
counter2 = Counter()
counter2.name = 'reply.total'
counter2.value = 1
node = False
section = False
if topic:
node = False
section = False
node = GetKindByNum('Node', topic.node_num)
if (node):
section = GetKindByNum('Section', node.section_num)
template_values['node'] = node
template_values['section'] = section
reply.num = counter.value
reply.content = reply_content
reply.topic = topic
reply.topic_num = topic.num
reply.member = member
reply.member_num = member.num
reply.created_by = member.username
topic.replies = topic.replies + 1
topic.node_name = node.name
topic.node_title = node.title
topic.last_reply_by = member.username
topic.last_touched = datetime.datetime.now()
ua = self.request.headers['User-Agent']
if (re.findall('Mozilla\/5.0 \(iPhone', ua)):
reply.source = 'iPhone'
if (re.findall('Mozilla\/5.0 \(iPod', ua)):
reply.source = 'iPod'
if (re.findall('Mozilla\/5.0 \(iPad', ua)):
reply.source = 'iPad'
if (re.findall('Android', ua)):
reply.source = 'Android'
if (re.findall('Mozilla\/5.0 \(PLAYSTATION 3;', ua)):
reply.source = 'PS3'
reply.put()
topic.put()
counter.put()
counter2.put()
# Notifications
notified_members = []
keys = []
# type: reply
if reply.member_num != topic.member_num:
q = db.GqlQuery('SELECT * FROM Counter WHERE name = :1', 'notification.max')
if (q.count() == 1):
counter = q[0]
counter.value = counter.value + 1
else:
counter = Counter()
counter.name = 'notification.max'
counter.value = 1
q2 = db.GqlQuery('SELECT * FROM Counter WHERE name = :1', 'notification.total')
if (q2.count() == 1):
counter2 = q2[0]
counter2.value = counter2.value + 1
else:
counter2 = Counter()
counter2.name = 'notification.total'
counter2.value = 1
notification = Notification(parent=topic.member)
notification.num = counter.value
notification.type = 'reply'
notification.payload = reply.content
notification.label1 = topic.title
notification.link1 = '/t/' + str(topic.num) + '#reply' + str(topic.replies)
notification.member = member
notification.for_member_num = topic.member_num
keys.append(str(topic.member.key()))
counter.put()
counter2.put()
notification.put()
for key in keys:
taskqueue.add(url='/notifications/check/' + key)
taskqueue.add(url='/notifications/reply/' + str(reply.key()))
page_size = TOPIC_PAGE_SIZE
pages = 1
if topic.replies > page_size:
if (topic.replies % page_size) > 0:
pages = int(math.floor(topic.replies / page_size)) + 1
else:
pages = int(math.floor(topic.replies / page_size))
memcache.set('Topic_' + str(topic.num), topic, 86400)
memcache.delete('topic_' + str(topic.num) + '_replies_desc_compressed_' + str(pages))
memcache.delete('topic_' + str(topic.num) + '_replies_asc_compressed_' + str(pages))
memcache.delete('topic_' + str(topic.num) + '_replies_filtered_compressed_' + str(pages))
memcache.delete('topic_' + str(topic.num) + '_replies_desc_rendered_desktop_' + str(pages))
memcache.delete('topic_' + str(topic.num) + '_replies_asc_rendered_desktop_' + str(pages))
memcache.delete('topic_' + str(topic.num) | |
import asyncio
import base64
import json
import os
from datetime import datetime, timedelta
from typing import Dict, List, Optional, Union
from urllib import parse as urlparse
import aiohttp
from spotify.classes import *
BASE_URL = "https://api.spotify.com/v1/"
class SpotifyClient:
"""A simple client for performing requests to the Spotify API.
Spotify Web API Reference: https://developer.spotify.com/documentation/web-api/reference/
You must have a spotify application in order to use this client.
You can create one here: https://developer.spotify.com/dashboard/applications/
Args:
client_id (str): Your Spotify Application's client ID.
client_secret (str): Your Spotify Application's client secret.
Keyword Args:
enable_cache (bool): Whether or not to enable caching. Defaults to ``True``.
The cache will only store the access token and it's type and expiration time.
"""
def __init__(
self, client_id: str, client_secret: str, *, enable_cache: bool = True
) -> None:
self.client_id = client_id
self.client_secret = client_secret
self.enable_cache = enable_cache
self.client_bearer: Optional[str] = None
self.bearer_expires_at: Optional[float] = None
self.started = asyncio.Event()
_ = asyncio.create_task(self._startup_task())
async def _startup_task(self) -> None:
if os.path.isfile("./.cache"):
with open("./.cache", "r") as f:
cache = json.load(f)
self.client_bearer = cache.get("access_token")
self.bearer_expires_at = cache.get("expires_at")
if not self.client_bearer:
await self._get_bearer()
else:
await self._get_bearer()
self.started.set()
async def _get_bearer(self) -> None:
token = f"{self.client_id}:{self.client_secret}".encode("ascii")
token = base64.encodebytes(token)
final_token = token.decode("utf-8").replace("\n", "")
headers = {"Authorization": f"Basic {final_token}"}
post_data = {"grant_type": "client_credentials"}
async with aiohttp.request(
"POST",
"https://accounts.spotify.com/api/token",
headers=headers,
data=post_data,
) as res:
res.raise_for_status()
data = await res.json()
self.client_bearer = data["access_token"]
if self.enable_cache:
with open("./.cache", "w") as f:
data["expires_at"] = (
datetime.now() + timedelta(seconds=data["expires_in"])
).timestamp()
json.dump(data, f)
async def _check_bearer_validity(self) -> None:
if (
self.bearer_expires_at
and self.bearer_expires_at < datetime.now().timestamp()
):
await self._get_bearer()
async def get(self, url: str) -> Dict:
await self.started.wait()
await self._check_bearer_validity()
async with aiohttp.request(
"GET",
BASE_URL + url,
headers={
"Authorization": f"Bearer {self.client_bearer}",
"Content-Type": "application/json",
},
) as res:
res.raise_for_status()
return await res.json()
async def get_album(self, album_id: str, *, market: Optional[str] = None) -> Album:
"""Get an album from Spotify.
Args:
album_id (str): The `Spotify ID`_ of the album.
Keyword Args:
market (Optional[str]): The `country code`_ of a market to get the album from. Defaults to ``None``.
Returns:
:obj:`~.classes.Album`
"""
res = await self.get(_format_url(f"albums/{album_id}", market=market))
return Album.from_dict(res)
async def get_several_albums(
self, album_ids: List[str], *, market: Optional[str] = None
) -> List[Album]:
"""Get multiple albums from Spotify.
Args:
album_ids (List[str]): The `Spotify ID`_ s of the albums. Maximum is 20 IDs.
Keyword Args:
market (Optional[str]): The `country code`_ of a market to get the albums from. Defaults to ``None``.
Returns:
List[:obj:`~.classes.Album`]
"""
if len(album_ids) > 20:
raise ValueError(
f"A maximum of 20 IDs may be passed, but you have passed {len(album_ids)}"
)
res = await self.get(
_format_url("albums", ids=",".join(album_ids), market=market)
)
return _format_several_items(res, album_ids, "albums", Album) # type: ignore
async def get_album_tracks(
self,
album_id: str,
*,
limit: Optional[int] = None,
market: Optional[str] = None,
offset: Optional[int] = None,
) -> Tracks:
"""Get an album's tracks from Spotify.
Args:
album_id (str): The `Spotify ID`_ of the album.
Keyword Args:
limit (Optional[int]): The maximum number of tracks to return.
Defaults to ``20``. Minimum is ``1``. Maximum is ``50``.
market (Optional[str]): The `country code`_ of a market to get the album from. Defaults to ``None``.
offset (Optional[int]): The index of the first set of tracks to return. Defaults to ``0``.
Returns:
:obj:`~.classes.Tracks`
"""
if limit and not 1 <= limit <= 50:
raise ValueError("Kwarg 'limit' must be between 1 and 50 inclusive")
res = await self.get(
_format_url(
f"albums/{album_id}/tracks", limit=limit, market=market, offset=offset
)
)
return Tracks.from_dict(res)
async def get_new_releases(
self,
*,
country: Optional[str] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
) -> Albums:
"""Get a list of new album releases featured in Spotify.
Keyword Args:
country (Optional[str]): The `country code`_ of a country to get the new releases from. Defaults to ``None``.
limit (Optional[int]): The maximum number of new releases to return.
Defaults to ``20``. Minimum is ``1``. Maximum is ``50``.
offset (Optional[int]): The index of the first set of new releases to return. Defaults to ``0``.
Returns:
:obj:`~.classes.Albums`
"""
if limit and not 1 <= limit <= 50:
raise ValueError("Kwarg 'limit' must be between 1 and 50 inclusive")
res = await self.get(
_format_url(
"browse/new-releases",
country=country,
limit=limit,
offset=offset,
)
)
return Albums.from_dict(res)
async def get_track(
self,
track_id: str,
*,
market: Optional[str] = None,
) -> Track:
"""Get a track from Spotify.
Args:
track_id (str): The `Spotify ID`_ of the track.
Keyword Args:
market (Optional[str]): The `country code`_ of a market to get the track from. Defaults to ``None``.
Returns:
:obj:`~.classes.Track`
"""
res = await self.get(_format_url(f"tracks/{track_id}", market=market))
return Track.from_dict(res)
async def get_several_tracks(
self,
track_ids: List[str],
*,
market: Optional[str] = None,
) -> List[Track]:
"""Get multiple tracks from Spotify.
Args:
track_ids (List[str]): The `Spotify ID`_ s of the tracks. Maximum is 50 IDs.
Keyword Args:
market (Optional[str]): The `country code`_ of a market to get the tracks from. Defaults to ``None``.
Returns:
List[:obj:`~.classes.Track`]
"""
if len(track_ids) > 50:
raise ValueError(
f"A maximum of 50 IDs may be passed, but you have passed {len(track_ids)}"
)
res = await self.get(
_format_url("tracks", ids=",".join(track_ids), market=market)
)
return _format_several_items(res, track_ids, "tracks", Track) # type: ignore
async def get_several_tracks_audio_features(
self,
track_ids: List[str],
) -> List[AudioFeatures]:
"""Get audio feature information for multiple tracks from Spotify.
Args:
track_ids (List[str]): The `Spotify ID`_ s of the tracks. Maximum is 100 IDs.
Returns:
List[:obj:`~.classes.AudioFeatures`]
"""
if len(track_ids) > 100:
raise ValueError(
f"A maximum of 100 IDs may be passed, but you have passed {len(track_ids)}"
)
res = await self.get(_format_url("audio-features", ids=",".join(track_ids)))
return _format_several_items(res, track_ids, "audio_features", AudioFeatures) # type: ignore
async def get_tracks_audio_features(
self,
track_id: str,
) -> AudioFeatures:
"""Get audio feature information for a track from Spotify.
Args:
track_id (str): The `Spotify ID`_ of the track.
Returns:
:obj:`~.classes.AudioFeatures`
"""
res = await self.get(f"audio-features/{track_id}")
return AudioFeatures.from_dict(res)
async def get_playlist(
self,
playlist_id: str,
*,
additional_types: Optional[List[str]] = None,
# fields: Optional[Dict[str, str]] = None,
market: Optional[str] = None,
) -> Playlist:
res = await self.get(
_format_url(
f"playlists/{playlist_id}",
additional_types=additional_types,
# fields=_format_playlist_fields(fields),
market=market,
)
)
return Playlist.from_dict(res)
# async def search(
# self,
# query: str,
# *,
# album: Optional[str] = None,
# artist: Optional[str] = None,
# track: Optional[str] = None,
# year: Optional[str] = None,
# upc: Optional[str] = None,
# tag: Optional[str] = None,
# isrc: Optional[str] = None,
# genre: Optional[str] = None,
# _type: Optional[List[str]] = ["track"],
# include_external_audio: Optional[bool] = False,
# limit: Optional[int] = None,
# market: Optional[str] = None,
# offset: Optional[int] = None,
# ) -> None:
# # query = "search?q=okinimesumama+artist:eve&type=track&include_external=audio&limit=3"
# # TODO: Check the defaults
# if tag and tag not in ("new", "hipster"):
# raise ValueError(f"Kwarg 'tag' must be 'new' or 'hipster', not '{tag}'")
# if type:
# for t in type:
# if t not in ("album", "artist", "playlist", "track", "show", "episode"):
# raise ValueError(
# f"Value '{t}' in type is not valid, it must be one of "
# ", ".join([f"'{_t}'" for _t in ("album", "artist", "playlist", "track", "show", "episode")])
# )
# if limit and (limit < 0 or limit > 50):
# raise ValueError("Kwarg 'limit' must be between 0 and 50 inclusive")
# url = (
# f"search?q={query}%20" +
# (f"album:{album}" if album else "") +
# (f"artist:{artist}" if artist else "") +
# (f"track:{track}" if track else "") +
# (f"year:{year}" if year else "") +
# (f"upc:{upc}" if upc else "") +
# (f"tag:{tag}" if tag else "") +
# (f"isrc:{isrc}" if isrc else "") +
# (f"genre:{genre}" if genre else "") +
# (f"&type={'%2C'.join(type)}" if type else "") +
# ("&exclude_external=audio" if include_external_audio else "") +
# (f"&limit={limit}" if limit else "") +
# (f"&market={market}" if market else "") +
# (f"&offset={offset}" if offset else "")
# )
# res = await self.get(url)
# return SearchResult.from_dict(res)
# with open("./samples/search4.json", "w") as f:
# json.dump(res, f, indent=4)
# def _format_search(res: Dict) -> Union[Album, Tracks, None]:
# pass
def _format_several_items(
res: Dict, ids: List[str], thing: str, cls: Union[Album, Track, AudioFeatures]
) -> List[Union[Album, Track, AudioFeatures]]:
items = []
for i, item in | |
# -*- coding: utf-8 -*-
import numpy as np
from scipy.optimize import fsolve
import random
import time
import os
import matplotlib.pyplot as plt
import logging
from Life import Life
from Fitness import Fitness
random.seed(10)
logging.basicConfig(level=logging.INFO, # 控制台打印的日志级别
filename='./logs/out_%s.log' % time.strftime("%Y%m%d%H%M", time.localtime(time.time())),
filemode='w', # 模式,有w和a,w就是写模式,每次都会重新写日志,覆盖之前的日志, a是追加模式,默认如果不写的话,就是追加模式
format='[line:%(lineno)d] - %(levelname)s: %(message)s'
# 日志格式 %(asctime)s - %(pathname)s[line:%(lineno)d] -
)
# 遗传算法类
class GA(object):
def __init__(self, populationSize=100, binaryEncode=True, geneLength=None, boundaryList=None, delta=0.01,
fitnessClass=None, crossoverRate=0.7, crossoverOneRate=0.5, crossoverTwoRate=0.5,
mutationRate=0.05, mutationLocationRate=0.5, mutationRotateRate=0.5, fitnessThreshold=0.9,
similarityThreshold=10, CF=10, punish=0.01, showBestFitness=True, showTotalFitness=False,
earlyStopRoundThreshold=100):
self._populationSize = populationSize # 种群数量
self._binaryEncode = binaryEncode # 基因编码方式
self._geneLength = geneLength # 基因长度
self._boundaryList = boundaryList # 变量区间列表
self._delta = delta # 变量精度
self._encodeLengths = None # 基因编码长度
self._fitnessClass = fitnessClass # 适配函数
self._crossoverRate = crossoverRate # 交叉概率
self._crossoverOneRate = crossoverOneRate # 单点交叉概率
self._crossoverTwoRate = crossoverTwoRate # 双点交叉概率
self._mutationRate = mutationRate # 突变概率
self._mutationLocationRate = mutationLocationRate # 位置变异概率
self._mutationRotateRate = mutationRotateRate # 旋转变异概率
self._bestLife = None # 一代中最好的个体
self._fitnessThreshold = fitnessThreshold # 适应度阈值,超过该阈值提前结束
self._generation = 1 # 初始化,为第一代
self._crossoverCount = 0 # 初始化,交叉次数是0
self._mutationCount = 0 # 初始化,变异次数是0
self._totalFitness = 0.0 # 适配值之和,用于选择时计算概率
self._earlyStop = False # 提前终止
self._CF = CF # 排挤因子
self._punish = punish # 惩罚因子
self._similarityThresholdThreshold = similarityThreshold # 相似度
self._bestFitnessHistory = [] # 最佳适应度历史值
self._totalFitnessHistory = [] # 总适应度历史值
self._showBestFitness = showBestFitness # 是否可视化最佳适应度
self._showTotalFitness = showTotalFitness # 是否可视化最佳适应度
self._earlyStopRoundThreshold = earlyStopRoundThreshold # 当适应度不再增加超过_earlyStopRoundThreshold时,提前终止
self._round = 0 # 适应度不再增加的轮数
self.initPopulation() # 初始化种群
# 初始化种群
def initPopulation(self):
logging.debug('init population')
self._population = []
if self._binaryEncode: # 将变量进行二进制编码
if self._boundaryList is None:
raise ValueError("boundaryList must be configured!")
# 获取编码长度列表
self._encodeLengths = self.getEncodedLengths()
# 基因长度为每个变量编码长度之和
self._geneLength = np.sum(self._encodeLengths)
# 随机化初始种群为0
for i in range(self._populationSize):
# 随机生成基因
gene = np.random.randint(0, 2, self._geneLength)
# 生成个体,并计算适应度
life = Life(gene)
life._fitness = self._fitnessClass.fitnessFunc(self.decodedOneGene(gene))
# 把生成个体添加至种群集合里
self._population.append(life)
else: # 编码方式为[0, 1, 2, ..., self._geneLength-1]
if self._geneLength is None:
raise ValueError("geneLength must be configured!")
for i in range(self._populationSize):
gene = np.array(range(self._geneLength))
# 将基因乱序
random.shuffle(gene)
# 生成个体,并计算适应度
life = Life(gene)
life._fitness = self._fitnessClass.fitnessFunc(gene)
# 把生成个体添加至种群集合里
self._population.append(life)
# 根据适应度值对种群的个体降序排列,并记录最佳个体和最佳适应度
self._population = self.sortPopulation(self._population)
self._bestLife = self._population[0]
self._bestFitnessHistory.append(self._bestLife._fitness)
# 计算总适应度
self._totalFitness = self.calTotalFitness(self._population)
self._totalFitnessHistory.append(self._totalFitness)
# 根据解的精度确定基因(gene)的长度
# 需要根据决策变量的上下边界来确定
def getEncodedLengths(self):
if self._boundaryList is None:
raise ValueError("boundaryList must be configured!")
# 每个变量的编码长度
encodeLengths = []
for i in self._boundaryList:
lower = i[0]
upper = i[1]
# lamnda 代表匿名函数f(x)=0, 50代表搜索的初始解
res = fsolve(lambda x: ((upper - lower) * 1 / self._delta) - 2 ** x - 1, 50)
length = int(np.floor(res[0]))
encodeLengths.append(length)
return encodeLengths
# 基因解码得到表现型的解
def decodedGenes(self, population):
if self._encodeLengths is None:
raise ValueError("encodeLengths must be configured!")
populationSize = len(population)
variables = len(self._encodeLengths)
decodeGenes = np.zeros((populationSize, variables))
for k, life in enumerate(population):
gene = life._gene.tolist()
decodeGenes[k] = self.decodedOneGene(gene)
return decodeGenes
# 解码某一基因序列
def decodedOneGene(self, encodeGene):
decodeGene = np.zeros(len(self._encodeLengths))
start = 0
for index, length in enumerate(self._encodeLengths):
# 将一个染色体进行拆分,得到染色体片段
power = length - 1
# 解码得到的10进制数字
demical = 0
for i in range(start, length + start):
demical += encodeGene[i] * (2 ** power)
power -= 1
lower = self._boundaryList[index][0]
upper = self._boundaryList[index][1]
decodedValue = lower + demical * (upper - lower) / (2 ** length - 1)
decodeGene[index] = decodedValue
# 下一段染色体的编码
start += length
return decodeGene
# 将种群按照适应度排序,默认倒序排列
def sortPopulation(self, population, reverse=True):
return sorted(population, key=lambda life: life._fitness, reverse=reverse)
# 计算种群的总适应度
def calTotalFitness(self, population):
fitnessList = []
for life in population:
fitnessList.append(life._fitness)
return np.sum(np.array(fitnessList))
# 计算海明距离
def hammingDist(self, s1, s2):
assert len(s1) == len(s2)
return sum([ch1 != ch2 for ch1, ch2 in zip(s1, s2)])
# 小生境淘汰
def smallHabitatElimination(self, population):
count = 0
popSize = len(population)
for i in range(popSize - 1):
for j in range(i + 1, popSize):
gene1 = population[i]._gene
gene2 = population[j]._gene
# 计算海明距离
distance = self.hammingDist(gene1, gene2)
# 若距离小于阈值,则惩罚其中适应度值较差的个体。
if distance < self._similarityThresholdThreshold:
count += 1
if population[i]._fitness < population[j]._fitness:
logging.debug("个体 #%s 基因组被惩罚:" % i)
logging.debug("被惩罚前分数: %f" % (population[i]._fitness))
population[i]._fitness = self._punish
else:
logging.debug("个体 #%s 基因组被惩罚:" % j)
logging.debug("被惩罚前分数: %f" % (population[j]._fitness))
population[j]._fitness = self._punish
return population
# 评估,计算每一个个体的适配值
def evaluation(self, population):
# 适配值之和,用于选择时计算概率
totalFitness = 0.0
bestLife = population[0]
if self._binaryEncode:
decodedGenes = self.decodedGenes(population)
bestLife._fitness = self._fitnessClass.fitnessFunc(decodedGenes[0])
for i in range(1, len(decodedGenes)):
decodedGene = decodedGenes[i]
life = self._population[i]
life._fitness = self._fitnessClass.fitnessFunc(decodedGene)
totalFitness += life._fitness
# 如果新基因的适配值大于原先的best基因,就更新best基因
if bestLife._fitness < life._fitness:
bestLife = life
else:
for life in population:
life._fitness = self._fitnessClass.fitnessFunc(life._gene)
totalFitness += life._fitness
# 如果新基因的适配值大于原先的best基因,就更新best基因
if bestLife < life._fitness:
bestLife = life
return totalFitness, bestLife
# 选择一个个体
def selection(self):
# 产生0到(适配值之和)之间的任何一个实数
r = random.uniform(0, self._totalFitness)
for life in self._population:
r -= life._fitness
if r <= 0:
return life
raise Exception("选择错误", self._totalFitness)
# 交叉
def crossover(self, parent1, parent2):
# 交叉类型概率
rate = random.random()
newGene1 = []
newGene2 = []
variablesCount = len(self._boundaryList) # 变量个数
# 若该概率小于单点交叉概率,则进行单点交叉,否则,进行双点交叉
if rate <= self._crossoverOneRate: # 单点交叉
startIndex = 0
for i in range(variablesCount):
index = random.randint(0, self._encodeLengths[i])
endIndex = startIndex + self._encodeLengths[i]
parentGene1 = parent1._gene[startIndex:endIndex]
parentGene2 = parent2._gene[startIndex:endIndex]
newGene1.extend(parentGene1[:index].tolist() + parentGene2[index:].tolist())
newGene2.extend(parentGene2[:index].tolist() + parentGene1[index:].tolist())
startIndex = self._encodeLengths[i]
else: # 双点交叉
startIndex = 0
for i in range(variablesCount):
index1 = random.randint(0, self._encodeLengths[i] - 1)
index2 = random.randint(index1, self._encodeLengths[i] - 1)
endIndex = startIndex + self._encodeLengths[i]
parentGene1 = parent1._gene[startIndex:endIndex]
parentGene2 = parent2._gene[startIndex:endIndex]
newGene1.extend(
parentGene1[:index1].tolist() + parentGene2[index1:index2].tolist() + parentGene1[index2:].tolist())
newGene2.extend(
parentGene2[:index1].tolist() + parentGene1[index1:index2].tolist() + parentGene2[index2:].tolist())
startIndex = self._encodeLengths[i]
self._crossoverCount += 1
return np.array(newGene1), np.array(newGene2)
# 突变
def mutation(self, gene):
newGene = []
variablesCount = len(self._boundaryList) # 变量个数
# 位置突变概率
locationRate = random.random()
if locationRate < self._mutationLocationRate:
startIndex = 0
for i in range(variablesCount):
# 相当于取得0到self._geneLength - 1之间的一个数,包括0和self._geneLength - 1
index1 = random.randint(0, self._encodeLengths[i] - 1)
index2 = random.randint(0, self._encodeLengths[i] - 1)
while index1 == index2:
index2 = random.randint(0, self._encodeLengths[i] - 1)
pass
endIndex = startIndex + self._encodeLengths[i]
genePart = gene[startIndex:endIndex]
genePart[index1], genePart[index2] = genePart[index2], genePart[index1]
newGene.extend(genePart)
startIndex = self._encodeLengths[i]
newGene = np.array(newGene)
"""
# 整体突变
index1 = random.randint(0, self._geneLength - 1)
index2 = random.randint(0, self._geneLength - 1)
newGene[index1], newGene[index2] = newGene[index2], newGene[index1]
"""
else: # 旋转突变
startIndex = 0
for i in range(variablesCount):
# 相当于取得0到self._geneLength - 1之间的一个数,包括0和self._geneLength - 1
index1 = random.randint(0, self._encodeLengths[i] - 1)
index2 = random.randint(0, self._encodeLengths[i] - 1)
while index1 == index2:
index2 = random.randint(0, self._encodeLengths[i] - 1)
pass
# 保证index1 < index2
if index1 > index2:
tmp = index1
index1 = index2
index2 = tmp
endIndex = startIndex + self._encodeLengths[i]
genePart = gene[startIndex:endIndex]
genePart[index1:index2] = 1 - genePart[index1:index2]
newGene.extend(genePart)
startIndex = self._encodeLengths[i]
newGene = np.array(newGene)
"""
# 整体突变
index1 = random.randint(0, self._geneLength - 1)
index2 = random.randint(0, self._geneLength - 1)
while index1 == index2:
index2 = random.randint(0, self._geneLength - 1)
pass
# 保证index1 < index2
if index1 > index2:
tmp = index1
index1 = index2
index2 = tmp
newGene[index1:index2] = 1 - newGene[index1:index2]
"""
# 突变次数加1
self._mutationCount += 1
return newGene
# 产生新后代
def getNewChild(self):
logging.debug('select 2 parent lives')
parent1 = self.selection()
parent2 = self.selection()
while self.hammingDist(parent1._gene, parent2._gene) == 0:
parent2 = self.selection()
pass
variableCount = len(self._boundaryList)
encodeLength = int(self._geneLength / variableCount)
# logging.debug('parent1: \n%s' % self.decodedOneGene(parent1._gene))
# logging.debug('parent2: \n%s' % parent1._gene.reshape((variableCount, encodeLength)))
# logging.debug('parent2: \n%s' % self.decodedOneGene(parent2._gene))
# logging.debug('parent2: \n%s' % parent2._gene.reshape((variableCount, encodeLength)))
logging.debug(
'hamming distance between parent1 and parent2: %d' % self.hammingDist(parent1._gene, parent2._gene))
# 按概率交叉
rate = random.random()
if rate < self._crossoverRate:
logging.debug('crossover')
# 交叉
gene1, gene2 = self.crossover(parent1, parent2)
logging.debug(
'hamming distance between gene1 and gene2 after crossover: %d' % self.hammingDist(gene1, gene2))
else:
gene1, gene2 = parent1._gene, parent2._gene
# 按概率突变
rate = random.random()
if rate < self._mutationRate:
logging.debug('mutation')
gene1, gene2 = self.mutation(gene1), self.mutation(gene2)
logging.debug(
'hamming distance between gene1 and gene2 after mutation: %d' % self.hammingDist(gene1, gene2))
# 计算子代适应度
life1 = Life(gene1)
life2 = Life(gene2)
life1._fitness = self._fitnessClass.fitnessFunc(self.decodedOneGene(gene1))
life2._fitness = self._fitnessClass.fitnessFunc(self.decodedOneGene(gene2))
return life1, life2
# 产生下一代
def getNewGeneration(self):
# 记录上一代的最佳适应度得分
bestFitnessCurrent = self._bestLife._fitness
# 更新种群
# 合并父代记忆的个体集合与子代个体组成新的种群,
# 大小为self._populationSize + self._populationSize / CF
newPopulation = self._population[:int(self._populationSize / self._CF)]
# debug
fitnessList = []
for life in newPopulation:
fitnessList.append(life._fitness)
logging.debug('generation %d fitness list of origin populations before elimination' % self._generation)
logging.debug(fitnessList)
# 生成新子代
i = 0
while i < self._populationSize:
child1, child2 = self.getNewChild()
newPopulation.append(child1)
newPopulation.append(child2)
i += 2
# debug
fitnessList = []
for life in newPopulation:
fitnessList.append(life._fitness)
logging.debug('generation | |
over time on the basis of a set of differential equations defining the rates of change of
state variables.
Args:
pop_hist_len (int): Maximum lenght of the population history. Keep memory utilization in mind when using
positive numbers for this argument.
traj_id (Any): ID of the trajectory which wraps the simulation object. That ID should come from the trajectory
ensemble database.
random_seed (int, optional): Pseudo-random number generator seed.
do_keep_mass_flow_specs (bool): Store the last iteration mass flow specs? See
:class:`pop.GroupPopulation <pram.pop.GroupPopulation>` and
:class:`pop.MassFlowSpec <pram.pop.MassFlowSpec>` classes.
"""
def __init__(self, pop_hist_len=0, traj_id=None, rand_seed=None, do_keep_mass_flow_specs=False):
self.set_rand_seed(rand_seed)
self.pid = os.getpid() # process ID
self.traj_id = traj_id # trajectory ID
self.run_cnt = 0
self.pop = GroupPopulation(self, pop_hist_len, do_keep_mass_flow_specs)
self.rules = []
self.sim_rules = []
self.probes = []
self.timer = None # value deduced in add_group() based on rule timers
self.is_setup_done = False # flag
# ensures simulation setup is performed only once while enabling multiple incremental simulation runs of
# arbitrary length thus promoting interactivity (a sine qua non for a user interface)
self.running = DotMap(
is_running = False,
progress = 0.0,
step = 1.0
)
self.fn = DotMap(
group_setup = None # called before the simulation is run for the very first time
)
self.analysis = DotMap(
rule_static = StaticRuleAnalyzer(),
rule_dynamic = DynamicRuleAnalyzer(self)
)
self.vars = {} # simulation variables
self.reset_cb()
self.reset_pragmas()
self.reset_comp_hist()
def __repr__(self):
return f'{self.__class__.__name__}({self.rand_seed or ""})'
def _inf(self, msg):
if not self.pragma.live_info:
return
if self.pragma.live_info_ts:
print(f'[{datetime.datetime.now()}: info] {msg}')
else:
print(f'[info] {msg}')
def add(self, lst=None):
"""Simulation element adder.
If the ``lst`` argument is None, this method returns instance of the :class:`~pram.sim.SimulationAdder` class
that will handle adding simulation elements. Otherwise, it will add all elements of ``lst`` to the simulation.
Args:
lst (Iterable): Combination of objects of the following types: :class:`Group <pram.entity.Group>`,
:class:`Probe <pram.data.Probe>`, :class:`SimRule <pram.rule.SimRule>`,
:class:`Rule <pram.rule.Rule>`, :class:`Model <pram.model.model.Model>`, and
:class:`Site <pram.entity.Site>`.
Returns:
SimulationAdder
"""
if lst:
for i in lst:
if isinstance(i, Group):
self.add_group(i)
elif isinstance(i, Probe):
self.add_probe(i)
elif isinstance(i, SimRule): # must be before Rule
self.add_sim_rule(i)
elif isinstance(i, Rule):
self.add_rule(i)
elif isinstance(i, Model):
self.add_rule(i.rule)
elif isinstance(i, Site):
self.add_site(i)
return self
else:
return SimulationAdder(self)
def add_group(self, group):
"""Adds a group.
Args:
group (Group): The group.
Returns:
``self``
"""
# No rules present:
if len(self.rules) == 0:
raise SimulationConstructionError('A group is being added but no rules are present; rules need to be added before groups.')
# No groups present:
if len(self.pop.groups) == 0: # run when the first group is being added (because that marks the end of adding rules)
if not self.analysis.rule_static.are_rules_done:
self.analyze_rules_static()
# Sync simulation and rules timers:
rule_t_unit_ms = min([r.T_UNIT_MS for r in self.rules])
self.timer = Timer.by_ms(rule_t_unit_ms, iter=0)
self.pop.add_group(group)
return self
def add_groups(self, groups):
"""Adds groups.
Args:
groups (Iterable[Group]): The groups.
Returns:
``self``
"""
for g in groups:
self.add_group(g)
return self
def add_probe(self, probe):
"""Adds a probe.
Args:
probe (Probe): The probe.
Returns:
``self``
"""
if probe.name in [p.name for p in self.probes]:
raise SimulationConstructionError(f'Probe with that name ({probe.name}) already exists.')
self.pop.ar_enc.encode_probe(probe)
self.probes.append(probe)
probe.set_pop(self.pop)
return self
def add_probes(self, probes):
"""Adds probes.
Args:
probes (Iterable[Probe]): The probes.
Returns:
``self``
"""
for p in probes:
self.add_probe(p)
return self
def add_rule(self, rule):
"""Adds a rule.
If the rule has any inner rules, all of those are added as well. An example of an inner rule is a desease
transmission model that is being acted upon by another rule, e.g., an intervention rule which therefore
contains it. Such containing relationship is not being enforced by the framework however.
An instance of a rule can only be added once.
Args:
rule (Rule): The rule.
Returns:
``self``
"""
if len(self.rules) == 0:
self._inf('Constructing a PRAM')
if len(self.pop.groups) > 0:
raise SimulationConstructionError('A rule is being added but groups already exist; rules need be added before groups.')
if isinstance(rule, Rule):
self.add_rules(rule.get_inner_rules())
if rule not in self.rules:
self.rules.append(rule)
elif isinstance(rule, Model):
self.add_rule(rule.rule)
# self.rules.append(rule)
self.analysis.rule_static.analyze_rules(self.rules) # keep the results of the static rule analysis current
return self
def add_rules(self, rules):
"""Adds rules.
Args:
rules (Iterable[Rule]): The rules.
Returns:
``self``
"""
for r in rules:
self.add_rule(r)
return self
def add_sim_rule(self, rule):
"""Adds a simulation rule.
Args:
rule (SimRule): The simulation rule.
Returns:
``self``
"""
self.sim_rules.append(rule)
self.set_vars(rule.vars)
return self
def add_sim_rules(self, rules):
"""Adds simulation rules.
Args:
rules (Iterable[SimRule]): The simulation rules.
Returns:
``self``
"""
for r in rules:
self.add_sim_rule(r)
return self
def add_site(self, site):
"""Adds a site.
Args:
site (Site): The site.
Returns:
``self``
"""
self.pop.add_site(site)
return self
def add_sites(self, sites):
"""Adds sites.
Args:
sites (Iterable[Site]): The sites.
Returns:
``self``
"""
self.pop.add_sites(sites)
return self
def analyze_rules_static(self):
"""Runs static rule analysis.
See :class:`~pram.sim.StaticRuleAnalyzer`.
Returns:
``self``
"""
self._inf('Running static rule analysis')
self.analysis.rule_static.analyze_rules(self.rules)
self._inf(f' Relevant attributes found : {list(self.analysis.rule_static.attr_used)}')
self._inf(f' Relevant relations found : {list(self.analysis.rule_static.rel_used)}')
return self
def analyze_rules_dynamic(self):
"""Runs dynamic rule analysis.
See :class:`~pram.sim.DynamicRuleAnalyzer`.
Returns:
``self``
"""
self._inf('Running dynamic rule analysis')
rd = self.analysis.rule_dynamic
rd.analyze()
if self.pragma.live_info:
self._inf(f' Accessed attributes : {list(rd.attr_used)}')
self._inf(f' Accessed relations : {list(rd.rel_used)}')
self._inf(f' Superfluous attributes : {list(rd.attr_unused)}')
self._inf(f' Superfluous relations : {list(rd.rel_unused )}')
else:
if self.pragma.analyze and (len(rd.attr_unused) > 0 or len(rd.rel_unused) > 0):
print('Based on the most recent simulation run, the following group attributes A and relations R are superfluous:')
print(f' A: {list(rd.attr_unused)}')
print(f' R: {list(rd.rel_unused )}')
return self
def analyze_rules_dynamic_old(self):
self._inf('Running dynamic rule analysis')
lr = self.analysis.rule_dynamic
lr.clear()
lr.attr_used = getattr(Group, 'attr_used').copy() # attributes conditioned on by at least one rule
lr.rel_used = getattr(Group, 'rel_used').copy() # relations
lr.attr_groups = set() # attributes defining groups
lr.rel_groups = set() # relations
for g in self.pop.groups.values():
for ga in g.attr.keys(): lr.attr_groups.add(ga)
for gr in g.rel.keys(): lr.rel_groups. add(gr)
lr.attr_unused = lr.attr_groups - lr.attr_used # attributes not conditioned on by even one rule
lr.rel_unused = lr.rel_groups - lr.rel_used # relations
if self.pragma.live_info:
self._inf(f' Accessed attributes : {list(self.analysis.rule_dynamic.attr_used)}')
self._inf(f' Accessed relations : {list(self.analysis.rule_dynamic.rel_used)}')
self._inf(f' Superfluous attributes : {list(lr.attr_unused)}')
self._inf(f' Superfluous relations : {list(lr.rel_unused )}')
else:
if self.pragma.analyze and (len(lr.attr_unused) > 0 or len(lr.rel_unused) > 0):
print('Based on the most recent simulation run, the following group attributes A and relations R are superfluous:')
print(f' A: {list(lr.attr_unused)}')
print(f' R: {list(lr.rel_unused )}')
return self
def commit_group(self, group):
"""Finishes adding a new group.
See :meth:`~pram.sim.Simulation.new_group` for explanation of the mechanism.
Args:
group (Group): The group in question.
Returns:
``self``
"""
self.add_group(group)
return self
def compact(self):
"""Compacts the simulation.
Returns:
``self``
"""
self.pop.compact()
return self
def db(self, db):
"""Simulation database interface.
Args:
db (DB): Database management system specific object.
Returns:
SimulationDBI
"""
return SimulationDBI(self, db)
def gen_diagram(self, fpath_diag, fpath_pdf):
"""Generates a simulation diagram.
Todo:
Reimplement and extend this method.
Args:
fpath_diag(str): Path to the diagram source file.
fpath_pdf(str): Path to the diagram PDF file.
Returns:
``self``
"""
# blockdiag sim {
# diagram = '''
# diagram sim {
# box [shape = box, label = "box"];
# square [shape = square, label = "sq"];
# roundedbox [shape = roundedbox, label = "rbox"];
# circle [shape = circle, label = "circ"];
#
# box -> square -> roundedbox -> circle;
#
# #pop [shape = actor, label = "pop", stacked, numbered = 1000];
# #db [shape = flowchart.database, label = "DB"];
#
# #db -> pop
# }'''
#
# with open(fpath_diag, 'w') as f:
# f.write(diagram)
node_w = 128
node_h = 40
span_w = 64
span_h = 40
fontsize = 8
rules = self.rules
with open(fpath_diag, 'w') as f:
f.write( 'diagram sim {')
f.write( 'orientation = portrait;')
f.write(f' node_width = {node_w};')
f.write(f' node_height = {node_h};')
f.write(f' default_fontsize = {fontsize};')
# f.write(f' timeline [shape=box, label="", width={node_w * len(rules) + span_w * (len(rules) - 1)}, height=8, color="#000000"];')
for (i,r) in enumerate(rules):
# Rule block:
if hasattr(r, 'derivatives'):
num = f', numbered={len(r.derivatives.params)}'
else:
num = ''
f.write(f' rule-{i} [shape=box, label="{r.__class__.__name__}" {num}];')
f.write(f' t-{i} [shape=box, label="", height=8, color="#000000"];')
# Rule-timeline arc:
if isinstance(r.i, IterAlways):
i0, i1 = 0,0
elif isinstance(r.i, IterPoint):
i0, i1 = r.i.i, r.i.i
elif isinstance(r.i, IterInt):
i0, i1 = r.i.i0, r.i.i1
f.write(f' rule-{i} -> t-{i} [label="{i0}"];')
f.write('}')
import | |
<reponame>openedx/openedx-census
#!/usr/bin/env python
"""Automate the process of counting courses on Open edX sites."""
import asyncio
import collections
import csv
import itertools
import json
import logging
import os
import pickle
import pprint
import re
import time
import traceback
import urllib.parse
import attr
import click
import requests
import tqdm
from census.helpers import NotTrying, ScrapeFail
from census.html_report import html_report
from census.keys import username, password
from census.report_helpers import get_known_domains, hash_sites_together, sort_sites
from census.session import SessionFactory
from census.settings import (
STATS_SITE,
UPDATE_JSON,
SITES_CSV,
SITES_PICKLE,
MAX_REQUESTS,
TIMEOUT,
USER_AGENT,
)
from census.sites import Attempt, Site, HashedSite, read_sites_csv, courses_and_orgs, totals, read_sites_flat, overcount
from census.site_patterns import find_site_functions
# We don't use anything from this module, it just registers all the parsers.
from census import parsers
HEADERS = {
'User-Agent': USER_AGENT,
}
GONE_MSGS = [
"Cannot connect to host",
"Bad Gateway",
"TimeoutError",
"500",
"503",
"404",
"530 get http", # Cloudflare DNS failures
]
CERTIFICATE_MSGS = [
"certificate verify failed",
"CertificateError:",
]
FALSE_ALARM_CERTIFICATE_MSGS = [
"unable to get local issuer certificate",
]
log = logging.getLogger(__name__)
def all_have_snippets(errors, snippets):
"""Do all of the errors match one of the snippets?"""
return all(any(snip in err for snip in snippets) for err in errors)
async def parse_site(site, session_factory):
for verify_ssl in [True, False]:
async with session_factory.new(verify_ssl=verify_ssl, listeners=[site]) as session:
start = time.time()
errs = []
success = False
for parser, args, kwargs, custom_parser in find_site_functions(site.url):
attempt = Attempt(parser.__name__)
err = None
try:
attempt.courses = await parser(site, session, *args, **kwargs)
except NotTrying as exc:
attempt.error = str(exc)
except ScrapeFail as exc:
attempt.error = f"{exc.__class__.__name__}: {exc}"
err = str(exc) or exc.__class__.__name__
except Exception as exc:
#print(f"Exception: {exc!r}, {exc}, {exc.__class__.__name__}")
#print(traceback.format_exc())
attempt.error = traceback.format_exc()
err = str(exc) or exc.__class__.__name__
else:
success = True
site.tried.append(attempt)
if err:
errs.append(err)
if custom_parser:
site.custom_parser_err = True
else:
if custom_parser:
break
if success:
site.current_courses = site.attempt_course_count()
if site.is_gone:
char = 'B'
else:
if site.current_courses == site.latest_courses:
char = '='
elif site.current_courses < site.latest_courses:
char = '-'
else:
char = '+'
else:
if verify_ssl and all_have_snippets(errs, CERTIFICATE_MSGS):
# We had an SSL error. Try again. But only mark it as an error if it wasn't
# a false alarm error.
if not all_have_snippets(errs, FALSE_ALARM_CERTIFICATE_MSGS):
site.ssl_err = True
site.tried = []
site.custom_parser_err = False
log.debug("SSL error: %s", (errs,))
continue
gone_content = site.current_courses is None and not site.is_openedx
gone_http = all_have_snippets(errs, GONE_MSGS)
if gone_content or gone_http:
site.is_gone_now = True
if site.is_gone:
char = 'X'
else:
char = 'G'
else:
char = 'E'
site.time = time.time() - start
return char
async def run(sites, session_kwargs):
kwargs = dict(max_requests=MAX_REQUESTS, headers=HEADERS)
kwargs.update(session_kwargs)
factory = SessionFactory(**kwargs)
tasks = [asyncio.ensure_future(parse_site(site, factory)) for site in sites]
chars = collections.Counter()
progress = tqdm.tqdm(asyncio.as_completed(tasks), total=len(tasks), smoothing=0.0)
for completed in progress:
char = await completed
chars[char] += 1
desc = " ".join(f"{c}{v}" for c, v in sorted(chars.items()))
progress.set_description(desc)
progress.close()
print()
def scrape_sites(sites, session_kwargs):
try:
loop = asyncio.get_event_loop()
future = asyncio.ensure_future(run(sites, session_kwargs))
# Some exceptions go to stderr and then to my except clause? Shut up.
loop.set_exception_handler(lambda loop, context: None)
loop.run_until_complete(future)
except KeyboardInterrupt:
pass
@click.group(help=__doc__)
def cli():
pass
@cli.command()
@click.option('--in', 'in_file', type=click.Path(exists=True), help="File of sites to scrape")
@click.option('--log', 'log_level', type=str, default='info', help="Logging level to use")
@click.option('--gone', is_flag=True, help="Scrape the sites we've recorded as gone")
@click.option('--site', is_flag=True, help="Command-line arguments are URLs to scrape")
@click.option('--summarize', is_flag=True, help="Summarize results instead of saving pickle")
@click.option('--save', is_flag=True, help="Save the scraped pages in the save/ directory")
@click.option('--out', 'out_file', type=click.File('wb'), default=SITES_PICKLE, help="Pickle file to write")
@click.option('--timeout', type=int, help=f"Timeout in seconds for each request [{TIMEOUT}]", default=TIMEOUT)
@click.argument('site_patterns', nargs=-1)
def scrape(in_file, log_level, gone, site, summarize, save, out_file, timeout, site_patterns):
"""Visit sites and count their courses."""
logging.basicConfig(level=log_level.upper())
# aiohttp issues warnings about cookies, silence them (and all other warnings!)
# WARNING:aiohttp.client:Can not load response cookies: Illegal key
# The bad cookies were from http://rechum.sev.gob.mx
logging.getLogger('aiohttp.client').setLevel(logging.ERROR)
if site:
# Exact sites provided on the command line
sites = (Site.from_url(u) for u in site_patterns)
else:
# Make the list of sites we're going to scrape.
in_file = in_file or SITES_CSV
if in_file.endswith('.csv'):
sites = read_sites_csv(in_file)
else:
sites = read_sites_flat(in_file)
if site_patterns:
sites = (s for s in sites if any(re.search(p, s.url) for p in site_patterns))
if not gone:
sites = (s for s in sites if not s.is_gone)
sites = list(sites)
if len(sites) == 1:
print("1 site")
else:
print(f"{len(sites)} sites")
os.makedirs("save", exist_ok=True)
# SCRAPE!
session_kwargs = {
'save': save,
'timeout': timeout,
}
scrape_sites(sites, session_kwargs)
if summarize:
show_text_report(sites)
else:
with out_file:
pickle.dump(sites, out_file)
@cli.command()
@click.option('--in', 'in_file', type=click.File('rb'), default=SITES_PICKLE,
help='The sites.pickle file to read')
def summary(in_file):
with in_file:
sites = pickle.load(in_file)
summarize(sites)
def summarize(sites):
old, new = totals(sites)
changed = sum(1 for s in sites if s.should_update())
gone = sum(1 for s in sites if s.is_gone_now and not s.is_gone)
back = sum(1 for s in sites if not s.is_gone_now and s.is_gone and s.current_courses)
print(f"{len(sites)} sites")
print(f"Courses: {old} --> {new} ({new-old:+d}); Sites: {changed} changed, {gone} gone, {back} back")
hashed_sites = collections.defaultdict(HashedSite)
nohash_sites = []
for site in sites:
if site.is_gone_now:
continue
if not site.current_courses:
continue
if site.fingerprint is None:
hashed_site = HashedSite()
hashed_site.sites.append(site)
nohash_sites.append(hashed_site)
else:
hashed_site = hashed_sites[site.fingerprint]
hashed_site.fingerprint = site.fingerprint
hashed_site.sites.append(site)
print(f"{len(nohash_sites)} with no hash, {len(hashed_sites)} with hash")
if nohash_sites:
print("No hash:")
for site in nohash_sites:
print(f" {site.best_url()}: {site.current_courses()}")
chaff_sites = []
not_chaff_sites = []
for hashed_site in itertools.chain(hashed_sites.values(), nohash_sites):
if hashed_site.all_chaff():
chaff_sites.append(hashed_site)
else:
not_chaff_sites.append(hashed_site)
print(f"Total sites: {len(not_chaff_sites)} not chaff, {len(chaff_sites)} chaff")
@cli.command()
@click.option('--in', 'in_file', type=click.File('rb'), default=SITES_PICKLE,
help='The sites.pickle file to read')
@click.option('--out', 'out_file', type=click.File('w'), default="html/sites.html",
help='The HTML file to write')
@click.option('--skip-none', is_flag=True, help="Don't include sites with no count")
@click.option('--only-new', is_flag=True, help="Only include sites we think are new")
@click.option('--full', is_flag=True, help="Include courses, orgs, etc")
def html(in_file, out_file, skip_none, only_new, full):
"""Write an HTML report."""
with in_file:
sites = pickle.load(in_file)
if skip_none:
sites = [site for site in sites if site.current_courses is not None]
# Prep data for reporting.
old, new = totals(sites)
if full:
all_courses, all_orgs, all_course_ids = courses_and_orgs(sites)
with open("course-ids.txt", "w") as f:
f.write("".join(i + "\n" for i in sorted(all_course_ids)))
else:
all_courses = all_orgs = None
html_report(out_file, sites, old, new, all_courses, all_orgs, only_new=only_new)
@cli.command()
@click.option('--in', 'in_file', type=click.File('rb'), default=SITES_PICKLE,
help='The sites.pickle file to read')
@click.option('--out', 'out_file', type=click.File('w'), default="html/sites.csv",
help='The CSV file to write')
def sheet(in_file, out_file):
"""Write a CSV file for importing into a spreadsheet.
Always skips no-course sites. Only includes new sites.
"""
with in_file:
sites = pickle.load(in_file)
sites = [site for site in sites if site.current_courses is not None]
known_domains = get_known_domains()
hashed_sites = hash_sites_together(sites, known_domains, only_new=True)
writer = csv.DictWriter(out_file, ["disposition", "language", "geography", "url", "courses", "sites", "tags", "aliases"])
writer.writeheader()
for hashed_site in hashed_sites:
url = hashed_site.best_url()
other_urls = [site.url for site in hashed_site.sites if site.url != url]
tags = {t for site in hashed_site.sites for t, _ in site.styled_tags()}
writer.writerow({
"url": url,
"courses": hashed_site.current_courses(),
"sites": len(hashed_site.sites),
"tags": ", ".join(sorted(tags)),
"aliases": ", ".join(other_urls),
})
print(f"Wrote {len(hashed_sites)} sites to {out_file.name}")
@cli.command()
@click.option('--in', 'in_file', type=click.File('rb'), default=SITES_PICKLE)
def emails(in_file):
"""Write the emails found."""
with in_file:
sites = pickle.load(in_file)
emails = set()
for site in sites:
emails.update(site.emails)
print("\n".join(sorted(emails)))
@cli.command('json')
@click.option('--in', 'in_file', type=click.File('rb'), default=SITES_PICKLE)
def write_json(in_file):
"""Write the update.json file."""
with in_file:
sites = pickle.load(in_file)
# Prep data for reporting.
sites_descending = sorted(sites, key=lambda s: s.latest_courses, reverse=True)
all_courses, _, _ = courses_and_orgs(sites)
json_update(sites_descending, all_courses, include_overcount=True)
@cli.command('text')
@click.option('--in', 'in_file', type=click.File('rb'), default=SITES_PICKLE,
help='The sites.pickle file to read')
def text_report(in_file):
"""Write a text report about site scraping."""
with in_file:
sites = pickle.load(in_file)
show_text_report(sites)
def show_text_report(sites):
old, new = totals(sites)
sites = sorted(sites, key=lambda s: s.latest_courses, reverse=True)
print(f"Found courses went from {old} to {new}")
for site in sites:
print(f"{site.url}: {site.latest_courses} --> {site.current_courses} ({site.fingerprint})")
for attempt in site.tried:
if attempt.error is not None:
line = attempt.error.splitlines()[-1]
else:
line = f"Counted {attempt.courses} courses"
print(f" {attempt.strategy}: {line}")
tags = ", ".join(t for t, s in site.styled_tags())
if tags:
print(f" [{tags}]")
other = site.other_info + site.emails
if other:
print(f" Info: {'; '.join(set(other))}")
def json_update(sites, all_courses, include_overcount=False):
"""Write a JSON file for uploading to the stats site.
`all_courses` is a dict mapping course_ids to a set of sites running that
course.
"""
data = {}
site_updates = {
s.url: {
'old_course_count': s.latest_courses,
'course_count': s.current_courses if s.current_courses is not None else s.latest_courses,
'is_gone': s.is_gone_now,
}
for s in sites if s.should_update()
}
data['sites'] = site_updates
if include_overcount:
data['overcount'] = overcount(all_courses)
with open(UPDATE_JSON, "w") as update_json:
json.dump(data, update_json, indent=4)
def login(site, session):
login_url = urllib.parse.urljoin(site, "/login/")
resp = session.get(login_url)
resp.raise_for_status()
m = re.search(r'name="csrfmiddlewaretoken" value="([^"]+)"', resp.text)
if m:
csrftoken = m.group(1)
else:
raise Exception(f"No CSRF token found from {login_url}")
| |
"vGPU profile {0} is already configured for VM {1}. "
"Skip.".format(vm_cfg["profile"], vm_cfg["vm"])
)
else:
tasks.append(vm_update.add_vgpu(vm_cfg["profile"]))
else:
self.logger.error(
"vGPU profile {0} is not available for VM {1}. Skip.".format(
vm_cfg["profile"], vm_cfg["vm"]
)
)
if tasks:
if not vm_status.is_memory_reser_full():
self.logger.warning(
"Adding a PCI device or shared PCI device "
"in passthrough mode needs to reserve memory. "
"Reserving memory."
)
tasks.append(vm_update.memory_reservation(reser=1))
else:
self.logger.debug("Good. Memory is already reserved.")
return tasks
def _get_remove_vgpu_tasks(self, vm_cfg):
"""Remove a vGPU profile for a VM and get Tasks
Args:
vm_cfg (dict): a dict contains VM config info
Returns:
list: a list of Tasks
"""
tasks = []
vm_obj = self.objs.get_vm(vm_cfg["vm"])
vm_status = GetVM(vm_obj)
vm_update = ConfigVM(vm_obj)
Check().check_kv(vm_cfg, "profile", required=True)
if vm_cfg["profile"] == vm_status.existing_vgpu_profile():
self.logger.info(
"vGPU {0} is to be removed "
"from VM {1}".format(vm_cfg["profile"], vm_cfg["vm"])
)
tasks.append(vm_update.remove_vgpu(vm_cfg["profile"]))
else:
self.logger.error(
"Couldn't find vgpu {0} on VM {1}. Skip.".format(
vm_cfg["profile"], vm_cfg["vm"]
)
)
return tasks
def pvrdma_cli(self):
"""Add/Remove PVRDMA device for VM(s)
Returns:
None
"""
vm_cfgs = self._extract_file(self.cfg)
if self.cfg["add"]:
tasks = []
for vm_cfg in vm_cfgs:
tasks.extend(self._get_add_pvrdma_tasks(vm_cfg))
if tasks:
GetWait().wait_for_tasks(tasks, task_name="Add PVRDMA device(s)")
if self.cfg["remove"]:
tasks = []
for vm_cfg in vm_cfgs:
tasks.append(self._get_remove_pvrdma_tasks(vm_cfg))
if tasks:
GetWait().wait_for_tasks(tasks, task_name="Remove PVRDMA device(s)")
def _pvrdma_cluster(self, vm_cfgs, *keys):
"""Add/Remove PVRDMA device for VM(s) (defined in cluster conf file)
Args:
vm_cfgs (list): a list of dicts contains VM config info
*keys: a keyword array that can trigger this configuration
Returns:
None
"""
tasks = []
for vm_cfg in vm_cfgs:
if all(k in vm_cfg for k in keys):
tasks.extend(self._get_add_pvrdma_tasks(vm_cfg))
if tasks:
GetWait().wait_for_tasks(tasks, task_name="Add PVRDMA device(s)")
def _get_add_pvrdma_tasks(self, vm_cfg):
"""Add PVRDMA device(s) for a VM and get Task
Args:
vm_cfg (dict): a dict contains VM config info
Returns:
list: a list of Tasks
"""
tasks = []
Check().check_kv(vm_cfg, "dvs_name", required=True)
dvs_name = vm_cfg["dvs_name"]
dvs_obj = self.objs.get_dvs(dvs_name)
vm_obj = self.objs.get_vm(vm_cfg["vm"])
vm_update = ConfigVM(vm_obj)
vm_status = GetVM(vm_obj)
Check().check_kv(vm_cfg, "pvrdma_port_group", required=True)
pg = vm_cfg["pvrdma_port_group"]
pg_obj = self.objs.get_network(pg)
self.logger.info("Found port group {0}".format(pg))
if pg in vm_status.network_names():
self.logger.error(
"Port group {0} already exists on VM {1}. "
"Skipping".format(pg, vm_obj.name)
)
else:
tasks.append(vm_update.add_pvrdma(dvs_obj, pg_obj))
if tasks:
if not vm_status.is_memory_reser_full():
self.logger.warning(
"Add a PVRDMA device needs to reserve memory. " "Reserving memory."
)
tasks.append(vm_update.memory_reservation(reser=1))
else:
self.logger.debug("Good. Memory is already reserved.")
return tasks
def _get_remove_pvrdma_tasks(self, vm_cfg):
"""Remove PVRDMA device from a VM and get Task
Args:
vm_cfg (dict): a dict contains vm config info
Returns:
Task
"""
vm_obj = self.objs.get_vm(vm_cfg["vm"])
Check().check_kv(vm_cfg, "pvrdma_port_group", required=True)
pg = vm_cfg["pvrdma_port_group"]
pg_obj = GetVM(vm_obj).network_obj(
network_name=pg, device_type=vim.VirtualVmxnet3Vrdma
)
if pg_obj:
self.logger.debug("Found port group {0} for VM {1}".format(pg, vm_obj.name))
task = ConfigVM(vm_obj).remove_network_adapter(pg_obj)
return task
else:
self.logger.error(
"Couldn't find port group {0} "
"on VM {1} to remove".format(pg, vm_obj.name)
)
return None
def svs_cli(self):
"""Create a standard virtual switch
Returns:
None
"""
if self.cfg["create"]:
self._create_svs(self.cfg)
if self.cfg["destroy"]:
self._destroy_svs(self.cfg)
def _create_svs(self, svs_cfg):
""" create a standard virtual switch. Note that the API for
adding/destroying svs doesn't return Task to track
Args:
svs_cfg (dict): a dict contains svs config info
Returns:
None
"""
Check().check_kv(svs_cfg, "name", required=True)
Check().check_kv(svs_cfg, "pnic", required=True)
Check().check_kv(svs_cfg, "host", required=True)
svs = svs_cfg["name"]
pnic = svs_cfg["pnic"]
svs_hosts = []
if isinstance(svs_cfg["host"], str):
svs_hosts.append(svs_cfg["host"])
elif isinstance(svs_cfg["host"], list):
svs_hosts = svs_cfg["host"]
else:
pass
for svs_host in svs_hosts:
host_obj = self.objs.get_host(svs_host)
host_update = ConfigHost(host_obj)
try:
host_update.create_svs(svs_name=svs, vmnic=pnic)
self.logger.info(
"Creating standard virtual switch {0} " "is successful.".format(svs)
)
except vmodl.MethodFault as error:
self.logger.error("Caught vmodl fault: " + error.msg)
if Check().check_kv(svs_cfg, "port_group"):
try:
host_update.create_pg_in_svs(
svs_name=svs, pg_name=svs_cfg["port_group"]
)
self.logger.info(
"Creating port group {0} "
"within virtual switch {1} is "
"successful.".format(svs_cfg["port_group"], svs)
)
except vmodl.MethodFault as error:
self.logger.error("Caught vmodl fault: " + error.msg)
def _destroy_svs(self, svs_cfg):
""" destroy a standard virtual switch. Note that the API for
adding/destroying svs doesn't return Task to track
Args:
svs_cfg (dict): a dict contains svs config info
Returns:
None
"""
Check().check_kv(svs_cfg, "host", required=True)
Check().check_kv(svs_cfg, "name", required=True)
svs_hosts = []
if isinstance(svs_cfg["host"], str):
svs_hosts.append(svs_cfg["host"])
elif isinstance(svs_cfg["host"], list):
svs_hosts = svs_cfg["host"]
else:
pass
svs_name = svs_cfg["name"]
for svs_host in svs_hosts:
host_obj = self.objs.get_host(svs_host)
host_update = ConfigHost(host_obj)
# destroy port group within this svs first
if Check().check_kv(svs_cfg, "port_group"):
pg_name = svs_cfg["port_group"]
try:
host_update.destroy_pg(pg_name)
self.logger.info(
"Destroying port group {0} "
"is successful.".format(svs_cfg["port_group"])
)
except vmodl.MethodFault as error:
self.logger.error("Caught vmodl fault : " + error.msg)
try:
host_update.destroy_svs(svs_name)
self.logger.info(
"Destroying virtual switch {0} is " "successful.".format(svs_name)
)
except vmodl.MethodFault as error:
self.logger.error("Caught vmodl fault : " + error.msg)
def dvs_cli(self):
"""Create a distributed virtual switch
Returns:
None
"""
if self.cfg["create"]:
self._create_dvs(self.cfg)
if self.cfg["destroy"]:
self._destroy_dvs(self.cfg)
def _create_dvs(self, dvs_cfg):
"""
Args:
dvs_cfg (dict): a dict contains dvs config info
Returns:
None
"""
self.logger.info("Checking DVS arguments...")
Check().check_kv(dvs_cfg, "name", required=True)
Check().check_kv(dvs_cfg, "datacenter", required=True)
Check().check_kv(dvs_cfg, "host", required=True)
Check().check_kv(dvs_cfg, "pnic", required=True)
self.logger.info("DVS arguments checking is completed")
dvs_name = dvs_cfg["name"]
pnics = []
dvs_hosts = []
if isinstance(dvs_cfg["host"], str):
dvs_hosts.append(dvs_cfg["host"])
elif isinstance(dvs_cfg["host"], list):
dvs_hosts = dvs_cfg["host"]
else:
pass
if isinstance(dvs_cfg["pnic"], str):
pnics.append(dvs_cfg["pnic"])
elif isinstance(dvs_cfg["pnic"], list):
pnics.extend(dvs_cfg["pnic"])
else:
pass
host_vmnics = {}
datacenter_obj = self.objs.get_datacenter(dvs_cfg["datacenter"])
for dvs_host in dvs_hosts:
host_obj = self.objs.get_host(dvs_host)
host_vmnics[host_obj] = pnics
task = ConfigDatacenter(datacenter_obj).create_dvs(host_vmnics, dvs_name)
GetWait().wait_for_tasks([task], task_name="Create distributed virtual switch")
# create port group within this DVS
if Check().check_kv(dvs_cfg, "port_group"):
dvs_obj = self.objs.get_dvs(dvs_name)
task = ConfigDVS(dvs_obj).create_pg_in_dvs(dvs_cfg["port_group"])
GetWait().wait_for_tasks(
[task], task_name="Create port group within this DVS"
)
def _destroy_dvs(self, dvs_cfg):
"""
Args:
dvs_cfg (dict): a dict contains dvs config info
Returns:
None
"""
Check().check_kv(dvs_cfg, "name", required=True)
dvs_obj = self.objs.get_dvs(dvs_cfg["name"])
# remove port group within this DVS first
if dvs_obj.portgroup:
for pg_obj in dvs_obj.portgroup:
if "DVUplinks" not in pg_obj.name:
self.logger.info(
"Remove port group: {0} within the "
"DVS: {1} first".format(pg_obj.name, dvs_obj.name)
)
task = pg_obj.Destroy_Task()
GetWait().wait_for_tasks([task], task_name="Destroy port group")
task = ConfigDVS(dvs_obj).destroy_dvs()
GetWait().wait_for_tasks([task], task_name="Destroy distributed virtual switch")
def cluster(self):
""" Cluster creation/destroy based on the definition from cluster conf
file
Returns:
None
"""
file_read = Cluster(self.cfg["file"])
svs_cfgs = file_read.read_svs_dvs_section(sec_def_key="_SVS_")
dvs_cfgs = file_read.read_svs_dvs_section(sec_def_key="_DVS_")
vm_cfgs = file_read.read_vm_section(sec_def_key="_VMS_")
if self.cfg["debug"]:
if svs_cfgs:
for svs_cfg in svs_cfgs:
self.logger.debug("_SVS_ read in as \n {0}".format(svs_cfg))
else:
self.logger.debug("No _SVS_ config info read in.")
if dvs_cfgs:
for dvs_cfg in dvs_cfgs:
self.logger.debug("_DVS_ read in as \n {0}".format(dvs_cfg))
else:
self.logger.debug("No _DVS_ config info read in.")
if vm_cfgs:
for vm_cfg in vm_cfgs:
self.logger.debug("_VMS_ read in as \n {0}".format(vm_cfg))
else:
self.logger.debug("No _VMS_ config info read in.")
if len(svs_cfgs) == 0 and len(dvs_cfgs) == 0 and len(vm_cfgs) == 0:
self.logger.error(
"Couldn't correctly read cluster configuration "
"file. Please check the format."
)
raise SystemExit
if self.cfg["create"]:
if svs_cfgs:
self._create_cluster_svs(svs_cfgs)
if dvs_cfgs:
self._create_cluster_dvs(dvs_cfgs)
if vm_cfgs:
self._create_cluster_vms(vm_cfgs)
if self.cfg["destroy"]:
if vm_cfgs:
self._destroy_cluster_vms(vm_cfgs)
if svs_cfgs:
self._destroy_cluster_svs(svs_cfgs)
if dvs_cfgs:
self._destroy_cluster_dvs(dvs_cfgs)
def _create_cluster_svs(self, switch_cfgs):
"""
Args:
switch_cfgs (list): a list of dicts contain switch config info
which is extracted from cluster file
Returns:
None
"""
for switch in switch_cfgs:
self._create_svs(switch)
def _create_cluster_dvs(self, switch_cfgs):
"""
Args:
switch_cfgs (list): a list of dicts contain switch config info
which is extracted from cluster file
Returns:
None
"""
for switch in switch_cfgs:
self._create_dvs(switch)
def _create_cluster_vms(self, vm_cfgs):
"""
Args:
vm_cfgs (list): a list of dicts contain VM config info
which is extracted from cluster file
Returns:
None
"""
self._clone_cluster(vm_cfgs, "template")
self._cpu_shares_cluster(vm_cfgs, "cpu_shares")
self._memory_shares_cluster(vm_cfgs, "memory_shares")
self._cpumem_reser_cluster(vm_cfgs, "cpu_reservation", "memory_reservation")
self._network_cluster(vm_cfgs, "port_group")
self._network_cfg_cluster(vm_cfgs, "ip", "is_dhcp")
self._latency_cluster(vm_cfgs, "latency")
self._passthru_cluster(vm_cfgs, "device")
self._vgpu_cluster(vm_cfgs, "vgpu")
self._sriov_cluster(vm_cfgs, "sriov_port_group")
self._pvrdma_cluster(vm_cfgs, "pvrdma_port_group")
self._power_cluster(vm_cfgs, "power")
# execute post scripts with enforced order
cluster_read = Cluster(self.cfg["file"])
sorted_posts = cluster_read.collect_scripts(vm_cfgs)
for post in sorted_posts:
tasks = []
for spec in post:
usr, pwd, vm, scripts, _ = spec
tasks.extend(self._get_post_procs(usr, pwd, vm, scripts))
if tasks:
proc_mng = self.content.guestOperationsManager.processManager
GetWait().wait_for_procs(proc_mng, tasks)
# get IP
for vm_cfg in vm_cfgs:
vm_obj = self.objs.get_vm(vm_cfg["vm"])
GetVM(vm_obj).get_ip_addr()
def _destroy_cluster_vms(self, vm_cfgs):
"""
Args:
vm_cfgs (list): a list of dicts contain VM config info
which is extracted from cluster file
Returns:
None
"""
vms = [vm_cfg["vm"] for vm_cfg in vm_cfgs]
if vms:
confirm = input("[ACTION] Do you really want to destroy {0} ? ".format(vms))
try:
if bool(strtobool(confirm)):
tasks = self._get_destroy_tasks(vms)
if tasks:
GetWait().wait_for_tasks(tasks, task_name="Destroy VM")
else:
self.logger.info("Not destroying any VMs")
except ValueError:
raise SystemExit("Not a valid answer. Exit.")
else:
self.logger.info("No VMs specified to destroy")
def _destroy_cluster_svs(self, switch_cfgs):
"""
Args:
switch_cfgs (list): a | |
0.285
# - Epoch 60 Batch 0/21 train_loss = 0.274
# - Epoch 60 Batch 10/21 train_loss = 0.240
# - Epoch 60 Batch 20/21 train_loss = 0.264
# - Epoch 61 Batch 9/21 train_loss = 0.276
# - Epoch 61 Batch 19/21 train_loss = 0.272
# - Epoch 62 Batch 8/21 train_loss = 0.277
# - Epoch 62 Batch 18/21 train_loss = 0.266
# - Epoch 63 Batch 7/21 train_loss = 0.259
# - Epoch 63 Batch 17/21 train_loss = 0.253
# - Epoch 64 Batch 6/21 train_loss = 0.243
# - Epoch 64 Batch 16/21 train_loss = 0.250
# - Epoch 65 Batch 5/21 train_loss = 0.246
# - Epoch 65 Batch 15/21 train_loss = 0.247
# - Epoch 66 Batch 4/21 train_loss = 0.231
# - Epoch 66 Batch 14/21 train_loss = 0.231
# - Epoch 67 Batch 3/21 train_loss = 0.247
# - Epoch 67 Batch 13/21 train_loss = 0.229
# - Epoch 68 Batch 2/21 train_loss = 0.252
# - Epoch 68 Batch 12/21 train_loss = 0.217
# - Epoch 69 Batch 1/21 train_loss = 0.222
# - Epoch 69 Batch 11/21 train_loss = 0.227
# - Epoch 70 Batch 0/21 train_loss = 0.226
# - Epoch 70 Batch 10/21 train_loss = 0.205
# - Epoch 70 Batch 20/21 train_loss = 0.225
# - Epoch 71 Batch 9/21 train_loss = 0.236
# - Epoch 71 Batch 19/21 train_loss = 0.236
# - Epoch 72 Batch 8/21 train_loss = 0.234
# - Epoch 72 Batch 18/21 train_loss = 0.240
# - Epoch 73 Batch 7/21 train_loss = 0.216
# - Epoch 73 Batch 17/21 train_loss = 0.214
# - Epoch 74 Batch 6/21 train_loss = 0.219
# - Epoch 74 Batch 16/21 train_loss = 0.217
# - Epoch 75 Batch 5/21 train_loss = 0.228
# - Epoch 75 Batch 15/21 train_loss = 0.212
# - Epoch 76 Batch 4/21 train_loss = 0.239
# - Epoch 76 Batch 14/21 train_loss = 0.207
# - Epoch 77 Batch 3/21 train_loss = 0.226
# - Epoch 77 Batch 13/21 train_loss = 0.200
# - Epoch 78 Batch 2/21 train_loss = 0.234
# - Epoch 78 Batch 12/21 train_loss = 0.194
# - Epoch 79 Batch 1/21 train_loss = 0.206
# - Epoch 79 Batch 11/21 train_loss = 0.210
# - Epoch 80 Batch 0/21 train_loss = 0.230
# - Epoch 80 Batch 10/21 train_loss = 0.196
# - Epoch 80 Batch 20/21 train_loss = 0.192
# - Epoch 81 Batch 9/21 train_loss = 0.210
# - Epoch 81 Batch 19/21 train_loss = 0.199
# - Epoch 82 Batch 8/21 train_loss = 0.207
# - Epoch 82 Batch 18/21 train_loss = 0.201
# - Epoch 83 Batch 7/21 train_loss = 0.203
# - Epoch 83 Batch 17/21 train_loss = 0.195
# - Epoch 84 Batch 6/21 train_loss = 0.219
# - Epoch 84 Batch 16/21 train_loss = 0.203
# - Epoch 85 Batch 5/21 train_loss = 0.195
# - Epoch 85 Batch 15/21 train_loss = 0.208
# - Epoch 86 Batch 4/21 train_loss = 0.196
# - Epoch 86 Batch 14/21 train_loss = 0.183
# - Epoch 87 Batch 3/21 train_loss = 0.204
# - Epoch 87 Batch 13/21 train_loss = 0.192
# - Epoch 88 Batch 2/21 train_loss = 0.214
# - Epoch 88 Batch 12/21 train_loss = 0.195
# - Epoch 89 Batch 1/21 train_loss = 0.205
# - Epoch 89 Batch 11/21 train_loss = 0.198
# - Epoch 90 Batch 0/21 train_loss = 0.206
# - Epoch 90 Batch 10/21 train_loss = 0.182
# - Epoch 90 Batch 20/21 train_loss = 0.192
# - Epoch 91 Batch 9/21 train_loss = 0.204
# - Epoch 91 Batch 19/21 train_loss = 0.176
# - Epoch 92 Batch 8/21 train_loss = 0.199
# - Epoch 92 Batch 18/21 train_loss = 0.197
# - Epoch 93 Batch 7/21 train_loss = 0.195
# - Epoch 93 Batch 17/21 train_loss = 0.186
# - Epoch 94 Batch 6/21 train_loss = 0.181
# - Epoch 94 Batch 16/21 train_loss = 0.190
# - Epoch 95 Batch 5/21 train_loss = 0.188
# - Epoch 95 Batch 15/21 train_loss = 0.181
# - Epoch 96 Batch 4/21 train_loss = 0.186
# - Epoch 96 Batch 14/21 train_loss = 0.179
# - Epoch 97 Batch 3/21 train_loss = 0.207
# - Epoch 97 Batch 13/21 train_loss = 0.189
# - Epoch 98 Batch 2/21 train_loss = 0.220
# - Epoch 98 Batch 12/21 train_loss = 0.187
# - Epoch 99 Batch 1/21 train_loss = 0.200
# - Epoch 99 Batch 11/21 train_loss = 0.194
# - Model Trained and Saved
#
#
# moe_szyslak:(looking at homer) you're right. he needs some professional help...
# duffman: ooh, someone is down in the sack
# homer_simpson: i can't believe you don't be your foot, homer.
# homer_simpson: i had the greatest gift of all, a little girl who could pick me to go halvsies on a ring.
# edna_krabappel-flanders: seymour...(ad lib singing) my bar could be in.
# homer_simpson:(derisive snort) kent brockman!
# homer_simpson:(touched) aw, that's my fourth grade teacher!
# carl_carlson: are you gonna be okay?
# barney_gumble:(reciting)" your infatuation is based on a physical attraction. talk to the woman of a way to paris...
# homer_simpson:(chuckles at injury) yeah, but at least we're hearing some interesting conversation from those two book clubs.
# book_club_member: well, well, look who's it.
# moe_szyslak:(amid men's reactions) you got that right!
# seymour_skinner: edna won't even let me in here?
# moe_szyslak:(nods) keep my tail
# lstm_layers = 1
#
#
# num_epochs = 100
#
# batch_size = 128
#
# rnn_size = 512
#
# embed_dim = 256
#
# seq_length = 25
#
# learning_rate = 0.005
#
# show_every_n_batches = 10
#
# - Epoch 0 Batch 0/21 train_loss = 8.821
# - Epoch 0 Batch 10/21 train_loss = 6.080
# - Epoch 0 Batch 20/21 train_loss = 5.732
# - Epoch 1 Batch 9/21 train_loss = 5.324
# - Epoch 1 Batch 19/21 train_loss = 5.043
# - Epoch 2 Batch 8/21 train_loss = 4.830
# - Epoch 2 Batch 18/21 train_loss = 4.676
# - Epoch 3 Batch 7/21 train_loss = 4.487
# - Epoch 3 Batch 17/21 train_loss = 4.379
# - Epoch 4 Batch 6/21 train_loss = 4.250
# - Epoch 4 Batch 16/21 train_loss = 4.130
# - Epoch 5 Batch 5/21 train_loss = 3.969
# - Epoch 5 Batch 15/21 train_loss = 3.855
# - Epoch 6 Batch 4/21 train_loss = 3.836
# - Epoch 6 Batch 14/21 train_loss = 3.543
# - Epoch 7 Batch 3/21 train_loss = 3.556
# - Epoch 7 Batch 13/21 train_loss = 3.392
# - Epoch 8 Batch 2/21 train_loss = 3.375
# - Epoch 8 Batch 12/21 train_loss = 3.161
# - Epoch 9 Batch 1/21 train_loss = 3.066
# - Epoch 9 Batch 11/21 train_loss = 2.921
# - Epoch 10 Batch 0/21 train_loss = 2.914
# - Epoch 10 Batch 10/21 train_loss = 2.747
# - Epoch 10 Batch 20/21 train_loss = 2.785
# - Epoch 11 Batch 9/21 train_loss = 2.561
# - Epoch 11 Batch 19/21 train_loss = 2.517
# - Epoch 12 Batch 8/21 train_loss = 2.472
# - Epoch 12 Batch 18/21 train_loss = 2.465
# - Epoch 13 Batch 7/21 train_loss = 2.287
# - Epoch 13 Batch 17/21 train_loss = 2.204
# - Epoch 14 Batch 6/21 train_loss = 2.219
# - Epoch 14 Batch 16/21 train_loss = 2.129
# - Epoch 15 Batch 5/21 train_loss = 2.144
# - Epoch 15 Batch 15/21 train_loss = 2.043
# - Epoch 16 Batch 4/21 train_loss = 1.970
# - Epoch 16 Batch 14/21 train_loss = 1.869
# - Epoch 17 Batch 3/21 train_loss = 1.856
# - Epoch 17 Batch 13/21 train_loss = 1.842
# - Epoch 18 Batch 2/21 train_loss = 1.911
# - Epoch 18 Batch 12/21 train_loss = 1.711
# - Epoch 19 Batch 1/21 train_loss = 1.657
# - Epoch 19 Batch 11/21 train_loss = 1.650
# - Epoch 20 Batch 0/21 train_loss = 1.666
# - Epoch 20 Batch 10/21 train_loss = 1.575
# - Epoch 20 Batch 20/21 train_loss = 1.571
# - Epoch 21 Batch 9/21 train_loss = 1.490
# - Epoch 21 Batch 19/21 train_loss = 1.444
# - Epoch 22 Batch 8/21 train_loss = 1.440
# - Epoch 22 Batch 18/21 train_loss = 1.395
# - Epoch 23 Batch 7/21 train_loss = 1.361
# - Epoch 23 Batch 17/21 train_loss = 1.257
# - Epoch 24 Batch 6/21 train_loss = 1.269
# - Epoch 24 Batch 16/21 train_loss = 1.220
# - Epoch 25 Batch 5/21 train_loss = 1.271
# | |
"""
Copyright 2019 Cartesi Pte. Ltd.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use
this file except in compliance with the License. You may obtain a copy of the
License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed
under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from concurrent import futures
from threading import Lock
import signal
import time
import math
import grpc
import sys
import traceback
import argparse
import pickle
from grpc_reflection.v1alpha import reflection
import machine_discovery_pb2_grpc
import machine_discovery_pb2
import machine_manager_pb2_grpc
import machine_manager_pb2
import cartesi_machine_pb2
import utils
from session_registry import SessionIdException, AddressException, RollbackException
# docker graceful shutdown, raise a KeyboardInterrupt in case of SIGTERM
def handle_sigterm(*args):
raise KeyboardInterrupt()
signal.signal(signal.SIGTERM, handle_sigterm)
LOGGER = utils.get_new_logger(__name__)
LOGGER = utils.configure_log(LOGGER)
LISTENING_ADDRESS = 'localhost'
LISTENING_PORT = 50051
SLEEP_TIME = 5
class NotReadyException(Exception):
pass
class SessionJob:
def __init__(self, session_id):
self.id = session_id
self.job_hash = None
self.job_future = None
class _MachineManager(machine_manager_pb2_grpc.MachineManagerServicer):
def __init__(self, session_registry_manager):
self.executor = futures.ThreadPoolExecutor(max_workers=10)
self.session_registry_manager = session_registry_manager
self.global_lock = Lock()
self.job_cache = {}
self.job = {}
def __set_job_cache__(self, request, future):
LOGGER.debug("Setting job cache")
result = future.result()
request_hash = pickle.dumps(request)
#Cache the job only if no exception raised
self.job_cache[request_hash] = future
return result
def __set_job_future__(self, session_id, future):
self.job[session_id].job_future = future
def __set_job_hash__(self, session_id, request):
self.job[session_id].job_hash = request
def __reset_job__(self, session_id):
self.job[session_id].job_future = None
self.job[session_id].job_hash = None
def __get_job__(self, session_id, request, err_msg, fn, *args):
LOGGER.debug("Acquiring manager global lock")
with self.global_lock:
LOGGER.debug("Lock acquired")
request_hash = pickle.dumps(request)
if request_hash in self.job_cache.keys():
LOGGER.debug("Job found in cache")
return self.job_cache[request_hash]
if session_id in self.job.keys():
if self.job[session_id].job_future is not None:
if self.job[session_id].job_future.done():
LOGGER.debug("Job is done")
if request_hash == self.job[session_id].job_hash:
LOGGER.debug("Request hash matches, return job")
job = self.job[session_id].job_future
self.__reset_job__(session_id)
return job
else:
LOGGER.debug("Request hash not match, dump result and start fresh")
else:
LOGGER.debug("Job is not done")
raise NotReadyException(err_msg)
else:
LOGGER.debug("First SessionJob creation")
self.job[session_id] = SessionJob(session_id)
self.__set_job_hash__(session_id, request_hash)
self.__set_job_future__(session_id, self.executor.submit(fn, *args))
raise NotReadyException(err_msg)
def ServerShuttingDown(self, context):
if self.session_registry_manager.shutting_down:
context.set_details("Server is shutting down, not accepting new requests")
context.set_code(grpc.StatusCode.UNAVAILABLE)
return True
else:
return False
def NewSession(self, request, context):
try:
if self.ServerShuttingDown(context):
return
session_id = request.session_id
machine_req = request.machine
force = request.force
LOGGER.info("New session requested with session_id: {}".format(session_id))
return self.session_registry_manager.new_session(session_id, machine_req, force)
#No session with provided id or address issue
except (SessionIdException, AddressException) as e:
LOGGER.error(e)
context.set_details("{}".format(e))
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
#Generic error catch
except Exception as e:
LOGGER.error("An exception occurred: {}\nTraceback: {}".format(e, traceback.format_exc()))
context.set_details('An exception with message "{}" was raised!'.format(e))
context.set_code(grpc.StatusCode.UNKNOWN)
def SessionRun(self, request, context):
try:
if self.ServerShuttingDown(context):
return
session_id = request.session_id
final_cycles = request.final_cycles
LOGGER.info("New session run requested for session_id {} with final cycles {}".format(session_id, final_cycles))
#Validate cycle values
utils.validate_cycles(final_cycles)
err_msg = "Result is not yet ready for SessionRun: " + session_id
job = self.__get_job__(session_id, request, err_msg, self.session_registry_manager.run_session, session_id, final_cycles)
return self.__set_job_cache__(request, job)
#If the session result is not ready yet, return progress
except NotReadyException as e:
LOGGER.debug("Not ready yet, getting progress")
session_context = self.session_registry_manager.registry[session_id]
#Calculating cycles related progress
last_cycle = request.final_cycles[-1]
if session_context.halt_cycle != None:
if last_cycle > session_context.halt_cycle:
last_cycle = session_context.halt_cycle
cycle_progress = 0
#Calcuting percentage progress with 2 decimal places, if machine already in a cycle
#that alows it to run to the desired cycle
if (session_context.cycle <= last_cycle):
cycle_progress = int(int(session_context.cycle/last_cycle * 10000) / 100)
#Build a status object to return
session_run_progress = machine_manager_pb2.SessionRunProgress(
progress=cycle_progress,
application_progress=session_context.app_progress,
updated_at=int(session_context.updated_at),
cycle=session_context.cycle
)
return machine_manager_pb2.SessionRunResponse(progress=session_run_progress)
#No session with provided id, address issue, bad final cycles provided or problem during rollback
except (SessionIdException, AddressException, utils.CycleException, RollbackException) as e:
LOGGER.error(e)
context.set_details("{}".format(e))
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
#Generic error catch
except Exception as e:
LOGGER.error("An exception occurred: {}\nTraceback: {}".format(e, traceback.format_exc()))
context.set_details('An exception with message "{}" was raised!'.format(e))
context.set_code(grpc.StatusCode.UNKNOWN)
def SessionStep(self, request, context):
try:
if self.ServerShuttingDown(context):
return
session_id = request.session_id
initial_cycle = request.initial_cycle
step_params = None
#Setting step_params if provided
if (request.WhichOneof("step_params_oneof") is not None):
if (request.WhichOneof("step_params_oneof") == "step_params"):
step_params = request.step_params
LOGGER.info("Step parameters received on request")
#Setting default step parameters if none were provided
if (step_params == None):
log_type = cartesi_machine_pb2.AccessLogType(proofs=True, annotations=False)
step_params = cartesi_machine_pb2.StepRequest(log_type=log_type)
LOGGER.info("Step parameters set to default")
LOGGER.info("New session step requested for session_id {} with initial cycle {}\nLog proofs: {}\nLog annotations: {}".format(session_id, initial_cycle, step_params.log_type.proofs, step_params.log_type.annotations))
#Validate cycle value
utils.validate_cycles([initial_cycle])
return self.session_registry_manager.step_session(session_id, initial_cycle, step_params)
#No session with provided id, address issue, bad initial cycle provided or problem during rollback
except (SessionIdException, AddressException, utils.CycleException, RollbackException) as e:
LOGGER.error(e)
context.set_details("{}".format(e))
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
#Generic error catch
except Exception as e:
LOGGER.error("An exception occurred: {}\nTraceback: {}".format(e, traceback.format_exc()))
context.set_details('An exception with message "{}" was raised!'.format(e))
context.set_code(grpc.StatusCode.UNKNOWN)
def SessionStore(self, request, context):
try:
if self.ServerShuttingDown(context):
return
session_id = request.session_id
store_req = request.store
LOGGER.info("New session store requested for session_id {} on directory {}".format(session_id, store_req.directory))
return self.session_registry_manager.session_store(session_id, store_req)
#No session with provided id or address issue
except (SessionIdException, AddressException) as e:
LOGGER.error(e)
context.set_details("{}".format(e))
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
#Generic error catch
except Exception as e:
LOGGER.error("An exception occurred: {}\nTraceback: {}".format(e, traceback.format_exc()))
context.set_details('An exception with message "{}" was raised!'.format(e))
context.set_code(grpc.StatusCode.UNKNOWN)
def SessionReadMemory(self, request, context):
try:
if self.ServerShuttingDown(context):
return
session_id = request.session_id
read_mem_req = request.position
cycle = request.cycle
LOGGER.info("New session memory read requested for session_id {} on cycle {} for address {} with length {}".format(session_id, cycle, read_mem_req.address, read_mem_req.length))
return self.session_registry_manager.session_read_mem(session_id, cycle, read_mem_req)
#No session with provided id or address issue
except (SessionIdException, AddressException) as e:
LOGGER.error(e)
context.set_details("{}".format(e))
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
#Generic error catch
except Exception as e:
LOGGER.error("An exception occurred: {}\nTraceback: {}".format(e, traceback.format_exc()))
context.set_details('An exception with message "{}" was raised!'.format(e))
context.set_code(grpc.StatusCode.UNKNOWN)
def SessionWriteMemory(self, request, context):
try:
if self.ServerShuttingDown(context):
return
session_id = request.session_id
write_mem_req = request.position
cycle = request.cycle
LOGGER.info("New session memory write requested for session_id {} on cycle {} for address {} with data {}".format(session_id, cycle, write_mem_req.address, write_mem_req.data))
return self.session_registry_manager.session_write_mem(session_id, cycle, write_mem_req)
#No session with provided id or address issue
except (SessionIdException, AddressException) as e:
LOGGER.error(e)
context.set_details("{}".format(e))
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
#Generic error catch
except Exception as e:
LOGGER.error("An exception occurred: {}\nTraceback: {}".format(e, traceback.format_exc()))
context.set_details('An exception with message "{}" was raised!'.format(e))
context.set_code(grpc.StatusCode.UNKNOWN)
def SessionGetProof(self, request, context):
try:
if self.ServerShuttingDown(context):
return
session_id = request.session_id
proof_req = request.target
cycle = request.cycle
LOGGER.info("New session proof requested for session_id {} on cycle {} for address {} with log2_size {}".format(session_id, cycle, proof_req.address, proof_req.log2_size))
return self.session_registry_manager.session_get_proof(session_id, cycle, proof_req)
#No session with provided id or address issue
except (SessionIdException, AddressException) as e:
LOGGER.error(e)
context.set_details("{}".format(e))
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
#Generic error catch
except Exception as e:
LOGGER.error("An exception occurred: {}\nTraceback: {}".format(e, traceback.format_exc()))
context.set_details('An exception with message "{}" was raised!'.format(e))
context.set_code(grpc.StatusCode.UNKNOWN)
class _MachineDiscovery(machine_discovery_pb2_grpc.MachineDiscoveryServicer):
def __init__(self, session_registry_manager):
self.session_registry_manager = session_registry_manager
def CommunicateAddress (self, request, context):
try:
address = request.address
session_id = request.session_id
LOGGER.info("Received a CommunicateAddress request for session_id {} and address {}".format(session_id, address))
self.session_registry_manager.register_address_for_session(session_id, address)
#Returning
return cartesi_machine_pb2.Void()
#No session with provided id
except SessionIdException as e:
LOGGER.error(e)
context.set_details("{}".format(e))
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
#Generic error catch
except Exception as e:
LOGGER.error("An exception occurred: {}\nTraceback: {}".format(e, traceback.format_exc()))
context.set_details('An exception with message "{}" was raised!'.format(e))
context.set_code(grpc.StatusCode.UNKNOWN)
def serve(args):
listening_add = args.address
listening_port = args.port
#Importing the defective session registry if defective flag is set
if args.defective:
from defective_session_registry import SessionRegistryManager
else:
from session_registry import SessionRegistryManager
manager_address = '{}:{}'.format(listening_add, listening_port)
session_registry_manager = SessionRegistryManager(manager_address)
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
machine_manager_pb2_grpc.add_MachineManagerServicer_to_server(_MachineManager(session_registry_manager),
server)
machine_discovery_pb2_grpc.add_MachineDiscoveryServicer_to_server(_MachineDiscovery(session_registry_manager),
server)
SERVICE_NAMES = (
machine_manager_pb2.DESCRIPTOR.services_by_name['MachineManager'].full_name,
machine_discovery_pb2.DESCRIPTOR.services_by_name['MachineDiscovery'].full_name,
reflection.SERVICE_NAME,
)
reflection.enable_server_reflection(SERVICE_NAMES, server)
server.add_insecure_port(manager_address)
server.start()
LOGGER.info("Server started, listening on address {} and port {}".format(listening_add, listening_port))
try:
while True:
time.sleep(SLEEP_TIME)
except KeyboardInterrupt:
LOGGER.info("\nIssued to shut down")
LOGGER.debug("Acquiring session registry global lock")
#Acquiring lock to write on session registry
with session_registry_manager.global_lock:
LOGGER.debug("Session registry global lock acquired")
session_registry_manager.shutting_down = True
#Shutdown all active sessions servers
for session_id in session_registry_manager.registry.keys():
LOGGER.debug("Acquiring lock for session {}".format(session_id))
with session_registry_manager.registry[session_id].lock:
LOGGER.debug("Lock for session {} acquired".format(session_id))
if (session_registry_manager.registry[session_id].address):
utils.shutdown_cartesi_machine_server(session_id, session_registry_manager.registry[session_id].address)
shutdown_event = server.stop(0)
LOGGER.info("Waiting for server to stop")
shutdown_event.wait()
LOGGER.info("Server stopped")
if __name__ == '__main__':
#Adding argument parser
description = "Instantiates a machine manager server, responsible for managing and interacting with multiple cartesi machine instances"
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
'--address', '-a',
dest='address',
default=LISTENING_ADDRESS,
help='Address to listen (default: {})'.format(LISTENING_ADDRESS)
)
parser.add_argument(
'--port', '-p',
dest='port',
default=LISTENING_PORT,
help='Port to listen (default: {})'.format(LISTENING_PORT)
)
parser.add_argument(
'--defective', '-d',
dest='defective',
action='store_true',
help='Makes server behave improperly, injecting errors silently in the issued commands\n\n' + '-'*23 + 'WARNING!' + '-'*23 + 'FOR TESTING PURPOSES ONLY!!!\n' + | |
# -*- coding:utf-8 -*-
import logging
logger = logging.getLogger(__name__)
import ast
import re
import os.path
import tempfile
import shutil
import hashlib
import stat
from prestring.python import PythonModule
from functools import partial
from collections import namedtuple
from io import StringIO
from kamo.expr import (
WithContextExprVistor,
collect_variable_name
)
marker = object()
"""
{module} :: {statement}+
{statement} :: {doctag} | {comment} | {pythoncode} | {if} | {for} | {deftag} | {text}
{doctag} :: '<%doc>' {text} '<%/doc>'
{comment} :: '##' {text}
{pythoncode} :: '<%' {text} '%>'
{if} :: '%if' {expr} ':' {text} ['%elif' {text} ':' {text}]* ['%else' {text} ':' {text}]? '%endif'
{for} :: '%for' {expr} 'in' {expr} ':' {text} %endfor
{deftag} :: '<%def' {defname} '>' {text} '</%def>'
{expr} :: {text - {newline}} | '(' {text} ')'
{newline} :: '\n'
{text} :: [{expr} '\n']+
"""
class Intern(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return '<Intern {!r}>'.format(self.name)
class Line(object):
def __init__(self, body):
self.body = body
begin_doc = Intern("<%doc>")
end_doc = Intern("</%doc>")
begin_def = Intern("<%def>")
end_def = Intern("</%def>")
comment = Intern("##")
begin_code = Intern("<%")
end_code = Intern("%>")
begin_if = Intern("%if")
begin_else = Intern("%else")
begin_elif = Intern("%elif")
end_if = Intern("%endif")
begin_for = Intern("%for")
end_for = Intern("%endfor")
class Scanner(re.Scanner):
def __init__(self, *args, **kwargs):
super(Scanner, self).__init__(*args, **kwargs)
self.body = []
def append(self, x):
self.body.append(x)
def extend(self, x):
self.body.extend(x)
def __call__(self, body):
for line in body.split("\n"):
self.scan(line)
return self.body
Lexer = partial(Scanner, [
('\s*<%doc>(.+)(?=</%doc>)', lambda s, x: s.extend([begin_doc, s.match.group(1)])),
("\s*<%(!?)\s*(.+)\s*(?=%>)", lambda s, x: s.extend([begin_code, s.match.group(1), s.match.group(2)])),
('\s*<%doc>', lambda s, x: s.append(begin_doc)),
('\s*</%doc>', lambda s, x: s.append(end_doc)),
('\s*<%def\s*name="([^>]+)"\s*>', lambda s, x: s.extend([begin_def, s.match.group(1)])),
('\s*</%def>', lambda s, x: s.append(end_def)),
('\s*## (.*)', lambda s, x: s.extend((comment, s.match.group(1)))),
("\s*<%(!?)", lambda s, x: s.extend([begin_code, s.match.group(1)])),
("\s*%>", lambda s, x: s.append(end_doc)),
("\s*%\s*if", lambda s, x: s.append(begin_if)),
("\s*%\s*elif", lambda s, x: s.append(begin_elif)),
("\s*%\s*else", lambda s, x: s.append(begin_else)),
("\s*%\s*endif", lambda s, x: s.append(end_if)),
("\s*%\s*for", lambda s, x: s.append(begin_for)),
("\s*%\s*endfor", lambda s, x: s.append(end_for)),
(".+", lambda s, x: s.append(x))
])
Doc = namedtuple("Doc", "body multiline")
Code = namedtuple("Code", "body ast declared is_module_level")
Def = namedtuple("Def", "body name args declared")
Text = namedtuple("Text", "body")
Expr = namedtuple("Expr", "body ast decorators declared")
If = namedtuple("If", "keyword expr body") # xxx: include if, elif, else
For = namedtuple("For", "keyword expr src body")
Optimized = namedtuple("Optimized", "tokens")
class Parser(object):
def __init__(self):
self.stack = [[]]
self.frame = self.stack[-1]
self.depth = 0
self.i = 0
@property
def body(self):
return self.stack[0]
def push_frame(self):
# [[x, y, <pos>]] -> [[x, y, [<pos>]]]
frame = []
self.frame.append(frame)
self.depth += 1
self.frame = frame
def pop_frame(self):
frame = self.stack
for i in range(self.depth):
frame = frame[-1]
self.depth -= 1
self.frame = frame
def parse_expr(self, expr, decorators=None, is_declared=False): # hmm.
ast_node = ast.parse(expr).body[0]
if is_declared:
declared = collect_variable_name(ast_node)
else:
declared = set()
return Expr(expr,
ast_node,
decorators=decorators or [],
declared=declared)
def __call__(self, tokens):
self.i = 0
n = len(tokens)
while n > self.i:
self.parse_statement(tokens)
return self.body
def parse_statement(self, tokens):
t = tokens[self.i]
if t is begin_doc:
self.parse_doc(tokens)
elif t is comment:
self.parse_comment(tokens)
elif t is begin_code:
self.parse_code(tokens)
elif t is begin_if:
self.parse_if(tokens)
elif t is begin_elif:
self.parse_elif(tokens)
elif t is begin_else:
self.parse_else(tokens)
elif t is end_if:
self.parse_end_if(tokens)
elif t is begin_for:
self.parse_for(tokens)
elif t is end_for:
self.parse_end_for(tokens)
elif t is begin_def:
self.parse_def(tokens)
else:
self.parse_text(tokens)
def parse_doc(self, tokens):
self.i += 1 # skip
body = []
while tokens[self.i] is not end_doc:
body.append(tokens[self.i])
self.i += 1
self.i += 1 # skip
self.frame.append(Doc(body, multiline=True))
def parse_comment(self, tokens):
self.i += 1 # skip
self.frame.append(Doc([tokens[self.i]], multiline=False))
self.i += 1
def parse_code(self, tokens):
self.i += 1 # skip
is_module_level = bool(tokens[self.i])
self.i += 1 # skip
body = []
while tokens[self.i] is not end_doc:
body.append(tokens[self.i])
self.i += 1
self.i += 1 # skip
body = "\n".join(body)
ast_node = ast.parse(body)
declared = collect_variable_name(ast_node)
self.frame.append(Code(body,
ast_node,
declared=declared,
is_module_level=is_module_level))
def parse_def(self, tokens):
self.i += 1 # skip
body = []
arguments = tokens[self.i]
name = arguments.split("(", 1)[0]
args = [e.strip() for e in arguments[len(name) + 1:-1].split(",")]
self.i += 1
while tokens[self.i] is not end_def:
body.append(tokens[self.i])
self.i += 1
self.i += 1 # skip
parsedbody = []
for token, is_emitting_var in split_with(self.emit_var_rx, "\n".join(body)):
if is_emitting_var:
token = token[2:-1] # ${foo} -> foo
token_with_filter = [e.strip(" ") for e in token.split("|")] # foo|bar|boo -> [foo, bar, boo]
token = token_with_filter[0]
token = self.parse_expr(token, token_with_filter[1:])
parsedbody.append((token, is_emitting_var))
self.frame.append(Def([Text(parsedbody)], name, args, declared=set([name])))
def parse_if(self, tokens):
self.i += 1 # skip
self.frame.append(("if", self.parse_expr(tokens[self.i].strip(": ")))) # hmm.
self.i += 1
self.push_frame()
self.parse_statement(tokens)
def _create_if_block(self, tokens):
# create if-block, elif-block, else-block
self.pop_frame()
body = self.frame.pop()
keyword, cond = self.frame.pop()
self.frame.append(If(keyword, cond, body))
def parse_elif(self, tokens):
self._create_if_block(tokens)
self.i += 1 # skip
self.frame.append(("elif", self.parse_expr(tokens[self.i].strip(": ")))) # hmm.
self.i += 1
self.push_frame()
self.parse_statement(tokens)
def parse_else(self, tokens):
self._create_if_block(tokens)
self.i += 1 # skip
self.frame.append(("else", None)) # hmm.
self.i += 1
self.push_frame()
self.parse_statement(tokens)
def parse_end_if(self, tokens):
self._create_if_block(tokens)
self.i += 1
def parse_for(self, tokens):
self.i += 1 # skip
# for expr in expr:
expr, src = [e.strip(" ") for e in tokens[self.i].rsplit(" in ", 1)]
expr = self.parse_expr(expr.strip(" "), is_declared=True)
src = self.parse_expr(src.rstrip(": "))
self.frame.append(("for", expr, src))
self.i += 1
self.push_frame()
self.parse_statement(tokens)
def parse_end_for(self, tokens):
# create for-block
self.pop_frame()
body = self.frame.pop()
keyword, expr, src = self.frame.pop()
self.frame.append(For(keyword, expr, src, body))
self.i += 1 # skip
emit_var_rx = re.compile("\${[^}]+}") # 雑
def parse_text(self, tokens):
body = []
for token, is_emitting_var in split_with(self.emit_var_rx, tokens[self.i]):
if is_emitting_var:
token = token[2:-1] # ${foo} -> foo
token_with_filter = [e.strip(" ") for e in token.split("|")] # foo|bar|boo -> [foo, bar, boo]
token = token_with_filter[0]
token = self.parse_expr(token, token_with_filter[1:])
body.append((token, is_emitting_var))
self.frame.append(Text(body))
self.i += 1
def split_with(rx, sentence):
r = []
while sentence:
m = rx.search(sentence)
if not m:
r.append((sentence, False))
return r
if not m.start() == 0:
r.append((sentence[:m.start()], False))
r.append((m.group(0), True))
sentence = sentence[m.end():]
return r
class _DeclaredStore(object):
def __init__(self):
self.stack = [set()]
def __contains__(self, k):
return any(k in frame for frame in self.stack)
def push_frame(self, s):
self.stack.append(s)
def pop_frame(self):
self.stack.pop()
class Optimizer(object):
def optimize(self, tokens, text, result):
last_is_text = False
for t in tokens:
if isinstance(t, Text):
emitting_status = False
for pair in t.body:
if pair[1] == emitting_status: # emitting_status
text.body.append(pair)
else:
emitting_status = not emitting_status
self.compact(text)
result.append(text)
text = Text([pair])
if text.body[-1][1] is False:
text.body.append(("\n", False))
else:
self.compact(text)
result.append(text)
text = Text([("\n", False)])
last_is_text = True
else:
if last_is_text:
self.compact(text)
result.append(text)
text = Text([("", False)])
last_is_text = False
if isinstance(t, If):
body = []
self.optimize(t.body, Text([("", False)]), body)
result.append(If(t.keyword, t.expr, body))
elif isinstance(t, For):
body = []
self.optimize(t.body, Text([("", False)]), body)
result.append(For(t.keyword, t.expr, t.src, body))
elif isinstance(t, Def):
body = []
self.optimize(t.body, Text([("", False)]), body)
result.append(Def(body, t.name, t.args, t.declared))
else:
result.append(t)
if last_is_text:
self.compact(text)
result.append(text)
def compact(self, text):
if text.body[0][1] is False: # text
body = "".join(pair[0] for pair in text.body)
text.body.clear()
text.body.append((body, False))
if text.body[0][0] == "":
text.body.pop(0)
def __call__(self, tokens):
r = []
self.optimize(tokens, Text([("", False)]), r)
self.body = Optimized(r)
return self.body
class Compiler(object):
def __init__(self, m=None, default="''", getter="c[{!r}]", default_decorators=["str"]):
self.depth = 0
self.m = m or PythonModule()
self.variables = None
self.toplevel = None
self.default = default
self.getter = getter
self.declaredstore = _DeclaredStore()
self.default_decorators = default_decorators
self.optimized = False
def __call__(self, tokens, name="render", args="io, **c"):
"""
from: ${x}
create:
def render(io, **context):
context["x"]
"""
if isinstance(tokens, Optimized):
tokens = tokens.tokens
self.optimized = True
self.toplevel = self.m.submodule()
with self.m.def_(name, args):
self.variables = self.m.submodule()
self.variables.stmt("write = io.write")
# self.variables.stmt("get = c.get")
# self.variables.stmt("M = object()")
for t in tokens:
self.visit(t)
self.optimized = False
return self.m
def visit(self, t):
method = getattr(self, "visit_{}".format(t.__class__.__name__.lower()))
method(t)
def visit_text(self, node):
for token, is_visit_var in node.body:
if is_visit_var:
self.m.stmt("write({})".format(self.calc_expr(token, emit=True)))
else:
self.m.stmt("write({!r})".format(token))
if not self.optimized:
self.m.stmt("write('\\n')")
def visit_doc(self, doc):
if doc.multiline:
self.m.stmt("########################################")
for line in doc.body:
self.m.stmt("# {}".format(line))
if doc.multiline:
self.m.stmt("########################################")
self.m.sep()
def visit_code(self, code):
m = self.toplevel if code.is_module_level else self.m
for line in code.body.split("\n"): # xxx:
m.stmt(line)
self.declaredstore.stack[-1].update(code.declared)
m.sep()
def visit_def(self, node):
self.declaredstore.stack[-1].update(node.declared)
with self.m.def_(node.name, *node.args):
try:
self.declaredstore.push_frame(set(node.args))
for text in node.body:
self.visit_text(text)
self.m.return_("''")
finally:
self.declaredstore.pop_frame()
def calc_expr(self, expr, emit=False):
io = StringIO()
v = WithContextExprVistor(io, self.declaredstore, getter=self.getter)
v.visit(expr.ast)
result = io.getvalue()
if emit:
if expr.decorators:
for f in expr.decorators:
result = "{}({})".format(f, result)
for f in self.default_decorators:
result = "{}({})".format(f, result)
return result
def visit_if(self, node):
if node.expr is None: # else
self.m.stmt("{}:".format(node.keyword))
else:
self.m.stmt("{} {}:".format(node.keyword, self.calc_expr(node.expr)))
with self.m.scope():
self._visit_children(node.body)
def visit_for(self, node):
self.m.stmt("{} {} | |
actual_type = type + "_tileset2"
elif ram_address == 0x02002F00: # BG1 8x8 tile mapping
actual_type = type + "_mapping1"
elif ram_address == 0x02019EE0: # BG2 8x8 tile mapping
actual_type = type + "_mapping2"
elif ram_address == 0x0600F000: # BG3 8x8 tile mapping
actual_type = type + "_mapping3"
elif ram_address == 0x02010654: # BG1 tileset tile type data
actual_type = type + "_tile_types1"
elif ram_address == 0x0202AEB4: # BG2 tileset tile type data
actual_type = type + "_tile_types2"
elif ram_address == 0x02027EB4: # BG2 collision layer data
actual_type = type + "_collision"
else:
actual_type = type + "_unknown"
self.assets.append(Asset(symbol.name + '_' + str(i), actual_type, asset_offset, data_length, compressed))
#print(hex(asset_offset), compressed, hex(ram_address), hex(data_length))
i += 1
def extract_room_properties(self, symbol_name: str) -> None:
symbol = self.current_controller.symbols.find_symbol_by_name(symbol_name)
if symbol is None:
self.api.show_error(self.name, f'Could not find symbol {symbol_name}')
return
reader = self.get_reader_for_symbol(symbol)
entity_list_1 = self.read_symbol(reader)
entity_list_2 = self.read_symbol(reader)
enemy_list = self.read_symbol(reader)
tile_entity_list = self.read_symbol(reader)
unknown_func_1 = self.read_symbol(reader)
unknown_func_2 = self.read_symbol(reader)
unknown_func_3 = self.read_symbol(reader)
state_changing_func = self.read_symbol(reader)
room_name = symbol_name[5:]
if entity_list_1:
self.replacements.append(f'{entity_list_1.name},Entities_{room_name}_0\n')
if entity_list_2:
self.replacements.append(f'{entity_list_2.name},Entities_{room_name}_1\n')
if enemy_list:
self.replacements.append(f'{enemy_list.name},Enemies_{room_name}\n')
if tile_entity_list:
self.replacements.append(f'{tile_entity_list.name},TileEntities_{room_name}\n')
if unknown_func_1:
self.replacements.append(f'{unknown_func_1.name},sub_unk1_{room_name}\n')
if unknown_func_2:
self.replacements.append(f'{unknown_func_2.name},sub_unk2_{room_name}\n')
if unknown_func_3:
self.replacements.append(f'{unknown_func_3.name},sub_unk3_{room_name}\n')
if state_changing_func:
self.replacements.append(f'{state_changing_func.name},sub_StateChange_{room_name}\n')
#print('ETTTT')
self.extract_entity_list(entity_list_1)
self.extract_entity_list(entity_list_2)
self.extract_entity_list(enemy_list)
#print('TILES')
self.extract_tile_entity_list(tile_entity_list)
#print(entity_list_1, entity_list_2, enemy_list, tile_entity_list, unknown_func_1, unknown_func_2, unknown_func_3, state_changing_func)
add_cnt = 0
while reader.cursor < symbol.length:
additional_entity_list = self.read_symbol(reader)
#print(additional_entity_list)
if additional_entity_list:
self.replacements.append(f'{additional_entity_list.name},gUnk_additional{add_cnt}_{room_name}\n')
# TODO detect delayed entity lists
# TODO also detect other non-list pointers?
# self.extract_entity_list(additional_entity_list)
add_cnt += 1
def slot_extract_current_entity_list(self) -> None:
symbol_name = QApplication.clipboard().text()
symbol = self.current_controller.symbols.find_symbol_by_name(symbol_name)
#symbol = self.current_controller.symbols.get_symbol_at(self.current_controller.address_resolver.to_local(self.current_controller.cursor))
try:
self.extract_entity_list(symbol)
except Exception:
traceback.print_exc()
self.api.show_error(self.name, 'Error in extracting entity list')
def extract_entity_list(self, symbol: Symbol) -> list[str]:
if symbol is None:
return
#print('entity list ', symbol)
data = self.current_controller.rom.get_bytes(symbol.address, symbol.address+symbol.length + 0x100)
reader = Reader(data, self.current_controller.symbols)
lines = []
while reader.cursor + 15 < symbol.length:
type_and_unknowns = reader.read_u8()
type = type_and_unknowns & 0x0F
collision = (type_and_unknowns & 0xF0) >> 4
unknowns = reader.read_u8()
unknown_2 = unknowns & 0x0F
unknown_3 = (unknowns & 0xF0) >> 4
subtype = reader.read_u8()
params_a = reader.read_u8()
params_b = reader.read_u32()
x = reader.read_u16()
y = reader.read_u16()
params_c = reader.read_u32()
if type_and_unknowns == 0xff: # End of list
lines.append('\tentity_list_end')
if reader.cursor == symbol.length:
break
else:
lines.append('\n')
continue
line = ''
if type == 9: # manager
line = f'\tmanager subtype={hex(subtype)}'
line += opt_param('x', '0x0', hex(x))
line += opt_param('y', '0x0', hex(y))
line += opt_param('unknown', '0xf', hex(unknowns))
line += opt_param('collision', '0', str(collision))
line += opt_param('paramA', '0x0', hex(params_a))
line += opt_param('paramB', '0x0', hex(params_b))
line += opt_param('paramC', '0x0', hex(params_c))
elif type == 6: # object
line = f'\tobject_raw subtype={hex(subtype)}'
line += opt_param('x', '0x0', hex(x))
line += opt_param('y', '0x0', hex(y))
line += opt_param('unknown', '0xf', hex(unknowns))
line += opt_param('collision', '0', str(collision))
line += opt_param('paramA', '0x0', hex(params_a))
line += opt_param('paramB', '0x0', hex(params_b))
script_symbol = self.current_controller.symbols.get_symbol_at(params_c-ROM_OFFSET)
if script_symbol and script_symbol.address+ROM_OFFSET == params_c:
line += f', paramC={script_symbol.name}' # script pointer in object 0x6A
else:
line += opt_param('paramC', '0x0', hex(params_c))
elif type == 3: # enemy
line = f'\tenemy_raw subtype={hex(subtype)}'
line += opt_param('x', '0x0', hex(x))
line += opt_param('y', '0x0', hex(y))
line += opt_param('unknown', '0xf', hex(unknowns))
line += opt_param('collision', '0', str(collision))
line += opt_param('paramA', '0x0', hex(params_a))
line += opt_param('paramB', '0x0', hex(params_b))
line += opt_param('paramC', '0x0', hex(params_c))
elif type == 7: # npc
script = hex(params_c)
script_symbol = self.current_controller.symbols.get_symbol_at(params_c-ROM_OFFSET)
if script_symbol and script_symbol.address+ROM_OFFSET == params_c:
script = script_symbol.name
line = f'\tnpc_raw subtype={hex(subtype)}'
line += opt_param('x', '0x0', hex(x))
line += opt_param('y', '0x0', hex(y))
line += opt_param('unknown', '0x4f', hex(unknowns))
line += opt_param('collision', '0', str(collision))
line += opt_param('paramA', '0x0', hex(params_a))
line += opt_param('paramB', '0x0', hex(params_b))
line += f', script={script}'
else:
line = f'\tentity_raw type={hex(type)}, subtype={hex(subtype)}'
line += opt_param('x', '0x0', hex(x))
line += opt_param('y', '0x0', hex(y))
line += f', unknown={hex(unknowns)}'
line += opt_param('collision', '0', str(collision))
line += opt_param('paramA', '0x0', hex(params_a))
line += opt_param('paramB', '0x0', hex(params_b))
line += opt_param('paramC', '0x0', hex(params_c))
lines.append(line + '\n')
if reader.cursor < symbol.length:
lines.append('@ unaccounted bytes\n')
while reader.cursor < symbol.length:
lines.append(f'\t.byte {reader.read_u8()}\n')
# print()
# print (''.join(lines))
QApplication.clipboard().setText(''.join(lines))
return lines
def slot_extract_current_tile_entity_list(self) -> None:
symbol = self.current_controller.symbols.get_symbol_at(self.current_controller.address_resolver.to_local(self.current_controller.cursor))
try:
self.extract_tile_entity_list(symbol)
except Exception:
traceback.print_exc()
self.api.show_error(self.name, 'Error in extracting tile entity list')
def extract_tile_entity_list(self, symbol: Symbol) -> list[str]:
if symbol is None:
return
print('tile entity list ', symbol)
data = self.current_controller.rom.get_bytes(symbol.address, symbol.address+symbol.length + 0x100)
reader = Reader(data, self.current_controller.symbols)
lines = []
while reader.cursor < symbol.length:
type = reader.read_u8()
params_a = reader.read_u8()
params_b = reader.read_u16()
params_c = reader.read_u16()
params_d = reader.read_u16()
if type == 0:
lines.append('\ttile_entity_list_end')
break
line = f'\ttile_entity type={hex(type)}'
line += opt_param('paramA', '0x0', hex(params_a))
line += opt_param('paramB', '0x0', hex(params_b))
line += opt_param('paramC', '0x0', hex(params_c))
line += opt_param('paramD', '0x0', hex(params_d))
lines.append(line + '\n')
if reader.cursor < symbol.length:
lines.append('@ unaccounted bytes\n')
while reader.cursor < symbol.length:
lines.append(f'\t.byte {reader.read_u8()}\n')
print()
print (''.join(lines))
QApplication.clipboard().setText(''.join(lines))
return lines
def slot_extract_current_delayed_entity_list(self) -> None:
symbol = self.current_controller.symbols.get_symbol_at(self.current_controller.address_resolver.to_local(self.current_controller.cursor))
try:
self.extract_delayed_entity_list(symbol)
except Exception:
traceback.print_exc()
self.api.show_error(self.name, 'Error in extracting delayed entity list')
def extract_delayed_entity_list(self, symbol: Symbol) -> list[str]:
if symbol is None:
return
data = self.current_controller.rom.get_bytes(symbol.address, symbol.address+symbol.length + 0x100)
reader = Reader(data, self.current_controller.symbols)
lines = []
while reader.cursor + 15 < symbol.length:
subtype = reader.read_u8()
params_a = reader.read_u8()
params_b = reader.read_u8()
layer = reader.read_u8()
x = reader.read_u16()
y = reader.read_u16()
params_c = reader.read_u32()
params_d = reader.read_u16()
conditions = reader.read_u16()
if subtype == 0xff: # End of list
lines.append('\tentity_list_end')
if reader.cursor == symbol.length:
break
else:
lines.append('\n')
continue
line = f'\tdelayed_entity_raw subtype={hex(subtype)}'
line += opt_param('x', '0x0', hex(x))
line += opt_param('y', '0x0', hex(y))
line += opt_param('layer', '0', str(layer))
line += opt_param('paramA', '0x0', hex(params_a))
line += opt_param('paramB', '0x0', hex(params_b))
script_symbol = self.current_controller.symbols.get_symbol_at(params_c-ROM_OFFSET)
if script_symbol and script_symbol.address+ROM_OFFSET == params_c:
line += f', paramC={script_symbol.name}' # script pointer in object 0x6A
else:
line += opt_param('paramC', '0x0', hex(params_c))
line += opt_param('paramD', '0x0', hex(params_d))
line += opt_param('conditions', '0x0', hex(conditions))
lines.append(line + '\n')
if reader.cursor < symbol.length:
lines.append('@ unaccounted bytes\n')
while reader.cursor < symbol.length:
lines.append(f'\t.byte {reader.read_u8()}\n')
print()
print (''.join(lines))
QApplication.clipboard().setText(''.join(lines))
return lines
def slot_extract_current_exit_region_list(self) -> None:
symbol_name = QApplication.clipboard().text()
symbol = self.current_controller.symbols.find_symbol_by_name(symbol_name)
#symbol = self.current_controller.symbols.get_symbol_at(self.current_controller.address_resolver.to_local(self.current_controller.cursor))
try:
self.extract_exit_region_list(symbol)
except Exception:
traceback.print_exc()
self.api.show_error(self.name, 'Error in extracting exit region list')
def extract_exit_region_list(self, symbol: Symbol) -> list[str]:
if symbol is None:
return
data = self.current_controller.rom.get_bytes(symbol.address, symbol.address+symbol.length + 0x100)
reader = Reader(data, self.current_controller.symbols)
lines = []
while reader.cursor + 7 < symbol.length:
# minish entrance list just exists once
# x = reader.read_u16()
# y = reader.read_u16()
# unknown = reader.read_u16()
# actionDelay = reader.read_u16()
# line = f'\tminish_entrance x={hex(x)}, y={hex(y)}'
# line += opt_param('unknown', '0x0', hex(unknown))
# line += opt_param('actionDelay', '0x0', hex(actionDelay))
center_x = reader.read_u16()
center_y = reader.read_u16()
half_width = reader.read_u8()
half_height = reader.read_u8()
exit_pointer_property_index = reader.read_u8()
bitfield = reader.read_u8()
if center_x == 0xffff: # End of list
lines.append('\texit_region_list_end')
if reader.cursor == symbol.length:
break
else:
lines.append('\n')
continue
line = f'\texit_region_raw centerX={hex(center_x)}, centerY={hex(center_y)}'
line += opt_param('halfWidth', '0x0', hex(half_width))
line += opt_param('halfHeight', '0x0', hex(half_height))
line += opt_param('exitIndex', '0x0', hex(exit_pointer_property_index))
line += opt_param('bitfield', '0x0', hex(bitfield))
lines.append(line + '\n')
if reader.cursor < symbol.length:
lines.append('@ unaccounted bytes\n')
while reader.cursor < symbol.length:
lines.append(f'\t.byte {reader.read_u8()}\n')
print()
print (''.join(lines))
QApplication.clipboard().setText(''.join(lines))
return lines
def slot_extract_current_exit(self) -> None:
symbol_name = QApplication.clipboard().text()
symbol = self.current_controller.symbols.find_symbol_by_name(symbol_name)
#symbol = self.current_controller.symbols.get_symbol_at(self.current_controller.address_resolver.to_local(self.current_controller.cursor))
try:
self.extract_exit(symbol)
except Exception:
traceback.print_exc()
self.api.show_error(self.name, 'Error in extracting exit region list')
def extract_exit(self, symbol: Symbol) -> list[str]:
if symbol is None:
return
data = self.current_controller.rom.get_bytes(symbol.address, symbol.address+symbol.length + 0x100)
reader = Reader(data, self.current_controller.symbols)
lines = []
transition_type = reader.read_u16()
x_pos = reader.read_u16()
y_pos = reader.read_u16()
dest_x = reader.read_u16()
dest_y = reader.read_u16()
screen_edge = reader.read_u8()
dest_area = reader.read_u8()
dest_room = reader.read_u8()
unknown_2 = reader.read_u8()
unknown_3 = reader.read_u8()
unknown_4 = reader.read_u8()
unknown_5 = reader.read_u16()
padding_1 = reader.read_u16()
assert(padding_1 == 0)
line = f'\texit_raw transition={hex(transition_type)}'
line += opt_param('x', '0x0', hex(x_pos))
line += opt_param('y', '0x0', hex(y_pos))
line += opt_param('destX', '0x0', hex(dest_x))
line += opt_param('destY', '0x0', hex(dest_y))
line += opt_param('screenEdge', '0x0', hex(screen_edge))
line += opt_param('destArea', '0x0', hex(dest_area))
line += opt_param('destRoom', '0x0', hex(dest_room))
line += opt_param('unknownA', '0x0', hex(unknown_2))
line += opt_param('unknownB', '0x0', hex(unknown_3))
line += opt_param('unknownC', '0x0', hex(unknown_4))
line += opt_param('unknownD', '0x0', hex(unknown_5))
lines.append(line)
if reader.cursor < symbol.length:
lines.append('@ unaccounted bytes\n')
while reader.cursor < symbol.length:
lines.append(f'\t.byte {reader.read_u8()}\n')
print()
print (''.join(lines))
| |
'61746933':{'en': 'Pittsworth'},
'61746428':{'en': 'Elbow Valley'},
'61746429':{'en': 'Freestone'},
'61746426':{'en': 'Allora'},
'61746427':{'en': 'Cunningham'},
'61746424':{'en': 'Ravensbourne'},
'61746425':{'en': 'Toowoomba'},
'61746422':{'en': 'Toowoomba'},
'61746423':{'en': 'Pittsworth'},
'61746420':{'en': 'Toowoomba'},
'61746421':{'en': 'Toowoomba'},
'61746938':{'en': '<NAME>'},
'61746939':{'en': 'Brookstead'},
'6138629':{'en': 'Melbourne'},
'6138628':{'en': 'Melbourne'},
'6138625':{'en': 'Melbourne'},
'6138624':{'en': 'Melbourne'},
'6138627':{'en': 'Melbourne'},
'6138626':{'en': 'Melbourne'},
'6138621':{'en': 'Melbourne'},
'6138620':{'en': 'Melbourne'},
'6138623':{'en': 'Melbourne'},
'6138622':{'en': 'Melbourne'},
'6139237':{'en': 'Croydon'},
'61745219':{'en': 'Bimbadeen/<NAME>'},
'61745214':{'en': 'Jundah/<NAME>/Macalister'},
'61745215':{'en': 'Longreach/Maranoa/Meandarra'},
'61745216':{'en': 'Muttaburra/Miamba/Miles'},
'61745217':{'en': 'Thomson/Millmerran/Mitchell'},
'61745210':{'en': 'Diamantina/Kilbeggan/Killarney'},
'61745211':{'en': 'Galilee/Kumbarilla/Kupunn'},
'61745212':{'en': 'Isisford/Legume/Leyburn'},
'61745213':{'en': 'Jericho/Liston/Longreach'},
'61361326':{'en': 'Huonville'},
'61361327':{'en': 'Strathgordon'},
'61361324':{'en': '<NAME>'},
'61361325':{'en': 'Margate'},
'61361322':{'en': 'Kempton'},
'61361323':{'en': 'Brighton'},
'61361320':{'en': 'Maydena'},
'61361321':{'en': 'Maydena'},
'61361328':{'en': 'Tarraleah'},
'61361329':{'en': 'Ouse'},
'61745715':{'en': 'Helidon'},
'61745714':{'en': 'Toowoomba'},
'6138481':{'en': 'Melbourne'},
'6138480':{'en': 'Melbourne'},
'61745717':{'en': 'Toowoomba'},
'6138486':{'en': 'Melbourne'},
'61745716':{'en': 'Millmerran'},
'61745637':{'en': 'Arcadia Valley'},
'61745711':{'en': 'Greenmount'},
'61745246':{'en': 'Pikedale/Toobeah'},
'6139684':{'en': 'Melbourne'},
'6139685':{'en': 'Melbourne'},
'6139686':{'en': 'Melbourne'},
'6139687':{'en': 'Melbourne'},
'6139680':{'en': 'Melbourne'},
'6139681':{'en': 'Melbourne'},
'6139682':{'en': 'Melbourne'},
'6139683':{'en': 'Melbourne'},
'61745712':{'en': 'Toowoomba'},
'6139688':{'en': 'Melbourne'},
'6139689':{'en': 'Melbourne'},
'6139230':{'en': 'Melbourne'},
'61745248':{'en': 'Bringalily/Valley Downs'},
'6139233':{'en': 'Melbourne'},
'61740068':{'en': 'Innot Hot Springs'},
'61740069':{'en': 'Torres'},
'6139538':{'en': 'Clayton'},
'6139539':{'en': 'Melbourne'},
'6139536':{'en': 'Melbourne'},
'6139537':{'en': 'Melbourne'},
'6139534':{'en': 'Melbourne'},
'6139535':{'en': 'Clayton'},
'6139532':{'en': 'Melbourne'},
'6139533':{'en': 'Melbourne'},
'61740062':{'en': 'Peninsula'},
'6139531':{'en': 'Melbourne'},
'61743295':{'en': 'Yandaran'},
'61362079':{'en': 'Brighton'},
'61362078':{'en': 'Richmond'},
'61362077':{'en': 'Richmond'},
'61362076':{'en': 'Richmond'},
'61362075':{'en': 'Ouse'},
'61362074':{'en': 'Ouse'},
'61362073':{'en': 'Ouse'},
'61362072':{'en': 'Orford'},
'61362071':{'en': 'Orford'},
'61362070':{'en': 'Orford'},
'61741718':{'en': 'Monogorilby'},
'61741911':{'en': 'Maryborough'},
'6138900':{'en': 'Dandenong'},
'6138901':{'en': 'Dandenong'},
'61364668':{'en': 'Burnie'},
'61364669':{'en': 'Currie'},
'61743269':{'en': 'Gooroolba'},
'61743268':{'en': '<NAME>'},
'61743267':{'en': 'Gayndah'},
'61743266':{'en': 'Bundaberg'},
'61743265':{'en': 'Bundaberg'},
'61743264':{'en': 'Bundaberg'},
'61743263':{'en': 'Bundaberg'},
'6138458':{'en': 'Melbourne'},
'61743261':{'en': 'Bundaberg'},
'61743260':{'en': 'Gaeta'},
'61742130':{'en': 'Thursday Island'},
'61742131':{'en': 'Cairns'},
'61742132':{'en': 'Mossman'},
'6138459':{'en': 'Melbourne'},
'61742134':{'en': 'Tully'},
'61742135':{'en': 'Innisfail'},
'61742136':{'en': 'Weipa'},
'61742137':{'en': 'Babinda'},
'61742138':{'en': 'Mossman'},
'61742139':{'en': 'Mossman'},
'61361520':{'en': 'Swansea'},
'61361521':{'en': 'Tarraleah'},
'61361522':{'en': 'Woodbury'},
'6173429':{'en': 'Brisbane'},
'6173422':{'en': 'Brisbane'},
'6173423':{'en': 'Brisbane'},
'6173420':{'en': 'Brisbane'},
'6173421':{'en': 'Brisbane'},
'6173426':{'en': 'Brisbane'},
'6173427':{'en': 'Beenleigh'},
'6173424':{'en': 'Ipswich'},
'6173425':{'en': 'Dayboro'},
'61741916':{'en': 'Maryborough'},
'61743083':{'en': 'Johnstown West'},
'61743082':{'en': 'Yandaran'},
'61743081':{'en': 'Eidsvold'},
'61743080':{'en': 'Manumbar'},
'61743087':{'en': 'Redridge'},
'61743086':{'en': 'Blackbutt'},
'61743085':{'en': 'Brooklands'},
'61743084':{'en': 'Kingaroy'},
'61743089':{'en': 'Murgon'},
'61743088':{'en': 'Gin Gin'},
'6174549':{'en': 'Toowoomba'},
'61742354':{'en': 'Thursday Island'},
'6136331':{'en': 'Launceston'},
'61742356':{'en': 'Kowanyama'},
'61742357':{'en': 'Atherton'},
'6136278':{'en': 'Hobart'},
'6136279':{'en': 'Hobart'},
'61742352':{'en': 'Walsh River'},
'6173386':{'en': 'Beenleigh'},
'6136274':{'en': 'Hobart'},
'6136275':{'en': 'Hobart'},
'6136277':{'en': 'Hobart'},
'6136270':{'en': 'Hobart'},
'6136271':{'en': 'Hobart'},
'6136272':{'en': 'Hobart'},
'6136273':{'en': 'Hobart'},
'6173384':{'en': 'Redcliffe'},
'6173383':{'en': 'Cleveland'},
'6173382':{'en': 'Beenleigh'},
'6173381':{'en': 'Ipswich'},
'6173380':{'en': 'Beenleigh'},
'6173288':{'en': 'Ipswich'},
'6173289':{'en': 'Samford'},
'6173286':{'en': 'Cleveland'},
'6173287':{'en': 'Beenleigh'},
'6173284':{'en': 'Redcliffe'},
'6173285':{'en': 'Redcliffe'},
'6173282':{'en': 'Ipswich'},
'6173283':{'en': 'Redcliffe'},
'6173280':{'en': 'Ipswich'},
'6173281':{'en': 'Ipswich'},
'61740747':{'en': 'Torres'},
'61363809':{'en': 'Launceston'},
'61363808':{'en': 'Launceston'},
'61363801':{'en': 'George Town'},
'61363800':{'en': 'George Town'},
'61363803':{'en': 'George Town'},
'61363802':{'en': 'George Town'},
'61363805':{'en': 'George Town'},
'61363804':{'en': 'George Town'},
'61363807':{'en': 'Launceston'},
'61363806':{'en': 'Launceston'},
'6139058':{'en': 'Melbourne'},
'6139051':{'en': 'Melbourne'},
'6139050':{'en': 'Melbourne'},
'6139052':{'en': 'Melbourne'},
'6139235':{'en': 'Melbourne'},
'6139234':{'en': 'Melbourne'},
'61393053':{'en': 'Craigieburn'},
'6139236':{'en': 'Melbourne'},
'61393055':{'en': 'Craigieburn'},
'61393054':{'en': 'Craigieburn'},
'61393057':{'en': 'Craigieburn'},
'6139940':{'en': 'Melbourne'},
'61393059':{'en': 'Craigieburn'},
'61393058':{'en': 'Melbourne'},
'6139239':{'en': 'Clayton'},
'6139238':{'en': 'Dandenong'},
'6139949':{'en': 'Melbourne'},
'6139948':{'en': 'Melbourne'},
'61387409':{'en': 'Dandenong'},
'61387408':{'en': 'Croydon'},
'61387403':{'en': 'Croydon'},
'61387402':{'en': 'Dandenong'},
'61387401':{'en': 'Croydon'},
'61387400':{'en': 'Croydon'},
'61387407':{'en': 'Whittlesea'},
'61387406':{'en': 'Kalkallo'},
'61387405':{'en': 'Sunbury'},
'61387404':{'en': 'Werribee'},
'6174050':{'en': 'Cairns'},
'61367051':{'en': 'Whitemark'},
'61740473':{'en': 'Cooktown'},
'61740472':{'en': 'Coen'},
'61740471':{'en': 'Chillagoe'},
'61740470':{'en': 'Cairns'},
'61740477':{'en': 'Cairns'},
'61740476':{'en': 'Cairns'},
'61740475':{'en': 'Cairns'},
'61740474':{'en': 'Cairns'},
'61740479':{'en': 'Cairns'},
'61740478':{'en': 'Cairns'},
'6138544':{'en': 'Clayton'},
'6138545':{'en': 'Clayton'},
'6138546':{'en': 'Clayton'},
'6138547':{'en': 'Melbourne'},
'6138540':{'en': 'Clayton'},
'6138541':{'en': 'Clayton'},
'6138542':{'en': 'Clayton'},
'6138543':{'en': 'Clayton'},
'6138548':{'en': 'Melbourne'},
'6138549':{'en': 'Clayton'},
'61364053':{'en': 'Wynyard'},
'61364052':{'en': 'Waratah'},
'61364051':{'en': 'Savage River'},
'61364050':{'en': 'Burnie'},
'61364057':{'en': 'Ulverstone'},
'61364056':{'en': 'Sheffield'},
'61364055':{'en': 'Devonport'},
'61364054':{'en': 'Yolla'},
'61745531':{'en': 'Allora'},
'61745530':{'en': 'Allora'},
'61364059':{'en': 'Yambacoona'},
'61364058':{'en': 'Currie'},
'61745535':{'en': 'Guluguba'},
'61745534':{'en': 'Guluguba'},
'61745537':{'en': 'Miamba'},
'61745536':{'en': 'Miamba'},
'61745027':{'en': 'Bowenville'},
'61745026':{'en': 'Warrego'},
'61745025':{'en': 'Thargomindah'},
'61745024':{'en': 'Tambo'},
'61745023':{'en': 'Quilpie'},
'61745022':{'en': 'Paroo'},
'61745021':{'en': 'Morven'},
'61745020':{'en': 'Haddon'},
'61745029':{'en': 'Bunya Mountains'},
'61745028':{'en': 'Brigalow'},
'61740217':{'en': 'Kowanyama'},
'61740216':{'en': 'Kidston'},
'61740215':{'en': 'Innot Hot Springs'},
'61740214':{'en': 'Innisfail'},
'61740213':{'en': 'Hopevale'},
'61740212':{'en': 'Herberton'},
'61740211':{'en': 'Gordonvale'},
'61740210':{'en': 'Georgetown'},
'61740219':{'en': 'Lakeland'},
'61740218':{'en': 'Kuranda'},
'61740741':{'en': 'Mutchilba'},
'61742620':{'en': 'Mount Garnet'},
'61742621':{'en': 'Mount Surprise'},
'61742622':{'en': 'Mutchilba'},
'61742623':{'en': 'Peninsula'},
'61742156':{'en': 'Torres'},
'61742157':{'en': 'Tully'},
'61742626':{'en': 'South Johnstone'},
'61742627':{'en': 'Thursday Island'},
'61742628':{'en': 'Torres'},
'61742629':{'en': 'Tully'},
'61742158':{'en': 'Euramo'},
'61362194':{'en': 'Tarraleah'},
'61362195':{'en': 'Tarraleah'},
'61362196':{'en': 'Woodbury'},
'61362197':{'en': 'Woodbury'},
'61362190':{'en': 'Swansea'},
'61362191':{'en': 'Swansea'},
'61362192':{'en': 'Swansea'},
'61362193':{'en': 'Tarraleah'},
'61746109':{'en': 'Omanama'},
'61362198':{'en': 'Woodbury'},
'61362199':{'en': 'Geeveston'},
'61746108':{'en': 'Oakey'},
'61363584':{'en': 'Glengarry'},
'61363585':{'en': 'Launceston'},
'61363586':{'en': 'Lilydale'},
'61363587':{'en': 'Longford'},
'61363580':{'en': '<NAME>'},
'61363581':{'en': 'Evandale'},
'61363582':{'en': 'Exeter'},
'61363583':{'en': '<NAME>'},
'61363588':{'en': 'Rossarden'},
'61363589':{'en': 'Targa'},
'61741457':{'en': 'Gaeta'},
'61740745':{'en': 'South Johnstone'},
'61363058':{'en': 'Mathinna'},
'61363059':{'en': 'Emita'},
'61740744':{'en': 'Silkwood'},
'61363052':{'en': 'Lilydale'},
'61363053':{'en': 'Longford'},
'61363050':{'en': 'Lilydale'},
'61363051':{'en': 'Lilydale'},
'61363056':{'en': 'Mathinna'},
'61363057':{'en': 'Mathinna'},
'61363054':{'en': 'Longford'},
'61363055':{'en': 'Longford'},
'61740746':{'en': 'Thursday Island'},
'61742441':{'en': 'Cairns'},
'61367218':{'en': 'Westbury'},
'61367219':{'en': 'Whitemark'},
'61740743':{'en': 'Ravenshoe'},
'61740742':{'en': 'Peninsula'},
'61399729':{'en': 'Melbourne'},
'61399728':{'en': 'Melbourne'},
'61747040':{'en': 'Cape River'},
'61745062':{'en': 'Thallon'},
'61399721':{'en': 'Melbourne'},
'61399720':{'en': 'Sydenham'},
'61399723':{'en': 'Melbourne'},
'61399722':{'en': 'Melbourne'},
'61399725':{'en': 'Melbourne'},
'61399724':{'en': 'Melbourne'},
'61399727':{'en': 'Melbourne'},
'61399726':{'en': 'Melbourne'},
'61745060':{'en': 'Dirranbandi'},
'6174081':{'en': 'Cairns'},
'6174080':{'en': 'Cairns'},
'61745067':{'en': '<NAME>'},
'61745066':{'en': 'Yetman'},
'61367212':{'en': 'Rossarden'},
'61367213':{'en': 'Scottsdale'},
'61745807':{'en': 'Isisford'},
'61745806':{'en': 'Helidon'},
'61741678':{'en': 'Moonford'},
'61741823':{'en': '<NAME>'},
'61741822':{'en': 'Chahpingah'},
'61741821':{'en': 'Kingaroy'},
'61741820':{'en': 'Kingaroy'},
'61741827':{'en': 'Nanango'},
'61741826':{'en': 'Maidenwell'},
'61741825':{'en': 'Kumbia'},
'61741824':{'en': 'Kingaroy'},
'6138809':{'en': 'Melbourne'},
'61741829':{'en': 'Biggenden'},
'61741828':{'en': 'Yarraman'},
'6138808':{'en': 'Melbourne'},
'61741679':{'en': 'Moonford'},
'61746444':{'en': 'Cottonvale'},
'6138805':{'en': 'Ringwood'},
'61746446':{'en': 'Pikedale'},
'61746447':{'en': 'Stanthorpe'},
'61746440':{'en': 'Haden'},
'61746441':{'en': 'Helidon'},
'61746442':{'en': 'Yuleba'},
'6138804':{'en': 'Melbourne'},
'61746448':{'en': 'Bringalily'},
'6138807':{'en': 'Melbourne'},
'61741799':{'en': 'Murgon'},
'61741798':{'en': 'Murgon'},
'61398817':{'en': 'Ringwood'},
'61393608':{'en': 'Melbourne'},
'61393609':{'en': 'Melbourne'},
'61393604':{'en': 'Sydenham'},
'61393605':{'en': 'Sydenham'},
'61393606':{'en': 'Melbourne'},
'61393607':{'en': 'Melbourne'},
'61393600':{'en': 'Melbourne'},
'61393601':{'en': 'Melbourne'},
'61393602':{'en': 'Melbourne'},
'61393603':{'en': 'Melbourne'},
'61745803':{'en': 'Toowoomba'},
'6138802':{'en': 'Melbourne'},
'61745232':{'en': 'Durham Downs/Southwood'},
'61745233':{'en': 'Injune/St George'},
'61745230':{'en': 'Wandoan/<NAME>'},
'61745231':{'en': 'Arcadia Valley/Roma'},
'61361348':{'en': 'Hobart'},
'61361349':{'en': 'Hobart'},
'61745234':{'en': 'Maranoa/Stanthorpe'},
'61745235':{'en': 'Mitchell/Surat'},
'61361344':{'en': 'Colebrook'},
'61361345':{'en': 'Southwest'},
'61361346':{'en': 'Little Swanport'},
'61361347':{'en': 'Swansea'},
'61361340':{'en': 'Tarraleah'},
'61361341':{'en': 'Miena'},
'61361342':{'en': 'Swansea'},
'61361343':{'en': 'Gretna'},
'61741674':{'en': 'Gaeta'},
'61745226':{'en': 'Miamba/Pikedale'},
'61741675':{'en': 'Mulgildie'},
'61734096':{'en': 'Dunwich'},
'61734097':{'en': 'Dunwich'},
'61363614':{'en': 'Scottsdale'},
'61363615':{'en': 'Waterhouse'},
'61363616':{'en': 'Winnaleah'},
'61363617':{'en': 'Bicheno'},
'61363610':{'en': 'Westbury'},
'61363611':{'en': 'Bridport'},
'61363612':{'en': 'Gladstone'},
'61363613':{'en': 'Ringarooma'},
'61363618':{'en': 'Fingal'},
'61363619':{'en': 'Mathinna'},
'61734098':{'en': 'Dunwich'},
'61745942':{'en': 'Roma'},
'61734099':{'en': 'Dunwich'},
'61745944':{'en': 'Moonie'},
'61745945':{'en': 'Dalby'},
'61745946':{'en': 'Dalby'},
'61745947':{'en': 'Dalby'},
'61745948':{'en': 'Dalby'},
'61745949':{'en': 'Dalby'},
'61746490':{'en': 'Galilee'},
'61745923':{'en': 'Toowoomba'},
'6139558':{'en': 'Clayton'},
'6139559':{'en': 'Melbourne'},
'61740040':{'en': '<NAME>'},
'61740041':{'en': '<NAME>'},
'61740046':{'en': 'Euramo'},
'61740047':{'en': 'Euramo'},
'61740044':{'en': 'Etheridge'},
'61740045':{'en': 'Etheridge'},
'6139550':{'en': 'Clayton'},
'6139551':{'en': 'Clayton'},
'6139552':{'en': 'Clayton'},
'6139553':{'en': 'Melbourne'},
'6139554':{'en': 'Dandenong'},
'6139555':{'en': 'Melbourne'},
'6139556':{'en': 'Melbourne'},
'6139557':{'en': 'Melbourne'},
'61745920':{'en': 'Roma'},
'61362055':{'en': 'Maydena'},
'61362054':{'en': 'Maydena'},
'61362057':{'en': 'Miena'},
'61362056':{'en': 'Miena'},
'61362051':{'en': 'Margate'},
'61362050':{'en': 'Margate'},
'61362053':{'en': 'Maydena'},
'61362052':{'en': 'Margate'},
'61385773':{'en': 'Clayton'},
'61385772':{'en': 'Clayton'},
'61385771':{'en': 'Clayton'},
'61385770':{'en': 'Clayton'},
'61362059':{'en': 'Richmond'},
'61362058':{'en': 'Miena'},
'61385775':{'en': 'Clayton'},
'61385774':{'en': 'Clayton'},
'6138202':{'en': 'Croydon'},
'6138201':{'en': 'Croydon'},
'6138200':{'en': 'Croydon'},
'61734445':{'en': '<NAME>'},
'61734444':{'en': 'Ipswich'},
'61734447':{'en': 'Beenleigh'},
'61734446':{'en': 'Beenleigh'},
'61734441':{'en': 'Ipswich'},
'61734440':{'en': 'Beenleigh'},
'61734443':{'en': 'Ipswich'},
'61734442':{'en': 'Ipswich'},
'61364646':{'en': 'Stanley'},
'61364647':{'en': 'Ulverstone'},
'61364644':{'en': 'Yambacoona'},
'61364645':{'en': 'Yolla'},
'61364642':{'en': 'Burnie'},
'61364643':{'en': 'Ulverstone'},
'61364640':{'en': 'Burnie'},
'61364641':{'en': 'Burnie'},
'61745925':{'en': 'Toowoomba'},
'61361506':{'en': '<NAME>'},
'61361507':{'en': 'Margate'},
'61361504':{'en': 'Huonville'},
'61361505':{'en': 'Kempton'},
'61361502':{'en': 'Hermitage'},
'61361503':{'en': 'Hobart'},
'61361500':{'en': 'Geeveston'},
'61361501':{'en': 'Gretna'},
'61361508':{'en': 'Maydena'},
'61361509':{'en': 'Miena'},
'61747098':{'en': 'Halifax'},
'61747099':{'en': 'The Monument'},
'61747094':{'en': 'Gununa'},
'61747095':{'en': 'Gununa'},
'61747096':{'en': 'Halifax'},
'61747097':{'en': 'Halifax'},
'61747090':{'en': 'Gunpowder'},
'61747091':{'en': 'Gunpowder'},
'61747092':{'en': 'Gunpowder'},
'61747093':{'en': 'Gununa'},
'61741730':{'en': 'Nanango'},
'61741195':{'en': 'Mulgildie'},
'61741736':{'en': '<NAME>'},
'61741498':{'en': 'Windera'},
'61741499':{'en': 'Wondai'},
'61741492':{'en': 'Brooweena'},
'61741493':{'en': 'Boondooma'},
'61741490':{'en': 'Biggenden'},
'61741491':{'en': 'Booyal'},
'61741496':{'en': 'Proston'},
'61741497':{'en': 'Tansey'},
'61741494':{'en': 'Manumbar'},
'61741495':{'en': 'Murgon'},
'61731879':{'en': 'Brisbane'},
'61742378':{'en': 'Mossman'},
'61742379':{'en': 'Etheridge'},
'61742372':{'en': 'Malanda'},
'61742373':{'en': 'Georgetown'},
'61742370':{'en': '<NAME>'},
'61742371':{'en': 'Gordonvale'},
'61742376':{'en': 'Mount Surprise'},
'61742377':{'en': 'Daintree'},
'61742374':{'en': 'Herberton'},
'61742375':{'en': 'Maryfarms'},
'6139033':{'en': 'Melbourne'},
'6139032':{'en': 'Melbourne'},
'6139031':{'en': 'Melbourne'},
'6139030':{'en': 'Melbourne'},
'6139037':{'en': 'Melbourne'},
'6139036':{'en': 'Melbourne'},
'61363869':{'en': 'Waterhouse'},
'61363868':{'en': 'Waterhouse'},
'61363867':{'en': '<NAME>'},
'61363866':{'en': '<NAME>'},
'61363865':{'en': 'St Helens'},
'61363864':{'en': 'St Helens'},
'61363863':{'en': 'Pyengana'},
'61363862':{'en': 'Pyengana'},
'61363861':{'en': 'Emita'},
'61363860':{'en': 'Emita'},
'61742241':{'en': 'Herberton'},
'6136432':{'en': 'Burnie'},
'6136433':{'en': 'Burnie'},
'6136430':{'en': 'Burnie'},
'6136431':{'en': 'Burnie'},
'6136436':{'en': 'Burnie'},
'6136437':{'en': 'Burnie'},
'6136434':{'en': 'Burnie'},
'6136435':{'en': 'Burnie'},
'61741899':{'en': 'Kingaroy'},
'6136438':{'en': 'Yolla'},
'6136439':{'en': 'Waratah'},
'6139963':{'en': 'Melbourne'},
'6139789':{'en': 'Dandenong'},
'6139788':{'en': 'Dandenong'},
'6139785':{'en': 'Dandenong'},
'6139784':{'en': 'Dandenong'},
'6139787':{'en': 'Dandenong'},
'6139786':{'en': 'Dandenong'},
'6139781':{'en': 'Dandenong'},
'6139780':{'en': 'Croydon'},
'6139783':{'en': 'Dandenong'},
'6139782':{'en': 'Dandenong'},
'61362886':{'en': 'Hobart'},
'61362887':{'en': 'Hobart'},
'61362884':{'en': 'Maydena'},
'61362885':{'en': 'Hobart'},
'61362882':{'en': 'Maydena'},
'61362883':{'en': 'Maydena'},
'61362880':{'en': 'Maydena'},
'61362881':{'en': 'Maydena'},
'61362888':{'en': 'Hobart'},
'61362889':{'en': 'Maydena'},
'6173004':{'en': 'Brisbane'},
'6173005':{'en': 'Brisbane'},
'6173006':{'en': 'Brisbane'},
'6173007':{'en': 'Brisbane'},
'6173000':{'en': 'Brisbane'},
'6173001':{'en': 'Brisbane'},
'6173002':{'en': 'Brisbane'},
'6173003':{'en': 'Brisbane'},
'6173008':{'en': 'Brisbane'},
'6173009':{'en': 'Brisbane'},
'6174529':{'en': 'Toowoomba'},
'6138783':{'en': 'Dandenong'},
'6138781':{'en': 'Dandenong'},
'6138786':{'en': 'Dandenong'},
'6138787':{'en': 'Dandenong'},
'6138785':{'en': 'Dandenong'},
'6138788':{'en': 'Dandenong'},
'6138789':{'en': 'Dandenong'},
'61361281':{'en': 'Sorell'},
'61361280':{'en': '<NAME>'},
'61361283':{'en': 'Hobart'},
'61361282':{'en': 'Nubeena'},
'61361285':{'en': 'Maydena'},
'61361284':{'en': 'Dover'},
'61361287':{'en': 'Brighton'},
'61361286':{'en': 'Kempton'},
'61361289':{'en': 'Miena'},
'61361288':{'en': '<NAME>'},
'61740187':{'en': 'Hopevale'},
'61740186':{'en': 'Herberton'},
'61740185':{'en': 'Gordonvale'},
'61740184':{'en': 'Georgetown'},
'61740183':{'en': 'Euramo'},
'61740182':{'en': 'Etheridge'},
'61740181':{'en': '<NAME>'},
'61740180':{'en': 'Dimbulah'},
'61740189':{'en': 'Aurukun'},
'61740188':{'en': 'Innisfail'},
'61364079':{'en': 'Yolla'},
'61364078':{'en': 'Wynyard'},
'6138568':{'en': 'Melbourne'},
'6138569':{'en': 'Clayton'},
'61364071':{'en': 'Queenstown'},
'61364070':{'en': 'Rosebery'},
'61364073':{'en': 'Smithton'},
'61364072':{'en': 'Stanley'},
'61364075':{'en': 'Burnie'},
'61364074':{'en': 'Marrawah'},
'61364077':{'en': 'Waratah'},
'61364076':{'en': '<NAME>'},
'61745837':{'en': '<NAME>'},
'61745009':{'en': 'Charleville'},
'61745008':{'en': 'Augathella'},
'61745559':{'en': 'Liston'},
'61745558':{'en': | |
<reponame>oliviazz/noteable
from flask import request, jsonify, Blueprint
from flask_login import login_required, login_user, current_user, logout_user
from models import User
from database import Database
import requests
from bs4 import BeautifulSoup
import json
import unicodedata
import datetime
from urllib import urlopen
bp = Blueprint('blueprint', __name__, template_folder='templates')
# ---------------------------------------------------------------------------------------------------------------------------------------------------#
# HELPER FUNCTIONS #
# #
# #
# #
# #
# ---------------------------------------------------------------------------------------------------------------------------------------------------#
def displayArticlesHelper(article_query_results):
print("RESULTS, " + str(article_query_results), " these are the results")
formatted_results = {}
for i in range(0, len(article_query_results)):
'CREATE TABLE articles(articleURL TEXT NOT NULL, numUses integer)'
# 0: articleID
# 1: title
# 2: icon
# 3: blurb
# 4: author
# 5: date
# 6: URL
# 7: numuses
# 8: tags, separated by comma
article_id = str(article_query_results[i][0])
article_url = str(article_query_results[i][6])
formatted_results[article_url] = {
'title': article_query_results[i][1],
'icon': article_query_results[i][2],
'blurb': article_query_results[i][3],
'author': article_query_results[i][4],
'date': article_query_results[i][6],
'tag': article_query_results[i][8],
'url': article_query_results[i][6]
}
#ormatted_results = sorted(formatted_results, key=formatted_results['date'])
return formatted_results
def modularAddArticle(json_payload, username):
article = str(json_payload['article_url'])
tags = str(json_payload['tags'])
print 'Verifying that article and tags parsed', article, tags
database = Database()
database.connect()
my_info = {'title':'', 'url':'', 'descrip':'', 'image':'', 'author':''}
working = False
try:
#trimmed_url = article[1:-1].encode('ascii', errors='ignore')
trimmed_url = article.replace('"', '').replace("'", '')
print "trimmed_url: " + str(trimmed_url)
trimmed_url = trimmed_url.encode('ascii', errors='ignore')
print "trimmed_url 2: " + str(trimmed_url)
soup = BeautifulSoup(urlopen(trimmed_url).read(), "lxml")
title = soup.find("meta", property="og:title")
print title
url = soup.find("meta", property="og:url")
descrip = soup.find("meta", property="og:description")
image = soup.find("meta", property="og:image")
author = soup.find('meta', {'name': 'byl'})
if title:
my_info['title'] = title['content'].encode('ascii', errors='ignore')
working = True
if url: my_info['url'] = url['content'].encode('ascii', errors='ignore')
if descrip: my_info['descrip'] = descrip['content'].encode('ascii', errors='ignore')
if image: my_info['image'] = image['content'].encode('ascii', errors='ignore')
if author:my_info['author'] = author['content'].encode('ascii', errors='ignore')
time = datetime.datetime.today().strftime('%Y-%m-%d').encode('ascii', errors='ignore')
database.insertArticle(username, articleTitle=my_info['title'], articleIcon=my_info['image'],
articleBlurb=my_info['descrip'], articleAuthor=my_info['author'], articleDate=time,
articleURL=my_info['url'], tags=tags)
print(database.userTagArticles(username, ""), "LEt's see if this worked!")
print('above are my tags')
print('verifiting that the info was successfully parsed \n\n ', my_info, username)
database.disconnect()
return jsonify(message="Posted article: " + article + '; Metadata collection: ' + str(working) ), 200
except Exception as e:
print(e)
database.disconnect()
return jsonify(message='Error!'), 400
# ---------------------------------------------------------------------------------------------------------------------------------------------------#
# LOGIN/USER #
# #
# #
# #
# #
# ---------------------------------------------------------------------------------------------------------------------------------------------------#
@bp.route("/", methods=["GET"])
def index():
return jsonify(message="Hello World!"), 200
@bp.route("/login", methods=["POST"])
def login():
json_payload = request.get_json()
# user_entry = User.get(json_payload['username'].replace('\"', ''))
username = 'livz'
if (user_entry):
user = User(*user_entry)
if (user.password == json_payload['password']): # not for prod
login_user(user)
return jsonify(isLoggedIn=current_user.is_authenticated), 200
return jsonify(authorization=False), 403
@bp.route("/protected", methods=["GET"])
@login_required
def protected():
return jsonify(message="Hello Protected World!"), 200
@bp.route("/checkuserexists", methods=["POST"])
def checkuserexists():
json_payload = request.get_json()
preUserId = str(json_payload['pre_user_Id'])
userId = hash(preUserId)
database = Database()
database.connect()
userExists = database.checkUser(userId)
return jsonify(exists = userExists), 200
@bp.route("/createuser", methods=["POST"])
def createuser():
json_payload = request.get_json()
user_data = json_payload['data']
print(user_data, ' \n\nuser data')
database = Database()
database.connect()
username = user_data['username'].replace('\"', '')
database.insertUser(str(user_data['firstName']), str(user_data['lastName']), str(username))
return jsonify(message=("User" + username + "successfully entered")), 200
# ---------------------------------------------------------------------------------------------------------------------------------------------------#
# GROUP CODE #
# #
# #
# #
# #
# ---------------------------------------------------------------------------------------------------------------------------------------------------#
@bp.route("/creategroup", methods=["POST"])
def creategroup():
json_payload = request.get_json()
database = Database()
database.connect()
database.insertGroup(json_payload['groupname'].replace('\"', ''))
database.disconnect()
return jsonify(message="Created group: " + json_payload['groupname'].replace('\"', '')), 200
@bp.route("/joingroup", methods=["POST"])
def joingroup():
json_payload = request.get_json()
username = str(json_payload['username'].replace('\"', ''))
groupname = str(json_payload['groupname'].replace('\"', ''))
print("username: " + username)
print("groupname: " + groupname)
database = Database()
database.connect()
database.addUserToGroup(username, groupname)
database.disconnect()
return jsonify(message="Added: " + username + " to group " + groupname), 200
@bp.route("/displayallgroups", methods=["POST"])
def displayallgroups():
database = Database()
database.connect()
groups = database.allGroups()
print groups
formatted_results = {}
for i in range(0, len(groups)):
formatted_results[i] = {'groupname': groups[i][0].replace(",", "")}
database.disconnect()
return jsonify(results=formatted_results), 200
@bp.route("/displaymygroups", methods=["POST"])
def displaymygroups():
database = Database()
database.connect()
json_payload = request.get_json()
username = str(json_payload['username'].replace('\"', ''))
groups = database.displayAllGroupsFromUsername(username)
formatted_results = {}
for i in range(0, len(groups)):
formatted_results[i] = {'groupname': groups[i][0]}
database.disconnect()
return jsonify(results=formatted_results), 200
@bp.route("/displaygrouparticles", methods=["POST"])
def displaygrouparticles():
database = Database()
database.connect()
json_payload = request.get_json()
print(json_payload, "payload in get articles")
# PUT THIS BACK LATER
groupname = json_payload['groupname'].replace('\"', '')
tags = ""
article_query_results = database.userTagArticles(groupname, tags)
print article_query_results
formatted_results = displayArticlesHelper(article_query_results)
print('all done')
return jsonify(results=formatted_results)
@bp.route("/leavegroup", methods=["POST"])
def leavegroup():
json_payload = request.get_json()
username = str(json_payload['username'].replace('\"', ''))
groupname = str(json_payload['groupname'].replace('\"', ''))
database = Database()
database.connect()
database.deleteUserFromGroup(username, groupname)
database.disconnect()
return jsonify(message="Removed: " + username + " from group " + groupname), 200
@bp.route("/addarticletogroup", methods=["POST"])
def addarticletogroup():
json_payload = request.get_json()
print(json_payload, " addarticle json payload")
# UPDATE LATER
groupname = str(json_payload['groupname'].replace('\"', ''))
result = modularAddArticle(json_payload, groupname)
return result
# ---------------------------------------------------------------------------------------------------------------------------------------------------#
# FRIEND FUNCTIONS #
# #
# #
# #
# #
# ---------------------------------------------------------------------------------------------------------------------------------------------------#
@bp.route("/addfriend", methods=["POST"])
def addfriend():
json_payload = request.get_json()
username = str(json_payload['username'].replace('\"', ''))
friendname = str(json_payload['friendname'].replace('\"', ''))
database = Database()
database.connect()
database.addFriend(username, friendname)
database.disconnect()
return "addedfriend"
@bp.route("/removefriend", methods=["POST"])
def removefriend():
json_payload = request.get_json()
username = str(json_payload['username'].replace('\"', ''))
friendname = str(json_payload['friendname'].replace('\"', ''))
database = Database()
database.connect()
database.deleteFriend(username, friendname)
database.disconnect()
return "deletedfriend"
@bp.route("/displaypending", methods=["POST"])
def displaypending():
json_payload = request.get_json()
database = Database()
database.connect()
username = str(json_payload['username'].replace('\"', ''))
allpending = database.displayPending(username)
formatted_results = {}
for i in range(0, len(allpending)):
formatted_results[i] = {'firstname': allpending[i][0], 'lastname': allpending[i][1], 'username': allpending[i][2]}
database.disconnect()
return jsonify(results=formatted_results), 200
@bp.route("/checkfriends", methods=["POST"])
def checkfriends():
json_payload = request.get_json()
username = json_payload['username'].replace('\"', '')
friendname = json_payload['friendname'].replace('\"', '')
database = Database()
database.connect()
friendsLogic = database.checkFriends(username, friendname)
return jsonify(results=friendsLogic), 200
@bp.route("/friendarticles", methods=["POST"])
def friendarticles():
json_payload = request.get_json()
# PUT THIS BACK LATER
username = json_payload['username'].replace('\"', '')
friendname = json_payload['friendname']
tags = ""
database = Database()
database.connect()
friendsLogic = database.checkFriends(username, friendname)
if friendsLogic == True:
article_query_results = database.userTagArticles(username, tags)
print article_query_results
formatted_results = displayArticlesHelper(article_query_results)
print('all done')
return jsonify(results=formatted_results)
else:
print "you are not friendos"
return "Would you like to add this user as a friend?"
@bp.route("/allfriends", methods=["POST"])
def allfriends():
json_payload = request.get_json()
# PUT THIS BACK LATER
username = json_payload['username'].replace('\"', '')
database = Database()
database.connect()
friends = database.allUserFriends(username)
formatted_results = {}
for i in range(0, len(friends)):
formatted_results[i] = {'firstname': friends[i][0], 'lastname': friends[i][1], 'username': friends[i][2]}
database.disconnect()
return jsonify(results=formatted_results), 200
@bp.route("/allusers", methods=["POST"])
def allusers():
database = Database()
database.connect()
users = database.allUsers()
formatted_results = {}
for i in range(0, len(users)):
formatted_results[i] = {'firstname': users[i][0], 'lastname': users[i][1], 'username': users[i][2]}
database.disconnect()
return jsonify(results=formatted_results), 200
@bp.route("/alltags", methods=["POST"])
def alltags():
database = Database()
database.connect()
tags = database.allTags()
print(tags, "all tags")
formatted_results = {}
for i in range(0, len(tags)):
formatted_results[i] = {'tagname': tags[i][0]}
print(formatted_results, "FORMATTED RESULTS")
database.disconnect()
return jsonify(results=formatted_results), 200
# ---------------------------------------------------------------------------------------------------------------------------------------------------#
# ARTICLE FUNCTIONS #
# #
# #
# #
# #
# ---------------------------------------------------------------------------------------------------------------------------------------------------#
@bp.route("/feed", methods=["POST"])
def feed():
database = Database()
database.connect()
json_payload = request.get_json()
print(json_payload, "payload in get articles")
# PUT THIS BACK LATER
# username = json_payload['username'].replace('\"', '')
username = 'livz'
tags = ""
article_query_results = database.feed(username, tags)
print article_query_results
formatted_results = displayArticlesHelper(article_query_results)
print('all done')
return jsonify(results=formatted_results)
@bp.route("/addarticle", methods=["POST"])
def addarticle():
json_payload = request.get_json()
print(json_payload, " addarticle json payload")
# UPDATE LATER
username = str(json_payload['username'].replace('\"', ''))
print "now, testing that it exists after adding"
result = modularAddArticle(json_payload, username)
return result
@bp.route("/deletearticle", methods=["POST"])
def deletearticle():
json_payload = request.get_json()
print json_payload, "json payload delete article"
if 'username' in json_payload:
username = str(json_payload['username'].replace('\"', ''))
else:
username = "<EMAIL>"
article = str(json_payload['article_url'])
database = Database()
database.connect()
try:
articleID = hash(article)
print('--------------')
#print(database.userTagArticles(username,""))
database.deleteArticle(username=username, articleID=articleID)
#print(database.userTagArticles(username, ""))
print('--------------')
database.disconnect()
return jsonify(message="Deleted article: " + article), 200
except Exception, err:
print('Error in deleting article: ', str(err))
database.disconnect()
return jsonify(message="Error in deleting article article!"), 400
@bp.route("/getarticles", methods=["POST"])
def getarticles():
database = Database()
database.connect()
json_payload = request.get_json()
print json_payload, "payload for get articles"
# PUT THIS BACK LATER
username = json_payload['username'].replace('\"', '')
tags = ""
article_query_results = database.userTagArticles(username, tags)
print "Article Query Results: ", article_query_results
formatted_results = displayArticlesHelper(article_query_results)
database.disconnect()
print('all done')
return jsonify(results=formatted_results)
@bp.route("/getgrouparticles", methods=["POST"])
def getgrouparticles():
database = Database()
database.connect()
json_payload = request.get_json()
print json_payload, "payload for get group articles"
# PUT THIS BACK LATER
groupname = json_payload['groupname'].replace('\"', '')
tags = ""
print(groupname, "yooo!!")
article_query_results = database.getArticlesFromGroup(groupname)
print("fdafsa--------------------\n", article_query_results)
formatted_results = displayArticlesHelper(article_query_results)
database.disconnect()
print('all done')
return jsonify(results=formatted_results)
# @bp.route("/getarticlesinfo", methods=["GET"])
# def getarticlesinfo():
# json_payload = request.get_json()
# #articles = json.loads(json_payload['articles'])
# articles = ['https://www.vogue.com/article/slam-jam-luca-benini-interview-pitti-uomo']
# # check if article url has an entry
# print("HEY")
# article_full_info = {}
# for article in articles:
# # if (has_info(article)):
# # continue
# # article = str(article)
# my_info = {'title': '', 'url':'', 'descrip':'', 'image':''}
# try:
# proxyurl = "https://cors-anywhere.herokuapp.com/";
# # headers = {'x-requested-with': 'XMLHttpRequest'}
# # response = requests.get(proxyurl+article, headers=headers)
# print('here>>')
# soup = BeautifulSoup(urlopen(article).read())
# title = soup.find("meta", property="og:title")
# url = soup.find("meta", property="og:url")
# descrip = soup.find("meta", property="og:description")
# image = soup.find("meta", property="og:image")
# if title: my_info['title'] = title['content']
# if url: my_info['url'] = url['content']
# if descrip: my_info['descrip'] = descrip['content']
# if image: my_info['image'] = image['content']
# print(title, url, descrip, image)
| |
<reponame>kevin-ci/advent-of-code-2020<gh_stars>0
import re
input = """iyr:2010 ecl:gry hgt:181cm
pid:591597745 byr:1920 hcl:#6b5442 eyr:2029 cid:123
cid:223 byr:1927
hgt:177cm hcl:#602927 iyr:2016 pid:404183620
ecl:amb
eyr:2020
byr:1998
ecl:hzl
cid:178 hcl:#a97842 iyr:2014 hgt:166cm pid:594143498 eyr:2030
ecl:hzl
pid:795349208 iyr:2018
eyr:2024 hcl:#de745c hgt:157cm
hgt:159cm pid:364060467 eyr:2025 byr:1978 iyr:2018 cid:117
ecl:hzl
hcl:#18171d
hcl:#cfa07d
ecl:amb iyr:2012
hgt:182cm cid:338
eyr:2020
pid:374679609 byr:1925
eyr:2021 byr:1981
hcl:#623a2f cid:195 iyr:2010
pid:579769934 ecl:grn hgt:192cm
byr:1970
ecl:oth
eyr:2025
pid:409994798 iyr:2018 hgt:189cm
hgt:153cm pid:817651329 iyr:2019
eyr:2029
hcl:#623a2f byr:1920
ecl:gry
iyr:2011 ecl:amb hcl:#a97842 byr:1965 pid:648375525 eyr:2028 hgt:177cm cid:287
iyr:2012 pid:369979235 hcl:#c0946f
ecl:amb hgt:178cm
byr:1927 ecl:brn hgt:178cm eyr:2026 hcl:#efcc98
iyr:2011 pid:770851101
eyr:2028
ecl:oth cid:298
byr:1943
hgt:168cm iyr:2018 hcl:#ceb3a1 pid:116783406
eyr:2027 hgt:175cm hcl:#733820
ecl:gry cid:349 iyr:2017 byr:1960
pid:257797292
cid:66 ecl:amb
eyr:2030
iyr:2026 byr:2024
hcl:a22966 hgt:179cm pid:155cm
eyr:2023 hcl:#c0946f pid:081232570 ecl:hzl
iyr:2010 hgt:158cm byr:1969
byr:1958
ecl:grn hcl:#ceb3a1
hgt:173cm
pid:600039004
cid:107 iyr:2012 eyr:2027
ecl:amb pid:021066381
hcl:#ceb3a1 byr:1982 iyr:2017
hgt:167cm eyr:2025 cid:61
hcl:#341e13
cid:268
pid:358390884 hgt:188cm byr:1961 iyr:2014 eyr:2027 ecl:blu
ecl:brn eyr:2020
pid:607203641
hcl:#fffffd iyr:2011
byr:1962
hgt:156cm
iyr:2018
hcl:#b6652a
byr:1942 ecl:blu eyr:2029 hgt:154cm pid:649263319
ecl:oth hgt:73in iyr:2012 hcl:#888785 eyr:2020
pid:147939289
byr:1961
ecl:oth iyr:2015
hgt:189cm hcl:#341e13 pid:686943691 eyr:2023 byr:1987
pid:568844323
eyr:2023 byr:1921 hgt:167cm cid:154 hcl:#b6652a
ecl:gry iyr:2020
eyr:2023 byr:1994
iyr:1937 hgt:177cm hcl:#c0946f pid:686240814 cid:231 ecl:#a8ba32
hcl:#b6652a
byr:1946 pid:543383899 iyr:2013 hgt:153cm ecl:hzl cid:238 eyr:2023
eyr:2028 ecl:blu
hgt:154cm cid:252
pid:196374590
byr:1987 iyr:2011
hcl:#7d3b0c
iyr:2013
ecl:amb cid:187
hgt:187cm pid:593027548 byr:1963
eyr:2024 hcl:#fffffd
pid:588211492 hgt:156cm
iyr:2021 eyr:2021 ecl:gry hcl:z byr:1928
ecl:amb hcl:#888785 hgt:180cm eyr:2022 byr:1923 pid:490291639 cid:173 iyr:2015
iyr:2014 cid:211 pid:404157420 hcl:#602927
ecl:oth byr:1946 eyr:2030 hgt:175cm
hcl:z byr:2026
pid:61805448
hgt:125 iyr:2025
eyr:2028
hgt:156cm
hcl:#341e13 cid:103 ecl:amb iyr:2017 byr:1937 pid:320691739
hgt:185cm
pid:440489464 byr:1929 ecl:amb iyr:2011 eyr:2021 cid:327 hcl:#341e13
byr:1988 ecl:grn
pid:062728732 iyr:2013
hgt:181cm
hcl:#18171d
eyr:2026
pid:000647617
eyr:2029 byr:1937
ecl:gry hcl:#e8eff3 hgt:164cm cid:151
iyr:2016
hgt:179cm
byr:1949
eyr:2029 pid:459190453
ecl:grn iyr:2020 hcl:#c0946f
hgt:160cm pid:476613532 cid:190 iyr:2016 hcl:#4657e5
byr:1929
eyr:2028
ecl:grn
eyr:2027 byr:1982
hcl:#18171d
pid:630408328
cid:65 iyr:2020
hgt:161cm
pid:752776254
hcl:#888785
hgt:189cm
eyr:2027 iyr:2020 ecl:hzl
cid:194 byr:1934
iyr:2015 hgt:167cm byr:1977
eyr:2021 hcl:#14564f pid:504471386 ecl:oth
hgt:84 pid:168cm
hcl:8532fb eyr:2023
iyr:2012 ecl:xry byr:2008 cid:288
cid:323 eyr:2024
iyr:2019
pid:495737304 byr:1966 hcl:#7d3b0c ecl:hzl
hgt:73in
iyr:2020 byr:1953 ecl:hzl hcl:#efcc98 hgt:174cm eyr:2026 pid:546906638
pid:839249028
hcl:z byr:2024
hgt:145 eyr:2034 iyr:2021 ecl:#891c47
eyr:2036
ecl:#89d2ae
cid:183 byr:2014
hcl:b3af0f
pid:12086913 iyr:1981
hgt:61cm
ecl:brn eyr:2030 pid:083487445 byr:1929 hcl:z iyr:2021
hgt:182 cid:318
eyr:2020
pid:188609216 hcl:#341e13
iyr:2012 hgt:179cm
eyr:2029
hcl:#888785 pid:704026565 hgt:173cm iyr:2020 ecl:blu byr:1950 cid:237
ecl:grn
eyr:2030
byr:1961 pid:695808266
iyr:2012 cid:56
hgt:155cm
iyr:2011 ecl:amb
byr:1986 pid:243061330 hgt:163cm eyr:2021
eyr:2030 hcl:#623a2f hgt:170cm ecl:hzl
pid:694575319 iyr:2011
byr:1939
iyr:2014 pid:184152121
hcl:#c0946f hgt:163cm
eyr:2028 byr:1992 cid:114
ecl:hzl
hgt:75in cid:233
hcl:#866857 pid:269157261 iyr:2020
byr:1973 eyr:2029
hgt:174cm
hcl:#f86751 iyr:2016
pid:904779190
ecl:brn eyr:2024 byr:1950
cid:123 iyr:2019
eyr:2030 pid:402585706
ecl:brn byr:1995 hcl:#4ff7fa
hgt:65in
ecl:grn eyr:2029
pid:083364259 iyr:2013 cid:50 byr:1938 hgt:187cm
hcl:#a97842
hcl:#6b5442 cid:101 iyr:2011
ecl:amb eyr:2029 byr:1963 pid:664573740
eyr:2025 hcl:#602927
hgt:188cm
iyr:2019
pid:521514539 byr:1940 ecl:gry
hcl:dc0449 eyr:1981 pid:188cm
cid:151 iyr:1979 hgt:61cm ecl:dne
byr:2028
iyr:2017 byr:1924
hgt:163cm eyr:2024 hcl:#ceb3a1 pid:424127124
ecl:amb
eyr:2039 pid:7837217107 hcl:z byr:2005
iyr:1989 ecl:#d95f4d hgt:190in
ecl:#329eb1 cid:178 hgt:192
eyr:2020 iyr:2012
hcl:#602927
byr:2028 pid:7175349420
ecl:gry byr:1931
hgt:162cm iyr:2014
eyr:2030 cid:50
hcl:#cfa07d pid:653585396
eyr:2025 hgt:177cm
ecl:gry hcl:#efcc98
iyr:2015
byr:1942
pid:388475446
hcl:#efcc98 ecl:grn
hgt:185cm
byr:1921 pid:253592171
eyr:2031 cid:220 iyr:2024
byr:1950
hgt:158cm ecl:gry iyr:2015 hcl:#18171d
eyr:2023
pid:151cm
byr:1957
hcl:z
eyr:2026
ecl:grn
iyr:1971 hgt:192in pid:5479810865
hgt:161cm pid:473851111 iyr:2018
ecl:brn byr:1982
eyr:2029
pid:136216608 byr:1958
cid:226 eyr:2023 hcl:#866857 iyr:2017 ecl:hzl hgt:159cm
byr:1993 hcl:#866857 hgt:169cm pid:488392920
cid:109 iyr:2017 ecl:oth eyr:2029
cid:248 ecl:amb eyr:2025 iyr:2017 byr:1951 hcl:#ceb3a1 pid:731763175 hgt:162cm
hcl:#835e79
eyr:2021
ecl:oth pid:617055193 byr:1997 iyr:2010
hgt:173cm
eyr:2024 pid:257895944
hcl:#ceb3a1
hgt:165cm
ecl:oth iyr:2020
byr:1958
pid:438580092
ecl:grt byr:2025
hcl:z iyr:2000 eyr:1952
cid:271 hgt:170in
iyr:2010 hcl:#6b5442 hgt:156cm
eyr:2026 ecl:grn pid:409793041 byr:1941
pid:076486440
hgt:177cm hcl:#888785 ecl:blu iyr:2017 eyr:2029
eyr:2028 ecl:amb hgt:186cm hcl:#1d5836 pid:563307670 iyr:2019 byr:1950
byr:1939 ecl:hzl hgt:193cm pid:329759796
hcl:#cfa07d eyr:2025 iyr:2011 cid:73
byr:1995
hgt:188cm eyr:2028
ecl:blu
iyr:2016 hcl:#888785 pid:459613739 cid:115
hcl:#623a2f
eyr:2021 cid:197 hgt:187cm ecl:oth
byr:1969
iyr:2010 pid:385660251
hgt:192cm cid:143 byr:1995 hcl:#fffffd
iyr:2017 ecl:oth
eyr:2020 pid:087661720
ecl:oth
byr:1994 hgt:183cm
eyr:2020 iyr:2020 pid:448389966 cid:92 hcl:#866857
pid:088166852 hgt:155cm cid:307 byr:1940
hcl:#7d3b0c
ecl:#af542f eyr:2023 iyr:2014
byr:2026 eyr:2039 hcl:5449b3
ecl:hzl hgt:176in
iyr:1962 pid:177cm
iyr:2020 ecl:amb hgt:164cm hcl:#c0946f
pid:931543453 eyr:2024 byr:2001
iyr:2010 eyr:2023 hgt:188cm
hcl:#866857 ecl:hzl pid:866631112 byr:1997
byr:1958 hgt:184cm
cid:117 hcl:#7d3b0c iyr:2019 pid:615734013 eyr:2028 ecl:gry
hgt:86 iyr:1935 ecl:grt pid:#af8e67 eyr:2031
byr:2018 hcl:6a2940
hgt:73in eyr:2022 pid:580461358 byr:1962
cid:129 iyr:2015 hcl:#7d3b0c
iyr:2019 hcl:#b6652a hgt:172cm ecl:blu pid:077121198 eyr:2021
byr:1995
hcl:#ceb3a1 cid:253
iyr:2015 hgt:177cm byr:1973
ecl:hzl pid:311289324 eyr:2025
iyr:2017 hcl:#efcc98
cid:57 byr:1940 ecl:blu
eyr:2025 hgt:157cm pid:827480048
eyr:2028 hgt:189cm
iyr:2016 byr:1978 ecl:hzl pid:127497651 cid:87
hcl:#623a2f
hcl:#341e13 byr:2015
ecl:brn hgt:187in
pid:82075551
eyr:1936
cid:200
iyr:1939
ecl:grn byr:1962
iyr:2011 hgt:169cm
pid:661559147
hcl:#623a2f eyr:2023
ecl:gry
hcl:#efcc98 eyr:2009 byr:2028
hgt:170in
cid:129 pid:161cm iyr:2018
pid:098090405 hcl:#623a2f byr:1943 ecl:hzl
hgt:152cm iyr:2013 eyr:2029
pid:495271053 iyr:2011 ecl:gry hcl:#623a2f cid:285
byr:1925 eyr:2024 hgt:187cm
cid:306
hgt:73in
iyr:2010 hcl:#448fd7
byr:1946
ecl:grn pid:137146932 eyr:2021
eyr:2020 hgt:159cm cid:90 iyr:2010 ecl:brn hcl:#341e13 byr:1955
hcl:#18171d iyr:2017 ecl:amb
pid:168517472
eyr:2021 hgt:181cm byr:1942
cid:325 eyr:2022 pid:947158470 byr:1994 iyr:2019 ecl:grn hgt:172cm hcl:#ec63ce
iyr:2011
pid:243339529
ecl:amb
hgt:169cm
byr:1967
eyr:2025 hcl:#b6652a
pid:664966826 eyr:2036 iyr:2015 byr:1972 hgt:68in
hcl:z
ecl:#038105
eyr:2021 pid:236054221
hgt:179cm
hcl:#b6652a iyr:2020 ecl:blu
ecl:grn
iyr:2010
pid:870519416 byr:1945 hcl:#a97842
hgt:176cm eyr:2030
hcl:#3318db eyr:2022
byr:1966
ecl:grn iyr:2013
cid:349
hgt:168cm pid:827688488
pid:124116963
hcl:#866857 eyr:2026
iyr:2013 ecl:grn byr:1983 hgt:183cm
iyr:2017 byr:1993
hcl:#18171d ecl:utc hgt:68in cid:168 eyr:2030 pid:#2fd9f2
ecl:blu cid:134 eyr:2025 pid:588957573
iyr:2017
hgt:151cm byr:1942 hcl:#4280c1
hcl:#51b593
iyr:2013
ecl:amb pid:668244584
cid:282
byr:1936
eyr:1985 hgt:161cm
pid:494051052
hgt:185cm byr:1996 eyr:2028 iyr:2018
ecl:amb
hcl:#efcc98
ecl:brn
eyr:2025
iyr:2011
hgt:163cm hcl:#a97842
byr:1989 pid:557549000
pid:828235468 cid:55
iyr:2010 byr:1926 eyr:2029 hgt:153cm hcl:#cfa07d
ecl:blu
hgt:158cm iyr:2015 pid:957913612 ecl:grn eyr:2020 byr:1984 cid:76 hcl:#6b5442
ecl:amb eyr:2020 pid:596116320
byr:1936
hcl:#cfa07d
hgt:165cm cid:86 iyr:2014
iyr:2012
cid:278 hcl:#602927
eyr:2020 ecl:hzl
hgt:176cm byr:1987 pid:594817909
iyr:2011 byr:1929 pid:073211525 eyr:2022
hgt:188cm
ecl:blu
hcl:#733820
hcl:#602927 hgt:187cm
pid:706155322 cid:203
ecl:brn byr:1952 iyr:2017 eyr:2020
hcl:bcb5f7
byr:2002 eyr:2029 pid:850069752 iyr:2019 ecl:hzl
hgt:167cm
hcl:#b6652a hgt:72in iyr:2013
ecl:grn eyr:2024 byr:1920 cid:114
pid:983486664
byr:1931 iyr:2020 pid:182737852 hgt:162cm
ecl:grn hcl:#888785 eyr:2028
eyr:2035
byr:1962 iyr:2012 cid:120
ecl:xry
hgt:61cm hcl:ce89a8 pid:335540582
pid:#05153d iyr:1990
eyr:1927 hgt:71cm
byr:2019 cid:346 ecl:#e38688
hcl:c6abd9
ecl:#cd58d8 pid:166cm iyr:2012
hcl:0d1b02 hgt:68
eyr:1958
pid:976419172 byr:1922 cid:345 hcl:#6b5442 iyr:2010 eyr:2026
ecl:grn hgt:155cm
ecl:gry hcl:#1bbadc hgt:168cm
eyr:2028
byr:1984 cid:179 iyr:2013 pid:706186218
ecl:blu hgt:188cm
pid:764775319 byr:1936 hcl:#7d3b0c iyr:2020
hcl:#623a2f
iyr:2012
pid:382832140 ecl:gry
eyr:2026
cid:350
hgt:165cm byr:1968
hcl:0b87a1 byr:2020 pid:4365879329
cid:110 ecl:grn
eyr:2032 hgt:155cm
iyr:2018
hgt:193cm eyr:2029 hcl:#733820 pid:081071142 byr:1929 ecl:oth
ecl:brn
eyr:2023 pid:876924536 cid:165
hcl:#efcc98 hgt:151cm byr:1972
iyr:2020
hgt:186cm eyr:2022
ecl:grn
byr:1972 pid:997639611 hcl:#ceb3a1 iyr:2013
byr:1926
pid:808460262
iyr:2012 eyr:2031 hcl:#a97842 ecl:amb
hgt:190cm
hgt:163cm
hcl:#ceb3a1 eyr:2028
ecl:grn
byr:1944 pid:381144425 iyr:2012
hcl:#95a232 pid:015229624 byr:1947 iyr:2013 hgt:66cm ecl:gry eyr:2027
hcl:z byr:1965 iyr:2013 hgt:157cm ecl:#8b12fb cid:246 pid:283039791 eyr:2023
ecl:gry byr:1950
hcl:#623a2f cid:276 iyr:2013 eyr:2030 pid:798610943 hgt:189in
eyr:2030 cid:52 hcl:#fffffd pid:041625574 ecl:amb iyr:2016 byr:1944
hgt:191cm
byr:1995
iyr:2015 cid:221 pid:279080024
eyr:2022
hgt:181cm ecl:brn hcl:#888785
hcl:z
ecl:blu
iyr:1970
eyr:2022
hgt:193cm pid:#540e31 cid:95 byr:1952
hcl:z eyr:2024 ecl:hzl
byr:2028
cid:323 pid:1949331457
hgt:69
eyr:2030 hcl:#866857
cid:173 iyr:2017
hgt:190cm byr:1941
ecl:blu
pid:269015932
hcl:#b6652a
iyr:2018
eyr:2022 ecl:brn hgt:185cm pid:456195468
hcl:#6b5442 hgt:188cm
iyr:2019 byr:1966 cid:298
pid:050653473
ecl:gry eyr:2028
cid:208
ecl:amb eyr:2023 hgt:176cm byr:1971 hcl:#7d3b0c pid:650190272 iyr:2018
hgt:68in pid:615309584
iyr:2011 byr:1950
hcl:#efcc98 ecl:oth
eyr:2024
eyr:2022 iyr:2011 hcl:#623a2f ecl:amb byr:1955
hgt:190cm
pid:244918527
iyr:2013 hcl:#ceb3a1 eyr:2029 hgt:164cm
ecl:oth
byr:1928 pid:337615663
hcl:#ceb3a1 pid:#ae7eea byr:2027
cid:254
hgt:125
iyr:1940
ecl:zzz
pid:033663619 iyr:2012 byr:1989 eyr:2030 ecl:hzl
hcl:#b6652a hgt:154cm
hgt:175cm byr:1929 pid:100788192
ecl:#92b14c
iyr:1940 hcl:#ceb3a1 eyr:2033
eyr:2029
pid:357835141 ecl:oth iyr:2019 hcl:#866857 hgt:154cm byr:1954
pid:895992818 byr:1965 iyr:2017 hcl:#efcc98 ecl:amb hgt:153cm eyr:2025
byr:1928 ecl:amb hgt:168cm pid:346938111 eyr:2025 iyr:2014
hcl:#cfa07d
hcl:#b6652a pid:825661608 eyr:2020 iyr:2019 byr:1974
hgt:180cm ecl:amb
byr:1970 hgt:159cm hcl:#733820 pid:101838832 iyr:2015 eyr:2027 ecl:blu
byr:1941 ecl:amb
eyr:2024 pid:015890498
hgt:175cm
iyr:2018 hcl:#cfa07d
hgt:67in
pid:404983369 eyr:2023 iyr:2018 byr:1974 hcl:#602927
ecl:blu
byr:1957
hcl:#fcc940 pid:615831236
iyr:2018 eyr:2020 ecl:brn hgt:181cm cid:218
hcl:#fffffd ecl:grn pid:271614109
eyr:2028 hgt:184cm byr:1974 iyr:2015
ecl:#e45ee0 pid:151cm cid:127 iyr:2014 byr:2022 hcl:973bc1 eyr:2033 hgt:181in
hcl:#6b5442 pid:502739402 eyr:2020 byr:1926 ecl:brn
iyr:2010
ecl:xry hgt:169cm byr:2023
iyr:1973 pid:4137668
eyr:2037 hcl:z
ecl:#3a8c46 hcl:43730a pid:57210146 eyr:2031 cid:117 iyr:2013 byr:2010
hcl:#341e13 cid:237 hgt:150cm iyr:2016 byr:1967 ecl:blu
pid:674080319 eyr:2024
iyr:2011 hcl:#866857 pid:111247018
byr:1920 hgt:192in ecl:#8bf268 eyr:2021
iyr:2022 hcl:z ecl:gry
hgt:159cm
pid:#88e8df
byr:2026 eyr:2032 cid:221
hgt:156cm eyr:2026
ecl:blu
hcl:#192dea cid:280 pid:788808021 byr:1980
iyr:2013
hgt:156in
byr:2024 hcl:4e4dd6
eyr:2030
iyr:2028 pid:35683378
ecl:#3a9fba
pid:081236370 cid:150 hcl:d15b43 byr:2029 hgt:118 iyr:2026 eyr:2038
ecl:grt
eyr:2034 pid:186cm
ecl:utc cid:300 iyr:2009 byr:2018 hcl:163913 hgt:74cm
ecl:hzl
pid:249858519 byr:1936 hgt:182cm
cid:343 iyr:2013 eyr:2030 hcl:#7d3b0c
cid:168
ecl:hzl
hgt:174cm iyr:2020
pid:446135799 hcl:#888785
eyr:2024 byr:1998
pid:545342162
hcl:#5cd3bd cid:126
eyr:2024
iyr:2012 ecl:grn
pid:104835585
byr:1989 hcl:#733820 ecl:oth eyr:2024 iyr:2017
hgt:180cm
hgt:184cm byr:2001 pid:199216567 ecl:gry
eyr:2022
cid:185 hcl:#7d3b0c
iyr:2019
byr:1996 eyr:2022 pid:503963080 ecl:grn iyr:2010 hcl:#fffffd
eyr:2030 iyr:2017
pid:472300557 hcl:#a97842
ecl:grn hgt:190cm
byr:1994
ecl:#2a8a59
eyr:2027
iyr:2015 byr:2021 hgt:158cm pid:365979521 hcl:z cid:242
ecl:gry
iyr:2020 hcl:#866857
pid:363851353 cid:319 hgt:154cm eyr:2027
byr:1953
ecl:grn hgt:165cm eyr:2026
pid:443722683 hcl:#341e13
iyr:2018 byr:1923
byr:1920 ecl:blu
cid:193 hgt:153cm hcl:#341e13 iyr:2010 pid:934896568
eyr:2021
eyr:2025
pid:524699651 cid:92
hcl:#602927 byr:1999
iyr:2011 ecl:brn hgt:164cm
eyr:2030 pid:739947771 iyr:2018
byr:1990
hgt:185cm hcl:#602927 ecl:gry
byr:1967 ecl:amb iyr:2020 hcl:#341e13
hgt:165cm
pid:681478012 eyr:2028
pid:807715479 ecl:blu byr:1955 eyr:1972 iyr:2018 hcl:#a97842 hgt:151
pid:635008585 cid:97
hgt:186cm hcl:#b6652a iyr:2015 eyr:2020 ecl:gry byr:1959
iyr:2017
cid:155 byr:1999 pid:550276277
hcl:#18171d
eyr:2020 hgt:164cm ecl:amb
byr:1977 hcl:#6b5442 ecl:grn iyr:2012 hgt:156cm
eyr:2028 pid:125635376
hgt:65in pid:042700658 byr:1962 iyr:2020
hcl:#888785 eyr:2021 ecl:gry
ecl:blu iyr:2017 hcl:#efcc98 pid:447451869 hgt:176cm
byr:1958
eyr:2024
ecl:amb hgt:155cm eyr:2022 hcl:#efcc98
pid:614496034 byr:1957
iyr:2016
cid:99
eyr:2020
ecl:amb iyr:2017
hgt:163cm pid:128207503 byr:1977
hcl:#866857
ecl:amb cid:342 eyr:2026 hgt:172cm pid:317675262
byr:1942 hcl:#a97842 iyr:2010
ecl:grn pid:077163993
hgt:187cm hcl:#341e13 iyr:2012 byr:1934 eyr:2024
pid:423538706 hgt:156cm
ecl:oth hcl:#341e13 iyr:2016 eyr:2028
iyr:2030 ecl:#faff64
byr:2012
pid:734434105 hgt:164in hcl:z eyr:2023
hgt:150in iyr:2016 pid:173cm hcl:db675a cid:219 eyr:2032 byr:1958
ecl:xry
pid:087437383
eyr:2025 hgt:178cm ecl:gry byr:1954
cid:227 hcl:#fffffd
iyr:2018
pid:152cm
iyr:2030 eyr:2030
byr:2010 hcl:z
hgt:155cm
ecl:amb
byr:1934
hcl:#341e13 hgt:167cm
pid:#7356dd ecl:amb
iyr:2011
eyr:2030
cid:123
eyr:2027
byr:2005
hgt:173cm cid:174 hcl:#ceb3a1 iyr:2018 ecl:amb pid:179cm
iyr:2019 ecl:grn eyr:2023
hgt:162cm
pid:649681621 hcl:#4ee6d2 byr:1955
hgt:165cm byr:1929 ecl:blu pid:839016251 iyr:2017 hcl:#c0946f
eyr:2020
eyr:2020
iyr:2017 hcl:#c7ed42 ecl:blu byr:1928
hgt:74in pid:112604496
eyr:2026 hgt:184 cid:113
byr:1933
pid:952646285
iyr:2019 hcl:#fffffd ecl:gry
pid:455008820 byr:1982 eyr:2030 ecl:gry iyr:2020 cid:103 hcl:#733820 hgt:184cm
hcl:#733820 iyr:2020 hgt:182cm ecl:grn
cid:226 pid:081011361 eyr:2022 byr:1995
iyr:1999
hcl:#18171d pid:9252198900
ecl:amb byr:1999 hgt:175cm eyr:2021
iyr:2020 hgt:165cm
ecl:blu
eyr:2023 pid:760213482
byr:1968
hcl:#c0946f
pid:242381670 ecl:amb
hgt:172cm byr:1980 eyr:2020 iyr:2014 hcl:#866857
byr:2021 pid:#a94a22 hcl:#cfa07d iyr:1969 eyr:2030 ecl:zzz
hgt:76cm
ecl:oth cid:168
byr:1954 pid:079481919 eyr:2025 hcl:#c0946f hgt:172cm
hgt:171cm
eyr:2030
byr:1969 cid:170
pid:164128658 ecl:amb
hcl:#c2265e iyr:2019
byr:1983
cid:163
eyr:2020 pid:232659795 iyr:2013 hcl:#888785 hgt:162cm
ecl:blu
ecl:gry hcl:#7d3b0c
pid:001171231 eyr:2020
byr:1935 hgt:160cm
iyr:2011
iyr:2012 hcl:#a97842
eyr:2029 pid:809880438 hgt:164cm cid:83 byr:1961 ecl:hzl
cid:288 eyr:2027
hgt:181cm byr:1955
iyr:2020
ecl:oth pid:754135833 hcl:#c0946f
iyr:2012 pid:053980893
cid:54 byr:1961 ecl:gry hcl:#602927 eyr:2020 hgt:167cm
iyr:2013
eyr:2025
hgt:176cm pid:169006156 cid:270 ecl:oth byr:2001
cid:244 pid:914067457
iyr:2017 byr:1926 hcl:#733820 ecl:brn hgt:187cm
eyr:2030
ecl:oth byr:1942
hgt:176cm iyr:2020 eyr:2027
hcl:#efcc98
pid:688816242
hgt:177cm hcl:#efcc98 eyr:2030 pid:888703414
iyr:2010 byr:1973 ecl:gry
cid:257 eyr:2030
ecl:brn
pid:359774824
byr:1988 hcl:#6b5442 iyr:2013 hgt:187cm
iyr:2011 hgt:173cm cid:290 byr:2000 ecl:gry
hcl:#7d3b0c
pid:743371399 eyr:2029
cid:162
eyr:1920 byr:2010 pid:#69d6ba hgt:74 hcl:z ecl:#d256f3 iyr:1933
pid:435518624 byr:1938 eyr:2027 iyr:2016 hcl:#18171d
hgt:161cm
ecl:gry
ecl:gry eyr:2027 hcl:#7d3b0c hgt:170cm
pid:928345976 iyr:2020
hcl:#5f4023 ecl:blu
pid:024527693
eyr:1932 iyr:2023 hgt:154cm byr:1948
cid:284 iyr:2011 byr:1920 eyr:2024 ecl:blu hgt:153cm
hcl:#602927 pid:005741906
iyr:2029 hgt:108 byr:2029 hcl:c8b25d
pid:522512400 eyr:2038 ecl:zzz cid:163
pid:371295649
eyr:2022 ecl:hzl
iyr:2019 hgt:153cm byr:1961
hcl:z
eyr:2027 iyr:2020 pid:619653661 byr:1968 hcl:#b6652a cid:62 ecl:hzl
hgt:186cm
iyr:1931
pid:565552342 ecl:#af97bb hcl:c92cd6 eyr:1931 byr:2025 hgt:184in
hgt:187cm
ecl:grn
byr:1954 cid:145
iyr:2016
hcl:#efcc98 eyr:2030 pid:202254357
cid:177
iyr:2013 byr:1926 hcl:#efcc98
pid:298693475 hgt:181cm eyr:2023 ecl:dne
byr:2014
cid:255
iyr:1951 hgt:72in
hcl:#efcc98 eyr:2039 pid:135688013
ecl:grn
byr:2019 eyr:1971 pid:#a95cb4
hcl:#ceb3a1 ecl:#6f919c
hgt:193cm iyr:2012
pid:497726268
ecl:grn
eyr:2025 hcl:#efcc98 iyr:2019 hgt:170cm byr:1970
byr:1939 hcl:#18171d cid:250
iyr:2011 ecl:blu pid:216607711
hgt:158cm eyr:2029
byr:1937
eyr:1931
hcl:#5ee898
pid:#876b1a hgt:190cm
cid:277 ecl:#5f0f80 iyr:2013
ecl:oth hgt:191cm eyr:2025 byr:1978 pid:271136754 hcl:#888785
iyr:2012
hcl:#6b5442
iyr:2015 byr:1958 pid:510020331 hgt:158cm eyr:2024 ecl:blu
byr:1998 cid:142 eyr:2026 iyr:2015 hcl:#733820
pid:671943334 hgt:186cm ecl:oth
eyr:2025 ecl:brn hcl:#7d3b0c pid:000803215
byr:1947
iyr:2017 hgt:168cm cid:230
pid:612432109 hgt:186cm byr:1963 ecl:hzl iyr:2019 eyr:2027
hcl:#efcc98
cid:148
hcl:#c0946f pid:846986027 eyr:2025 byr:1941
cid:154 hgt:158cm iyr:2012
ecl:brn
ecl:gry hgt:186cm
iyr:2015 hcl:#602927 byr:1923 eyr:2023
pid:48544569
pid:857428120 hgt:158cm hcl:#e4a267 iyr:2014 eyr:2020 byr:1975 ecl:blu
ecl:blu pid:559783197 byr:1935 cid:119 iyr:2017 hgt:157cm hcl:#6b5442 eyr:2020
ecl:oth pid:724332293 hcl:#602927
cid:77 iyr:2019
byr:2001 hgt:192cm eyr:2024
ecl:hzl eyr:2031
hcl:#efcc98 byr:2011 cid:280 iyr:2017
pid:377875085
hgt:172cm
byr:1947 hgt:174cm ecl:amb iyr:2018 cid:94 hcl:#a97842 eyr:2026 pid:286225332
hgt:85 ecl:xry eyr:2033 iyr:1952 pid:92902290
hcl:a6f86d
byr:2013
byr:1935 hcl:#c0946f pid:368741489 ecl:blu
eyr:2020 hgt:164cm
iyr:2018
cid:196
pid:718568707
ecl:oth byr:2003 hcl:#a97842 iyr:2010 hgt:168cm eyr:2025 cid:261
hcl:#6b5442
pid:675429853
hgt:62in ecl:grn iyr:2016
eyr:2027 byr:1932
byr:1978
pid:080846464 hcl:#ceb3a1 ecl:gry iyr:2015 hgt:190cm eyr:2029
pid:1756319674
iyr:2010 byr:1998 hcl:#866857 cid:259
eyr:2025 hgt:73in ecl:hzl
eyr:2035
hcl:z hgt:61cm
pid:3267812127
cid:230
byr:2029 iyr:2028 ecl:lzr
hgt:161cm ecl:hzl byr:1934 iyr:2011 eyr:2025 hcl:#cfa07d pid:354474868
pid:727482965
hcl:#623a2f iyr:2010 hgt:156cm eyr:2020 cid:68 ecl:grn byr:1950
pid:040800697 hgt:186cm
hcl:#341e13 iyr:2030 ecl:hzl
byr:1937 eyr:2020
iyr:2013 byr:1928 pid:752644096 eyr:2030 hgt:191cm ecl:hzl
cid:93 hcl:#a97842
pid:022267155 hcl:#cfa07d eyr:2026
ecl:hzl
hgt:187cm iyr:2014 cid:347
hgt:73in
eyr:2021 pid:054367702 ecl:amb hcl:#18171d byr:1965
iyr:2020 cid:267
eyr:2022
cid:140 pid:189859171 byr:1984 iyr:2020 ecl:brn hgt:166cm hcl:#623a2f
byr:1971 iyr:2015
hgt:168cm
eyr:2020 pid:650970816 hcl:#341e13
ecl:grn
cid:168
hcl:#c0946f byr:1948 hgt:189cm
pid:868785851
cid:194 ecl:amb eyr:2024 iyr:2011
eyr:2040
byr:2030 hcl:afde59
hgt:172cm pid:72468598 iyr:1990 cid:165 ecl:#896a8e
iyr:2009 hcl:#6b5442
eyr:2028
cid:53 ecl:hzl
hgt:165cm byr:1999 pid:844037301
cid:281 eyr:2022
iyr:2020 byr:1976 hgt:176cm hcl:#6b5442 ecl:amb pid:755280305
hgt:154cm iyr:2013
pid:059284139 byr:1992
cid:215 ecl:blu eyr:2025 hcl:#b6652a
ecl:grn
cid:308
hgt:187cm pid:009080324 eyr:2027
iyr:2012 byr:1955
pid:083241291 hcl:#7c1810 eyr:2030 iyr:2019 byr:1950 ecl:brn hgt:72in
cid:148 byr:1953 hcl:#623a2f
pid:076848285 hgt:175cm iyr:2017
eyr:2022
ecl:oth
iyr:2020
hgt:160cm
eyr:2028 cid:312 ecl:brn hcl:#888785 pid:681067688 byr:1986
iyr:1972 cid:170 eyr:2023
pid:21811501 ecl:#17c6e8
hgt:158in byr:2015 hcl:5b7956
pid:720571739 cid:304 byr:1951 hgt:191cm
eyr:2025 hcl:#341e13
iyr:2011
eyr:2020 ecl:blu hcl:#cfa07d pid:097863725
hgt:150cm
byr:1951
cid:143 iyr:2013
eyr:2027 iyr:2019 ecl:#a0eeca hcl:#c0946f pid:724783488 byr:1943 cid:282 hgt:124
byr:2012
iyr:2013 eyr:2036 hcl:z hgt:97
pid:#677847 ecl:dne
pid:341708492 hgt:190cm
byr:1988 hcl:#888785
ecl:hzl
iyr:2015 eyr:2029
iyr:2020 byr:1968
ecl:gry
eyr:2030 hcl:#1976b0
cid:127 pid:701862616
hgt:161cm"""
inputs = [i for i in input.split("\n\n")]
"""one"""
requirements = ['ecl', 'pid', 'eyr', 'hcl', 'byr', 'iyr', 'hgt']
total = len(inputs)
valid = [] | |
<reponame>ntiufalara/openerp7<filename>openerp/addons/event/event.py
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime, timedelta
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp import SUPERUSER_ID
class event_type(osv.osv):
""" Event Type """
_name = 'event.type'
_description = __doc__
_columns = {
'name': fields.char('Event Type', size=64, required=True),
'default_reply_to': fields.char('Default Reply-To', size=64,help="The email address of the organizer which is put in the 'Reply-To' of all emails sent automatically at event or registrations confirmation. You can also put your email address of your mail gateway if you use one." ),
'default_email_event': fields.many2one('email.template','Event Confirmation Email', help="It will select this default confirmation event mail value when you choose this event"),
'default_email_registration': fields.many2one('email.template','Registration Confirmation Email', help="It will select this default confirmation registration mail value when you choose this event"),
'default_registration_min': fields.integer('Default Minimum Registration', help="It will select this default minimum value when you choose this event"),
'default_registration_max': fields.integer('Default Maximum Registration', help="It will select this default maximum value when you choose this event"),
}
_defaults = {
'default_registration_min': 0,
'default_registration_max': 0,
}
event_type()
class event_event(osv.osv):
"""Event"""
_name = 'event.event'
_description = __doc__
_order = 'date_begin'
_inherit = ['mail.thread', 'ir.needaction_mixin']
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
if isinstance(ids, (long, int)):
ids = [ids]
res = []
for record in self.browse(cr, uid, ids, context=context):
date = record.date_begin.split(" ")[0]
date_end = record.date_end.split(" ")[0]
if date != date_end:
date += ' - ' + date_end
display_name = record.name + ' (' + date + ')'
res.append((record['id'], display_name))
return res
def copy(self, cr, uid, id, default=None, context=None):
""" Reset the state and the registrations while copying an event
"""
if not default:
default = {}
default.update({
'state': 'draft',
'registration_ids': False,
})
return super(event_event, self).copy(cr, uid, id, default=default, context=context)
def button_draft(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'draft'}, context=context)
def button_cancel(self, cr, uid, ids, context=None):
registration = self.pool.get('event.registration')
reg_ids = registration.search(cr, uid, [('event_id','in',ids)], context=context)
for event_reg in registration.browse(cr,uid,reg_ids,context=context):
if event_reg.state == 'done':
raise osv.except_osv(_('Error!'),_("You have already set a registration for this event as 'Attended'. Please reset it to draft if you want to cancel this event.") )
registration.write(cr, uid, reg_ids, {'state': 'cancel'}, context=context)
return self.write(cr, uid, ids, {'state': 'cancel'}, context=context)
def button_done(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'done'}, context=context)
def check_registration_limits(self, cr, uid, ids, context=None):
for self.event in self.browse(cr, uid, ids, context=context):
total_confirmed = self.event.register_current
if total_confirmed < self.event.register_min or total_confirmed > self.event.register_max and self.event.register_max!=0:
raise osv.except_osv(_('Error!'),_("The total of confirmed registration for the event '%s' does not meet the expected minimum/maximum. Please reconsider those limits before going further.") % (self.event.name))
def check_registration_limits_before(self, cr, uid, ids, no_of_registration, context=None):
for event in self.browse(cr, uid, ids, context=context):
available_seats = event.register_avail
if available_seats and no_of_registration > available_seats:
raise osv.except_osv(_('Warning!'),_("Only %d Seats are Available!") % (available_seats))
elif available_seats == 0:
raise osv.except_osv(_('Warning!'),_("No Tickets Available!"))
def confirm_event(self, cr, uid, ids, context=None):
register_pool = self.pool.get('event.registration')
if self.event.email_confirmation_id:
#send reminder that will confirm the event for all the people that were already confirmed
reg_ids = register_pool.search(cr, uid, [
('event_id', '=', self.event.id),
('state', 'not in', ['draft', 'cancel'])], context=context)
register_pool.mail_user_confirm(cr, uid, reg_ids)
return self.write(cr, uid, ids, {'state': 'confirm'}, context=context)
def button_confirm(self, cr, uid, ids, context=None):
""" Confirm Event and send confirmation email to all register peoples
"""
if isinstance(ids, (int, long)):
ids = [ids]
self.check_registration_limits(cr, uid, ids, context=context)
return self.confirm_event(cr, uid, ids, context=context)
def _get_register(self, cr, uid, ids, fields, args, context=None):
"""Get Confirm or uncofirm register value.
@param ids: List of Event registration type's id
@param fields: List of function fields(register_current and register_prospect).
@param context: A standard dictionary for contextual values
@return: Dictionary of function fields value.
"""
res = {}
for event in self.browse(cr, uid, ids, context=context):
res[event.id] = {}
reg_open = reg_done = reg_draft =0
for registration in event.registration_ids:
if registration.state == 'open':
reg_open += registration.nb_register
elif registration.state == 'done':
reg_done += registration.nb_register
elif registration.state == 'draft':
reg_draft += registration.nb_register
for field in fields:
number = 0
if field == 'register_current':
number = reg_open
elif field == 'register_attended':
number = reg_done
elif field == 'register_prospect':
number = reg_draft
elif field == 'register_avail':
#the number of ticket is unlimited if the event.register_max field is not set.
#In that cas we arbitrary set it to 9999, it is used in the kanban view to special case the display of the 'subscribe' button
number = event.register_max - reg_open if event.register_max != 0 else 9999
res[event.id][field] = number
return res
def _subscribe_fnc(self, cr, uid, ids, fields, args, context=None):
"""This functional fields compute if the current user (uid) is already subscribed or not to the event passed in parameter (ids)
"""
register_pool = self.pool.get('event.registration')
res = {}
for event in self.browse(cr, uid, ids, context=context):
res[event.id] = False
curr_reg_id = register_pool.search(cr, uid, [('user_id', '=', uid), ('event_id', '=' ,event.id)])
if curr_reg_id:
for reg in register_pool.browse(cr, uid, curr_reg_id, context=context):
if reg.state in ('open','done'):
res[event.id]= True
continue
return res
_columns = {
'name': fields.char('Name', size=64, required=True, translate=True, readonly=False, states={'done': [('readonly', True)]}),
'user_id': fields.many2one('res.users', 'Responsible User', readonly=False, states={'done': [('readonly', True)]}),
'type': fields.many2one('event.type', 'Type of Event', readonly=False, states={'done': [('readonly', True)]}),
'register_max': fields.integer('Maximum Registrations', help="You can for each event define a maximum registration level. If you have too much registrations you are not able to confirm your event. (put 0 to ignore this rule )", readonly=True, states={'draft': [('readonly', False)]}),
'register_min': fields.integer('Minimum Registrations', help="You can for each event define a minimum registration level. If you do not enough registrations you are not able to confirm your event. (put 0 to ignore this rule )", readonly=True, states={'draft': [('readonly', False)]}),
'register_current': fields.function(_get_register, string='Confirmed Registrations', multi='register_numbers'),
'register_avail': fields.function(_get_register, string='Available Registrations', multi='register_numbers',type='integer'),
'register_prospect': fields.function(_get_register, string='Unconfirmed Registrations', multi='register_numbers'),
'register_attended': fields.function(_get_register, string='# of Participations', multi='register_numbers'),
'registration_ids': fields.one2many('event.registration', 'event_id', 'Registrations', readonly=False, states={'done': [('readonly', True)]}),
'date_begin': fields.datetime('Start Date', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'date_end': fields.datetime('End Date', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'state': fields.selection([
('draft', 'Unconfirmed'),
('cancel', 'Cancelled'),
('confirm', 'Confirmed'),
('done', 'Done')],
'Status', readonly=True, required=True,
track_visibility='onchange',
help='If event is created, the status is \'Draft\'.If event is confirmed for the particular dates the status is set to \'Confirmed\'. If the event is over, the status is set to \'Done\'.If event is cancelled the status is set to \'Cancelled\'.'),
'email_registration_id' : fields.many2one('email.template','Registration Confirmation Email', help='This field contains the template of the mail that will be automatically sent each time a registration for this event is confirmed.'),
'email_confirmation_id' : fields.many2one('email.template','Event Confirmation Email', help="If you set an email template, each participant will receive this email announcing the confirmation of the event."),
'reply_to': fields.char('Reply-To Email', size=64, readonly=False, states={'done': [('readonly', True)]}, help="The email address of the organizer is likely to be put here, with the effect to be in the 'Reply-To' of the mails sent automatically at event or registrations confirmation. You can also put the email address of your mail gateway if you use one."),
'main_speaker_id': fields.many2one('res.partner','Main Speaker', readonly=False, states={'done': [('readonly', True)]}, help="Speaker who will be giving speech at the event."),
'address_id': fields.many2one('res.partner','Location Address', readonly=False, states={'done': [('readonly', True)]}),
'street': fields.related('address_id','street',type='char',string='Street'),
'street2': fields.related('address_id','street2',type='char',string='Street2'),
'state_id': fields.related('address_id','state_id',type='many2one', relation="res.country.state", string='State'),
'zip': fields.related('address_id','zip',type='char',string='zip'),
'city': fields.related('address_id','city',type='char',string='city'),
'speaker_confirmed': fields.boolean('Speaker Confirmed', readonly=False, states={'done': [('readonly', True)]}),
'country_id': fields.related('address_id', 'country_id',
type='many2one', relation='res.country', string='Country', readonly=False, states={'done': [('readonly', True)]}),
'note': fields.text('Description', readonly=False, states={'done': [('readonly', True)]}),
'company_id': fields.many2one('res.company', 'Company', required=False, change_default=True, readonly=False, states={'done': [('readonly', True)]}),
'is_subscribed' : fields.function(_subscribe_fnc, type="boolean", string='Subscribed'),
}
_defaults = {
'state': 'draft',
'company_id': | |
<reponame>TobyChen0106/DeepQ_Final_B05901170<gh_stars>0
import torch.nn as nn
import torch
import math
import time
import torch.utils.model_zoo as model_zoo
from utils import BasicBlock, Bottleneck, BBoxTransform, ClipBoxes
from anchors import Anchors
import losses
from lib.nms.pth_nms import pth_nms
def nms(dets, thresh):
"""Dispatch to either CPU or GPU NMS implementations.\
Accept dets as tensor"""
return pth_nms(dets, thresh)
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
}
class PyramidFeatures(nn.Module):
def __init__(self, C2_size, C3_size, C4_size, C5_size, feature_size=256):
super(PyramidFeatures, self).__init__()
# upsample C5 to get P5 from the FPN paper
self.P5_1 = nn.Conv2d(C5_size, feature_size, kernel_size=1, stride=1, padding=0)
self.P5_upsampled = nn.Upsample(scale_factor=2, mode='nearest')
self.P5_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=1)
# add P5 elementwise to C4
self.P4_1 = nn.Conv2d(C4_size, feature_size, kernel_size=1, stride=1, padding=0)
self.P4_upsampled = nn.Upsample(scale_factor=2, mode='nearest')
self.P4_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=1)
# add P4 elementwise to C3
self.P3_1 = nn.Conv2d(C3_size, feature_size, kernel_size=1, stride=1, padding=0)
self.P3_upsampled = nn.Upsample(scale_factor=2, mode='nearest')
self.P3_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=1)
# add P3 elementwise to C2 (customized by <NAME>)
self.P2_1 = nn.Conv2d(C2_size, feature_size, kernel_size=1, stride=1, padding=0)
self.P2_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=1)
# "P6 is obtained via a 3x3 stride-2 conv on C5"
self.P6 = nn.Conv2d(C5_size, feature_size, kernel_size=3, stride=2, padding=1)
# "P7 is computed by applying ReLU followed by a 3x3 stride-2 conv on P6"
self.P7_1 = nn.ReLU()
self.P7_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=2, padding=1)
def forward(self, C2, C3, C4, C5):
P5_x = self.P5_1(C5)
P5_upsampled_x = self.P5_upsampled(P5_x)
P5_x = self.P5_2(P5_x)
P4_x = self.P4_1(C4)
P4_x = P5_upsampled_x + P4_x
P4_upsampled_x = self.P4_upsampled(P4_x)
P4_x = self.P4_2(P4_x)
P3_x = self.P3_1(C3)
P3_x = P3_x + P4_upsampled_x
P3_upsampled_x = self.P3_upsampled(P3_x)
P3_x = self.P3_2(P3_x)
# add P3 elementwise to C2 (customized by <NAME>)
P2_x = self.P2_1(C2)
P2_x = P2_x + P3_upsampled_x
P2_x = self.P2_2(P2_x)
P6_x = self.P6(C5)
P7_x = self.P7_1(P6_x)
P7_x = self.P7_2(P7_x)
return [P2_x, P3_x, P4_x, P5_x, P6_x, P7_x]
class RegressionModel(nn.Module):
def __init__(self, num_features_in, num_anchors=9, feature_size=256):
super(RegressionModel, self).__init__()
self.conv1 = nn.Conv2d(num_features_in, feature_size, kernel_size=3, padding=1)
self.act1 = nn.ReLU()
self.conv2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)
self.act2 = nn.ReLU()
self.conv3 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)
self.act3 = nn.ReLU()
self.conv4 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)
self.act4 = nn.ReLU()
self.output = nn.Conv2d(feature_size, num_anchors*4, kernel_size=3, padding=1)
def forward(self, x):
out = self.conv1(x)
out = self.act1(out)
out = self.conv2(out)
out = self.act2(out)
out = self.conv3(out)
out = self.act3(out)
out = self.conv4(out)
out = self.act4(out)
out = self.output(out)
# out is B x C x W x H, with C = 4*num_anchors
out = out.permute(0, 2, 3, 1)
return out.contiguous().view(out.shape[0], -1, 4)
class ClassificationModel(nn.Module):
def __init__(self, num_features_in, num_anchors=9, num_classes=80, prior=0.01, feature_size=256, dropout1=0.25, dropout2=0.25):
super(ClassificationModel, self).__init__()
self.num_classes = num_classes
self.num_anchors = num_anchors
self.conv1 = nn.Conv2d(num_features_in, feature_size, kernel_size=3, padding=1)
self.act1 = nn.ReLU()
self.conv2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)
self.act2 = nn.ReLU()
self.conv3 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)
self.act3 = nn.ReLU()
#add dropout1 to ClassificationModel (customized by <NAME>)
self.dropout1 = nn.Dropout(p=dropout1)
self.conv4 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)
self.act4 = nn.ReLU()
#add dropout2 to ClassificationModel (customized by <NAME>)
self.dropout2 = nn.Dropout(p=dropout2)
self.output = nn.Conv2d(feature_size, num_anchors*num_classes, kernel_size=3, padding=1)
self.output_act = nn.Sigmoid()
def forward(self, x):
out = self.conv1(x)
out = self.act1(out)
out = self.conv2(out)
out = self.act2(out)
out = self.conv3(out)
out = self.act3(out)
out = self.dropout1(out)
out = self.conv4(out)
out = self.act4(out)
out = self.dropout2(out)
out = self.output(out)
out = self.output_act(out)
# out is B x C x W x H, with C = n_classes + n_anchors
out1 = out.permute(0, 2, 3, 1)
batch_size, width, height, channels = out1.shape
out2 = out1.view(batch_size, width, height, self.num_anchors, self.num_classes)
return out2.contiguous().view(x.shape[0], -1, self.num_classes)
class ResNet(nn.Module):
def __init__(self, num_classes, block, layers, groups=1, width_per_group=64, replace_stride_with_dilation=None,
dropout1=0.25, dropout2=0.25, alpha=0.25, gamma=2.0,
loss_with_no_bboxes=False, no_bboxes_alpha=0.5, no_bboxes_gamma=2.0):
#Has been changed to ResNext(customized by Yu Han Huang)
self.inplanes = 64
super(ResNet, self).__init__()
#add self.dilation, width_per_group, replace_stride_with_dilation (customized by Yu Han Huang)
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
#add dilate=replace_stride_with_dilation (customized by Yu Han Huang)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2])
#add C2 layer_size to fpn_sizes (customized by Yu Han Huang)
if block == BasicBlock:
fpn_sizes = [self.layer1[layers[0]-1].conv2.out_channels, self.layer2[layers[1]-1].conv2.out_channels,
self.layer3[layers[2]-1].conv2.out_channels, self.layer4[layers[3]-1].conv2.out_channels]
elif block == BasicBlock:
fpn_sizes = [self.layer1[layers[0]-1].conv3.out_channels, self.layer2[layers[1]-1].conv3.out_channels,
self.layer3[layers[2]-1].conv3.out_channels, self.layer4[layers[3]-1].conv3.out_channels]
#add fpn_sizes[0] into PyramidFeatures (customized by <NAME>)
self.fpn = PyramidFeatures(fpn_sizes[0], fpn_sizes[1], fpn_sizes[2], fpn_sizes[3])
self.regressionModel = RegressionModel(256)
self.classificationModel = ClassificationModel(256, num_classes=num_classes, dropout1=dropout1, dropout2=dropout2)
self.anchors = Anchors()
self.regressBoxes = BBoxTransform()
self.clipBoxes = ClipBoxes()
#add arguments alpha, gamma loss_with_no_bboxes, no_bboxes_alpha, no_bboxes_gamma(customized by Y<NAME> Huang)
self.focalLoss = losses.FocalLoss(alpha=alpha, gamma=gamma, loss_with_no_bboxes=loss_with_no_bboxes, no_bboxes_alpha=no_bboxes_alpha, no_bboxes_gamma=no_bboxes_gamma)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
prior = 0.01
self.classificationModel.output.weight.data.fill_(0)
self.classificationModel.output.bias.data.fill_(-math.log((1.0-prior)/prior))
self.regressionModel.output.weight.data.fill_(0)
self.regressionModel.output.bias.data.fill_(0)
self.freeze_bn()
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
downsample = None
#add dilate for resnext101 (customized by <NAME>)
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups, self.base_width, previous_dilation))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups, base_width=self.base_width, dilation=self.dilation,))
return nn.Sequential(*layers)
def freeze_bn(self):
'''Freeze BatchNorm layers.'''
for layer in self.modules():
if isinstance(layer, nn.BatchNorm2d):
layer.eval()
def forward(self, inputs):
if self.training:
img_batch, annotations = inputs
else:
img_batch = inputs
x = self.conv1(img_batch)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x1 = self.layer1(x)
x2 = self.layer2(x1)
x3 = self.layer3(x2)
x4 = self.layer4(x3)
features = self.fpn(x1, x2, x3, x4)
regression = torch.cat([self.regressionModel(feature) for feature in features], dim=1)
classification = torch.cat([self.classificationModel(feature) for feature in features], dim=1)
anchors = self.anchors(img_batch)
if self.training:
'''
l1 = 0
for p in self.classificationModel.parameters():
l1 = l1 + p.abs().sum()
'''
return self.focalLoss(classification, regression, anchors, annotations)
else:
transformed_anchors = self.regressBoxes(anchors, regression)
transformed_anchors = self.clipBoxes(transformed_anchors, img_batch)
scores = torch.max(classification, dim=2, keepdim=True)[0]
scores_over_thresh = (scores>0.05)[0, :, 0]
if scores_over_thresh.sum() == 0:
# no boxes to NMS, just return
return [torch.zeros(0), torch.zeros(0), torch.zeros(0, 4)]
classification = classification[:, scores_over_thresh, :]
transformed_anchors = transformed_anchors[:, scores_over_thresh, :]
scores = scores[:, scores_over_thresh, :]
anchors_nms_idx = nms(torch.cat([transformed_anchors, scores], dim=2)[0, :, :], 0.5)
nms_scores, nms_class = classification[0, anchors_nms_idx, :].max(dim=1)
return [nms_scores, nms_class, transformed_anchors[0, anchors_nms_idx, :]]
#Arguments: dropout1, dropout2, alpha, gamma,loss_with_no_bboxes, no_bboxes_alpha, no_bboxes_gamma are added into all of the retinanet model with different backbone
#(customized by <NAME>)
def resnet18(num_classes, pretrained=False, dropout1=0.25, dropout2=0.25, alpha=0.25, gamma=2.0,
loss_with_no_bboxes=False, no_bboxes_alpha=0.5, no_bboxes_gamma=2, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
dropout1: The dropout rate for the 3rd layer of the Classification Model
dropout2: The dropout rate for the 4th layer of the Classification Model
alpha: Alpha in focal loss
gamma: Gamma in focal loss
loss_with_no_bboxes: If True, picture with no bboxes will be taken into account
no_bboxes_alpha: Alpha in focal loss for picture with no bboxes
no_bboxes_gamma: Gamma in focal loss for picture with no bboxes
"""
model = ResNet(num_classes, BasicBlock, [2, 2, 2, 2], dropout1=0.25, dropout2=0.25, alpha=0.25, gamma=2.0,
loss_with_no_bboxes=False, no_bboxes_alpha=0.5, no_bboxes_gamma=2, **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18'], model_dir='.'), strict=False)
return model
def resnet34(num_classes, pretrained=False, dropout1=0.25, dropout2=0.25, alpha=0.25, gamma=2.0,
loss_with_no_bboxes=False, no_bboxes_alpha=0.5, no_bboxes_gamma=2, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
dropout1: The dropout rate for the 3rd layer of the Classification Model
dropout2: The dropout rate for the 4th layer of the Classification Model
alpha: Alpha in focal loss
gamma: Gamma in focal loss
loss_with_no_bboxes: If True, picture with no bboxes will be taken into account
no_bboxes_alpha: Alpha in focal loss for picture with no bboxes
no_bboxes_gamma: | |
<gh_stars>1-10
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
This module computes the neutral and ionized populations of H in the
upper atmosphere.
"""
from __future__ import (division, print_function, absolute_import,
unicode_literals)
import numpy as np
import astropy.units as u
import astropy.constants as c
from scipy.integrate import simps, solve_ivp, cumtrapz
from scipy.interpolate import interp1d
from p_winds import parker, tools, microphysics
__all__ = ["radiative_processes_exact", "radiative_processes",
"radiative_processes_mono", "recombination", "ion_fraction"]
# Exact calculation of hydrogen photoionization
def radiative_processes_exact(spectrum_at_planet, r_grid, density, f_r,
h_fraction):
"""
Calculate the photoionization rate of hydrogen as a function of radius based
on the EUV spectrum arriving at the planet and the neutral H density
profile.
Parameters
----------
spectrum_at_planet (``dict``):
Spectrum of the host star arriving at the planet covering fluxes at
least up to the wavelength corresponding to the energy to ionize
hydrogen (13.6 eV, or 911.65 Angstrom).
r_grid (``numpy.ndarray``):
Radius grid for the calculation, in units of cm.
density (``numpy.ndarray``):
Number density profile for the atmosphere, in units of 1 / cm ** 3.
f_r (``numpy.ndarray`` or ``float``):
Ionization fraction profile for the atmosphere.
h_fraction (``float``):
Hydrogen number fraction of the outflow.
Returns
-------
phi_prime (``float``):
Ionization rate of hydrogen for each point on r_grid in unit of 1 / s.
"""
wavelength = (spectrum_at_planet['wavelength'] *
spectrum_at_planet['wavelength_unit']).to(u.angstrom).value
flux_lambda = (spectrum_at_planet['flux_lambda'] * spectrum_at_planet[
'flux_unit']).to(u.erg / u.s / u.cm ** 2 / u.angstrom).value
energy = (c.h * c.c).to(u.erg * u.angstrom).value / wavelength
# Wavelength corresponding to the energy to ionize H
wl_break = 911.65 # angstrom
# Index of the lambda_0 in the wavelength array
i_break = tools.nearest_index(wavelength, wl_break)
# Auxiliary definitions
wavelength_cut = wavelength[:i_break + 1]
flux_lambda_cut = flux_lambda[:i_break + 1]
energy_cut = energy[:i_break + 1]
# 2d grid of radius and wavelength
xx, yy = np.meshgrid(wavelength_cut, r_grid)
# Photoionization cross-section in function of wavelength
a_lambda = microphysics.hydrogen_cross_section(wavelength=xx)
# Optical depth to hydrogen photoionization
m_h = 1.67262192E-24 # Proton mass in unit of kg
r_grid_temp = r_grid[::-1]
# We assume that the atmosphere is made of only H + He
he_fraction = 1 - h_fraction
f_he_to_h = he_fraction / h_fraction
mu = (1 + 4 * f_he_to_h) / (1 + f_r + f_he_to_h)
n_tot = density / mu / m_h
n_htot = 1 / (1 + f_r + f_he_to_h) * n_tot
n_h = n_htot * (1 - f_r)
n_hetot = n_htot * f_he_to_h
n_he = n_hetot * (1 - f_r)
n_h_temp = n_h[::-1]
column_h = cumtrapz(n_h_temp, r_grid_temp, initial=0)
column_density_h = -column_h[::-1]
tau_rnu = column_density_h[:, None] * a_lambda
# Optical depth to helium photoionization
n_he_temp = n_he[::-1]
column_he = cumtrapz(n_he_temp, r_grid_temp, initial=0)
column_density_he = -column_he[::-1]
a_lambda_he = microphysics.helium_total_cross_section(wavelength=xx)
tau_rnu += column_density_he[:, None] * a_lambda_he
# Finally calculate the photoionization rate
phi_prime = abs(simps(flux_lambda_cut * a_lambda / energy_cut *
np.exp(-tau_rnu), wavelength_cut, axis=-1))
return phi_prime
# Stellar flux-average calculation of hydrogen photoionization
def radiative_processes(spectrum_at_planet):
"""
Calculate the photoionization rate of hydrogen at null optical depth based
on the EUV spectrum arriving at the planet.
Parameters
----------
spectrum_at_planet (``dict``):
Spectrum of the host star arriving at the planet covering fluxes at
least up to the wavelength corresponding to the energy to ionize
hydrogen (13.6 eV, or 911.65 Angstrom).
Returns
-------
phi (``float``):
Ionization rate of hydrogen at null optical depth in unit of 1 / s.
a_0 (``float``):
Flux-averaged photoionization cross-section of hydrogen in unit of
cm ** 2.
"""
wavelength = (spectrum_at_planet['wavelength'] *
spectrum_at_planet['wavelength_unit']).to(u.angstrom).value
flux_lambda = (spectrum_at_planet['flux_lambda'] * spectrum_at_planet[
'flux_unit']).to(u.erg / u.s / u.cm ** 2 / u.angstrom).value
energy = (c.h * c.c).to(u.erg * u.angstrom).value / wavelength
# Wavelength corresponding to the energy to ionize H
wl_break = 911.65 # angstrom
# Index of the lambda_0 in the wavelength array
i_break = tools.nearest_index(wavelength, wl_break)
# Auxiliary definitions
wavelength_cut = wavelength[:i_break + 1]
flux_lambda_cut = flux_lambda[:i_break + 1]
energy_cut = energy[:i_break + 1]
# Photoionization cross-section in function of wavelength
a_lambda = microphysics.hydrogen_cross_section(wavelength=wavelength_cut)
# Flux-averaged photoionization cross-section
# Note: For some reason the Simpson's rule implementation of ``scipy`` may
# yield negative results when the flux varies by a few orders of magnitude
# at the edges of integration. So we take the absolute values of a_0 and phi
a_0 = abs(simps(flux_lambda_cut * a_lambda, wavelength_cut) /
simps(flux_lambda_cut, wavelength_cut))
# Finally calculate the photoionization rate
phi = abs(simps(flux_lambda_cut * a_lambda / energy_cut, wavelength_cut))
return phi, a_0
# Hydrogen photoionization if you have only a monochromatic channel flux
def radiative_processes_mono(flux_euv, average_photon_energy=20.):
"""
Calculate the photoionization rate of hydrogen at null optical depth based
on the monochromatic EUV flux arriving at the planet.
Parameters
----------
flux_euv (``float``):
Monochromatic extreme-ultraviolet (0 - 912 Angstrom) flux arriving at
the planet in unit of erg / s / cm ** 2.
average_photon_energy (``float``, optional):
Average energy of the photons ionizing H in unit of eV. Default is 20 eV
(as in Murray-Clay et al 2009, Allan & Vidotto 2019).
Returns
-------
phi (``float``):
Ionization rate of hydrogen at null optical depth in unit of 1 / s.
a_0 (``float``):
Flux-averaged photoionization cross-section of hydrogen in unit of
cm ** 2.
"""
# Average cross-section
a_0 = 6.3E-18 * (average_photon_energy / 13.6) ** (-3) # Unit 1 / cm ** 2.
# Monochromatic ionization rate
flux_euv *= 6.24150907E+11 # Convert erg to eV
phi = flux_euv * a_0 / average_photon_energy
return phi, a_0
# Case-B hydrogen recombination
def recombination(temperature):
"""
Calculates the case-B hydrogen recombination rate for a gas at a certain
temperature.
Parameters
----------
temperature (``float``):
Isothermal temperature of the upper atmosphere in unit of Kelvin.
Returns
-------
alpha_rec (``float``):
Recombination rate of hydrogen in units of cm ** 3 / s.
"""
alpha_rec = 2.59E-13 * (temperature / 1E4) ** (-0.7)
return alpha_rec
# Fraction of ionized hydrogen vs. radius profile
def ion_fraction(radius_profile, planet_radius, temperature, h_fraction,
mass_loss_rate, planet_mass, mean_molecular_weight_0=1.0,
spectrum_at_planet=None, flux_euv=None, initial_f_ion=0.0,
relax_solution=False, convergence=0.01, max_n_relax=10,
exact_phi=False, return_mu=False, **options_solve_ivp):
"""
Calculate the fraction of ionized hydrogen in the upper atmosphere in
function of the radius in unit of planetary radius.
Parameters
----------
radius_profile (``numpy.ndarray``):
Radius in unit of planetary radii.
planet_radius (``float``):
Planetary radius in unit of Jupiter radius.
temperature (``float``):
Isothermal temperature of the upper atmosphere in unit of Kelvin.
h_fraction (``float``):
Total (ion + neutral) H number fraction of the atmosphere.
mass_loss_rate (``float``):
Mass loss rate of the planet in units of g / s.
planet_mass (``float``):
Planetary mass in unit of Jupiter mass.
mean_molecular_weight_0 (``float``):
Initial mean molecular weight of the atmosphere in unit of proton mass.
Default value is 1.0 (100% neutral H). Since its final value depend on
the H ion fraction itself, the mean molecular weight can be
self-consistently calculated by setting `relax_solution` to `True`.
spectrum_at_planet (``dict``, optional):
Spectrum of the host star arriving at the planet covering fluxes at
least up to the wavelength corresponding to the energy to ionize
hydrogen (13.6 eV, or 911.65 Angstrom). Can be generated using
``tools.make_spectrum_dict``. If ``None``, then ``flux_euv`` must be
provided instead. Default is ``None``.
flux_euv (``float``, optional):
Extreme-ultraviolet (0-911.65 Angstrom) flux arriving at the planet in
units of erg / s / cm ** 2. If ``None``, then ``spectrum_at_planet``
must be provided instead. Default is ``None``.
initial_f_ion (``float``, optional):
The initial ionization fraction at the layer near the surface of the
planet. Default is 0.0, i.e., 100% neutral.
relax_solution (``bool``, optional):
The first solution is calculating by initially assuming the entire
atmosphere is in neutral state. If ``True``, the solution will be
re-calculated in a loop until it converges to a delta_f of 1%, or for a
maximum of 10 loops (default parameters). Default is ``False``.
convergence (``float``, optional):
Value of delta_f at which to stop the relaxation of the solution for
``f_r``. Default is 0.01.
max_n_relax (``int``, optional):
Maximum number of loops to perform the relaxation of the solution for
``f_r``. Default is 10.
return_mu (``bool``, optional):
If ``True``, then this function returns a second variable ``mu_bar``,
which is the self-consistent, density-averaged mean molecular weight of
the atmosphere. Equivalent to the ``mu_bar`` of Eq. A.3 in | |
= Constraint(expr=m.x949*m.x949 - m.x3979*m.b3010 <= 0)
m.c4051 = Constraint(expr=m.x950*m.x950 - m.x3980*m.b3010 <= 0)
m.c4052 = Constraint(expr=m.x951*m.x951 - m.x3981*m.b3010 <= 0)
m.c4053 = Constraint(expr=m.x952*m.x952 - m.x3982*m.b3010 <= 0)
m.c4054 = Constraint(expr=m.x953*m.x953 - m.x3983*m.b3010 <= 0)
m.c4055 = Constraint(expr=m.x954*m.x954 - m.x3984*m.b3010 <= 0)
m.c4056 = Constraint(expr=m.x955*m.x955 - m.x3985*m.b3010 <= 0)
m.c4057 = Constraint(expr=m.x956*m.x956 - m.x3986*m.b3010 <= 0)
m.c4058 = Constraint(expr=m.x957*m.x957 - m.x3987*m.b3010 <= 0)
m.c4059 = Constraint(expr=m.x958*m.x958 - m.x3988*m.b3010 <= 0)
m.c4060 = Constraint(expr=m.x959*m.x959 - m.x3989*m.b3010 <= 0)
m.c4061 = Constraint(expr=m.x960*m.x960 - m.x3990*m.b3010 <= 0)
m.c4062 = Constraint(expr=m.x961*m.x961 - m.x3991*m.b3010 <= 0)
m.c4063 = Constraint(expr=m.x962*m.x962 - m.x3992*m.b3010 <= 0)
m.c4064 = Constraint(expr=m.x963*m.x963 - m.x3993*m.b3010 <= 0)
m.c4065 = Constraint(expr=m.x964*m.x964 - m.x3994*m.b3010 <= 0)
m.c4066 = Constraint(expr=m.x965*m.x965 - m.x3995*m.b3010 <= 0)
m.c4067 = Constraint(expr=m.x966*m.x966 - m.x3996*m.b3010 <= 0)
m.c4068 = Constraint(expr=m.x967*m.x967 - m.x3997*m.b3010 <= 0)
m.c4069 = Constraint(expr=m.x968*m.x968 - m.x3998*m.b3010 <= 0)
m.c4070 = Constraint(expr=m.x969*m.x969 - m.x3999*m.b3010 <= 0)
m.c4071 = Constraint(expr=m.x970*m.x970 - m.x4000*m.b3010 <= 0)
m.c4072 = Constraint(expr=m.x971*m.x971 - m.x4001*m.b3010 <= 0)
m.c4073 = Constraint(expr=m.x972*m.x972 - m.x4002*m.b3010 <= 0)
m.c4074 = Constraint(expr=m.x973*m.x973 - m.x4003*m.b3010 <= 0)
m.c4075 = Constraint(expr=m.x974*m.x974 - m.x4004*m.b3010 <= 0)
m.c4076 = Constraint(expr=m.x975*m.x975 - m.x4005*m.b3010 <= 0)
m.c4077 = Constraint(expr=m.x976*m.x976 - m.x4006*m.b3010 <= 0)
m.c4078 = Constraint(expr=m.x977*m.x977 - m.x4007*m.b3010 <= 0)
m.c4079 = Constraint(expr=m.x978*m.x978 - m.x4008*m.b3010 <= 0)
m.c4080 = Constraint(expr=m.x979*m.x979 - m.x4009*m.b3010 <= 0)
m.c4081 = Constraint(expr=m.x980*m.x980 - m.x4010*m.b3010 <= 0)
m.c4082 = Constraint(expr=m.x981*m.x981 - m.x4011*m.b3010 <= 0)
m.c4083 = Constraint(expr=m.x982*m.x982 - m.x4012*m.b3010 <= 0)
m.c4084 = Constraint(expr=m.x983*m.x983 - m.x4013*m.b3010 <= 0)
m.c4085 = Constraint(expr=m.x984*m.x984 - m.x4014*m.b3010 <= 0)
m.c4086 = Constraint(expr=m.x985*m.x985 - m.x4015*m.b3010 <= 0)
m.c4087 = Constraint(expr=m.x986*m.x986 - m.x4016*m.b3010 <= 0)
m.c4088 = Constraint(expr=m.x987*m.x987 - m.x4017*m.b3010 <= 0)
m.c4089 = Constraint(expr=m.x988*m.x988 - m.x4018*m.b3010 <= 0)
m.c4090 = Constraint(expr=m.x989*m.x989 - m.x4019*m.b3010 <= 0)
m.c4091 = Constraint(expr=m.x990*m.x990 - m.x4020*m.b3010 <= 0)
m.c4092 = Constraint(expr=m.x991*m.x991 - m.x4021*m.b3010 <= 0)
m.c4093 = Constraint(expr=m.x992*m.x992 - m.x4022*m.b3010 <= 0)
m.c4094 = Constraint(expr=m.x993*m.x993 - m.x4023*m.b3010 <= 0)
m.c4095 = Constraint(expr=m.x994*m.x994 - m.x4024*m.b3010 <= 0)
m.c4096 = Constraint(expr=m.x995*m.x995 - m.x4025*m.b3010 <= 0)
m.c4097 = Constraint(expr=m.x996*m.x996 - m.x4026*m.b3010 <= 0)
m.c4098 = Constraint(expr=m.x997*m.x997 - m.x4027*m.b3010 <= 0)
m.c4099 = Constraint(expr=m.x998*m.x998 - m.x4028*m.b3010 <= 0)
m.c4100 = Constraint(expr=m.x999*m.x999 - m.x4029*m.b3010 <= 0)
m.c4101 = Constraint(expr=m.x1000*m.x1000 - m.x4030*m.b3010 <= 0)
m.c4102 = Constraint(expr=m.x1001*m.x1001 - m.x4031*m.b3011 <= 0)
m.c4103 = Constraint(expr=m.x1002*m.x1002 - m.x4032*m.b3011 <= 0)
m.c4104 = Constraint(expr=m.x1003*m.x1003 - m.x4033*m.b3011 <= 0)
m.c4105 = Constraint(expr=m.x1004*m.x1004 - m.x4034*m.b3011 <= 0)
m.c4106 = Constraint(expr=m.x1005*m.x1005 - m.x4035*m.b3011 <= 0)
m.c4107 = Constraint(expr=m.x1006*m.x1006 - m.x4036*m.b3011 <= 0)
m.c4108 = Constraint(expr=m.x1007*m.x1007 - m.x4037*m.b3011 <= 0)
m.c4109 = Constraint(expr=m.x1008*m.x1008 - m.x4038*m.b3011 <= 0)
m.c4110 = Constraint(expr=m.x1009*m.x1009 - m.x4039*m.b3011 <= 0)
m.c4111 = Constraint(expr=m.x1010*m.x1010 - m.x4040*m.b3011 <= 0)
m.c4112 = Constraint(expr=m.x1011*m.x1011 - m.x4041*m.b3011 <= 0)
m.c4113 = Constraint(expr=m.x1012*m.x1012 - m.x4042*m.b3011 <= 0)
m.c4114 = Constraint(expr=m.x1013*m.x1013 - m.x4043*m.b3011 <= 0)
m.c4115 = Constraint(expr=m.x1014*m.x1014 - m.x4044*m.b3011 <= 0)
m.c4116 = Constraint(expr=m.x1015*m.x1015 - m.x4045*m.b3011 <= 0)
m.c4117 = Constraint(expr=m.x1016*m.x1016 - m.x4046*m.b3011 <= 0)
m.c4118 = Constraint(expr=m.x1017*m.x1017 - m.x4047*m.b3011 <= 0)
m.c4119 = Constraint(expr=m.x1018*m.x1018 - m.x4048*m.b3011 <= 0)
m.c4120 = Constraint(expr=m.x1019*m.x1019 - m.x4049*m.b3011 <= 0)
m.c4121 = Constraint(expr=m.x1020*m.x1020 - m.x4050*m.b3011 <= 0)
m.c4122 = Constraint(expr=m.x1021*m.x1021 - m.x4051*m.b3011 <= 0)
m.c4123 = Constraint(expr=m.x1022*m.x1022 - m.x4052*m.b3011 <= 0)
m.c4124 = Constraint(expr=m.x1023*m.x1023 - m.x4053*m.b3011 <= 0)
m.c4125 = Constraint(expr=m.x1024*m.x1024 - m.x4054*m.b3011 <= 0)
m.c4126 = Constraint(expr=m.x1025*m.x1025 - m.x4055*m.b3011 <= 0)
m.c4127 = Constraint(expr=m.x1026*m.x1026 - m.x4056*m.b3011 <= 0)
m.c4128 = Constraint(expr=m.x1027*m.x1027 - m.x4057*m.b3011 <= 0)
m.c4129 = Constraint(expr=m.x1028*m.x1028 - m.x4058*m.b3011 <= 0)
m.c4130 = Constraint(expr=m.x1029*m.x1029 - m.x4059*m.b3011 <= 0)
m.c4131 = Constraint(expr=m.x1030*m.x1030 - m.x4060*m.b3011 <= 0)
m.c4132 = Constraint(expr=m.x1031*m.x1031 - m.x4061*m.b3011 <= 0)
m.c4133 = Constraint(expr=m.x1032*m.x1032 - m.x4062*m.b3011 <= 0)
m.c4134 = Constraint(expr=m.x1033*m.x1033 - m.x4063*m.b3011 <= 0)
m.c4135 = Constraint(expr=m.x1034*m.x1034 - m.x4064*m.b3011 <= 0)
m.c4136 = Constraint(expr=m.x1035*m.x1035 - m.x4065*m.b3011 <= 0)
m.c4137 = Constraint(expr=m.x1036*m.x1036 - m.x4066*m.b3011 <= 0)
m.c4138 = Constraint(expr=m.x1037*m.x1037 - m.x4067*m.b3011 <= 0)
m.c4139 = Constraint(expr=m.x1038*m.x1038 - m.x4068*m.b3011 <= 0)
m.c4140 = Constraint(expr=m.x1039*m.x1039 - m.x4069*m.b3011 <= 0)
m.c4141 = Constraint(expr=m.x1040*m.x1040 - m.x4070*m.b3011 <= 0)
m.c4142 = Constraint(expr=m.x1041*m.x1041 - m.x4071*m.b3011 <= 0)
m.c4143 = Constraint(expr=m.x1042*m.x1042 - m.x4072*m.b3011 <= 0)
m.c4144 = Constraint(expr=m.x1043*m.x1043 - m.x4073*m.b3011 <= 0)
m.c4145 = Constraint(expr=m.x1044*m.x1044 - m.x4074*m.b3011 <= 0)
m.c4146 = Constraint(expr=m.x1045*m.x1045 - m.x4075*m.b3011 <= 0)
m.c4147 = Constraint(expr=m.x1046*m.x1046 - m.x4076*m.b3011 <= 0)
m.c4148 = Constraint(expr=m.x1047*m.x1047 - m.x4077*m.b3011 <= 0)
m.c4149 = Constraint(expr=m.x1048*m.x1048 - m.x4078*m.b3011 <= 0)
m.c4150 = Constraint(expr=m.x1049*m.x1049 - m.x4079*m.b3011 <= 0)
m.c4151 = Constraint(expr=m.x1050*m.x1050 - m.x4080*m.b3011 <= 0)
m.c4152 = Constraint(expr=m.x1051*m.x1051 - m.x4081*m.b3011 <= 0)
m.c4153 = Constraint(expr=m.x1052*m.x1052 - m.x4082*m.b3011 <= 0)
m.c4154 = Constraint(expr=m.x1053*m.x1053 - m.x4083*m.b3011 <= 0)
m.c4155 = Constraint(expr=m.x1054*m.x1054 - m.x4084*m.b3011 <= 0)
m.c4156 = Constraint(expr=m.x1055*m.x1055 - m.x4085*m.b3011 <= 0)
m.c4157 = Constraint(expr=m.x1056*m.x1056 - m.x4086*m.b3011 <= 0)
m.c4158 = Constraint(expr=m.x1057*m.x1057 - m.x4087*m.b3011 <= 0)
m.c4159 = Constraint(expr=m.x1058*m.x1058 - m.x4088*m.b3011 <= 0)
m.c4160 = Constraint(expr=m.x1059*m.x1059 - m.x4089*m.b3011 <= 0)
m.c4161 = Constraint(expr=m.x1060*m.x1060 - m.x4090*m.b3011 <= 0)
m.c4162 = Constraint(expr=m.x1061*m.x1061 - m.x4091*m.b3011 <= 0)
m.c4163 = Constraint(expr=m.x1062*m.x1062 - m.x4092*m.b3011 <= 0)
m.c4164 = Constraint(expr=m.x1063*m.x1063 - m.x4093*m.b3011 <= 0)
m.c4165 = Constraint(expr=m.x1064*m.x1064 - m.x4094*m.b3011 <= 0)
m.c4166 = Constraint(expr=m.x1065*m.x1065 - m.x4095*m.b3011 <= 0)
m.c4167 = Constraint(expr=m.x1066*m.x1066 - m.x4096*m.b3011 <= 0)
m.c4168 = Constraint(expr=m.x1067*m.x1067 - m.x4097*m.b3011 <= 0)
m.c4169 = Constraint(expr=m.x1068*m.x1068 - m.x4098*m.b3011 <= 0)
m.c4170 = Constraint(expr=m.x1069*m.x1069 - m.x4099*m.b3011 <= 0)
m.c4171 = Constraint(expr=m.x1070*m.x1070 - m.x4100*m.b3011 <= 0)
m.c4172 = Constraint(expr=m.x1071*m.x1071 - m.x4101*m.b3011 <= 0)
m.c4173 = Constraint(expr=m.x1072*m.x1072 - m.x4102*m.b3011 <= 0)
m.c4174 = Constraint(expr=m.x1073*m.x1073 - m.x4103*m.b3011 <= 0)
m.c4175 = Constraint(expr=m.x1074*m.x1074 - m.x4104*m.b3011 <= 0)
m.c4176 = Constraint(expr=m.x1075*m.x1075 - m.x4105*m.b3011 <= 0)
m.c4177 = Constraint(expr=m.x1076*m.x1076 - m.x4106*m.b3011 <= 0)
m.c4178 = Constraint(expr=m.x1077*m.x1077 - m.x4107*m.b3011 <= 0)
m.c4179 = Constraint(expr=m.x1078*m.x1078 - m.x4108*m.b3011 <= 0)
m.c4180 = Constraint(expr=m.x1079*m.x1079 - m.x4109*m.b3011 <= 0)
m.c4181 = Constraint(expr=m.x1080*m.x1080 - m.x4110*m.b3011 <= 0)
m.c4182 = Constraint(expr=m.x1081*m.x1081 - m.x4111*m.b3011 <= 0)
m.c4183 = Constraint(expr=m.x1082*m.x1082 - m.x4112*m.b3011 <= 0)
m.c4184 = Constraint(expr=m.x1083*m.x1083 - m.x4113*m.b3011 <= 0)
m.c4185 = Constraint(expr=m.x1084*m.x1084 - m.x4114*m.b3011 <= 0)
m.c4186 = Constraint(expr=m.x1085*m.x1085 - m.x4115*m.b3011 <= 0)
m.c4187 = Constraint(expr=m.x1086*m.x1086 - m.x4116*m.b3011 <= 0)
m.c4188 = Constraint(expr=m.x1087*m.x1087 - m.x4117*m.b3011 <= 0)
m.c4189 = Constraint(expr=m.x1088*m.x1088 - m.x4118*m.b3011 <= 0)
m.c4190 = Constraint(expr=m.x1089*m.x1089 - m.x4119*m.b3011 <= 0)
m.c4191 = Constraint(expr=m.x1090*m.x1090 - m.x4120*m.b3011 <= 0)
m.c4192 = Constraint(expr=m.x1091*m.x1091 - m.x4121*m.b3011 <= 0)
m.c4193 = Constraint(expr=m.x1092*m.x1092 - m.x4122*m.b3011 <= 0)
m.c4194 = Constraint(expr=m.x1093*m.x1093 - m.x4123*m.b3011 <= 0)
m.c4195 = Constraint(expr=m.x1094*m.x1094 - m.x4124*m.b3011 <= 0)
m.c4196 = Constraint(expr=m.x1095*m.x1095 - m.x4125*m.b3011 <= 0)
m.c4197 = Constraint(expr=m.x1096*m.x1096 - m.x4126*m.b3011 <= 0)
m.c4198 = Constraint(expr=m.x1097*m.x1097 - m.x4127*m.b3011 <= 0)
m.c4199 = Constraint(expr=m.x1098*m.x1098 - m.x4128*m.b3011 <= 0)
m.c4200 = Constraint(expr=m.x1099*m.x1099 - m.x4129*m.b3011 <= 0)
m.c4201 = Constraint(expr=m.x1100*m.x1100 - m.x4130*m.b3011 <= 0)
m.c4202 = Constraint(expr=m.x1101*m.x1101 - m.x4131*m.b3012 <= 0)
m.c4203 = Constraint(expr=m.x1102*m.x1102 - m.x4132*m.b3012 <= 0)
m.c4204 = Constraint(expr=m.x1103*m.x1103 - m.x4133*m.b3012 <= 0)
m.c4205 = Constraint(expr=m.x1104*m.x1104 - m.x4134*m.b3012 <= 0)
m.c4206 = Constraint(expr=m.x1105*m.x1105 - m.x4135*m.b3012 <= 0)
m.c4207 = Constraint(expr=m.x1106*m.x1106 - m.x4136*m.b3012 <= 0)
m.c4208 = Constraint(expr=m.x1107*m.x1107 - m.x4137*m.b3012 <= 0)
m.c4209 = Constraint(expr=m.x1108*m.x1108 - m.x4138*m.b3012 <= 0)
m.c4210 = Constraint(expr=m.x1109*m.x1109 - m.x4139*m.b3012 <= 0)
m.c4211 = Constraint(expr=m.x1110*m.x1110 - m.x4140*m.b3012 <= 0)
m.c4212 = Constraint(expr=m.x1111*m.x1111 - m.x4141*m.b3012 <= 0)
m.c4213 = Constraint(expr=m.x1112*m.x1112 - m.x4142*m.b3012 <= 0)
m.c4214 = Constraint(expr=m.x1113*m.x1113 - m.x4143*m.b3012 <= 0)
m.c4215 = Constraint(expr=m.x1114*m.x1114 - m.x4144*m.b3012 <= 0)
m.c4216 = Constraint(expr=m.x1115*m.x1115 - m.x4145*m.b3012 <= 0)
m.c4217 = Constraint(expr=m.x1116*m.x1116 - m.x4146*m.b3012 <= 0)
m.c4218 = Constraint(expr=m.x1117*m.x1117 - m.x4147*m.b3012 <= 0)
m.c4219 = Constraint(expr=m.x1118*m.x1118 - m.x4148*m.b3012 <= 0)
m.c4220 = Constraint(expr=m.x1119*m.x1119 - m.x4149*m.b3012 <= 0)
m.c4221 = Constraint(expr=m.x1120*m.x1120 - m.x4150*m.b3012 <= 0)
m.c4222 = Constraint(expr=m.x1121*m.x1121 - m.x4151*m.b3012 <= 0)
m.c4223 = Constraint(expr=m.x1122*m.x1122 - m.x4152*m.b3012 <= 0)
m.c4224 = Constraint(expr=m.x1123*m.x1123 - m.x4153*m.b3012 <= 0)
m.c4225 = Constraint(expr=m.x1124*m.x1124 - m.x4154*m.b3012 <= 0)
m.c4226 = Constraint(expr=m.x1125*m.x1125 - m.x4155*m.b3012 <= 0)
m.c4227 = Constraint(expr=m.x1126*m.x1126 - m.x4156*m.b3012 <= 0)
m.c4228 = Constraint(expr=m.x1127*m.x1127 - m.x4157*m.b3012 <= 0)
m.c4229 = Constraint(expr=m.x1128*m.x1128 - m.x4158*m.b3012 <= 0)
m.c4230 = Constraint(expr=m.x1129*m.x1129 - m.x4159*m.b3012 <= 0)
m.c4231 = Constraint(expr=m.x1130*m.x1130 - m.x4160*m.b3012 <= 0)
m.c4232 = Constraint(expr=m.x1131*m.x1131 - m.x4161*m.b3012 <= 0)
m.c4233 = Constraint(expr=m.x1132*m.x1132 - m.x4162*m.b3012 <= 0)
m.c4234 = Constraint(expr=m.x1133*m.x1133 - m.x4163*m.b3012 <= 0)
m.c4235 = Constraint(expr=m.x1134*m.x1134 - m.x4164*m.b3012 <= 0)
m.c4236 = Constraint(expr=m.x1135*m.x1135 - m.x4165*m.b3012 <= 0)
m.c4237 = Constraint(expr=m.x1136*m.x1136 - m.x4166*m.b3012 <= 0)
m.c4238 = Constraint(expr=m.x1137*m.x1137 - m.x4167*m.b3012 <= 0)
m.c4239 = Constraint(expr=m.x1138*m.x1138 - m.x4168*m.b3012 <= 0)
m.c4240 = Constraint(expr=m.x1139*m.x1139 - m.x4169*m.b3012 <= 0)
m.c4241 = Constraint(expr=m.x1140*m.x1140 - m.x4170*m.b3012 <= 0)
m.c4242 = Constraint(expr=m.x1141*m.x1141 - m.x4171*m.b3012 <= 0)
m.c4243 = Constraint(expr=m.x1142*m.x1142 - m.x4172*m.b3012 <= 0)
m.c4244 = Constraint(expr=m.x1143*m.x1143 - m.x4173*m.b3012 <= 0)
m.c4245 = Constraint(expr=m.x1144*m.x1144 - m.x4174*m.b3012 <= 0)
m.c4246 = Constraint(expr=m.x1145*m.x1145 - m.x4175*m.b3012 <= 0)
m.c4247 = Constraint(expr=m.x1146*m.x1146 - m.x4176*m.b3012 <= 0)
m.c4248 = Constraint(expr=m.x1147*m.x1147 - m.x4177*m.b3012 <= 0)
m.c4249 = Constraint(expr=m.x1148*m.x1148 - m.x4178*m.b3012 <= 0)
m.c4250 = Constraint(expr=m.x1149*m.x1149 - m.x4179*m.b3012 <= 0)
m.c4251 = Constraint(expr=m.x1150*m.x1150 - m.x4180*m.b3012 <= 0)
m.c4252 = Constraint(expr=m.x1151*m.x1151 - m.x4181*m.b3012 <= 0)
m.c4253 = Constraint(expr=m.x1152*m.x1152 - m.x4182*m.b3012 <= 0)
m.c4254 = Constraint(expr=m.x1153*m.x1153 - m.x4183*m.b3012 <= 0)
m.c4255 = Constraint(expr=m.x1154*m.x1154 - m.x4184*m.b3012 <= 0)
m.c4256 = Constraint(expr=m.x1155*m.x1155 - m.x4185*m.b3012 <= 0)
m.c4257 = Constraint(expr=m.x1156*m.x1156 - m.x4186*m.b3012 <= 0)
m.c4258 = Constraint(expr=m.x1157*m.x1157 - m.x4187*m.b3012 <= 0)
m.c4259 = Constraint(expr=m.x1158*m.x1158 - m.x4188*m.b3012 <= 0)
m.c4260 = Constraint(expr=m.x1159*m.x1159 - m.x4189*m.b3012 <= 0)
m.c4261 = Constraint(expr=m.x1160*m.x1160 - m.x4190*m.b3012 <= 0)
m.c4262 = Constraint(expr=m.x1161*m.x1161 - m.x4191*m.b3012 <= 0)
m.c4263 = Constraint(expr=m.x1162*m.x1162 | |
#
# Copyright (c) 2017-2019 AutoDeploy AI
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import, print_function
import sys
import json
import os
import pandas as pd
import numpy as np
FUNCTION_NAME_CLASSIFICATION = 'classification'
FUNCTION_NAME_REGRESSION = 'regression'
FUNCTION_NAME_CLUSTERING = 'clustering'
FUNCTION_NAME_UNKNOWN = 'unknown'
SUPPORTED_FUNCTION_NAMES = (FUNCTION_NAME_CLASSIFICATION, FUNCTION_NAME_REGRESSION, FUNCTION_NAME_CLUSTERING)
SUPPORTED_SERIALIZATIONS = ('pickle', 'joblib', 'spark', 'hdf5', 'xgboost', 'lightgbm', 'pmml', 'onnx', 'pt')
class BaseModel(object):
def __init__(self, model):
self.model = model
def is_support(self):
raise NotImplementedError()
def model_type(self):
raise NotImplementedError()
def model_version(self):
raise NotImplementedError()
def mining_function(self, y_test):
return FUNCTION_NAME_UNKNOWN
def serialization(self):
raise NotImplementedError()
def runtime(self):
return 'Python{major}{minor}'.format(major=sys.version_info[0], minor=sys.version_info[1])
def algorithm(self):
return self.model.__class__.__name__
def evaluate_metrics(self, x_test, y_test, data_test, input_function_name):
raise NotImplementedError()
def predictors(self, x_test, data_test):
if x_test is None:
return []
result = []
if isinstance(x_test, np.ndarray) and x_test.ndim <= 2:
x_test = pd.DataFrame(x_test)
x_test.columns = ['x'+str(i) for i in range(0, len(x_test.columns))]
x_test = self._series_to_dataframe(x_test)
if isinstance(x_test, pd.DataFrame):
row = json.loads(x_test.iloc[0].to_json())
cols = row.keys()
for x in cols:
result.append({
'name': x,
'sample': row[x],
'type': type(row[x]).__name__
})
else: # numpy array with multiple dimensions than two
row = x_test[0]
result.append({
'name': 'tensor_input',
'sample': row.tolist(),
'type': x_test.dtype.name,
'shape': self._normalize_np_shape(x_test.shape)
})
return result
def targets(self, y_test, data_test):
if y_test is None:
return []
result = []
if isinstance(y_test, np.ndarray) and y_test.ndim <= 2:
y_test = pd.DataFrame(y_test)
y_test.columns = ['y'+str(i) for i in range(0, len(y_test.columns))]
y_test = self._series_to_dataframe(y_test)
if isinstance(y_test, pd.DataFrame):
row = json.loads(y_test.iloc[0].to_json())
cols = row.keys()
for x in cols:
result.append({
'name': x,
'sample': row[x],
'type': type(row[x]).__name__
})
else: # numpy array with multiple dimensions than two
row = y_test[0]
result.append({
'name': 'tensor_target',
'sample': row.tolist(),
'type': y_test.dtype.name,
'shape': self._normalize_np_shape(y_test.shape)
})
return result
def outputs(self, y_test, data_test, **kwargs):
return []
@staticmethod
def extract_major_minor_version(version):
result = version
elements = version.split('.')
if len(elements) > 2:
result = '{major}.{minor}'.format(major=elements[0], minor=elements[1])
return result
@staticmethod
def evaluate_metrics_by_sklearn(wrapped_model, x_test, y_test, input_function_name):
if x_test is None or y_test is None:
return {}
try:
function_name = input_function_name if input_function_name else wrapped_model.mining_function(y_test)
if function_name == FUNCTION_NAME_CLASSIFICATION:
from sklearn.metrics import accuracy_score
y_pred = wrapped_model.model.predict(x_test)
accuracy = accuracy_score(y_test, y_pred)
return {
'accuracy': accuracy
}
elif function_name == FUNCTION_NAME_REGRESSION:
from sklearn.metrics import explained_variance_score
y_pred = wrapped_model.model.predict(x_test)
explained_variance = explained_variance_score(y_test, y_pred)
return {
'explainedVariance': explained_variance
}
else:
return {}
except:
return {}
@staticmethod
def _normalize_np_shape(shape):
result = None
if shape is not None and len(shape) > 1:
result = []
for idx, d in enumerate(shape):
if idx == 0:
result.append(None)
else:
result.append(d)
return result
@staticmethod
def _series_to_dataframe(data):
if isinstance(data, pd.Series):
return pd.DataFrame(data)
return data
def _test_data_to_ndarray(self, x_y_test, data_test):
data = self._to_dataframe(x_y_test, data_test)
if isinstance(data, pd.DataFrame):
return data.values
return data
@staticmethod
def _to_ndarray(data):
return data.values if isinstance(data, (pd.DataFrame, pd.Series)) else data
@staticmethod
def _to_dataframe(x_y_test, data_test):
if x_y_test is None and data_test is not None:
x_y_test = data_test.limit(1).toPandas()
if isinstance(x_y_test, pd.Series):
x_y_test = pd.DataFrame(x_y_test)
return x_y_test
def _infer_mining_function(self, y_test):
if y_test is None:
return FUNCTION_NAME_UNKNOWN
y_test = self._to_ndarray(y_test)
if y_test.ndim >= 2:
return FUNCTION_NAME_CLASSIFICATION if y_test.shape[y_test.ndim - 1] > 1 else FUNCTION_NAME_REGRESSION
# float numbers are treated as a regression problem
return FUNCTION_NAME_REGRESSION if y_test.dtype.kind in 'fc' else FUNCTION_NAME_CLASSIFICATION
@staticmethod
def _compatible_shape(shape1, shape2):
if len(shape1) != len(shape2):
return False
# could be tuple and list
shape1 = list(shape1)
shape2 = list(shape2)
if len(shape1) > 1:
return shape1[1:] == shape2[1:]
return shape1 == shape2
class CustomModel(BaseModel):
def __init__(self, model):
BaseModel.__init__(self, model)
def is_support(self):
return not isinstance(self.model, (str, bytes, bytearray))
def model_type(self):
return 'Custom'
def model_version(self):
return 'unknown'
def serialization(self):
return 'pickle'
def evaluate_metrics(self, x_test, y_test, data_test, input_function_name):
return {}
class PMMLModel(BaseModel):
def __init__(self, model):
BaseModel.__init__(self, model)
self.pmml_model = None
def __del__(self):
if self.pmml_model:
try:
from pypmml import Model
Model.close()
except:
pass
def is_support(self):
try:
from pypmml import Model
model_content = self.model
if hasattr(self.model, 'read') and callable(self.model.read):
model_content = self.model.read()
if isinstance(model_content, (bytes, bytearray)):
model_content = model_content.decode('utf-8')
if isinstance(model_content, str):
# Check if a file path
if os.path.exists(model_content):
self.pmml_model = Model.fromFile(model_content)
else:
self.pmml_model = Model.fromString(model_content)
return True
else:
Model.close()
return False
except Exception as e:
return False
def model_type(self):
return 'PMML'
def model_version(self):
return None
def mining_function(self, y_test):
return self.pmml_model.functionName
def serialization(self):
return 'pmml'
def runtime(self):
return 'PyPMML'
def algorithm(self):
return self.pmml_model.modelElement
def evaluate_metrics(self, x_test, y_test, data_test, input_function_name):
prediction_col = self.get_prediction_col()
if prediction_col is None:
return {}
# Convert spark df to Pandas
if data_test is not None:
try:
label_col = self.pmml_model.targetName
if not label_col:
return {}
pandas_data_test = data_test.toPandas()
y_test = pandas_data_test[label_col]
x_test = pandas_data_test
except:
return {}
if x_test is not None and y_test is not None:
try:
function_name = input_function_name if input_function_name else self.mining_function(y_test)
if function_name == FUNCTION_NAME_CLASSIFICATION:
from sklearn.metrics import accuracy_score
y_pred = self.pmml_model.predict(x_test)
accuracy = accuracy_score(y_test, y_pred[prediction_col])
return {
'accuracy': accuracy
}
elif function_name == FUNCTION_NAME_REGRESSION:
from sklearn.metrics import explained_variance_score
y_pred = self.pmml_model.predict(x_test)
explained_variance = explained_variance_score(y_test, y_pred[prediction_col])
return {
'explainedVariance': explained_variance
}
else:
return {}
except:
return {}
return {}
def get_prediction_col(self):
output_fields = self.pmml_model.outputFields
for x in output_fields:
if x.feature == 'predictedValue':
return x.name
return None
def predictors(self, x_test, data_test):
result = []
row = None
x_test = self._to_dataframe(x_test, data_test)
if isinstance(x_test, pd.DataFrame):
row = json.loads(x_test.iloc[0].to_json())
for x in self.pmml_model.inputFields:
result.append(({
'name': x.name,
'sample': row.get(x.name) if row is not None else None,
'type': x.dataType
}))
return result
def targets(self, y_test, data_test):
result = []
row = None
y_test = self._to_dataframe(y_test, data_test)
if isinstance(y_test, pd.DataFrame):
row = json.loads(y_test.iloc[0].to_json())
for x in self.pmml_model.targetFields:
result.append(({
'name': x.name,
'sample': row.get(x.name) if row is not None else None,
'type': x.dataType
}))
return result
def outputs(self, y_test, data_test, **kwargs):
result = []
for x in self.pmml_model.outputFields:
result.append(({
'name': x.name,
'type': x.dataType
}))
return result
class ONNXModel(BaseModel):
def __init__(self, model):
super(ONNXModel, self).__init__(model)
self.onnx_model = None
self.sess = None
self._algorithm = None
def is_support(self):
try:
import onnx
if isinstance(self.model, onnx.ModelProto):
self.onnx_model = self.model
return True
if isinstance(self.model, (bytes, bytearray)):
onnx_model = onnx.load_model_from_string(self.model)
else:
# could be either readable or a file path
onnx_model = onnx.load_model(self.model)
onnx.checker.check_model(onnx_model)
self.onnx_model = onnx_model
return True
except Exception:
return False
def model_type(self):
return 'ONNX'
def model_version(self):
return None
def mining_function(self, y_test):
algorithm = self.algorithm()
if algorithm is not None:
if algorithm in ('LinearClassifier', 'SVMClassifier', 'TreeEnsembleClassifier'):
return FUNCTION_NAME_CLASSIFICATION
if algorithm in ('LinearRegressor', 'SVMRegressor', 'TreeEnsembleRegressor'):
return FUNCTION_NAME_REGRESSION
return self._infer_mining_function(y_test)
def serialization(self):
return 'onnx'
def runtime(self):
return 'ONNX Runtime'
def algorithm(self):
if self._algorithm is None:
use_onnx_ml = False
if self.onnx_model is not None:
graph = self.onnx_model.graph
for node in graph.node:
if node.domain == 'ai.onnx.ml':
use_onnx_ml = True
if node.op_type in ('LinearClassifier', 'LinearRegressor', 'SVMClassifier', 'SVMRegressor',
'TreeEnsembleClassifier', 'TreeEnsembleRegressor'):
self._algorithm = node.op_type
break
if self._algorithm is None and not use_onnx_ml:
self._algorithm = 'NeuralNetwork'
return self._algorithm
def evaluate_metrics(self, x_test, y_test, data_test, input_function_name):
if x_test is None or y_test is None:
return {}
try:
function_name = input_function_name if input_function_name else self.mining_function(y_test)
# convert to numpy array if not
x_test = self._to_ndarray(x_test)
y_test = self._to_ndarray(y_test)
shape = y_test.shape
if len(shape) > 1 and shape[1] > 1:
y_test = np.argmax(y_test, axis=1)
sess = self._get_inference_session()
y_pred = None
if function_name in (FUNCTION_NAME_CLASSIFICATION, FUNCTION_NAME_REGRESSION) and len(
sess.get_inputs()) == 1:
input_name = sess.get_inputs()[0].name
y_pred = sess.run([sess.get_outputs()[0].name], {input_name: x_test.astype(np.float32)})[0]
y_pred = np.asarray(y_pred)
shape = y_pred.shape
if len(shape) > 1 and shape[1] > 1:
y_pred = np.argmax(y_pred, axis=1)
if y_pred is not None:
if function_name == FUNCTION_NAME_CLASSIFICATION:
from sklearn.metrics import accuracy_score
accuracy = accuracy_score(y_test, y_pred)
return {
'accuracy': accuracy
}
elif function_name == FUNCTION_NAME_REGRESSION:
from sklearn.metrics import explained_variance_score
explained_variance = explained_variance_score(y_test, y_pred)
return {
'explainedVariance': explained_variance
}
else:
return {}
except Exception as e:
return {}
def predictors(self, x_test, data_test):
result = []
sess = self._get_inference_session()
for x in sess.get_inputs():
result.append({
'name': x.name,
'type': x.type,
'shape': x.shape
})
# suppose there is only | |
self._client.get_data_feed_ingestion_progress(data_feed_id=data_feed_id, **kwargs)
@distributed_trace_async
async def refresh_data_feed_ingestion(
self,
data_feed_id: str,
start_time: Union[str, datetime.datetime],
end_time: Union[str, datetime.datetime],
**kwargs: Any
) -> None:
"""Refreshes data ingestion by data feed to backfill data.
:param data_feed_id: The data feed unique id.
:type data_feed_id: str
:param start_time: The start point of time range to refresh data ingestion.
:type start_time: Union[str, ~datetime.datetime]
:param end_time: The end point of time range to refresh data ingestion.
:type end_time: Union[str, ~datetime.datetime]
:return: None
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
.. admonition:: Example:
.. literalinclude:: ../samples/async_samples/sample_ingestion_async.py
:start-after: [START refresh_data_feed_ingestion_async]
:end-before: [END refresh_data_feed_ingestion_async]
:language: python
:dedent: 4
:caption: Refresh data feed ingestion over a period of time
"""
return await self._client.refresh_data_feed_ingestion(
data_feed_id=data_feed_id, start_time=start_time, end_time=end_time, **kwargs
)
@distributed_trace_async
async def delete_alert_configuration(self, *alert_configuration_id: str, **kwargs: Any) -> None:
"""Delete an anomaly alert configuration by its ID.
:param str alert_configuration_id: anomaly alert configuration unique id.
:return: None
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
.. admonition:: Example:
.. literalinclude:: ../samples/async_samples/sample_alert_configuration_async.py
:start-after: [START delete_alert_config_async]
:end-before: [END delete_alert_config_async]
:language: python
:dedent: 4
:caption: Delete an anomaly alert configuration by its ID
"""
return await self._client.delete_alert_configuration(*alert_configuration_id, **kwargs)
@distributed_trace_async
async def delete_detection_configuration(self, *detection_configuration_id: str, **kwargs: Any) -> None:
"""Delete an anomaly detection configuration by its ID.
:param str detection_configuration_id: anomaly detection configuration unique id.
:return: None
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
.. admonition:: Example:
.. literalinclude:: ../samples/async_samples/sample_detection_configuration_async.py
:start-after: [START delete_detection_config_async]
:end-before: [END delete_detection_config_async]
:language: python
:dedent: 4
:caption: Delete an anomaly detection configuration by its ID
"""
return await self._client.delete_detection_configuration(*detection_configuration_id, **kwargs)
@distributed_trace_async
async def delete_data_feed(self, *data_feed_id: str, **kwargs: Any) -> None:
"""Delete a data feed by its ID.
:param str data_feed_id: The data feed unique id.
:return: None
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
.. admonition:: Example:
.. literalinclude:: ../samples/async_samples/sample_data_feeds_async.py
:start-after: [START delete_data_feed_async]
:end-before: [END delete_data_feed_async]
:language: python
:dedent: 4
:caption: Delete a data feed by its ID
"""
return await self._client.delete_data_feed(*data_feed_id, **kwargs)
@distributed_trace_async
async def delete_hook(self, *hook_id: str, **kwargs: Any) -> None:
"""Delete a web or email hook by its ID.
:param str hook_id: Hook unique ID.
:return: None
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
.. admonition:: Example:
.. literalinclude:: ../samples/async_samples/sample_hooks_async.py
:start-after: [START delete_hook_async]
:end-before: [END delete_hook_async]
:language: python
:dedent: 4
:caption: Delete a hook by its ID
"""
return await self._client.delete_hook(*hook_id, **kwargs)
@distributed_trace_async
async def update_data_feed(self, data_feed: Union[str, models.DataFeed], **kwargs: Any) -> models.DataFeed:
"""Update a data feed. Either pass the entire DataFeed object with the chosen updates
or the ID to your data feed with updates passed via keyword arguments. If you pass both
the DataFeed object and keyword arguments, the keyword arguments will take precedence.
:param data_feed: The data feed with updates or the data feed ID.
:type data_feed: ~azure.ai.metricsadvisor.models.DataFeed or str
:keyword str name: The name to update the data feed.
:keyword str timestamp_column: User-defined timestamp column name.
:keyword ~datetime.datetime ingestion_begin_time: Ingestion start time.
:keyword int data_source_request_concurrency: The max concurrency of data ingestion queries against
user data source. Zero (0) means no limitation.
:keyword int ingestion_retry_delay: The min retry interval for failed data ingestion tasks, in seconds.
:keyword int ingestion_start_offset: The time that the beginning of data ingestion task will delay
for every data slice according to this offset, in seconds.
:keyword int stop_retry_after: Stop retry data ingestion after the data slice first
schedule time in seconds.
:keyword str rollup_identification_value: The identification value for the row of calculated
all-up value.
:keyword rollup_type: Mark if the data feed needs rollup. Possible values include: "NoRollup",
"AutoRollup", "AlreadyRollup". Default value: "AutoRollup".
:paramtype rollup_type: str or ~azure.ai.metricsadvisor.models.DataFeedRollupType
:keyword list[str] auto_rollup_group_by_column_names: Roll up columns.
:keyword rollup_method: Roll up method. Possible values include: "None", "Sum", "Max", "Min",
"Avg", "Count".
:paramtype rollup_method: str or ~azure.ai.metricsadvisor.models.DataFeedAutoRollupMethod
:keyword fill_type: The type of fill missing point for anomaly detection. Possible
values include: "SmartFilling", "PreviousValue", "CustomValue", "NoFilling". Default value:
"SmartFilling".
:paramtype fill_type: str or ~azure.ai.metricsadvisor.models.DatasourceMissingDataPointFillType
:keyword float custom_fill_value: The value of fill missing point for anomaly detection
if "CustomValue" fill type is specified.
:keyword list[str] admins: Data feed administrators.
:keyword str data_feed_description: Data feed description.
:keyword list[str] viewers: Data feed viewers.
:keyword access_mode: Data feed access mode. Possible values include:
"Private", "Public". Default value: "Private".
:paramtype access_mode: str or ~azure.ai.metricsadvisor.models.DataFeedAccessMode
:keyword str action_link_template: action link for alert.
:keyword status: Data feed status. Possible values include: "Active", "Paused".
:paramtype status: str or ~azure.ai.metricsadvisor.models.DataFeedStatus
:keyword source: The source of the data feed for update
:paramtype source: Union[AzureApplicationInsightsDataFeedSource, AzureBlobDataFeedSource,
AzureCosmosDbDataFeedSource, AzureDataExplorerDataFeedSource, AzureDataLakeStorageGen2DataFeedSource,
AzureTableDataFeedSource, AzureLogAnalyticsDataFeedSource, InfluxDbDataFeedSource, MySqlDataFeedSource,
PostgreSqlDataFeedSource, SqlServerDataFeedSource, MongoDbDataFeedSource, AzureEventHubsDataFeedSource]
:rtype: ~azure.ai.metricsadvisor.models.DataFeed
:raises ~azure.core.exceptions.HttpResponseError:
.. admonition:: Example:
.. literalinclude:: ../samples/async_samples/sample_data_feeds_async.py
:start-after: [START update_data_feed_async]
:end-before: [END update_data_feed_async]
:language: python
:dedent: 4
:caption: Update an existing data feed
"""
return await self._client.update_data_feed(data_feed=data_feed, **kwargs)
@distributed_trace_async
async def update_alert_configuration(
self, alert_configuration: Union[str, models.AnomalyAlertConfiguration], **kwargs: Any
) -> models.AnomalyAlertConfiguration:
"""Update anomaly alerting configuration. Either pass the entire AnomalyAlertConfiguration object
with the chosen updates or the ID to your alert configuration with updates passed via keyword arguments.
If you pass both the AnomalyAlertConfiguration object and keyword arguments, the keyword arguments
will take precedence.
:param alert_configuration: AnomalyAlertConfiguration object or the ID to the alert configuration.
:type alert_configuration: str or ~azure.ai.metricsadvisor.models.AnomalyAlertConfiguration
:keyword str name: Name for the anomaly alert configuration.
:keyword metric_alert_configurations: Anomaly alert configurations.
:paramtype metric_alert_configurations: list[~azure.ai.metricsadvisor.models.MetricAlertConfiguration]
:keyword list[str] hook_ids: Unique hook IDs.
:keyword cross_metrics_operator: Cross metrics operator should be specified when setting up multiple metric
alert configurations. Possible values include: "AND", "OR", "XOR".
:paramtype cross_metrics_operator: str or
~azure.ai.metricsadvisor.models.MetricAnomalyAlertConfigurationsOperator
:keyword str description: Anomaly alert configuration description.
:rtype: ~azure.ai.metricsadvisor.models.AnomalyAlertConfiguration
:raises ~azure.core.exceptions.HttpResponseError:
.. admonition:: Example:
.. literalinclude:: ../samples/async_samples/sample_alert_configuration_async.py
:start-after: [START update_alert_config_async]
:end-before: [END update_alert_config_async]
:language: python
:dedent: 4
:caption: Update an existing anomaly alert configuration
"""
return await self._client.update_alert_configuration(alert_configuration=alert_configuration, **kwargs)
@distributed_trace_async
async def update_detection_configuration(
self, detection_configuration: Union[str, models.AnomalyDetectionConfiguration], **kwargs: Any
) -> models.AnomalyDetectionConfiguration:
"""Update anomaly metric detection configuration. Either pass the entire AnomalyDetectionConfiguration object
with the chosen updates or the ID to your detection configuration with updates passed via keyword arguments.
If you pass both the AnomalyDetectionConfiguration object and keyword arguments, the keyword arguments
will take precedence.
:param detection_configuration: AnomalyDetectionConfiguration object or the ID to the detection
configuration.
:type detection_configuration: str or ~azure.ai.metricsadvisor.models.AnomalyDetectionConfiguration
:keyword str name: The name for the anomaly detection configuration
:keyword str metric_id: metric unique id.
:keyword whole_series_detection_condition: Required.
Conditions to detect anomalies in all time series of a metric.
:paramtype whole_series_detection_condition: ~azure.ai.metricsadvisor.models.MetricDetectionCondition
:keyword str description: anomaly detection configuration description.
:keyword series_group_detection_conditions: detection configuration for series group.
:paramtype series_group_detection_conditions:
list[~azure.ai.metricsadvisor.models.MetricSeriesGroupDetectionCondition]
:keyword series_detection_conditions: detection configuration for specific series.
:paramtype series_detection_conditions:
list[~azure.ai.metricsadvisor.models.MetricSingleSeriesDetectionCondition]
:return: AnomalyDetectionConfiguration
:rtype: ~azure.ai.metricsadvisor.models.AnomalyDetectionConfiguration
:raises ~azure.core.exceptions.HttpResponseError:
.. admonition:: Example:
.. literalinclude:: ../samples/async_samples/sample_detection_configuration_async.py
:start-after: [START update_detection_config_async]
:end-before: [END update_detection_config_async]
:language: python
:dedent: 4
:caption: Update an existing anomaly detection configuration
"""
return await self._client.update_detection_configuration(
detection_configuration=detection_configuration, **kwargs
)
@distributed_trace_async
async def update_hook(
self, hook: Union[str, models.EmailNotificationHook, models.WebNotificationHook], **kwargs: Any
) -> Union[models.NotificationHook, models.EmailNotificationHook, models.WebNotificationHook]:
"""Update a hook. Either pass the entire EmailNotificationHook or WebNotificationHook object with the
chosen updates, or the ID to your hook configuration with the updates passed via keyword arguments.
If you pass both the hook object and keyword arguments, the keyword arguments will take precedence.
:param hook: An email or web hook or the ID to the hook. If an ID is passed, you must pass `hook_type`.
:type hook: Union[str, ~azure.ai.metricsadvisor.models.EmailNotificationHook,
~azure.ai.metricsadvisor.models.WebNotificationHook]
:keyword str hook_type: The hook type. Possible values are "Email" or "Web". Must be passed if only the
hook ID is provided.
:keyword str name: Hook unique name.
:keyword str description: Hook description.
:keyword str external_link: Hook external link.
:keyword list[str] emails_to_alert: Email TO: list. Only should be passed to update EmailNotificationHook.
:keyword str endpoint: API address, will be called when alert is triggered, only support
POST method via SSL. Only should be passed to update WebNotificationHook.
:keyword str username: basic authentication. Only should be passed to update WebNotificationHook.
:keyword str password: basic authentication. Only should be passed to update WebNotificationHook.
:keyword str certificate_key: client certificate. Only should be passed to update WebNotificationHook.
:keyword str certificate_password: <PASSWORD>. Only should be passed | |
get_cells_serialized_result:
"""
Attributes:
- success
- e
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
(1, TType.STRUCT, 'e', (ClientException, ClientException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, e=None,):
self.success = success
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = ClientException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_cells_serialized_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success)
oprot.writeFieldEnd()
if self.e != None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class refresh_shared_mutator_args:
"""
Attributes:
- ns
- table_name
- mutate_spec
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'ns', None, None, ), # 1
(2, TType.STRING, 'table_name', None, None, ), # 2
(3, TType.STRUCT, 'mutate_spec', (MutateSpec, MutateSpec.thrift_spec), None, ), # 3
)
def __init__(self, ns=None, table_name=None, mutate_spec=None,):
self.ns = ns
self.table_name = table_name
self.mutate_spec = mutate_spec
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.ns = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.table_name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.mutate_spec = MutateSpec()
self.mutate_spec.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('refresh_shared_mutator_args')
if self.ns != None:
oprot.writeFieldBegin('ns', TType.I64, 1)
oprot.writeI64(self.ns)
oprot.writeFieldEnd()
if self.table_name != None:
oprot.writeFieldBegin('table_name', TType.STRING, 2)
oprot.writeString(self.table_name)
oprot.writeFieldEnd()
if self.mutate_spec != None:
oprot.writeFieldBegin('mutate_spec', TType.STRUCT, 3)
self.mutate_spec.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class refresh_shared_mutator_result:
"""
Attributes:
- e
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'e', (ClientException, ClientException.thrift_spec), None, ), # 1
)
def __init__(self, e=None,):
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.e = ClientException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('refresh_shared_mutator_result')
if self.e != None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class offer_cells_args:
"""
Attributes:
- ns
- table_name
- mutate_spec
- cells
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'ns', None, None, ), # 1
(2, TType.STRING, 'table_name', None, None, ), # 2
(3, TType.STRUCT, 'mutate_spec', (MutateSpec, MutateSpec.thrift_spec), None, ), # 3
(4, TType.LIST, 'cells', (TType.STRUCT,(Cell, Cell.thrift_spec)), None, ), # 4
)
def __init__(self, ns=None, table_name=None, mutate_spec=None, cells=None,):
self.ns = ns
self.table_name = table_name
self.mutate_spec = mutate_spec
self.cells = cells
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.ns = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.table_name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.mutate_spec = MutateSpec()
self.mutate_spec.read(iprot)
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.LIST:
self.cells = []
(_etype133, _size130) = iprot.readListBegin()
for _i134 in xrange(_size130):
_elem135 = Cell()
_elem135.read(iprot)
self.cells.append(_elem135)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('offer_cells_args')
if self.ns != None:
oprot.writeFieldBegin('ns', TType.I64, 1)
oprot.writeI64(self.ns)
oprot.writeFieldEnd()
if self.table_name != None:
oprot.writeFieldBegin('table_name', TType.STRING, 2)
oprot.writeString(self.table_name)
oprot.writeFieldEnd()
if self.mutate_spec != None:
oprot.writeFieldBegin('mutate_spec', TType.STRUCT, 3)
self.mutate_spec.write(oprot)
oprot.writeFieldEnd()
if self.cells != None:
oprot.writeFieldBegin('cells', TType.LIST, 4)
oprot.writeListBegin(TType.STRUCT, len(self.cells))
for iter136 in self.cells:
iter136.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class offer_cells_result:
"""
Attributes:
- e
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'e', (ClientException, ClientException.thrift_spec), None, ), # 1
)
def __init__(self, e=None,):
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.e = ClientException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('offer_cells_result')
if self.e != None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class offer_cells_as_arrays_args:
"""
Attributes:
- ns
- table_name
- mutate_spec
- cells
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'ns', None, None, ), # 1
(2, TType.STRING, 'table_name', None, None, ), # 2
(3, TType.STRUCT, 'mutate_spec', (MutateSpec, MutateSpec.thrift_spec), None, ), # 3
(4, TType.LIST, 'cells', (TType.LIST,(TType.STRING,None)), None, ), # 4
)
def __init__(self, ns=None, table_name=None, mutate_spec=None, cells=None,):
self.ns = ns
self.table_name = table_name
self.mutate_spec = mutate_spec
self.cells = cells
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.ns = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.table_name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.mutate_spec = MutateSpec()
self.mutate_spec.read(iprot)
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.LIST:
self.cells = []
(_etype140, _size137) = iprot.readListBegin()
for _i141 in xrange(_size137):
_elem142 = []
(_etype146, _size143) = iprot.readListBegin()
for _i147 in xrange(_size143):
_elem148 = iprot.readString();
_elem142.append(_elem148)
iprot.readListEnd()
self.cells.append(_elem142)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('offer_cells_as_arrays_args')
if self.ns != None:
oprot.writeFieldBegin('ns', TType.I64, 1)
oprot.writeI64(self.ns)
oprot.writeFieldEnd()
if self.table_name != None:
oprot.writeFieldBegin('table_name', TType.STRING, 2)
oprot.writeString(self.table_name)
oprot.writeFieldEnd()
if self.mutate_spec != None:
oprot.writeFieldBegin('mutate_spec', TType.STRUCT, 3)
self.mutate_spec.write(oprot)
oprot.writeFieldEnd()
if self.cells != None:
oprot.writeFieldBegin('cells', TType.LIST, 4)
oprot.writeListBegin(TType.LIST, len(self.cells))
for iter149 in self.cells:
oprot.writeListBegin(TType.STRING, len(iter149))
for iter150 in iter149:
oprot.writeString(iter150)
| |
number of local MEPs disabled due to operational errors
**type**\: int
**range:** 0..4294967295
.. attribute:: peer_meps
The number of peer MEPs
**type**\: int
**range:** 0..4294967295
.. attribute:: operational_peer_meps
The number of operational peer MEPs recorded in the CFM database
**type**\: int
**range:** 0..4294967295
.. attribute:: peer_meps_with_defects
The number of peer MEPs with defects
**type**\: int
**range:** 0..4294967295
.. attribute:: peer_meps_without_defects
The number of peer MEPs without defects
**type**\: int
**range:** 0..4294967295
.. attribute:: peer_meps_timed_out
The number of peer MEPs that have timed out
**type**\: int
**range:** 0..4294967295
.. attribute:: mips
The number of MIPs
**type**\: int
**range:** 0..4294967295
.. attribute:: interfaces
The number of interfaces running CFM
**type**\: int
**range:** 0..4294967295
.. attribute:: bridge_domains_and_xconnects
Number or bridge domains and crossconnects
**type**\: int
**range:** 0..4294967295
.. attribute:: traceroute_cache_entries
Number of traceroute cache entries
**type**\: int
**range:** 0..4294967295
.. attribute:: traceroute_cache_replies
Number of traceroute cache replies
**type**\: int
**range:** 0..4294967295
.. attribute:: ccm_learning_db_entries
Number of entries in the CCM learning database
**type**\: int
**range:** 0..4294967295
.. attribute:: issu_role
ISSU Role of CFM\-D, if any
**type**\: :py:class:`CfmBagIssuRole <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.CfmBagIssuRole>`
.. attribute:: bnm_enabled_links
Number of BNM Enabled Links
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'ethernet-cfm-oper'
_revision = '2017-10-06'
def __init__(self):
super(Cfm.Nodes.Node.Summary, self).__init__()
self.yang_name = "summary"
self.yang_parent_name = "node"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('domains', (YLeaf(YType.uint32, 'domains'), ['int'])),
('services', (YLeaf(YType.uint32, 'services'), ['int'])),
('ccm_rate', (YLeaf(YType.uint32, 'ccm-rate'), ['int'])),
('local_meps', (YLeaf(YType.uint32, 'local-meps'), ['int'])),
('operational_local_meps', (YLeaf(YType.uint32, 'operational-local-meps'), ['int'])),
('down_meps', (YLeaf(YType.uint32, 'down-meps'), ['int'])),
('up_meps', (YLeaf(YType.uint32, 'up-meps'), ['int'])),
('offloaded', (YLeaf(YType.uint32, 'offloaded'), ['int'])),
('offloaded_at3_3ms', (YLeaf(YType.uint32, 'offloaded-at3-3ms'), ['int'])),
('offloaded_at10ms', (YLeaf(YType.uint32, 'offloaded-at10ms'), ['int'])),
('disabled_misconfigured', (YLeaf(YType.uint32, 'disabled-misconfigured'), ['int'])),
('disabled_out_of_resources', (YLeaf(YType.uint32, 'disabled-out-of-resources'), ['int'])),
('disabled_operational_error', (YLeaf(YType.uint32, 'disabled-operational-error'), ['int'])),
('peer_meps', (YLeaf(YType.uint32, 'peer-meps'), ['int'])),
('operational_peer_meps', (YLeaf(YType.uint32, 'operational-peer-meps'), ['int'])),
('peer_meps_with_defects', (YLeaf(YType.uint32, 'peer-meps-with-defects'), ['int'])),
('peer_meps_without_defects', (YLeaf(YType.uint32, 'peer-meps-without-defects'), ['int'])),
('peer_meps_timed_out', (YLeaf(YType.uint32, 'peer-meps-timed-out'), ['int'])),
('mips', (YLeaf(YType.uint32, 'mips'), ['int'])),
('interfaces', (YLeaf(YType.uint32, 'interfaces'), ['int'])),
('bridge_domains_and_xconnects', (YLeaf(YType.uint32, 'bridge-domains-and-xconnects'), ['int'])),
('traceroute_cache_entries', (YLeaf(YType.uint32, 'traceroute-cache-entries'), ['int'])),
('traceroute_cache_replies', (YLeaf(YType.uint32, 'traceroute-cache-replies'), ['int'])),
('ccm_learning_db_entries', (YLeaf(YType.uint32, 'ccm-learning-db-entries'), ['int'])),
('issu_role', (YLeaf(YType.enumeration, 'issu-role'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper', 'CfmBagIssuRole', '')])),
('bnm_enabled_links', (YLeaf(YType.uint32, 'bnm-enabled-links'), ['int'])),
])
self.domains = None
self.services = None
self.ccm_rate = None
self.local_meps = None
self.operational_local_meps = None
self.down_meps = None
self.up_meps = None
self.offloaded = None
self.offloaded_at3_3ms = None
self.offloaded_at10ms = None
self.disabled_misconfigured = None
self.disabled_out_of_resources = None
self.disabled_operational_error = None
self.peer_meps = None
self.operational_peer_meps = None
self.peer_meps_with_defects = None
self.peer_meps_without_defects = None
self.peer_meps_timed_out = None
self.mips = None
self.interfaces = None
self.bridge_domains_and_xconnects = None
self.traceroute_cache_entries = None
self.traceroute_cache_replies = None
self.ccm_learning_db_entries = None
self.issu_role = None
self.bnm_enabled_links = None
self._segment_path = lambda: "summary"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Cfm.Nodes.Node.Summary, [u'domains', u'services', u'ccm_rate', u'local_meps', u'operational_local_meps', u'down_meps', u'up_meps', u'offloaded', u'offloaded_at3_3ms', u'offloaded_at10ms', u'disabled_misconfigured', u'disabled_out_of_resources', u'disabled_operational_error', u'peer_meps', u'operational_peer_meps', u'peer_meps_with_defects', u'peer_meps_without_defects', u'peer_meps_timed_out', u'mips', u'interfaces', u'bridge_domains_and_xconnects', u'traceroute_cache_entries', u'traceroute_cache_replies', u'ccm_learning_db_entries', u'issu_role', u'bnm_enabled_links'], name, value)
class CcmLearningDatabases(Entity):
"""
CCMLearningDatabase table
.. attribute:: ccm_learning_database
CCM Learning Database entry
**type**\: list of :py:class:`CcmLearningDatabase <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.Cfm.Nodes.Node.CcmLearningDatabases.CcmLearningDatabase>`
"""
_prefix = 'ethernet-cfm-oper'
_revision = '2017-10-06'
def __init__(self):
super(Cfm.Nodes.Node.CcmLearningDatabases, self).__init__()
self.yang_name = "ccm-learning-databases"
self.yang_parent_name = "node"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("ccm-learning-database", ("ccm_learning_database", Cfm.Nodes.Node.CcmLearningDatabases.CcmLearningDatabase))])
self._leafs = OrderedDict()
self.ccm_learning_database = YList(self)
self._segment_path = lambda: "ccm-learning-databases"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Cfm.Nodes.Node.CcmLearningDatabases, [], name, value)
class CcmLearningDatabase(Entity):
"""
CCM Learning Database entry
.. attribute:: domain (key)
Maintenance Domain
**type**\: str
**length:** 1..79
.. attribute:: service (key)
Service (Maintenance Association)
**type**\: str
**length:** 1..79
.. attribute:: mac_address (key)
MAC Address
**type**\: str
**pattern:** [0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2}){5}
.. attribute:: domain_xr
Maintenance domain name
**type**\: str
.. attribute:: level
Maintenance level
**type**\: :py:class:`CfmBagMdLevel <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.CfmBagMdLevel>`
.. attribute:: service_xr
Maintenance association name
**type**\: str
.. attribute:: source_mac_address
Source MAC address
**type**\: str
**pattern:** [0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2}){5}
.. attribute:: ingress_interface
The XID of the ingress interface for the CCM
**type**\: int
**range:** 0..4294967295
.. attribute:: stale
The XID is stale and may have been reused for a different interface
**type**\: bool
.. attribute:: ingress_interface_string
String representation of the Bridge Domain or Cross\-Connect associated with the ingress XID
**type**\: str
"""
_prefix = 'ethernet-cfm-oper'
_revision = '2017-10-06'
def __init__(self):
super(Cfm.Nodes.Node.CcmLearningDatabases.CcmLearningDatabase, self).__init__()
self.yang_name = "ccm-learning-database"
self.yang_parent_name = "ccm-learning-databases"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['domain','service','mac_address']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('domain', (YLeaf(YType.str, 'domain'), ['str'])),
('service', (YLeaf(YType.str, 'service'), ['str'])),
('mac_address', (YLeaf(YType.str, 'mac-address'), ['str'])),
('domain_xr', (YLeaf(YType.str, 'domain-xr'), ['str'])),
('level', (YLeaf(YType.enumeration, 'level'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper', 'CfmBagMdLevel', '')])),
('service_xr', (YLeaf(YType.str, 'service-xr'), ['str'])),
('source_mac_address', (YLeaf(YType.str, 'source-mac-address'), ['str'])),
('ingress_interface', (YLeaf(YType.uint32, 'ingress-interface'), ['int'])),
('stale', (YLeaf(YType.boolean, 'stale'), ['bool'])),
('ingress_interface_string', (YLeaf(YType.str, 'ingress-interface-string'), ['str'])),
])
self.domain = None
self.service = None
self.mac_address = None
self.domain_xr = None
self.level = None
self.service_xr = None
self.source_mac_address = None
self.ingress_interface = None
self.stale = None
self.ingress_interface_string = None
self._segment_path = lambda: "ccm-learning-database" + "[domain='" + str(self.domain) + "']" + "[service='" + str(self.service) + "']" + "[mac-address='" + str(self.mac_address) + "']"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Cfm.Nodes.Node.CcmLearningDatabases.CcmLearningDatabase, ['domain', 'service', 'mac_address', 'domain_xr', 'level', 'service_xr', 'source_mac_address', 'ingress_interface', 'stale', 'ingress_interface_string'], name, value)
class Global(Entity):
"""
Global operational data
.. attribute:: incomplete_traceroutes
Incomplete Traceroute table
**type**\: :py:class:`IncompleteTraceroutes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.Cfm.Global.IncompleteTraceroutes>`
.. attribute:: maintenance_points
Maintenance Points table
**type**\: :py:class:`MaintenancePoints <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.Cfm.Global.MaintenancePoints>`
.. attribute:: global_configuration_errors
Global configuration errors table
**type**\: :py:class:`GlobalConfigurationErrors <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.Cfm.Global.GlobalConfigurationErrors>`
.. attribute:: mep_configuration_errors
MEP configuration errors table
**type**\: :py:class:`MepConfigurationErrors <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.Cfm.Global.MepConfigurationErrors>`
.. attribute:: traceroute_caches
Traceroute Cache table
**type**\: :py:class:`TracerouteCaches <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.Cfm.Global.TracerouteCaches>`
.. attribute:: local_meps
Local MEPs table
**type**\: :py:class:`LocalMeps <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.Cfm.Global.LocalMeps>`
.. attribute:: peer_me_pv2s
Peer MEPs table Version 2
**type**\: :py:class:`PeerMePv2s <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.Cfm.Global.PeerMePv2s>`
"""
_prefix = 'ethernet-cfm-oper'
_revision = '2017-10-06'
def __init__(self):
super(Cfm.Global, self).__init__()
self.yang_name = "global"
self.yang_parent_name = "cfm"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("incomplete-traceroutes", ("incomplete_traceroutes", Cfm.Global.IncompleteTraceroutes)), ("maintenance-points", ("maintenance_points", Cfm.Global.MaintenancePoints)), ("global-configuration-errors", ("global_configuration_errors", Cfm.Global.GlobalConfigurationErrors)), ("mep-configuration-errors", ("mep_configuration_errors", Cfm.Global.MepConfigurationErrors)), ("traceroute-caches", ("traceroute_caches", Cfm.Global.TracerouteCaches)), ("local-meps", ("local_meps", Cfm.Global.LocalMeps)), ("peer-me-pv2s", ("peer_me_pv2s", Cfm.Global.PeerMePv2s))])
self._leafs = OrderedDict()
self.incomplete_traceroutes = Cfm.Global.IncompleteTraceroutes()
self.incomplete_traceroutes.parent = self
self._children_name_map["incomplete_traceroutes"] = "incomplete-traceroutes"
self.maintenance_points = Cfm.Global.MaintenancePoints()
self.maintenance_points.parent = self
self._children_name_map["maintenance_points"] = "maintenance-points"
self.global_configuration_errors = Cfm.Global.GlobalConfigurationErrors()
self.global_configuration_errors.parent = self
self._children_name_map["global_configuration_errors"] = "global-configuration-errors"
self.mep_configuration_errors = Cfm.Global.MepConfigurationErrors()
self.mep_configuration_errors.parent = self
self._children_name_map["mep_configuration_errors"] = "mep-configuration-errors"
self.traceroute_caches = Cfm.Global.TracerouteCaches()
self.traceroute_caches.parent = self
self._children_name_map["traceroute_caches"] = "traceroute-caches"
self.local_meps = Cfm.Global.LocalMeps()
self.local_meps.parent = self
self._children_name_map["local_meps"] = "local-meps"
self.peer_me_pv2s = Cfm.Global.PeerMePv2s()
self.peer_me_pv2s.parent = self
self._children_name_map["peer_me_pv2s"] = "peer-me-pv2s"
self._segment_path = lambda: "global"
self._absolute_path = lambda: "Cisco-IOS-XR-ethernet-cfm-oper:cfm/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Cfm.Global, [], name, value)
class IncompleteTraceroutes(Entity):
"""
Incomplete Traceroute table
.. attribute:: incomplete_traceroute
Information about a traceroute operation that has not yet timed out
**type**\: list of :py:class:`IncompleteTraceroute <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.Cfm.Global.IncompleteTraceroutes.IncompleteTraceroute>`
"""
_prefix = 'ethernet-cfm-oper'
_revision = '2017-10-06'
def __init__(self):
super(Cfm.Global.IncompleteTraceroutes, self).__init__()
self.yang_name = "incomplete-traceroutes"
self.yang_parent_name = "global"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("incomplete-traceroute", ("incomplete_traceroute", Cfm.Global.IncompleteTraceroutes.IncompleteTraceroute))])
self._leafs = OrderedDict()
self.incomplete_traceroute = YList(self)
self._segment_path = lambda: "incomplete-traceroutes"
self._absolute_path = lambda: "Cisco-IOS-XR-ethernet-cfm-oper:cfm/global/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Cfm.Global.IncompleteTraceroutes, [], name, value)
class IncompleteTraceroute(Entity):
"""
Information about a traceroute operation that
has not yet timed out
.. attribute:: domain (key)
Maintenance Domain
**type**\: str
**length:** 1..79
.. attribute:: service (key)
Service (Maintenance Association)
**type**\: str
**length:** 1..79
.. attribute:: mep_id (key)
MEP ID
**type**\: int
**range:** 1..8191
.. attribute:: interface (key)
Interface
**type**\: str
**pattern:** [a\-zA\-Z0\-9.\_/\-]+
.. attribute:: transaction_id (key)
Transaction ID
**type**\: int
**range:** 0..4294967295
.. attribute:: traceroute_information
Information about the traceroute operation
**type**\: :py:class:`TracerouteInformation <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.Cfm.Global.IncompleteTraceroutes.IncompleteTraceroute.TracerouteInformation>`
.. attribute:: time_left
Time (in seconds) before the traceroute completes
**type**\: int
**range:** 0..18446744073709551615
| |
args=(self.superuser.pk,))
self.assertContains(
response,
'<div class="readonly"><a href="%s">super</a></div>' % user_url,
html=True,
)
# Related ForeignKey with the string primary key registered in admin.
language_url = reverse(
'admin:admin_views_language_change',
args=(quote(language.pk),),
)
self.assertContains(
response,
'<div class="readonly"><a href="%s">_40</a></div>' % language_url,
html=True,
)
# Related ForeignKey object not registered in admin.
self.assertContains(response, '<div class="readonly">Chapter 1</div>', html=True)
def test_readonly_manytomany_backwards_ref(self):
"""
Regression test for #16433 - backwards references for related objects
broke if the related field is read-only due to the help_text attribute
"""
topping = Topping.objects.create(name='Salami')
pizza = Pizza.objects.create(name='Americano')
pizza.toppings.add(topping)
response = self.client.get(reverse('admin:admin_views_topping_add'))
self.assertEqual(response.status_code, 200)
def test_readonly_manytomany_forwards_ref(self):
topping = Topping.objects.create(name='Salami')
pizza = Pizza.objects.create(name='Americano')
pizza.toppings.add(topping)
response = self.client.get(reverse('admin:admin_views_pizza_change', args=(pizza.pk,)))
self.assertContains(response, '<label>Toppings:</label>', html=True)
self.assertContains(response, '<div class="readonly">Salami</div>', html=True)
def test_readonly_onetoone_backwards_ref(self):
"""
Can reference a reverse OneToOneField in ModelAdmin.readonly_fields.
"""
v1 = Villain.objects.create(name='Adam')
pl = Plot.objects.create(name='Test Plot', team_leader=v1, contact=v1)
pd = PlotDetails.objects.create(details='Brand New Plot', plot=pl)
response = self.client.get(reverse('admin:admin_views_plotproxy_change', args=(pl.pk,)))
field = self.get_admin_readonly_field(response, 'plotdetails')
pd_url = reverse('admin:admin_views_plotdetails_change', args=(pd.pk,))
self.assertEqual(field.contents(), '<a href="%s">Brand New Plot</a>' % pd_url)
# The reverse relation also works if the OneToOneField is null.
pd.plot = None
pd.save()
response = self.client.get(reverse('admin:admin_views_plotproxy_change', args=(pl.pk,)))
field = self.get_admin_readonly_field(response, 'plotdetails')
self.assertEqual(field.contents(), '-') # default empty value
def test_readonly_field_overrides(self):
"""
Regression test for #22087 - ModelForm Meta overrides are ignored by
AdminReadonlyField
"""
p = FieldOverridePost.objects.create(title="Test Post", content="Test Content")
response = self.client.get(reverse('admin:admin_views_fieldoverridepost_change', args=(p.pk,)))
self.assertContains(response, '<div class="help">Overridden help text for the date</div>')
self.assertContains(response, '<label for="id_public">Overridden public label:</label>', html=True)
self.assertNotContains(response, 'Some help text for the date (with Unicode ŠĐĆŽćžšđ)')
def test_correct_autoescaping(self):
"""
Make sure that non-field readonly elements are properly autoescaped (#24461)
"""
section = Section.objects.create(name='<a>evil</a>')
response = self.client.get(reverse('admin:admin_views_section_change', args=(section.pk,)))
self.assertNotContains(response, "<a>evil</a>", status_code=200)
self.assertContains(response, "<a>evil</a>", status_code=200)
def test_label_suffix_translated(self):
pizza = Pizza.objects.create(name='Americano')
url = reverse('admin:admin_views_pizza_change', args=(pizza.pk,))
with self.settings(LANGUAGE_CODE='fr'):
response = self.client.get(url)
self.assertContains(response, '<label>Toppings\u00A0:</label>', html=True)
@override_settings(ROOT_URLCONF='admin_views.urls')
class LimitChoicesToInAdminTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(username='super', password='<PASSWORD>', email='<EMAIL>')
def setUp(self):
self.client.force_login(self.superuser)
def test_limit_choices_to_as_callable(self):
"""Test for ticket 2445 changes to admin."""
threepwood = Character.objects.create(
username='threepwood',
last_action=datetime.datetime.today() + datetime.timedelta(days=1),
)
marley = Character.objects.create(
username='marley',
last_action=datetime.datetime.today() - datetime.timedelta(days=1),
)
response = self.client.get(reverse('admin:admin_views_stumpjoke_add'))
# The allowed option should appear twice; the limited option should not appear.
self.assertContains(response, threepwood.username, count=2)
self.assertNotContains(response, marley.username)
@override_settings(ROOT_URLCONF='admin_views.urls')
class RawIdFieldsTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(username='super', password='<PASSWORD>', email='<EMAIL>')
def setUp(self):
self.client.force_login(self.superuser)
def test_limit_choices_to(self):
"""Regression test for 14880"""
actor = Actor.objects.create(name="Palin", age=27)
Inquisition.objects.create(expected=True,
leader=actor,
country="England")
Inquisition.objects.create(expected=False,
leader=actor,
country="Spain")
response = self.client.get(reverse('admin:admin_views_sketch_add'))
# Find the link
m = re.search(br'<a href="([^"]*)"[^>]* id="lookup_id_inquisition"', response.content)
self.assertTrue(m) # Got a match
popup_url = m[1].decode().replace('&', '&')
# Handle relative links
popup_url = urljoin(response.request['PATH_INFO'], popup_url)
# Get the popup and verify the correct objects show up in the resulting
# page. This step also tests integers, strings and booleans in the
# lookup query string; in model we define inquisition field to have a
# limit_choices_to option that includes a filter on a string field
# (inquisition__actor__name), a filter on an integer field
# (inquisition__actor__age), and a filter on a boolean field
# (inquisition__expected).
response2 = self.client.get(popup_url)
self.assertContains(response2, "Spain")
self.assertNotContains(response2, "England")
def test_limit_choices_to_isnull_false(self):
"""Regression test for 20182"""
Actor.objects.create(name="Palin", age=27)
Actor.objects.create(name="Kilbraken", age=50, title="Judge")
response = self.client.get(reverse('admin:admin_views_sketch_add'))
# Find the link
m = re.search(br'<a href="([^"]*)"[^>]* id="lookup_id_defendant0"', response.content)
self.assertTrue(m) # Got a match
popup_url = m[1].decode().replace('&', '&')
# Handle relative links
popup_url = urljoin(response.request['PATH_INFO'], popup_url)
# Get the popup and verify the correct objects show up in the resulting
# page. This step tests field__isnull=0 gets parsed correctly from the
# lookup query string; in model we define defendant0 field to have a
# limit_choices_to option that includes "actor__title__isnull=False".
response2 = self.client.get(popup_url)
self.assertContains(response2, "Kilbraken")
self.assertNotContains(response2, "Palin")
def test_limit_choices_to_isnull_true(self):
"""Regression test for 20182"""
Actor.objects.create(name="Palin", age=27)
Actor.objects.create(name="Kilbraken", age=50, title="Judge")
response = self.client.get(reverse('admin:admin_views_sketch_add'))
# Find the link
m = re.search(br'<a href="([^"]*)"[^>]* id="lookup_id_defendant1"', response.content)
self.assertTrue(m) # Got a match
popup_url = m[1].decode().replace('&', '&')
# Handle relative links
popup_url = urljoin(response.request['PATH_INFO'], popup_url)
# Get the popup and verify the correct objects show up in the resulting
# page. This step tests field__isnull=1 gets parsed correctly from the
# lookup query string; in model we define defendant1 field to have a
# limit_choices_to option that includes "actor__title__isnull=True".
response2 = self.client.get(popup_url)
self.assertNotContains(response2, "Kilbraken")
self.assertContains(response2, "Palin")
def test_list_display_method_same_name_as_reverse_accessor(self):
"""
Should be able to use a ModelAdmin method in list_display that has the
same name as a reverse model field ("sketch" in this case).
"""
actor = Actor.objects.create(name="Palin", age=27)
Inquisition.objects.create(expected=True, leader=actor, country="England")
response = self.client.get(reverse('admin:admin_views_inquisition_changelist'))
self.assertContains(response, 'list-display-sketch')
@override_settings(ROOT_URLCONF='admin_views.urls')
class UserAdminTest(TestCase):
"""
Tests user CRUD functionality.
"""
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(username='super', password='<PASSWORD>', email='<EMAIL>')
cls.adduser = User.objects.create_user(username='adduser', password='<PASSWORD>', is_staff=True)
cls.changeuser = User.objects.create_user(username='changeuser', password='<PASSWORD>', is_staff=True)
cls.s1 = Section.objects.create(name='Test section')
cls.a1 = Article.objects.create(
content='<p>Middle content</p>', date=datetime.datetime(2008, 3, 18, 11, 54, 58), section=cls.s1
)
cls.a2 = Article.objects.create(
content='<p>Oldest content</p>', date=datetime.datetime(2000, 3, 18, 11, 54, 58), section=cls.s1
)
cls.a3 = Article.objects.create(
content='<p>Newest content</p>', date=datetime.datetime(2009, 3, 18, 11, 54, 58), section=cls.s1
)
cls.p1 = PrePopulatedPost.objects.create(title='A Long Title', published=True, slug='a-long-title')
cls.per1 = Person.objects.create(name='<NAME>', gender=1, alive=True)
cls.per2 = Person.objects.create(name='<NAME>', gender=1, alive=False)
cls.per3 = Person.objects.create(name='<NAME>', gender=1, alive=True)
def setUp(self):
self.client.force_login(self.superuser)
def test_save_button(self):
user_count = User.objects.count()
response = self.client.post(reverse('admin:auth_user_add'), {
'username': 'newuser',
'password1': '<PASSWORD>',
'password2': '<PASSWORD>',
})
new_user = User.objects.get(username='newuser')
self.assertRedirects(response, reverse('admin:auth_user_change', args=(new_user.pk,)))
self.assertEqual(User.objects.count(), user_count + 1)
self.assertTrue(new_user.has_usable_password())
def test_save_continue_editing_button(self):
user_count = User.objects.count()
response = self.client.post(reverse('admin:auth_user_add'), {
'username': 'newuser',
'password1': '<PASSWORD>',
'password2': '<PASSWORD>',
'_continue': '1',
})
new_user = User.objects.get(username='newuser')
new_user_url = reverse('admin:auth_user_change', args=(new_user.pk,))
self.assertRedirects(response, new_user_url, fetch_redirect_response=False)
self.assertEqual(User.objects.count(), user_count + 1)
self.assertTrue(new_user.has_usable_password())
response = self.client.get(new_user_url)
self.assertContains(
response,
'<li class="success">The user “<a href="%s">'
'%s</a>” was added successfully. You may edit it again below.</li>'
% (new_user_url, new_user),
html=True,
)
def test_password_mismatch(self):
response = self.client.post(reverse('admin:auth_user_add'), {
'username': 'newuser',
'password1': '<PASSWORD>',
'password2': '<PASSWORD>',
})
self.assertEqual(response.status_code, 200)
self.assertFormError(response, 'adminform', 'password', [])
self.assertFormError(response, 'adminform', 'password2', ['The two password fields didn’t match.'])
def test_user_fk_add_popup(self):
"""User addition through a FK popup should return the appropriate JavaScript response."""
response = self.client.get(reverse('admin:admin_views_album_add'))
self.assertContains(response, reverse('admin:auth_user_add'))
self.assertContains(response, 'class="related-widget-wrapper-link add-related" id="add_id_owner"')
response = self.client.get(reverse('admin:auth_user_add') + '?_popup=1')
self.assertNotContains(response, 'name="_continue"')
self.assertNotContains(response, 'name="_addanother"')
data = {
'username': 'newuser',
'password1': '<PASSWORD>',
'password2': '<PASSWORD>',
'_popup': '1',
'_save': '1',
}
response = self.client.post(reverse('admin:auth_user_add') + '?_popup=1', data, follow=True)
self.assertContains(response, '"obj": "newuser"')
def test_user_fk_change_popup(self):
"""User change through a FK popup should return the appropriate JavaScript response."""
response = self.client.get(reverse('admin:admin_views_album_add'))
self.assertContains(response, reverse('admin:auth_user_change', args=('__fk__',)))
self.assertContains(response, 'class="related-widget-wrapper-link change-related" id="change_id_owner"')
user = User.objects.get(username='changeuser')
url = reverse('admin:auth_user_change', args=(user.pk,)) + '?_popup=1'
response = self.client.get(url)
self.assertNotContains(response, 'name="_continue"')
self.assertNotContains(response, 'name="_addanother"')
data = {
'username': 'newuser',
'password1': '<PASSWORD>',
'password2': '<PASSWORD>',
'last_login_0': '2007-05-30',
'last_login_1': '13:20:10',
'date_joined_0': '2007-05-30',
'date_joined_1': '13:20:10',
'_popup': '1',
'_save': '1',
}
response = self.client.post(url, data, follow=True)
self.assertContains(response, '"obj": "newuser"')
self.assertContains(response, '"action": "change"')
def test_user_fk_delete_popup(self):
"""User deletion through a FK popup should return the appropriate JavaScript response."""
response = self.client.get(reverse('admin:admin_views_album_add'))
self.assertContains(response, reverse('admin:auth_user_delete', args=('__fk__',)))
self.assertContains(response, 'class="related-widget-wrapper-link change-related" id="change_id_owner"')
user = User.objects.get(username='changeuser')
url = reverse('admin:auth_user_delete', args=(user.pk,)) + '?_popup=1'
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
data = {
'post': 'yes',
'_popup': '1',
}
response = self.client.post(url, data, follow=True)
self.assertContains(response, '"action": "delete"')
def test_save_add_another_button(self):
user_count = User.objects.count()
response = self.client.post(reverse('admin:auth_user_add'), {
'username': 'newuser',
'password1': '<PASSWORD>',
'password2': '<PASSWORD>',
'_addanother': '1',
})
new_user = User.objects.order_by('-id')[0]
self.assertRedirects(response, reverse('admin:auth_user_add'))
self.assertEqual(User.objects.count(), user_count + 1)
self.assertTrue(new_user.has_usable_password())
def test_user_permission_performance(self):
u = User.objects.all()[0]
# Don't depend on a warm cache, see #17377.
ContentType.objects.clear_cache()
with self.assertNumQueries(10):
response = self.client.get(reverse('admin:auth_user_change', args=(u.pk,)))
self.assertEqual(response.status_code, 200)
def test_form_url_present_in_context(self):
u = User.objects.all()[0]
response = self.client.get(reverse('admin3:auth_user_password_change', args=(u.pk,)))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['form_url'], 'pony')
@override_settings(ROOT_URLCONF='admin_views.urls')
class GroupAdminTest(TestCase):
"""
Tests group CRUD functionality.
"""
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(username='super', password='<PASSWORD>', email='<EMAIL>')
def setUp(self):
self.client.force_login(self.superuser)
def test_save_button(self):
group_count = Group.objects.count()
response = self.client.post(reverse('admin:auth_group_add'), {
'name': 'newgroup',
})
Group.objects.order_by('-id')[0]
self.assertRedirects(response, reverse('admin:auth_group_changelist'))
self.assertEqual(Group.objects.count(), group_count + 1)
def test_group_permission_performance(self):
g = Group.objects.create(name="test_group")
# Ensure no queries are skipped due to cached content type for Group.
ContentType.objects.clear_cache()
with self.assertNumQueries(8):
response = self.client.get(reverse('admin:auth_group_change', args=(g.pk,)))
self.assertEqual(response.status_code, 200)
@override_settings(ROOT_URLCONF='admin_views.urls')
class CSSTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(username='super', password='<PASSWORD>', email='<EMAIL>')
cls.s1 = Section.objects.create(name='Test section')
cls.a1 = Article.objects.create(
content='<p>Middle content</p>', date=datetime.datetime(2008, 3, 18, 11, 54, 58), section=cls.s1
)
cls.a2 = Article.objects.create(
content='<p>Oldest content</p>', date=datetime.datetime(2000, 3, 18, 11, 54, 58), section=cls.s1
)
cls.a3 = Article.objects.create(
content='<p>Newest content</p>', date=datetime.datetime(2009, 3, 18, 11, 54, 58), section=cls.s1
)
cls.p1 = PrePopulatedPost.objects.create(title='A Long Title', published=True, slug='a-long-title')
def setUp(self):
self.client.force_login(self.superuser)
def test_field_prefix_css_classes(self):
"""
Fields have a CSS class name with | |
or # noqa: E501
local_var_params['entity_set_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `entity_set_id` when calling `execute_entity_neighbor_search`") # noqa: E501
# verify the required parameter 'entity_key_id' is set
if self.api_client.client_side_validation and ('entity_key_id' not in local_var_params or # noqa: E501
local_var_params['entity_key_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `entity_key_id` when calling `execute_entity_neighbor_search`") # noqa: E501
collection_formats = {}
path_params = {}
if 'entity_set_id' in local_var_params:
path_params['entitySetId'] = local_var_params['entity_set_id'] # noqa: E501
if 'entity_key_id' in local_var_params:
path_params['entityKeyId'] = local_var_params['entity_key_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['http_auth', 'openlattice_auth'] # noqa: E501
return self.api_client.call_api(
'/datastore/search/{entitySetId}/{entityKeyId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[NeighborEntityDetails]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def execute_entity_neighbor_search_bulk(self, entity_set_id, request_body, **kwargs): # noqa: E501
"""Executes a search for all neighbors of multiple entities of the same entity set that are connected by an association # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.execute_entity_neighbor_search_bulk(entity_set_id, request_body, async_req=True)
>>> result = thread.get()
:param entity_set_id: (required)
:type entity_set_id: str
:param request_body: (required)
:type request_body: list[str]
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: dict(str, list[NeighborEntityDetails])
"""
kwargs['_return_http_data_only'] = True
return self.execute_entity_neighbor_search_bulk_with_http_info(entity_set_id, request_body, **kwargs) # noqa: E501
def execute_entity_neighbor_search_bulk_with_http_info(self, entity_set_id, request_body, **kwargs): # noqa: E501
"""Executes a search for all neighbors of multiple entities of the same entity set that are connected by an association # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.execute_entity_neighbor_search_bulk_with_http_info(entity_set_id, request_body, async_req=True)
>>> result = thread.get()
:param entity_set_id: (required)
:type entity_set_id: str
:param request_body: (required)
:type request_body: list[str]
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(dict(str, list[NeighborEntityDetails]), status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'entity_set_id',
'request_body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method execute_entity_neighbor_search_bulk" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'entity_set_id' is set
if self.api_client.client_side_validation and ('entity_set_id' not in local_var_params or # noqa: E501
local_var_params['entity_set_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `entity_set_id` when calling `execute_entity_neighbor_search_bulk`") # noqa: E501
# verify the required parameter 'request_body' is set
if self.api_client.client_side_validation and ('request_body' not in local_var_params or # noqa: E501
local_var_params['request_body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `request_body` when calling `execute_entity_neighbor_search_bulk`") # noqa: E501
collection_formats = {}
path_params = {}
if 'entity_set_id' in local_var_params:
path_params['entitySetId'] = local_var_params['entity_set_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'request_body' in local_var_params:
body_params = local_var_params['request_body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['http_auth', 'openlattice_auth'] # noqa: E501
return self.api_client.call_api(
'/datastore/search/{entitySetId}/neighbors', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='dict(str, list[NeighborEntityDetails])', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def execute_entity_set_collection_search(self, search_term, **kwargs): # noqa: E501
"""Executes a search over all EntitySetCollections to find ones that match the given search term # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.execute_entity_set_collection_search(search_term, async_req=True)
>>> result = thread.get()
:param search_term: (required)
:type search_term: SearchTerm
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: SearchResult
"""
kwargs['_return_http_data_only'] = True
return self.execute_entity_set_collection_search_with_http_info(search_term, **kwargs) # noqa: E501
def execute_entity_set_collection_search_with_http_info(self, search_term, **kwargs): # noqa: E501
"""Executes a search over all EntitySetCollections to find ones that match the given search term # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.execute_entity_set_collection_search_with_http_info(search_term, async_req=True)
>>> result = thread.get()
:param search_term: (required)
:type search_term: SearchTerm
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(SearchResult, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'search_term'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method execute_entity_set_collection_search" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'search_term' is set
if self.api_client.client_side_validation and ('search_term' not in local_var_params or # noqa: E501
local_var_params['search_term'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `search_term` when calling `execute_entity_set_collection_search`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'search_term' in local_var_params:
body_params = local_var_params['search_term']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['http_auth', 'openlattice_auth'] # noqa: E501
return self.api_client.call_api(
'/datastore/search/entity_sets/collections', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SearchResult', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def execute_entity_set_data_query(self, entity_set_id, search_term, **kwargs): # noqa: E501
"""Executes a search over the data of a given entity set to find rows that match the search term # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.execute_entity_set_data_query(entity_set_id, search_term, async_req=True)
>>> result = thread.get()
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ------------------------
# Penji OpDev Fall 2019
# GS Run Wrapper
# Author: <NAME>
# Updated:
# ------------------------
# General
import os
import argparse
import pandas as pd
# For Google Sheets
import pygsheets
# Local
import core.utils as utils
from core import logger
import core.gs_api_utils as gs_api_
from core.config import cfg
from core.gs_parent import GoogleSheetsParent
class GoogleSheetsPullFromArchive(GoogleSheetsParent):
def __init__(self, args):
GoogleSheetsParent.__init__(self, args)
# Ensure all files are created and saved properly
self._setup_all()
self.df_ct_prof = self._load('ct_prof')
self.df_ct_courses = self._load('ct_courses')
self.df_arch_prof = self._load('archive_prof')
self.df_arch_courses = self._load('archive_courses')
def run(self, *args, **kwargs):
# Pull data for prof file
self.df_ct_prof = self.df_ct_prof.apply(self.pull_arch_prof, axis=1)
# Pull data for courses file
#self.df_ct_courses = self.df_ct_courses.apply(self.pull_arch_courses, axis=1)
print(self.df_ct_prof)
if self.args.save:
self._save(self.df_ct_prof, 'ct_prof')
self._save(self.df_ct_courses, 'ct_courses')
def pull_arch_prof(self, row):
try:
for ir in self.df_arch_prof.itertuples():
if ir[1] == row['Full Name'] and '@' in str(ir[5]):
print(ir[1], ir[5])
row['Email'] = ir[5]
row['Previous Response'] = ir[6]
row['Term Last Sent'] = ir[7]
break
except:
logger.warn(f'Empty Archive Professor CSV')
return row
def pull_arch_courses(self, row):
try:
for ir in self.df_arch_courses.itertuples():
if ir[1] == row['Course Code'] and not pd.isna(ir[4]):
print(ir[1], ir[4])
row['Archive Demand In'] = ir[4]
break
except:
logger.warn(f'Empty Archive Course CSV')
return row
class GoogleSheetsPrep(GoogleSheetsParent):
def __init__(self, args):
GoogleSheetsParent.__init__(self, args)
self.df_ct = self._load('ct')
self.df_ct_prof = self._load('ct_prof')
self.df_ct_courses = self._load('ct_courses')
self.df_arch_courses = self._load('archive_courses')
def run(self, *args, **kwargs):
""" Sets up Professor df for google sheets upload
Needs demand and ranking in order to deduct desired course
Professor Row Reference #s
"""
# Process Current term CSV: Demand, Ranking, Professor Row #
self.df_ct = self.df_ct.apply(self.process_cur_term_csv, axis=1)
# Process Professor CSV: Demand, Ranking, Professor Row #
self.df_ct_prof = self.df_ct_prof.apply(self.process_prof_courses, axis=1)
# Clear out those temporary values
self.df_ct = self.df_ct.apply(self.clear_temp_values, axis=1)
if self.args.save:
self._save(self.df_ct, 'ct')
self._save(self.df_ct_prof, 'ct_prof')
else:
print(self.df_ct)
print(self.df_ct_prof)
def clear_temp_values(self, row):
row['Demand'], row['Ranking'] = None, None
return row
def process_cur_term_csv(self, row):
# Term Sheet: Demand Column
demand = 3 # Default
try:
for ir in self.df_ct_courses.itertuples():
if ir[1] == row['Course Code'] and not pd.isna(ir[6]):
print(ir[1], ir[6])
demand = ir[6]
break
except:
logger.warn(f'Empty Archive Course CSV')
ranking = demand + (row['# Students'] / 100)
# Term Sheet: Professor Row Reference #
row_references = []
if isinstance(row['Professor'], str):
prof_names_in = row['Professor'].split(', ')
for ir in self.df_ct_prof.itertuples():
[row_references.append(ir[0]+2) for name in prof_names_in if ir[1] == name]
assert len(prof_names_in) == len(row_references), \
f'ERROR: prof names {prof_names_in} != {row_references} row references'
row['Demand'], row['Ranking'], row['Professor Row #'] = demand, ranking, row_references
return row
def process_prof_courses(self, row):
# Professor Sheet: All Courses
# Don't select a class if no email available
all_courses = [] # (None, None, 0) # Course Code, Course Row #, Ranking
best_course = (None, None, 0)
if '@' in str(row['Email']):
prof_name = row['<NAME>']
for ir in self.df_ct.itertuples():
if ir[15] and str(row.name+2) in str(ir[15])[1:-1].split(', '):
all_courses.append((ir[1], ir[0]+2, ir[11]))
if all_courses:
# Find their course with the highest ranking
for course in all_courses:
if course[2] > best_course[2]:
best_course = course
else:
all_courses = None
row['Desired Course Code'] = best_course[0]
row['Desired Course Row #'] = int(best_course[1]) if best_course[1] else best_course[1]
row['All Courses'] = all_courses
return row
class GoogleSheetsUpload(GoogleSheetsParent):
def __init__(self, args):
GoogleSheetsParent.__init__(self, args)
self.status_arr = ['No', 'Sent for different course', 'Match Error', 'Awaiting Response', 'Yes']
def run(self):
# Create a new sheet in folder
sh = self._connect_google_sheet()
# Make sure the sheets are setup properly
gs_df_arr = self._load_all()
self.num_wks = len(gs_df_arr)
# TODO: Professor row Reference #s add 2
# Find number of rows and columns for each
shapes = []
setup_formulas = [self.setup_term_formulas, self.setup_professor_formulas, self.setup_course_formulas,
None, None, None, None, None, self.setup_arch_course_formulas]
for idx in range(len(gs_df_arr)):
# load csv as pd df and upload it
gs_df = gs_df_arr[idx]
shapes.append(gs_df.shape)
# Create new sheets
if self.reset_gs:
wks = sh.add_worksheet(self.files[self.file_keys[idx]][1], rows=shapes[idx][0]+10, cols=shapes[idx][1], index=idx)
if idx == 0:
sh.del_worksheet(sh.worksheet_by_title('Sheet1'))
else:
wks = sh[idx]
# Upload the data
if self.args.data:
wks.set_dataframe(gs_df, (1, 1))
wks.replace('NaN', '')
# Add The Formulas
if self.args.formulas and setup_formulas[idx]:
term = self.pterm if idx in (3,4,5) else self.cterm
setup_formulas[idx](wks, term)
if self.args.format:
self.format_sheet(sh, shapes)
def format_sheet(self, sh, shapes):
# Format Tutor Columns
gs_api_.format_tutor_col(sh=sh, wks=sh[0], shape=shapes[0], col_idx=10) # Current Term
gs_api_.format_tutor_col(sh=sh, wks=sh[2], shape=shapes[2], col_idx=7) # Current Courses
gs_api_.format_tutor_col(sh=sh, wks=sh[3], shape=shapes[3], col_idx=10) # Prev Term
gs_api_.format_tutor_col(sh=sh, wks=sh[5], shape=shapes[5], col_idx=7) # Prev Courses
gs_api_.format_tutor_col(sh=sh, wks=sh[8], shape=shapes[8], col_idx=6) # Archive Courses
# Freeze first row of each wks
[gs_api_.freeze_row(sh=sh, wks=sh[i]) for i in range(self.num_wks)]
# Headers of editable columns: Add blue background
editable_col_cells = [sh[1].cell('G1'), sh[1].cell('H1'), sh[1].cell('I1'),
sh[1].cell('J1'), sh[1].cell('K1'), sh[1].cell('L1'), sh[2].cell('E1'),
sh[4].cell('G1'), sh[4].cell('H1'), sh[4].cell('I1'),
sh[4].cell('J1'), sh[4].cell('K1'), sh[4].cell('L1'), sh[5].cell('E1')]
for cell in editable_col_cells:
cell.color = (207/255, 226/255, 243/255, 1.0)
tutors_range = sh[6].get_values('A1', 'O1', returnas='range')
for cell in tutors_range[0]:
cell.color = (207/255, 226/255, 243/255, 1.0)
# All Headers: Set Bold
# Current Term
[cell.set_text_format('bold', True) for cell in sh[0].get_values('A1', 'Q1', returnas='range')[0]]
[cell.set_text_format('bold', True) for cell in sh[1].get_values('A1', 'P1', returnas='range')[0]]
[cell.set_text_format('bold', True) for cell in sh[2].get_values('A1', 'G1', returnas='range')[0]]
# Previous Term
[cell.set_text_format('bold', True) for cell in sh[3].get_values('A1', 'Q1', returnas='range')[0]]
[cell.set_text_format('bold', True) for cell in sh[4].get_values('A1', 'P1', returnas='range')[0]]
[cell.set_text_format('bold', True) for cell in sh[5].get_values('A1', 'G1', returnas='range')[0]]
# Tutors & Archive
[cell.set_text_format('bold', True) for cell in sh[6].get_values('A1', 'O1', returnas='range')[0]]
[cell.set_text_format('bold', True) for cell in sh[7].get_values('A1', 'G1', returnas='range')[0]]
[cell.set_text_format('bold', True) for cell in sh[8].get_values('A1', 'F1', returnas='range')[0]]
# Format Status Column
gs_api_.format_status_col(sh=sh, wks=sh[0], shape=shapes[0], col_idx=17, stat_arr=self.status_arr)
gs_api_.format_status_col(sh=sh, wks=sh[3], shape=shapes[3], col_idx=17, stat_arr=self.status_arr)
def setup_term_formulas(self, wks, term):
# Demand
wks.cell('B1').formula = 'ArrayFormula(IF(ROW(A:A)=1,"Demand", VLOOKUP(A1:A, ' + f"'Courses {term[2]}'" + '!$A:$D, 4, FALSE)))'
# Previous Response
wks.cell('H1').formula = 'ArrayFormula(IF(ROW(C:C)=1,"Previous Response",IF(ISBLANK(G1:G), "", ' \
'VLOOKUP(G1:G, ' + f"'Professors {term[2]}'" + '!$A:$F, 6, False))))'
# # Tutors
wks.cell('J1').formula = f'ArrayFormula(IF(ROW(A:A)=1,"# Tutors", IF(ISBLANK(A1:A), "", COUNTIFS(' \
f'Tutors!E:E, "{self.school_config.NICE_NAME}", Tutors!L:L, "*"&C1:C&D1:D&"*", Tutors!I:I, "TRUE", Tutors!J:J,"YES"))))'
# Ranking
wks.cell('K1').formula = 'ArrayFormula(IF(ROW(A:A)=1,"Ranking", IF(ISBLANK(A1:A), "", B1:B+(I1:I/100))))'
# Course Status: color coded professor info
self.status_arr = stat = ['No', 'Sent for different course', 'Match Error', 'Awaiting Response', 'Yes']
wks.cell('Q1').formula = f'ArrayFormula(IF(ROW(A:A)=1,"Status", IF(ISBLANK(A1:A), "", ' \
f'IFERROR(IF((O1:O="[]") + (VLOOKUP(G1:G, ' + f"'Professors {term[2]}'" + f'!$A:$N, 14, False) = "") > 0, "{stat[2]}", ' \
f'IFERROR(IFS(VLOOKUP(G1:G, ' + f"'Professors {term[2]}'" + f'!$A:$N, 9, False)="No", "{stat[0]}",' \
f'VLOOKUP(G1:G, ' + f"'Professors {term[2]}'" + f'!$A:$N, 9, False)="Yes", "{stat[4]}", ' \
f'VLOOKUP(G1:G, ' + f"'Professors {term[2]}'" + f'!$A:$N, 8, False)="No", "{stat[0]}", ' \
f'VLOOKUP(G1:G, ' + f"'Professors {term[2]}'" + f'!$A:$N, 8, False)="Yes", "{stat[4]}", ' \
f'VLOOKUP(G1:G, ' + f"'Professors {term[2]}'" + f'!$A:$N, 6, False)="No", "{stat[0]}", ' \
f'VLOOKUP(G1:G, ' + f"'Professors {term[2]}'" + f'!$A:$N, 6, False)="Yes", "{stat[4]}" ), ' \
f'IF(NE(A1:A, VLOOKUP(G1:G, ' + f"'Professors {term[2]}'" + f'!$A:$N, 14, False)), "{stat[1]}", ' \
f'IF(VLOOKUP(G1:G, ' + f"'Professors {term[2]}'" + f'!$A:$N, 12, False)="Fall 19","{stat[3]}",)))),"{stat[2]}" ))))'
def setup_professor_formulas(self, wks, term):
# Previous Response
# To Send
wks.cell('M1').formula = 'ArrayFormula(IF(ROW(A:A)=1,"To Send",IF(ISBLANK(A:A),"", ' \
'IF(RegExMatch(E1:E,"@"), ' \
'IFERROR(' \
'IFS(L1:L="Fall 19", "No",F1:F="No", "No",H1:H="No", "No",I1:I="No", "No"),' \
' "Yes"), "No"))))'
def setup_course_formulas(self, wks, term):
# Demand out
wks.cell('D1').formula = 'ArrayFormula(IF(ROW(F:F)=1,"Demand Out", IFS(' \
'IF((F1:F), F1:F+E1:E, 3+E1:E)>5, 5, ' \
'IF((F1:F), F1:F+E1:E, 3+E1:E)<0, 0, ' \
'IF((F1:F), F1:F+E1:E, 3+E1:E)<5, IF((F1:F), F1:F+E1:E, 3+E1:E))))'
# Demand in
wks.cell('F1').formula = 'ArrayFormula(IF(ROW(E:E)=1,"Archive Demand In", ' \
'IFERROR(VLOOKUP(A1:A, '+"'Spring 19'"+'!$A:$B, 2, FALSE), )))'
# # Tutors
wks.cell('G1').formula = f'ArrayFormula(IF(ROW(A:A)=1,"# Tutors", IF(ISBLANK(A1:A), "", ' \
f'COUNTIFS(Tutors!E:E, "{self.school_config.NICE_NAME}", Tutors!L:L, "*"&SUBSTITUTE(A1:A," ","")&"*", ' \
f'Tutors!I:I, "TRUE", Tutors!J:J, "YES"))))'
def setup_arch_course_formulas(self, wks, term):
# # Tutors
wks.cell('F1').formula = f'ArrayFormula(IF(ROW(A:A)=1,"# Tutors", IF(ISBLANK(A1:A), "", ' \
f'COUNTIFS(Tutors!E:E, "{self.school_config.NICE_NAME}", Tutors!L:L, "*"&SUBSTITUTE(A1:A," ","")&"*", ' \
f'Tutors!I:I, "TRUE", Tutors!J:J, "YES"))))'
class GoogleSheetsUploadStudentOrgs(GoogleSheetsParent):
def __init__(self, args):
GoogleSheetsParent.__init__(self, args)
def run(self):
# Create a new sheet in folder
sh = self._connect_google_sheet(sheet_name_in=f'{self.school_config.NICE_NAME} Student Orgs')
gs_df = self._load(file_name_key='student_orgs')
shape = gs_df.shape
if self.reset_gs:
wks = sh.add_worksheet(self.files['student_orgs'][1], rows=shape[0] + 10, cols=shape[1], index=0)
sh.del_worksheet(sh.worksheet_by_title('Sheet1'))
else:
wks = sh[0]
# Upload the data
if self.args.data:
wks.set_dataframe(gs_df, (1, 1))
wks.replace('NaN', '')
if self.args.format:
#self.format_sheet(sh, shape)
[cell.set_text_format('bold', True) for cell in sh[0].get_values('A1', 'C1', returnas='range')[0]]
class GoogleSheetsDownload:
def __init__(self, args):
self.args = args
def run(self):
""" Pulls Data From GS Previous Term and saves to proper csv format
"""
config = cfg[self.args.school.upper()]
sheet_name = f'{config.NICE_NAME} Class List' # f'{config.NICE_NAME} Course List'
gc = pygsheets.authorize(service_file='core/credentials/penji_dev_key.json')
try:
sh = gc.open(sheet_name)
logger.info(f'Found {sheet_name} in google drive, downloading sheet')
except pygsheets.exceptions.SpreadsheetNotFound:
logger.error(f'Could not find {sheet_name} | |
<filename>sporco/admm/bpdn.py<gh_stars>1-10
# -*- coding: utf-8 -*-
# Copyright (C) 2015-2019 by <NAME> <<EMAIL>>
# All rights reserved. BSD 3-clause License.
# This file is part of the SPORCO package. Details of the copyright
# and user license can be found in the 'LICENSE.txt' file distributed
# with the package.
"""Classes for ADMM algorithm for the BPDN problem"""
from __future__ import division, absolute_import
import copy
import numpy as np
from sporco.admm import admm
import sporco.linalg as sl
import sporco.prox as sp
from sporco.util import u
__author__ = """<NAME> <<EMAIL>>"""
class GenericBPDN(admm.ADMMEqual):
r"""
Base class for ADMM algorithm for solving variants of the
Basis Pursuit DeNoising (BPDN) :cite:`chen-1998-atomic` problem.
|
.. inheritance-diagram:: GenericBPDN
:parts: 2
|
The generic problem form is
.. math::
\mathrm{argmin}_\mathbf{x} \;
(1/2) \| D \mathbf{x} - \mathbf{s} \|_2^2 + g(\mathbf{x}) \;\;,
where :math:`g(\cdot)` is a penalty term or the indicator function
of a constraint, and is solved via the ADMM problem
.. math::
\mathrm{argmin}_{\mathbf{x}, \mathbf{y}} \;
(1/2) \| D \mathbf{x} - \mathbf{s} \|_2^2 + g(\mathbf{y})
\quad \text{such that} \quad \mathbf{x} = \mathbf{y} \;\;.
After termination of the :meth:`solve` method, attribute
:attr:`itstat` is a list of tuples representing statistics of each
iteration. The fields of the named tuple ``IterationStats`` are:
``Iter`` : Iteration number
``ObjFun`` : Objective function value
``DFid`` : Value of data fidelity term
:math:`(1/2) \| D \mathbf{x} - \mathbf{s} \|_2^2`
``Reg`` : Value of regularisation term
``PrimalRsdl`` : Norm of primal residual
``DualRsdl`` : Norm of dual residual
``EpsPrimal`` : Primal residual stopping tolerance
:math:`\epsilon_{\mathrm{pri}}`
``EpsDual`` : Dual residual stopping tolerance
:math:`\epsilon_{\mathrm{dua}}`
``Rho`` : Penalty parameter
``Time`` : Cumulative run time
"""
class Options(admm.ADMMEqual.Options):
"""GenericBPDN algorithm options
Options include all of those defined in
:class:`.admm.ADMMEqual.Options`, together with
additional options:
``AuxVarObj`` : Flag indicating whether the objective
function should be evaluated using variable X (``False``) or
Y (``True``) as its argument. Setting this flag to ``True``
often gives a better estimate of the objective function.
``LinSolveCheck`` : Flag indicating whether to compute
relative residual of X step solver.
``NonNegCoef`` : If ``True``, force solution to be non-negative.
"""
defaults = copy.deepcopy(admm.ADMMEqual.Options.defaults)
# Warning: although __setitem__ below takes care of setting
# 'fEvalX' and 'gEvalY' from the value of 'AuxVarObj', this
# cannot be relied upon for initialisation since the order of
# initialisation of the dictionary keys is not deterministic;
# if 'AuxVarObj' is initialised first, the other two keys are
# correctly set, but this setting is overwritten when 'fEvalX'
# and 'gEvalY' are themselves initialised
defaults.update({'AuxVarObj': True, 'fEvalX': False,
'gEvalY': True, 'ReturnX': False,
'LinSolveCheck': False, 'RelaxParam': 1.8,
'NonNegCoef': False})
defaults['AutoRho'].update({'Enabled': True, 'Period': 10,
'AutoScaling': True, 'Scaling': 1000.0,
'RsdlRatio': 1.2})
def __init__(self, opt=None):
"""
Parameters
----------
opt : dict or None, optional (default None)
GenericBPDN algorithm options
"""
if opt is None:
opt = {}
admm.ADMMEqual.Options.__init__(self, opt)
def __setitem__(self, key, value):
"""Set options 'fEvalX' and 'gEvalY' appropriately when
option 'AuxVarObj' is set.
"""
admm.ADMMEqual.Options.__setitem__(self, key, value)
if key == 'AuxVarObj':
if value is True:
self['fEvalX'] = False
self['gEvalY'] = True
else:
self['fEvalX'] = True
self['gEvalY'] = False
itstat_fields_objfn = ('ObjFun', 'DFid', 'Reg')
itstat_fields_extra = ('XSlvRelRes',)
hdrtxt_objfn = ('Fnc', 'DFid', 'Reg')
hdrval_objfun = {'Fnc': 'ObjFun', 'DFid': 'DFid', 'Reg': 'Reg'}
def __init__(self, D, S, opt=None):
"""
Parameters
----------
D : array_like, shape (N, M)
Dictionary matrix
S : array_like, shape (N, K)
Signal vector or matrix
opt : :class:`BPDN.Options` object
Algorithm options
"""
Nc = D.shape[1]
Nm = S.shape[1]
if opt is None:
opt = GenericBPDN.Options()
super(GenericBPDN, self).__init__((Nc, Nm), S.dtype, opt)
self.S = np.asarray(S, dtype=self.dtype)
self.setdict(D)
def setdict(self, D):
"""Set dictionary array."""
self.D = np.asarray(D, dtype=self.dtype)
self.DTS = self.D.T.dot(self.S)
# Factorise dictionary for efficient solves
self.lu, self.piv = sl.cho_factor(self.D, self.rho)
self.lu = np.asarray(self.lu, dtype=self.dtype)
def getcoef(self):
"""Get final coefficient array."""
return self.Y
def xstep(self):
r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{x}`.
"""
self.X = np.asarray(sl.cho_solve_ATAI(
self.D, self.rho, self.DTS + self.rho * (self.Y - self.U),
self.lu, self.piv), dtype=self.dtype)
if self.opt['LinSolveCheck']:
b = self.DTS + self.rho * (self.Y - self.U)
ax = self.D.T.dot(self.D.dot(self.X)) + self.rho*self.X
self.xrrs = sl.rrs(ax, b)
else:
self.xrrs = None
def ystep(self):
r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{y}`. If this method is not overridden, the
problem is solved without any regularisation other than the
option enforcement of non-negativity of the solution. When it
is overridden, it should be explicitly called at the end of
the overriding method.
"""
if self.opt['NonNegCoef']:
self.Y[self.Y < 0.0] = 0.0
def eval_objfn(self):
"""Compute components of objective function as well as total
contribution to objective function.
"""
dfd = self.obfn_dfd()
reg = self.obfn_reg()
obj = dfd + reg[0]
return (obj, dfd) + reg[1:]
def obfn_dfd(self):
r"""Compute data fidelity term :math:`(1/2) \| D \mathbf{x} -
\mathbf{s} \|_2^2`.
"""
return 0.5*np.linalg.norm((self.D.dot(self.obfn_fvar()) - self.S))**2
def obfn_reg(self):
"""Compute regularisation term(s) and contribution to objective
function.
"""
raise NotImplementedError()
def itstat_extra(self):
"""Non-standard entries for the iteration stats record tuple."""
return (self.xrrs,)
def rhochange(self):
"""Re-factorise matrix when rho changes."""
self.lu, self.piv = sl.cho_factor(self.D, self.rho)
self.lu = np.asarray(self.lu, dtype=self.dtype)
class BPDN(GenericBPDN):
r"""
ADMM algorithm for the Basis Pursuit DeNoising (BPDN)
:cite:`chen-1998-atomic` problem.
|
.. inheritance-diagram:: BPDN
:parts: 2
|
Solve the Single Measurement Vector (SMV) BPDN problem
.. math::
\mathrm{argmin}_\mathbf{x} \;
(1/2) \| D \mathbf{x} - \mathbf{s} \|_2^2 + \lambda \| \mathbf{x}
\|_1
via the ADMM problem
.. math::
\mathrm{argmin}_{\mathbf{x}, \mathbf{y}} \;
(1/2) \| D \mathbf{x} - \mathbf{s} \|_2^2 + \lambda \| \mathbf{y}
\|_1 \quad \text{such that} \quad \mathbf{x} = \mathbf{y} \;\;.
The Multiple Measurement Vector (MMV) BPDN problem
.. math::
\mathrm{argmin}_X \;
(1/2) \| D X - S \|_F^2 + \lambda \| X \|_1
is also supported.
After termination of the :meth:`solve` method, attribute
:attr:`itstat` is a list of tuples representing statistics of each
iteration. The fields of the named tuple ``IterationStats`` are:
``Iter`` : Iteration number
``ObjFun`` : Objective function value
``DFid`` : Value of data fidelity term :math:`(1/2) \| D
\mathbf{x} - \mathbf{s} \|_2^2`
``RegL1`` : Value of regularisation term :math:`\| \mathbf{x}
\|_1`
``PrimalRsdl`` : Norm of primal residual
``DualRsdl`` : Norm of dual residual
``EpsPrimal`` : Primal residual stopping tolerance
:math:`\epsilon_{\mathrm{pri}}`
``EpsDual`` : Dual residual stopping tolerance
:math:`\epsilon_{\mathrm{dua}}`
``Rho`` : Penalty parameter
``Time`` : Cumulative run time
"""
class Options(GenericBPDN.Options):
r"""BPDN algorithm options
Options include all of those defined in
:class:`.GenericBPDN.Options`, together with additional
options:
``L1Weight`` : An array of weights for the :math:`\ell_1`
norm. The array shape must be such that the array is
compatible for multiplication with the X/Y variables. If this
option is defined, the regularization term is :math:`\lambda
\| \mathbf{w} \odot \mathbf{x} \|_1` where :math:`\mathbf{w}`
denotes the weighting array.
"""
defaults = copy.deepcopy(GenericBPDN.Options.defaults)
defaults.update({'L1Weight': 1.0})
def __init__(self, opt=None):
"""
Parameters
----------
opt : dict or None, optional (default None)
BPDN algorithm options
"""
if opt is None:
opt = {}
GenericBPDN.Options.__init__(self, opt)
itstat_fields_objfn = ('ObjFun', 'DFid', 'RegL1')
hdrtxt_objfn = ('Fnc', 'DFid', u('Regℓ1'))
hdrval_objfun = {'Fnc': 'ObjFun', 'DFid': 'DFid', u('Regℓ1'): 'RegL1'}
def __init__(self, D, S, lmbda=None, opt=None):
"""
|
**Call graph**
.. image:: ../_static/jonga/bpdn_init.svg
:width: 20%
:target: ../_static/jonga/bpdn_init.svg
|
Parameters
----------
D : array_like, shape (N, M)
Dictionary matrix
S : array_like, shape (N, K)
Signal vector or matrix
lmbda : float
Regularisation parameter
opt : :class:`BPDN.Options` object
Algorithm options
"""
# Set default options if necessary
if opt is None:
opt = BPDN.Options()
# Set dtype attribute based on S.dtype and opt['DataType']
self.set_dtype(opt, S.dtype)
# Set default lambda value if not specified
if lmbda is None:
DTS = D.T.dot(S)
lmbda = 0.1 * abs(DTS).max()
# Set l1 term scaling and weight array
self.lmbda = self.dtype.type(lmbda)
self.wl1 = np.asarray(opt['L1Weight'], dtype=self.dtype)
# Set penalty parameter
self.set_attr('rho', opt['rho'], dval=(50.0*self.lmbda + 1.0),
dtype=self.dtype)
# Set rho_xi attribute (see Sec. VI.C of wohlberg-2015-adaptive)
if self.lmbda != 0.0:
rho_xi = float((1.0 + (18.3)**(np.log10(self.lmbda) + 1.0)))
else:
rho_xi = 1.0
self.set_attr('rho_xi', opt['AutoRho', 'RsdlTarget'], dval=rho_xi,
dtype=self.dtype)
super(BPDN, self).__init__(D, S, opt)
def uinit(self, ushape):
"""Return initialiser for working variable U"""
if self.opt['Y0'] is None:
return np.zeros(ushape, dtype=self.dtype)
else:
# If initial Y is non-zero, initial U is | |
get wcs, no header yet
if self.meta is None:
self.logger.warning("Cannot get WCS, no header yet")
return None
try:
self._wcs = WCS(self.meta)
return self._wcs
except:
self.logger.warning("Problem with WCS")
return None
@property
def exptime(self):
# We have it already, just return it
if self._exptime is not None:
return self._exptime
# Can't get exptime, no header yet
if self.meta is None:
self.logger.warning("Cannot get EXPTIME, no header yet")
return None
# Get rdnoise from the header
# We have this key, set _rndoise and return
if 'EXPTIME' in self.meta.keys():
self._exptime = self.meta['EXPTIME']
return self._exptime
print('No EXPTIME found')
return None
@property
def instrument(self):
# We have it already, just return it
if self._instrument is not None:
return self._instrument
# Can't get instrument, no header yet
if self.meta is None:
self.logger.warning("Cannot get INSTRUMENT, no header yet")
return None
# instrument, c4d, k4m or ksb
# DTINSTRU = 'mosaic3 '
# DTTELESC = 'kp4m '
# Bok 90Prime data has
if self.meta.get("DTINSTRU") == 'mosaic3':
self._instrument = 'k4m'
return self._instrument
elif self.meta.get("DTINSTRU") == '90prime':
self._instrument = 'ksb'
return self._instrument
else:
self._instrument = 'c4d'
return self._instrument
@property
def plver(self):
# We have it already, just return it
if self._plver is not None:
return self._plver
# Can't get plver, no header yet
if self.meta is None:
self.logger.warning("Cannot get PLVER, no header yet")
return None
plver = self.meta.get('PLVER')
if plver is None:
self._plver = 'V1.0'
self._plver = plver
return self._plver
@property
def cpfwhm(self):
# We have it already, just return it
if self._cpfwhm is not None:
return self._cpfwhm
# Can't get fwhm, no header yet
if self.meta is None:
self.logger.warning("Cannot get CPFWHM, no header yet")
return None
# FWHM values are ONLY in the extension headers
cpfwhm_map = { 'c4d': 1.5 if self.meta.get('FWHM') is None else self.meta.get('FWHM')*0.27,
'k4m': 1.5 if self.meta.get('SEEING1') is None else self.meta.get('SEEING1'),
'ksb': 1.5 if self.meta.get('SEEING1') is None else self.meta.get('SEEING1') }
cpfwhm = cpfwhm_map[self.instrument]
self._cpfwhm = cpfwhm
return self._cpfwhm
@property
def maglim(self):
# We have it already, just return it
if self._daomaglim is not None:
return self._daomaglim
if self._sexmaglim is not None:
return self._sexmaglim
self.logger.warning('Maglim not set yet')
return None
# Write SE catalog in DAO format
def sextodao(self,cat=None,outfile=None,format="coo"):
daobase = os.path.basename(self.daofile)
daobase = os.path.splitext(os.path.splitext(daobase)[0])[0]
if outfile is None: outfile=daobase+".coo"
if cat is None: cat=self.sexcat
sextodao(self.sexcat,self.meta,outfile=outfile,format=format,logger=self.logger)
# Run Source Extractor
#---------------------
def runsex(self,outfile=None):
basedir, tmpdir = getnscdirs(self.nscversion)
configdir = basedir+"config/"
sexcatfile = "flux_sex.cat.fits"
sexcat, maglim = runsex(self.fluxfile,self.wtfile,self.maskfile,self.meta,sexcatfile,configdir,logger=self.logger)
self.sexcat = sexcatfile
self.sexcat = sexcat
self._sexmaglim = maglim
# Set the FWHM as well
fwhm = sexfwhm(sexcat,logger=self.logger)
self.meta['FWHM'] = fwhm
# Determine FWHM using SE catalog
#--------------------------------
def sexfwhm(self):
self.seeing = sexfwhm(self.sexcat)
return self.seeing
# Pick PSF candidates using SE catalog
#-------------------------------------
def sexpickpsf(self,nstars=100):
base = os.path.basename(self.sexfile)
base = os.path.splitext(os.path.splitext(base)[0])[0]
fwhm = self.sexfwhm() if self.seeing is None else self.seeing
psfcat = sexpickpsf(self.sexcat,fwhm,self.meta,base+".lst",nstars=nstars,logger=self.logger)
# Make DAOPHOT option files
#--------------------------
#def mkopt(self,**kwargs):
def mkopt(self):
base = os.path.basename(self.daofile)
base = os.path.splitext(os.path.splitext(base)[0])[0]
#mkopt(base,self.meta,logger=self.logger,**kwargs)
mkopt(base,self.meta,logger=self.logger)
# Make image ready for DAOPHOT
def mkdaoim(self):
mkdaoim(self.fluxfile,self.wtfile,self.maskfile,self.meta,self.daofile,logger=self.logger)
# DAOPHOT detection
#----------------------
def daofind(self):
daobase = os.path.basename(self.daofile)
daobase = os.path.splitext(os.path.splitext(daobase)[0])[0]
cat = daofind(self.daofile,outfile=daobase+".coo",logger=self.logger)
# DAOPHOT aperture photometry
#----------------------------
def daoaperphot(self):
daobase = os.path.basename(self.daofile)
daobase = os.path.splitext(os.path.splitext(daobase)[0])[0]
apcat, maglim = daoaperphot(self.daofile,daobase+".coo",outfile=daobase+".ap",logger=self.logger)
self._daomaglim = maglim
# Pick PSF stars using DAOPHOT
#-----------------------------
def daopickpsf(self,maglim=None,nstars=100):
daobase = os.path.basename(self.daofile)
daobase = os.path.splitext(os.path.splitext(daobase)[0])[0]
if maglim is None: maglim=self.maglim
psfcat = daopickpsf(self.daofile,daobase+".ap",maglim,daobase+".lst",nstars,logger=self.logger)
# Run DAOPHOT PSF
#-------------------
def daopsf(self,verbose=False):
daobase = os.path.basename(self.daofile)
daobase = os.path.splitext(os.path.splitext(daobase)[0])[0]
psfcat = daopsf(self.daofile,daobase+".lst",outfile=daobase+".psf",verbose=verbose,logger=self.logger)
# Subtract neighbors of PSF stars
#--------------------------------
def subpsfnei(self):
daobase = os.path.basename(self.daofile)
daobase = os.path.splitext(os.path.splitext(daobase)[0])[0]
psfcat = subpsfnei(self.daofile,daobase+".lst",daobase+".nei",daobase+"a.fits",logger=self.logger)
# Create DAOPHOT PSF
#-------------------
def createpsf(self,listfile=None,apfile=None,doiter=True,maxiter=5,minstars=6,subneighbors=True,verbose=False):
daobase = os.path.basename(self.daofile)
daobase = os.path.splitext(os.path.splitext(daobase)[0])[0]
createpsf(daobase+".fits",daobase+".ap",daobase+".lst",meta=self.meta,logger=self.logger)
# Run ALLSTAR
#-------------
def allstar(self,psffile=None,apfile=None,subfile=None):
daobase = os.path.basename(self.daofile)
daobase = os.path.splitext(os.path.splitext(daobase)[0])[0]
alscat = allstar(daobase+".fits",daobase+".psf",daobase+".ap",outfile=daobase+".als",meta=self.meta,logger=self.logger)
# Get aperture correction
#------------------------
def getapcor(self):
daobase = os.path.basename(self.daofile)
daobase = os.path.splitext(os.path.splitext(daobase)[0])[0]
apcorr = apcor(daobase+"a.fits",daobase+".lst",daobase+".psf",self.meta,optfile=daobase+'.opt',alsoptfile=daobase+".als.opt",logger=self.logger)
self.apcorr = apcorr
self.meta['apcor'] = (apcorr,"Aperture correction in mags")
# Combine SE and DAOPHOT catalogs
#--------------------------------
def finalcat(self,outfile=None,both=True,sexdetect=True):
# both Only keep sources that have BOTH SE and ALLSTAR information
# sexdetect SE catalog was used for DAOPHOT detection list
self.logger.info("-- Creating final combined catalog --")
daobase = os.path.basename(self.daofile)
daobase = os.path.splitext(os.path.splitext(daobase)[0])[0]
if outfile is None: outfile=self.base+".cat.fits"
# Check that we have the SE and ALS information
if (self.sexcat is None) | (os.path.exists(daobase+".als") is None):
self.logger.warning("SE catalog or ALS catalog NOT found")
return
# Load ALS catalog
als = Table(daoread(daobase+".als"))
nals = len(als)
# Apply aperture correction
if self.apcorr is None:
self.logger.error("No aperture correction available")
return
als['MAG'] -= self.apcorr
# Just add columns to the SE catalog
ncat = len(self.sexcat)
newcat = self.sexcat.copy()
alsnames = ['X','Y','MAG','ERR','SKY','ITER','CHI','SHARP']
newnames = ['XPSF','YPSF','MAGPSF','ERRPSF','SKY','ITER','CHI','SHARP','RAPSF','DECPSF']
newtypes = ['float64','float64','float','float','float','float','float','float','float64','float64']
nan = float('nan')
newvals = [nan, nan, nan, nan ,nan, nan, nan, nan, nan, nan]
# DAOPHOT detection list used, need ALS ID
if not sexdetect:
alsnames = ['ID']+alsnames
newnames = ['ALSID']+newnames
newtypes = ['int32']+newtypes
newvals = [-1]+newvals
newcols = []
for n,t,v in zip(newnames,newtypes,newvals):
col = Column(name=n,length=ncat,dtype=t)
col[:] = v
newcols.append(col)
newcat.add_columns(newcols)
# Match up with IDs if SE list used by DAOPHOT
if sexdetect:
mid, ind1, ind2 = np.intersect1d(newcat["NUMBER"],als["ID"],return_indices=True)
for id1,id2 in zip(newnames,alsnames):
newcat[id1][ind1] = als[id2][ind2]
# Only keep sources that have SE+ALLSTAR information
# trim out ones that don't have ALS
if (both is True) & (nals<ncat): newcat = newcat[ind1]
# Match up with coordinates, DAOPHOT detection list used
else:
print("Need to match up with coordinates")
# Only keep sources that have SE+ALLSTAR information
# trim out ones that don't have ALS
if (both is True) & (nals<ncat): newcat = newcat[ind1]
# Add RA, DEC
r,d = self.wcs.all_pix2world(newcat["XPSF"],newcat["YPSF"],1)
newcat['RAPSF'] = r
newcat['DECPSF'] = d
# Write to file
self.logger.info("Final catalog = "+outfile)
fits.writeto(outfile,None,self.meta,overwrite=True) # meta in PDU header
# append the table in extension 1
hdulist = fits.open(outfile)
hdu = fits.table_to_hdu(newcat)
hdulist.append(hdu)
hdulist.writeto(outfile,overwrite=True)
hdulist.close()
#newcat.write(outfile,overwrite=True)
#fits.append(outfile,0,self.meta) # meta is header of 2nd extension
# Process a single chip
#----------------------
def process(self):
self.runsex()
self.logger.info("-- Getting ready to run DAOPHOT --")
self.mkopt()
self.mkdaoim()
#self.daodetect()
# Create DAOPHOT-style coo file
# Need to use SE positions
self.sextodao(outfile="flux_dao.coo")
self.daoaperphot()
self.daopickpsf()
self.createpsf()
self.allstar()
self.getapcor()
self.finalcat()
# Do I need to rerun daoaperphot to get aperture
# photometry at the FINAL allstar positions??
# Is there a way to reduce the number of iterations needed to create the PSF?
# what do the ?, * mean anyway?
# maybe just remove the worse 10% of stars or something
# Put all of the daophot-running into separate function (maybe separate module)
# same for sextractor
# Maybe make my own xmatch function that does one-to-one matching
# Clean up the files
#--------------------
def cleanup(self):
self.logger.info("Copying final files to output directory "+self.outdir)
base = os.path.basename(self.fluxfile)
base = os.path.splitext(os.path.splitext(base)[0])[0]
daobase = os.path.basename(self.daofile)
daobase = os.path.splitext(os.path.splitext(daobase)[0])[0]
# Copy the files we want to keep
# final combined catalog, logs
outcatfile = self.outdir+self.bigbase+"_"+str(self.ccdnum)+".fits"
if os.path.exists(outcatfile): os.remove(outcatfile)
shutil.copyfile("flux.cat.fits",outcatfile)
# Copy DAOPHOT opt files
outoptfile = self.outdir+self.bigbase+"_"+str(self.ccdnum)+".opt"
if os.path.exists(outoptfile): os.remove(outoptfile)
shutil.copyfile(daobase+".opt",outoptfile)
outalsoptfile = self.outdir+self.bigbase+"_"+str(self.ccdnum)+".als.opt"
if os.path.exists(outalsoptfile): os.remove(outalsoptfile)
shutil.copyfile(daobase+".als.opt",outalsoptfile)
# Copy DAOPHOT PSF star list
outlstfile = self.outdir+self.bigbase+"_"+str(self.ccdnum)+".psf.lst"
if os.path.exists(outlstfile): os.remove(outlstfile)
shutil.copyfile(daobase+".lst",outlstfile)
# Copy DAOPHOT PSF file
outpsffile = self.outdir+self.bigbase+"_"+str(self.ccdnum)+".psf"
if os.path.exists(outpsffile): os.remove(outpsffile)
shutil.copyfile(daobase+".psf",outpsffile)
# Copy DAOPHOT .apers file??
# copy Allstar PSF subtracted file to output dir
outsubfile = self.outdir+self.bigbase+"_"+str(self.ccdnum)+"s.fits"
if os.path.exists(outsubfile): os.remove(outsubfile)
shutil.copyfile(daobase+"s.fits",outsubfile)
# Copy SE config file
outconfigfile = self.outdir+self.bigbase+"_"+str(self.ccdnum)+".sex.config"
if os.path.exists(outconfigfile): os.remove(outconfigfile)
shutil.copyfile("default.config",outconfigfile)
# Combine all the log files
logfiles = glob.glob(base+"*.log")
loglines = []
for logfil in logfiles:
loglines += ["==> "+logfil+" <==\n"]
f = open(logfil,'r')
lines = f.readlines()
f.close()
loglines += lines
loglines += ["\n"]
f = open(base+".logs","w")
f.writelines("".join(loglines))
f.close()
outlogfile = self.outdir+self.bigbase+"_"+str(self.ccdnum)+".logs"
if os.path.exists(outlogfile): os.remove(outlogfile)
shutil.copyfile(base+".logs",outlogfile)
# Delete temporary directory/files
self.logger.info(" Cleaning up")
files1 = glob.glob("flux*")
files2 = glob.glob("default*")
files = files1+files2+["flux.fits","wt.fits","mask.fits","daophot.opt","allstar.opt"]
for f in files:
if os.path.exists(f): os.remove(f)
# Main command-line program
if __name__ == "__main__":
# Version
verdir = ""
if len(sys.argv) > 4:
version = sys.argv[4]
verdir | |
<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 23 08:59:01 2021
@author: alexa
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import solve_ivp
import time
import math
from numpy import pi as pi
from scipy import optimize
#par = [MASSA, MASSB, d_1, d_2, SADDLE_E]
def upo_analytical(total_energy, t, par):
"""
Returns the analytical solution of the unstable periodic orbit at
total energy and discretized at the time points, t.
Parameters
----------
total_energy : float
Total energy of the unstable periodic orbit
t : 1d numpy array
vector of time points at which analytical solution is to be evaluated
parameters : float (list)
model parameters
Returns
-------
2d numpy array
analytical solution evaluated at the time points
"""
d2 = par[3]
y_t = np.sqrt((total_energy-1)/(d2*pi))*np.cos(t)
py_t = np.sqrt(2*(total_energy-1))*np.sin(t)
return np.array([y_t,py_t])
def init_guess_eqpt_uncoupled(eqNum, par):
"""
Returns guess for solving configuration space coordinates of the equilibrium points.
For numerical solution of the equilibrium points, this function returns the guess that be inferred from the potential energy surface.
Parameters
----------
eqNum : int
= 1 for saddle and = 2,3 for centre equilibrium points
parameters : float (list)
model parameters
Returns
-------
x0 : float (list of size 2)
configuration space coordinates of the guess: [x, y]
"""
if eqNum == 1:
x0 = [1, 0]
return x0
def grad_pot_uncoupled(x,par):
""" Returns the negative of the gradient of the potential energy function
Parameters
----------
x : float (list of size 2)
configuration space coordinates: [x, y]
parameters : float (list)
model parameters
Returns
-------
F : float (list of size 2)
configuration space coordinates of the guess: [x, y]
"""
dVdx = -2*pi*np.sin(2*pi*x[0])
dVdy = 2*par[3]*pi*x[1]
F = [-dVdx, -dVdy]
return F
def pot_energy_uncoupled(x, y, par):
""" Returns the potential energy at the configuration space coordinates
Parameters
----------
x : float
configuration space coordinate
y : float
configuration space coordinate
parameters : float (list)
model parameters
Returns
-------
float
potential energy of the configuration
"""
return np.cos(2*pi*x) + par[3]*pi*y**2
def eigvector_uncoupled(par):
""" Returns the flag for the correction factor to the eigenvectors for the linear guess of the unstable periodic orbit.
Parameters
----------
parameters : float (list)
model parameters
Returns
-------
correcx : 1 or 0
flag to set the x-component of the eigenvector
correcy : 1 or 0
flag to use the y-component of the eigenvector
"""
correcx = 1
correcy = 1
return correcx, correcy
def guess_lin_uncoupled(eqPt, Ax, par):
""" Returns an initial guess as list of coordinates in the phase space
This guess is based on the linearization at the saddle equilibrium point and is used for starting the differential correction iteration
Parameters
----------
eqPt : float (list of size 2)
configuration space coordinates of the equilibrium point
Ax : float
small amplitude displacement from the equilibrium point to initialize guess for the differential correction
parameters : float (list)
model parameters
Returns
-------
float (list of size 4)
phase space coordinates of the initial guess in the phase space
"""
correcx, correcy = eigvector_uncoupled(par)
return [1, Ax, 0,0]
def jacobian_uncoupled(eqPt, par):
""" Returns Jacobian of the Hamiltonian vector field
Parameters
----------
eqPt : float (list of size 4)
phase space coordinates of the equilibrium point
parameters : float (list)
model parameters
Returns
-------
Df : 2d numpy array
Jacobian matrix
"""
x,y,px,py = eqPt[0:4]
# The following is the Jacobian matrix
d2Vdx2 = -4*(pi**2)*np.cos(2*pi*x)
d2Vdy2 = 2*par[3]*pi
d2Vdydx = 0
d2Vdxdy = d2Vdydx
Df = np.array([[ 0, 0, par[0], 0],
[0, 0, 0, par[1]],
[-d2Vdx2, -d2Vdydx, 0, 0],
[-d2Vdxdy, -d2Vdy2, 0, 0]])
return Df
def variational_eqns_uncoupled(t,PHI,par):
"""
Returns the state transition matrix, PHI(t,t0), where Df(t) is the Jacobian of the Hamiltonian vector field
d PHI(t, t0)/dt = Df(t) * PHI(t, t0)
Parameters
----------
t : float
solution time
PHI : 1d numpy array
state transition matrix and the phase space coordinates at initial time in the form of a vector
parameters : float (list)
model parameters
Returns
-------
PHIdot : float (list of size 20)
right hand side for solving the state transition matrix
"""
phi = PHI[0:16]
phimatrix = np.reshape(PHI[0:16],(4,4))
x,y,px,py = PHI[16:20]
# The first order derivative of the potential energy.
dVdx = -2*pi*np.sin(2*pi*x)
dVdy = 2*par[3]*pi*y
# The second order derivative of the potential energy.
d2Vdx2 = -4*(pi**2)*np.cos(2*pi*x)
d2Vdy2 = 2*par[3]*pi
d2Vdydx = 0
d2Vdxdy = d2Vdydx
Df = np.array([[ 0, 0, par[0], 0],
[0, 0, 0, par[1]],
[-d2Vdx2, -d2Vdydx, 0, 0],
[-d2Vdxdy, -d2Vdy2, 0, 0]])
phidot = np.matmul(Df, phimatrix) # variational equation
PHIdot = np.zeros(20)
PHIdot[0:16] = np.reshape(phidot,(1,16))
PHIdot[16] = px/par[0]
PHIdot[17] = py/par[1]
PHIdot[18] = -dVdx
PHIdot[19] = -dVdy
return list(PHIdot)
def diffcorr_setup_uncoupled():
"""
Returns iteration conditions for differential correction.
See references for details on how to set up the conditions and how to choose the coordinates in the iteration procedure.
Parameters
----------
Empty - None
Returns
-------
drdot1 : 1 or 0
flag to select the phase space coordinate for event criteria in stopping integration of the periodic orbit
correctr0 : 1 or 0
flag to select which configuration coordinate to apply correction
MAXdrdot1 : float
tolerance to satisfy for convergence of the method
"""
dxdot1 = 1
correctx0= 0
MAXdxdot1 = 1.e-10
drdot1 = dxdot1
correctr0 = correctx0
MAXdrdot1 = MAXdxdot1
return [drdot1, correctr0, MAXdrdot1]
def conv_coord_uncoupled(x1, y1, dxdot1, dydot1):
"""
Returns the variable we want to keep fixed during differential correction.
dxdot1 -> fix x, dydot1 -> fix y.
Parameters
----------
x1 : float
value of phase space coordinate, x
y1 : float
value of phase space coordinate, y
dxdot1 : float
value of phase space coordinate, dxdot
dydot1 : float
value of phase space coordinate, dydot
Returns
-------
value of one of the phase space coordinates
"""
return dxdot1
def get_coord_uncoupled(x,y, E, par):
"""
Function that returns the potential energy for a given total energy with one of the configuration space coordinate being fixed
Used to solve for coordinates on the isopotential contours using numerical solvers
Parameters
----------
x : float
configuration space coordinate
y : float
configuration space coordinate
E : float
total energy
parameters :float (list)
model parameters
Returns
-------
float
Potential energy
"""
return np.cos(2*pi*x) + par[3]*pi*y**2 - E
def diffcorr_acc_corr_uncoupled(coords, phi_t1, x0, par):
"""
Returns the updated guess for the initial condition after applying
small correction based on the leading order terms.
Correcting x or y coordinate of the guess depends on the system and needs to be chosen by inspecting the geometry of the bottleneck in the potential energy surface.
Parameters
----------
coords : float (list of size 4)
phase space coordinates in the order of position and momentum
phi_t1 : 2d numpy array
state transition matrix evaluated at the time t1 which is used to derive the correction terms
x0 : float
coordinate of the initial condition before the correction
parameters : float (list)
model parameters
Returns
-------
x0 : float
coordinate of the initial condition after the correction
"""
x1, y1, dxdot1, dydot1 = coords
dVdx = -2*pi*np.sin(2*pi*x1)
dVdy = 2*par[3]*pi*y1
vxdot1 = -dVdx
vydot1 = -dVdy
#correction to the initial x0
#correctx0 = dxdot1/(phi_t1[2,0] - phi_t1[3,0]*(vxdot1/vydot1))
#x0[0] = x0[0] - correctx0 # correction in x coodinate.
#correction to the initial y0
correcty0 = dydot1/(phi_t1[3,1] - phi_t1[2,1]*vydot1*(1/vxdot1))
x0[1] = x0[1] - correcty0
return x0
def configdiff_uncoupled(guess1, guess2, ham2dof_model, \
half_period_model, n_turn, par):
"""
Returns the difference of x(or y) coordinates of the guess initial condition and the ith turning point
Used by turning point based on configuration difference method | |
#!/usr/bin/env python
# coding: utf-8
# # Notebook script for generation of training dataset (supports n phase material)
#
# ## For case of one or two phase, GUI works
#
# ## Different steps of data generation is outlined in this notebook (LaueToolsNN GUI does the same thing)
#
# ### Define material of interest
# ### Generate class hkl data for Neural Network model (these are the output neurons)
# ### Clean up generated dataset
# In[1]:
if __name__ == '__main__': #enclosing required because of multiprocessing
## Import modules used for this Notebook
import os
import numpy as np
import _pickle as cPickle
import itertools
from keras.callbacks import EarlyStopping, ModelCheckpoint
import matplotlib.pyplot as plt
## if LaueToolsNN is properly installed
try:
from lauetoolsnn.utils_lauenn import generate_classHKL, generate_multimat_dataset, \
rmv_freq_class_MM, get_multimaterial_detail,\
array_generator, array_generator_verify, vali_array
except:
# else import from a path where LaueToolsNN files are
import sys
sys.path.append(r"C:\Users\purushot\Desktop\github_version_simple\lauetoolsnn")
from utils_lauenn import generate_classHKL, generate_multimat_dataset, \
rmv_freq_class_MM, get_multimaterial_detail,\
array_generator, array_generator_verify, vali_array
# ## step 1: define material and other parameters for simulating Laue patterns
# In[2]:
# =============================================================================
## User Input dictionary with parameters
## In case of only one phase/material, keep same value for material_ and material1_ key
# =============================================================================
input_params = {
# =============================================================================
# GENERATION OF DATASET
# =============================================================================
"material_": ["Cu","Si","Ge","GaN"], ## same key as used in dict_LaueTools
"prefix" : "", ## prefix for the folder to be created for training dataset
"symmetry": ["cubic","cubic","cubic","hexagonal"], ## crystal symmetry of material_
"SG": [225,230,230,186], ## Space group of material_ (None if not known)
"hkl_max_identify" : [3,3,3,5], ## Maximum hkl index to classify in a Laue pattern
"maximum_angle_to_search":90, ## Angle of radial distribution to reconstruct the histogram (in deg)
"step_for_binning" : 0.1, ## bin widht of angular radial distribution in degree
"nb_grains_per_lp" : [1,1,1,1], ## max grains to be generated in a Laue Image
"grains_nb_simulate" : 100, ## Number of orientations to generate (takes advantage of crystal symmetry)
# =============================================================================
# Detector parameters (roughly) of the Experimental setup
# =============================================================================
## Sample-detector distance, X center, Y center, two detector angles
"detectorparameters" : [79.553,979.32,932.31,0.37,0.447],
"pixelsize" : 0.0734, ## Detector pixel size
"dim1":2018, ## Dimensions of detector in pixels
"dim2":2016,
"emin" : 5, ## Minimum and maximum energy to use for simulating Laue Patterns
"emax" : 22,
# =============================================================================
# Training paarmeters
# =============================================================================
"freq_rmv_classhkl" : [100,100,100,100],
"keep_length_classhkl" : ["all","all","all","all"],
"batch_size":50, ## batches of files to use while training
"epochs":8,
}
generate_data = True
train_model = True
# ### number of files it will generate fro training
# nb_grains_list = []
# for ino, imat in enumerate(input_params["material_"]):
# nb_grains_list.append(list(range(input_params["nb_grains_per_lp"][ino]+1)))
# list_permute = list(itertools.product(*nb_grains_list))
# list_permute.pop(0)
# print(len(list_permute)*input_params["grains_nb_simulate"])
# ## Step 2: Get material parameters
# ### Generates a folder with material name and gets material unit cell parameters and symmetry object
# from the get_material_detail function
# In[3]:
material_= input_params["material_"]
n = input_params["hkl_max_identify"]
maximum_angle_to_search = input_params["maximum_angle_to_search"]
step_for_binning = input_params["step_for_binning"]
nb_grains_per_lp = input_params["nb_grains_per_lp"]
grains_nb_simulate = input_params["grains_nb_simulate"]
detectorparameters = input_params["detectorparameters"]
pixelsize = input_params["pixelsize"]
emax = input_params["emax"]
emin = input_params["emin"]
symm_ = input_params["symmetry"]
SG = input_params["SG"]
if len(material_) > 1:
prefix_mat = material_[0]
for ino, imat in enumerate(material_):
if ino == 0:
continue
prefix_mat = prefix_mat + "_" + imat
else:
prefix_mat = material_
save_directory = os.getcwd()+"//"+prefix_mat+input_params["prefix"]
print("save directory is : "+save_directory)
if not os.path.exists(save_directory):
os.makedirs(save_directory)
## get unit cell parameters and other details required for simulating Laue patterns
rules, symmetry, lattice_material, \
crystal, SG = get_multimaterial_detail(material_, SG, symm_)
# ## Step 3: Generate Neural network output classes (Laue spot hkls) using the generate_classHKL function
# In[4]:
if generate_data:
### generate_classHKL_multimat
## procedure for generation of GROUND TRUTH classes
# general_diff_cond = True will eliminate the hkl index that does not satisfy the general reflection conditions
for ino, imat in enumerate(material_):
generate_classHKL(n[ino], rules[ino], lattice_material[ino], \
symmetry[ino], material_[ino], \
crystal=crystal[ino], SG=SG[ino], general_diff_cond=False,
save_directory=save_directory, write_to_console=print, \
ang_maxx = maximum_angle_to_search, \
step = step_for_binning)
# ## Step 4: Generate Training and Testing dataset only for the output classes (Laue spot hkls) calculated in the Step 3
# ### Uses multiprocessing library
# In[5]:
############ GENERATING MULTI MATERIAL TRAINING DATA ##############
# data_realism =True ; will introduce noise and partial Laue patterns in the training dataset
# modelp can have either "random" for random orientation generation or "uniform" for uniform orientation generation
# include_scm (if True; misorientation_angle parameter need to be defined): this parameter introduces misoriented crystal of
# specific angle along a crystal axis in the training dataset
generate_multimat_dataset(material_=material_,
ang_maxx=maximum_angle_to_search,
step=step_for_binning,
nb_grains=nb_grains_per_lp,
grains_nb_simulate=grains_nb_simulate,
data_realism = True,
detectorparameters=detectorparameters,
pixelsize=pixelsize,
type_="training_data",
var0 = 1,
dim1=input_params["dim1"],
dim2=input_params["dim2"],
removeharmonics=1,
save_directory=save_directory,
write_to_console=print,
emin=emin,
emax=emax,
modelp = "random",
general_diff_rules = False,
crystal = crystal,)
############ GENERATING TESTING DATA ##############
factor = 5 # validation split for the training dataset --> corresponds to 20% of total training dataset
generate_multimat_dataset(material_=material_,
ang_maxx=maximum_angle_to_search,
step=step_for_binning,
nb_grains=nb_grains_per_lp,
grains_nb_simulate=grains_nb_simulate//factor,
data_realism = True,
detectorparameters=detectorparameters,
pixelsize=pixelsize,
type_="testing_data",
var0 = 1,
dim1=input_params["dim1"],
dim2=input_params["dim2"],
removeharmonics=1,
save_directory=save_directory,
write_to_console=print,
emin=emin,
emax=emax,
modelp = "random",
general_diff_rules = False,
crystal = crystal,)
#%%# Updating the ClassHKL list by removing the non-common HKL or less frequent HKL from the list
## The non-common HKL can occur as a result of the detector position and energy used
# freq_rmv: remove output hkl if the training dataset has less tha 100 occurances of the considered hkl (freq_rmv1 for second phase)
# Weights (penalty during training) are also calculated based on the occurance
freq_rmv = input_params["freq_rmv_classhkl"]
elements = input_params["keep_length_classhkl"]
rmv_freq_class_MM(freq_rmv = freq_rmv, elements = elements,
save_directory = save_directory, material_ = material_,
write_to_console = print, progress=None, qapp=None)
## End of data generation for Neural network training: all files are saved in the same folder
## to be later used for training and prediction
# ## Step 2: Load the necessary files generated in Step 1 script
# ### Loading the Output class and ground truth
# In[3]:
if train_model:
classhkl = np.load(save_directory+"//MOD_grain_classhkl_angbin.npz")["arr_0"]
angbins = np.load(save_directory+"//MOD_grain_classhkl_angbin.npz")["arr_1"]
loc_new = np.load(save_directory+"//MOD_grain_classhkl_angbin.npz")["arr_2"]
with open(save_directory+"//class_weights.pickle", "rb") as input_file:
class_weights = cPickle.load(input_file)
class_weights = class_weights[0]
n_bins = len(angbins)-1
n_outputs = len(classhkl)
print(n_bins, n_outputs)
# ## Step 3: Defining a neural network architecture
# In[4]:
import tensorflow as tf
import keras
from keras.regularizers import l2
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout
metricsNN = [
keras.metrics.FalseNegatives(name="fn"),
keras.metrics.FalsePositives(name="fp"),
keras.metrics.TrueNegatives(name="tn"),
keras.metrics.TruePositives(name="tp"),
keras.metrics.Precision(name="precision"),
keras.metrics.Recall(name="accuracy"),
]
def model_arch_general_optimized(n_bins, n_outputs, kernel_coeff = 0.0005, bias_coeff = 0.0005, lr=None, verbose=1,
write_to_console=None):
"""
Very simple and straight forward Neural Network with few hyperparameters
straighforward RELU activation strategy with cross entropy to identify the HKL
Tried BatchNormalization --> no significant impact
Tried weighted approach --> not better for HCP
Trying Regularaization
l2(0.001) means that every coefficient in the weight matrix of the layer
will add 0.001 * weight_coefficient_value**2 to the total loss of the network
1e-3,1e-5,1e-6
"""
if n_outputs >= n_bins:
param = n_bins
if param*15 < (2*n_outputs): ## quick hack; make Proper implementation
param = (n_bins + n_outputs)//2
else:
param = n_outputs*2 ## More reasonable ???
model = Sequential()
model.add(keras.Input(shape=(n_bins,)))
## Hidden layer 1
model.add(Dense(n_bins, kernel_regularizer=l2(kernel_coeff), bias_regularizer=l2(bias_coeff)))
# model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.3)) ## Adding dropout as we introduce some uncertain data with noise
## Hidden layer 2
model.add(Dense(((param)*15 + n_bins)//2, kernel_regularizer=l2(kernel_coeff), bias_regularizer=l2(bias_coeff)))
# model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.3))
## Hidden layer 3
model.add(Dense((param)*15, kernel_regularizer=l2(kernel_coeff), bias_regularizer=l2(bias_coeff)))
# model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.3))
## Output layer
model.add(Dense(n_outputs, activation='softmax'))
## Compile model
if lr != None:
otp = tf.keras.optimizers.Adam(learning_rate=lr)
model.compile(loss='categorical_crossentropy', optimizer=otp, metrics=[metricsNN])
else:
model.compile(loss='categorical_crossentropy', optimizer="adam", metrics=[metricsNN])
if verbose == 1:
model.summary()
stringlist = []
model.summary(print_fn=lambda x: stringlist.append(x))
short_model_summary = "\n".join(stringlist)
if write_to_console!=None:
write_to_console(short_model_summary)
| |
<reponame>joezuntz/MockMPI<filename>mockmpi/comm.py
# Copyright (c) <NAME> and other collaborators
# See https://github.com/rmjarvis/MockMPI/LICENSE for license information.
import numpy as np
# This constant seems to have the same value in MPICH and OpenMPI
# so we reproduce it here since it can be quite important.
IN_PLACE = 1
class MockComm(object):
"""A class to mock up the MPI Comm API using a multiprocessing Pipe.
"""
def __init__(self, rank, size, pipes, barrier):
self.rank = rank
self.size = size
self.pipes = pipes
self.barrier = barrier
def __bool__(self):
return self.size > 0
def Get_rank(self):
return self.rank
def Get_size(self):
return self.size
def send(self, msg, dest):
if dest != self.rank:
self.pipes[dest].send(msg)
else:
self.msg = msg
def Send(self, msg, dest):
if not isinstance(msg, np.ndarray):
raise ValueError(
"Can only use Send with numpy arrays "
"(Mocking code does not handle general buffers)"
)
self.send(msg, dest)
def recv(self, source):
if source != self.rank:
msg = self.pipes[source].recv()
else:
msg = self.msg
return msg
def Recv(self, buffer, source):
msg = self.recv(source)
buffer[:] = msg
def Barrier(self):
self.barrier.wait()
def bcast(self, msg, root=0):
if root == self.rank:
for p in range(self.size):
self.send(msg, p)
msg = self.recv(root)
return msg
def Bcast(self, msg, root=0):
if root == self.rank:
for p in range(self.size):
self.Send(msg, p)
self.Recv(msg, root)
def scatter(self, data, root=0):
if root == self.rank:
for p in range(self.size):
self.send(data[p], p)
data = self.recv(root)
return data
def gather(self, data, root=0):
self.send(data, root)
if root == self.rank:
new_data = []
for p in range(self.size):
new_data.append(self.recv(p))
return new_data
else:
return None
def alltoall(self, data=0):
for p in range(self.size):
self.send(data[p], p)
new_data = []
for p in range(self.size):
new_data.append(self.recv(p))
return new_data
def reduce(self, sendobj, op=None, root=0):
if op is not None:
raise NotImplementedError("Not implemented non-sum reductions in mock MPI")
new_data = self.gather(sendobj, root)
if root == self.rank:
d = new_data[0]
for d2 in new_data[1:]:
d = d + d2
return d
else:
return None
def allreduce(self, sendobj, op=None):
d = self.reduce(sendobj, op)
d = self.bcast(d)
return d
def Reduce(self, sendbuf, recvbuf, op=None, root=0):
if sendbuf is IN_PLACE:
sendbuf = recvbuf.copy()
if not isinstance(sendbuf, np.ndarray):
raise ValueError(
"Cannot use Reduce with non-arrays. "
"(Mocking code does not handle general buffers)"
)
r = self.reduce(sendbuf, op=op, root=root)
if self.rank == root:
recvbuf[:] = r
def Allreduce(self, sendbuf, recvbuf, op=None):
self.Reduce(sendbuf, recvbuf, op)
self.Bcast(recvbuf)
# Instance methods not implemented
def Abort(self, *args, **kwargs):
raise NotImplementedError("The method 'Abort' is not implemented in mockmpi")
def Accept(self, *args, **kwargs):
raise NotImplementedError("The method 'Accept' is not implemented in mockmpi")
def Allgather(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Allgather' is not implemented in mockmpi"
)
def Allgatherv(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Allgatherv' is not implemented in mockmpi"
)
def Alltoall(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Alltoall' is not implemented in mockmpi"
)
def Alltoallv(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Alltoallv' is not implemented in mockmpi"
)
def Alltoallw(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Alltoallw' is not implemented in mockmpi"
)
def Bsend(self, *args, **kwargs):
raise NotImplementedError("The method 'Bsend' is not implemented in mockmpi")
def Bsend_init(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Bsend_init' is not implemented in mockmpi"
)
def Call_errhandler(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Call_errhandler' is not implemented in mockmpi"
)
def Cart_map(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Cart_map' is not implemented in mockmpi"
)
def Clone(self, *args, **kwargs):
raise NotImplementedError("The method 'Clone' is not implemented in mockmpi")
def Connect(self, *args, **kwargs):
raise NotImplementedError("The method 'Connect' is not implemented in mockmpi")
def Create(self, *args, **kwargs):
raise NotImplementedError("The method 'Create' is not implemented in mockmpi")
def Create_cart(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Create_cart' is not implemented in mockmpi"
)
def Create_dist_graph(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Create_dist_graph' is not implemented in mockmpi"
)
def Create_dist_graph_adjacent(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Create_dist_graph_adjacent' is not implemented in mockmpi"
)
def Create_graph(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Create_graph' is not implemented in mockmpi"
)
def Create_group(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Create_group' is not implemented in mockmpi"
)
def Create_intercomm(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Create_intercomm' is not implemented in mockmpi"
)
def Delete_attr(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Delete_attr' is not implemented in mockmpi"
)
def Disconnect(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Disconnect' is not implemented in mockmpi"
)
def Dup(self, *args, **kwargs):
raise NotImplementedError("The method 'Dup' is not implemented in mockmpi")
def Dup_with_info(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Dup_with_info' is not implemented in mockmpi"
)
def Exscan(self, *args, **kwargs):
raise NotImplementedError("The method 'Exscan' is not implemented in mockmpi")
def Free(self, *args, **kwargs):
raise NotImplementedError("The method 'Free' is not implemented in mockmpi")
def Gather(self, *args, **kwargs):
raise NotImplementedError("The method 'Gather' is not implemented in mockmpi")
def Gatherv(self, *args, **kwargs):
raise NotImplementedError("The method 'Gatherv' is not implemented in mockmpi")
def Get_attr(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Get_attr' is not implemented in mockmpi"
)
def Get_errhandler(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Get_errhandler' is not implemented in mockmpi"
)
def Get_group(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Get_group' is not implemented in mockmpi"
)
def Get_info(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Get_info' is not implemented in mockmpi"
)
def Get_name(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Get_name' is not implemented in mockmpi"
)
def Get_topology(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Get_topology' is not implemented in mockmpi"
)
def Graph_map(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Graph_map' is not implemented in mockmpi"
)
def Iallgather(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Iallgather' is not implemented in mockmpi"
)
def Iallgatherv(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Iallgatherv' is not implemented in mockmpi"
)
def Iallreduce(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Iallreduce' is not implemented in mockmpi"
)
def Ialltoall(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Ialltoall' is not implemented in mockmpi"
)
def Ialltoallv(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Ialltoallv' is not implemented in mockmpi"
)
def Ialltoallw(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Ialltoallw' is not implemented in mockmpi"
)
def Ibarrier(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Ibarrier' is not implemented in mockmpi"
)
def Ibcast(self, *args, **kwargs):
raise NotImplementedError("The method 'Ibcast' is not implemented in mockmpi")
def Ibsend(self, *args, **kwargs):
raise NotImplementedError("The method 'Ibsend' is not implemented in mockmpi")
def Idup(self, *args, **kwargs):
raise NotImplementedError("The method 'Idup' is not implemented in mockmpi")
def Iexscan(self, *args, **kwargs):
raise NotImplementedError("The method 'Iexscan' is not implemented in mockmpi")
def Igather(self, *args, **kwargs):
raise NotImplementedError("The method 'Igather' is not implemented in mockmpi")
def Igatherv(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Igatherv' is not implemented in mockmpi"
)
def Improbe(self, *args, **kwargs):
raise NotImplementedError("The method 'Improbe' is not implemented in mockmpi")
def Iprobe(self, *args, **kwargs):
raise NotImplementedError("The method 'Iprobe' is not implemented in mockmpi")
def Irecv(self, *args, **kwargs):
raise NotImplementedError("The method 'Irecv' is not implemented in mockmpi")
def Ireduce(self, *args, **kwargs):
raise NotImplementedError("The method 'Ireduce' is not implemented in mockmpi")
def Ireduce_scatter(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Ireduce_scatter' is not implemented in mockmpi"
)
def Ireduce_scatter_block(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Ireduce_scatter_block' is not implemented in mockmpi"
)
def Irsend(self, *args, **kwargs):
raise NotImplementedError("The method 'Irsend' is not implemented in mockmpi")
def Is_inter(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Is_inter' is not implemented in mockmpi"
)
def Is_intra(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Is_intra' is not implemented in mockmpi"
)
def Iscan(self, *args, **kwargs):
raise NotImplementedError("The method 'Iscan' is not implemented in mockmpi")
def Iscatter(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Iscatter' is not implemented in mockmpi"
)
def Iscatterv(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Iscatterv' is not implemented in mockmpi"
)
def Isend(self, *args, **kwargs):
raise NotImplementedError("The method 'Isend' is not implemented in mockmpi")
def Issend(self, *args, **kwargs):
raise NotImplementedError("The method 'Issend' is not implemented in mockmpi")
def Mprobe(self, *args, **kwargs):
raise NotImplementedError("The method 'Mprobe' is not implemented in mockmpi")
def Probe(self, *args, **kwargs):
raise NotImplementedError("The method 'Probe' is not implemented in mockmpi")
def Recv_init(self, *args, **kwargs):
| |
<gh_stars>0
#!/usr/bin/env python
# -*-mode: python; coding: utf-8 -*-
#
# Inspired from svn-import.py by <EMAIL> (ref :
# http://svn.haxx.se/users/archive-2006-10/0857.shtml)
#
# svn-merge-vendor.py (v1.0.1) - Import a new release, such as a vendor drop.
#
# The "Vendor branches" chapter of "Version Control with Subversion"
# describes how to do a new vendor drop with:
#
# >The goal here is to make our current directory contain only the
# >libcomplex 1.1 code, and to ensure that all that code is under version
# >control. Oh, and we want to do this with as little version control
# >history disturbance as possible.
#
# This utility tries to take you to this goal - automatically. Files
# new in this release is added to version control, and files removed
# in this new release are removed from version control. It will
# detect the moved files by looking in the svn log to find the
# "copied-from" path !
#
# Compared to svn_load_dirs.pl, this utility:
#
# * DETECTS THE MOVED FILES !!
# * Does not hard-code commit messages
# * Allows you to fine-tune the import before commit, which
# allows you to turn adds+deletes into moves.
#
# TODO :
# * support --username and --password
#
# This tool is provided under GPL license. Please read
# http://www.gnu.org/licenses/gpl.html for the original text.
#
# $HeadURL$
# $LastChangedRevision$
# $LastChangedDate$
# $LastChangedBy$
import os
import re
import tempfile
import atexit
import subprocess
import shutil
import sys
import getopt
import logging
import string
from StringIO import StringIO
# lxml module can be found here : http://codespeak.net/lxml/
from lxml import etree
import types
prog_name = os.path.basename(sys.argv[0])
orig_svn_subroot = None
base_copied_paths = []
r_from = None
r_to = None
log_tree = None
entries_to_treat = []
entries_to_delete = []
added_paths = []
logger = None
def del_temp_tree(tmpdir):
"""Delete tree, standring in the root"""
global logger
logger.info("Deleting tmpdir "+tmpdir)
os.chdir("/")
try:
shutil.rmtree(tmpdir)
except OSError:
print logger.warn("Couldn't delete tmpdir %s. Don't forget to remove it manually." % (tmpdir))
def checkout(url, revision=None):
"""Checks out the given URL at the given revision, using HEAD if not defined. Returns the working copy directory"""
global logger
# Create a temp dir to hold our working copy
wc_dir = tempfile.mkdtemp(prefix=prog_name)
atexit.register(del_temp_tree, wc_dir)
if (revision):
url += "@"+revision
# Check out
logger.info("Checking out "+url+" to "+wc_dir)
returncode = call_cmd(["svn", "checkout", url, wc_dir])
if (returncode == 1):
return None
else:
return wc_dir
def merge(wc_dir, revision_from, revision_to):
"""Merges repo_url from revision revision_from to revision revision_to into wc_dir"""
global logger
logger.info("Merging between revisions %s and %s into %s" % (revision_from, revision_to, wc_dir))
os.chdir(wc_dir)
return call_cmd(["svn", "merge", "-r", revision_from+":"+revision_to, wc_dir])
def treat_status(wc_dir_orig, wc_dir):
"""Copies modification from official vendor branch to wc"""
global logger
logger.info("Copying modification from official vendor branch %s to wc %s" % (wc_dir_orig, wc_dir))
os.chdir(wc_dir_orig)
status_tree = call_cmd_xml_tree_out(["svn", "status", "--xml"])
global entries_to_treat, entries_to_delete
entries_to_treat = status_tree.xpath("/status/target/entry")
entries_to_delete = []
while len(entries_to_treat) > 0:
entry = entries_to_treat.pop(0)
entry_type = get_entry_type(entry)
file = get_entry_path(entry)
if entry_type == 'added':
if is_entry_copied(entry):
check_exit(copy(wc_dir_orig, wc_dir, file), "Error during copy")
else:
check_exit(add(wc_dir_orig, wc_dir, file), "Error during add")
elif entry_type == 'deleted':
entries_to_delete.append(entry)
elif entry_type == 'modified' or entry_type == 'replaced':
check_exit(update(wc_dir_orig, wc_dir, file), "Error during update")
elif entry_type == 'normal':
logger.info("File %s has a 'normal' state (unchanged). Ignoring." % (file))
else:
logger.error("Status not understood : '%s' not supported (file : %s)" % (entry_type, file))
# We then treat the left deletions
for entry in entries_to_delete:
check_exit(delete(wc_dir_orig, wc_dir, get_entry_path(entry)), "Error during delete")
return 0
def get_entry_type(entry):
return get_xml_text_content(entry, "wc-status/@item")
def get_entry_path(entry):
return get_xml_text_content(entry, "@path")
def is_entry_copied(entry):
return get_xml_text_content(entry, "wc-status/@copied") == 'true'
def copy(wc_dir_orig, wc_dir, file):
global logger
logger.info("A+ %s" % (file))
# Retrieving the original URL
os.chdir(wc_dir_orig)
info_tree = call_cmd_xml_tree_out(["svn", "info", "--xml", os.path.join(wc_dir_orig, file)])
url = get_xml_text_content(info_tree, "/info/entry/url")
# Detecting original svn root
global orig_svn_subroot
if not orig_svn_subroot:
orig_svn_root = get_xml_text_content(info_tree, "/info/entry/repository/root")
#print >>sys.stderr, "url : %s" % (url)
sub_url = url.split(orig_svn_root)[-1]
sub_url = os.path.normpath(sub_url)
#print >>sys.stderr, "sub_url : %s" % (sub_url)
if sub_url.startswith(os.path.sep):
sub_url = sub_url[1:]
orig_svn_subroot = '/'+sub_url.split(file)[0].replace(os.path.sep, '/')
#print >>sys.stderr, "orig_svn_subroot : %s" % (orig_svn_subroot)
global log_tree
if not log_tree:
# Detecting original file copy path
os.chdir(wc_dir_orig)
orig_svn_root_subroot = get_xml_text_content(info_tree, "/info/entry/repository/root") + orig_svn_subroot
real_from = str(int(r_from)+1)
logger.info("Retrieving log of the original trunk %s between revisions %s and %s ..." % (orig_svn_root_subroot, real_from, r_to))
log_tree = call_cmd_xml_tree_out(["svn", "log", "--xml", "-v", "-r", "%s:%s" % (real_from, r_to), orig_svn_root_subroot])
# Detecting the path of the original moved or copied file
orig_url_file = orig_svn_subroot+file.replace(os.path.sep, '/')
orig_url_file_old = None
#print >>sys.stderr, " orig_url_file : %s" % (orig_url_file)
while orig_url_file:
orig_url_file_old = orig_url_file
orig_url_file = get_xml_text_content(log_tree, "//path[(@action='R' or @action='A') and text()='%s']/@copyfrom-path" % (orig_url_file))
logger.debug("orig_url_file : %s" % (orig_url_file))
orig_url_file = orig_url_file_old
# Getting the relative url for the original url file
if orig_url_file:
orig_file = convert_relative_url_to_path(orig_url_file)
else:
orig_file = None
global base_copied_paths, added_paths
# If there is no "moved origin" for that file, or the origin doesn't exist in the working directory, or the origin is the same as the given file, or the origin is an added file
if not orig_url_file or (orig_file and (not os.path.exists(os.path.join(wc_dir, orig_file)) or orig_file == file or orig_file in added_paths)):
# Check if the file is within a recently copied path
for path in base_copied_paths:
if file.startswith(path):
logger.warn("The path %s to add is a sub-path of recently copied %s. Ignoring the A+." % (file, path))
return 0
# Simple add the file
logger.warn("Log paths for the file %s don't correspond with any file in the wc. Will do a simple A." % (file))
return add(wc_dir_orig, wc_dir, file)
# We catch the relative URL for the original file
orig_file = convert_relative_url_to_path(orig_url_file)
# Detect if it's a move
cmd = 'copy'
global entries_to_treat, entries_to_delete
if search_and_remove_delete_entry(entries_to_treat, orig_file) or search_and_remove_delete_entry(entries_to_delete, orig_file):
# It's a move, removing the delete, and treating it as a move
cmd = 'move'
logger.info("%s from %s" % (cmd, orig_url_file))
returncode = call_cmd(["svn", cmd, os.path.join(wc_dir, orig_file), os.path.join(wc_dir, file)])
if returncode == 0:
if os.path.isdir(os.path.join(wc_dir, orig_file)):
base_copied_paths.append(file)
else:
# Copy the last version of the file from the original repository
shutil.copy(os.path.join(wc_dir_orig, file), os.path.join(wc_dir, file))
return returncode
def search_and_remove_delete_entry(entries, orig_file):
for entry in entries:
if get_entry_type(entry) == 'deleted' and get_entry_path(entry) == orig_file:
entries.remove(entry)
return True
return False
def convert_relative_url_to_path(url):
global orig_svn_subroot
return os.path.normpath(url.split(orig_svn_subroot)[-1])
def new_added_path(returncode, file):
if not is_returncode_bad(returncode):
global added_paths
added_paths.append(file)
def add(wc_dir_orig, wc_dir, file):
global logger
logger.info("A %s" % (file))
if os.path.exists(os.path.join(wc_dir, file)):
logger.warn("Target file %s already exists. Will do a simple M" % (file))
return update(wc_dir_orig, wc_dir, file)
os.chdir(wc_dir)
if os.path.isdir(os.path.join(wc_dir_orig, file)):
returncode = call_cmd(["svn", "mkdir", file])
new_added_path(returncode, file)
return returncode
else:
shutil.copy(os.path.join(wc_dir_orig, file), os.path.join(wc_dir, file))
returncode = call_cmd(["svn", "add", file])
new_added_path(returncode, file)
return returncode
def delete(wc_dir_orig, wc_dir, file):
global logger
logger.info("D %s" % (file))
os.chdir(wc_dir)
if not os.path.exists(file):
logger.warn("File %s doesn't exist. Ignoring D." % (file))
return 0
return call_cmd(["svn", "delete", file])
def update(wc_dir_orig, wc_dir, file):
global logger
logger.info("M %s" % (file))
if os.path.isdir(os.path.join(wc_dir_orig, file)):
logger.warn("%s is a directory. Ignoring M." % (file))
return 0
shutil.copy(os.path.join(wc_dir_orig, file), os.path.join(wc_dir, file))
return 0
def fine_tune(wc_dir):
"""Gives the user a chance to fine-tune"""
alert(["If you want to fine-tune import, do so in working copy located at : %s" % (wc_dir),
"When done, press Enter to commit, or Ctrl-C to abort."])
def alert(messages):
"""Wait the user to <ENTER> or abort the program"""
for message in messages:
print >> sys.stderr, message
try:
return sys.stdin.readline()
except KeyboardInterrupt:
sys.exit(0)
def commit(wc_dir, message):
"""Commits the wc_dir"""
os.chdir(wc_dir)
cmd = ["svn", "commit"]
if (message):
cmd += ["-m", message]
return call_cmd(cmd)
def tag_wc(repo_url, current, tag, message):
"""Tags the wc_dir"""
cmd = ["svn", "copy"]
if (message):
cmd += ["-m", message]
return call_cmd(cmd + [repo_url+"/"+current, repo_url+"/"+tag])
def call_cmd(cmd):
global logger
logger.debug(string.join(cmd, ' '))
return subprocess.call(cmd, stdout=DEVNULL, stderr=sys.stderr)#subprocess.STDOUT)
def call_cmd_out(cmd):
global logger
logger.debug(string.join(cmd, ' '))
return subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=sys.stderr).stdout
def call_cmd_str_out(cmd):
out = call_cmd_out(cmd)
str_out = ""
for line in out.readlines():
str_out += line
out.close()
return str_out
def call_cmd_xml_tree_out(cmd):
return etree.parse(StringIO(call_cmd_str_out(cmd)))
def get_xml_text_content(xml_doc, xpath):
result_nodes = xml_doc.xpath(xpath)
if result_nodes:
if type(result_nodes[0]) == types.StringType:
return result_nodes[0]
else:
return result_nodes[0].text
else:
return None
def usage(error = None):
"""Print usage message and exit"""
print >>sys.stderr, """%s: Merges the difference between two revisions of the original repository of the vendor, to the vendor branch
usage: %s [options] REPO_URL CURRENT_PATH ORIGINAL_REPO_URL -r N:M
- REPO_URL : repository URL for the vendor branch (i.e: http://svn.example.com/repos/vendor/libcomplex)
- CURRENT_PATH : relative path | |
import os
import sys
import time
import pprint
import signal
import random
import inspect
import pkgutil
import traceback
import importlib.util
import threading as mt
from .ids import generate_id
from .threads import get_thread_name
# ------------------------------------------------------------------------------
#
_ps_cmd = 'ps -efw'
if sys.platform != 'darwin':
_ps_cmd += ' --forest'
# ------------------------------------------------------------------------------
#
def get_trace():
trace = sys.exc_info()[2]
if trace:
stack = traceback.extract_tb(trace)
traceback_list = traceback.format_list(stack)
return ''.join(traceback_list)
else:
stack = traceback.extract_stack()
traceback_list = traceback.format_list(stack)
return ''.join(traceback_list[:-1])
# ------------------------------------------------------------------------------
#
# pylint: disable=unused-argument
def print_stacktraces(signum=None, sigframe=None):
'''
signum, sigframe exist to satisfy signal handler signature requirements
'''
this_tid = mt.currentThread().ident
# if multiple processes (ie. a process group) get the signal, then all
# traces are mixed together. Thus we waid 'pid%100' milliseconds, in
# the hope that this will stagger the prints.
pid = int(os.getpid())
time.sleep((pid % 100) / 1000)
out = '=========================================================\n'
out += 'RADICAL Utils -- Debug Helper -- Stacktraces\n'
out += os.popen("%s | grep ' %s ' | grep -v grep" % (_ps_cmd, pid)).read()
if _debug_helper:
out += '---------------------------------------------------------\n'
if _debug_helper.locks:
out += 'Locks:\n'
for name, lock in _debug_helper.locks.items():
owner = lock.owner
waits = lock.waits
if not owner: owner = '-'
out += ' %-60s: %s %s\n' % (name, owner, waits)
if _debug_helper.rlocks:
out += 'RLocks:\n'
for name, rlock in _debug_helper.rlocks.items():
owner = rlock.owner
waits = rlock.waits
if not owner: owner = '-'
out += ' %-60s: %s %s\n' % (name, owner, waits)
out += '---------------------------------------------------------\n'
try:
info = get_stacktraces()
except Exception as e:
out += 'skipping frame (%s)' % e
info = None
if info:
for tid,tname in info:
if tid == this_tid: marker = '[active]'
else : marker = ''
out += '---------------------------------------------------------\n'
out += 'Thread: %s %s\n' % (tname, marker)
out += ' PID : %s \n' % os.getpid()
out += ' TID : %s \n' % tid
for fname,line,func,code in info[tid,tname]:
if code: code = code.strip()
else : code = '<no code>'
# # [:-1]: .py vs. .pyc:/
# if not (__file__[:-1] in fname and \
# func in ['get_stacktraces', 'print_stacktraces']):
if func not in ['get_stacktraces', 'print_stacktraces']:
out += ' File: %s, line %d, in %s\n' % (fname, line, func)
out += ' %s\n' % code
out += '========================================================='
sys.stdout.write('%s\n' % out)
if 'RADICAL_DEBUG' in os.environ:
with open('/tmp/ru.stacktrace.%s.log' % pid, 'w') as f:
f.write('%s\n' % out)
# ------------------------------------------------------------------------------
#
def get_stacktraces():
id2name = dict()
for th in mt.enumerate():
id2name[th.ident] = th.name
ret = dict()
stacklist = list(sys._current_frames().items()) # pylint: disable=W0212
for tid, stack in stacklist:
name = id2name.get(tid, 'noname')
ret[tid, name] = traceback.extract_stack(stack)
return ret
# ------------------------------------------------------------------------------
#
def print_stacktrace(msg=None, _stack=None):
if not msg:
msg = ''
tname = mt.currentThread().name
pid = os.getpid()
out = '--------------\n'
out += 'RADICAL Utils -- Stacktrace [%s] [%s]\n' % (pid, tname)
out += '%s\n' % msg
out += os.popen("%s | grep ' %s ' | grep -v grep" % (_ps_cmd, pid)).read()
if not _stack:
_stack = get_stacktrace()
for line in _stack:
out += line.strip()
out += '\n'
out += '--------------\n'
sys.stdout.write(out)
# ------------------------------------------------------------------------------
#
def print_exception_trace(msg=None):
print_stacktrace(msg=msg, _stack=traceback.format_exc().split('\n'))
# ------------------------------------------------------------------------------
#
def get_stacktrace():
return traceback.format_stack()[:-1]
# ------------------------------------------------------------------------------
#
def get_caller_name(skip=2):
'''
Get a name of a caller in the format module.class.method
`skip` specifies how many levels of stack to skip while getting caller
name. skip=1 means 'who calls me', skip=2 'who calls my caller' etc.
An empty string is returned if skipped levels exceed stack height
Kudos: http://stackoverflow.com/questions/2654113/ \
python-how-to-get-the-callers-method-name-in-the-called-method
'''
stack = inspect.stack()
start = 0 + skip
if len(stack) < start + 1:
return ''
pframe = stack[start][0]
name = list()
module = inspect.getmodule(pframe)
# `modname` can be None when frame is executed directly in console
# TODO(techtonik): consider using __main__
if module:
name.append(module.__name__)
# detect classname
if 'self' in pframe.f_locals:
name.append(pframe.f_locals['self'].__class__.__name__)
codename = pframe.f_code.co_name
if codename != '<module>': # top level usually
name.append(codename) # function or a method
del pframe
return '.'.join(name)
# ------------------------------------------------------------------------------
#
_verb = False
if 'RADICAL_DEBUG' in os.environ:
_verb = True
_raise_on_state = dict()
_raise_on_lock = mt.Lock()
# ------------------------------------------------------------------------------
#
def raise_on(tag, log=None, msg=None):
'''
The purpose of this method is to artificially trigger error conditions for
testing purposes, for example when handling the n'th unit, getting the n'th
heartbeat signal, etc.
The tag parameter is interpreted as follows: on the `n`'th invocation of
this method with any given `tag`, an exception is raised, and the counter
for that tag is reset.
The limit `n` is set via an environment variable `RU_RAISE_ON_<tag>`, with
`tag` in upper casing. The environment will only be inspected during the
first invocation of the method with any given tag. The tag counter is
process-local, but is shared amongst threads of that process.
'''
global _raise_on_state # pylint: disable=W0603
global _raise_on_lock # pylint: disable=W0603
with _raise_on_lock:
if tag not in _raise_on_state:
env = os.environ.get('RU_RAISE_ON_%s' % tag.upper())
if env and env.startswith('RANDOM_'):
# env is rnd spec
rate = int(env[7:])
limit = 1
elif env:
# env is int
rate = 1
limit = int(env)
else:
# no env set
rate = 1
limit = 0
_raise_on_state[tag] = {'count': 0,
'rate' : rate,
'limit': limit}
_raise_on_state[tag]['count'] += 1
count = _raise_on_state[tag]['count']
limit = _raise_on_state[tag]['limit']
rate = _raise_on_state[tag]['rate']
if msg : info = '%s [%2d / %2d] [%s]' % (tag, count, limit, msg)
elif _verb: info = '%s [%2d / %2d]' % (tag, count, limit )
if log : log.debug('raise_on checked %s' , info)
elif _verb: print('raise_on checked %s' % info)
if limit and count == limit:
_raise_on_state[tag]['count'] = 0
if rate == 1:
val = limit
else:
val = random.randint(0, 100)
if val > rate:
if log: log.warning('raise_on ignored %s [%2d / %2d]',
tag, val, rate)
elif _verb: print('raise_on ignored %s [%2d / %2d]'
% (tag, val, rate))
return
if log: log.warning('raise_on triggered %s [%2d / %2d]',
tag, val, rate)
elif _verb: print('raise_on triggered %s [%2d / %2d]'
% (tag, val, rate))
# reset counter and raise exception
raise RuntimeError('raise_on for %s [%s]' % (tag, val))
# ------------------------------------------------------------------------------
#
def attach_pudb(log=None):
# need to move here to avoid circular import
from .threads import gettid
host = '127.0.0.1'
# host = gethostip()
tid = gettid()
port = tid + 10000
if log:
log.info('debugger open: telnet %s %d', host, port)
else:
print('debugger open: telnet %s %d' % (host, port))
try:
import pudb # pylint: disable=E0401
from pudb.remote import set_trace # pylint: disable=E0401
pudb.DEFAULT_SIGNAL = signal.SIGALRM
set_trace(host=host, port=port, term_size=(200, 50))
except Exception as e:
if log:
log.warning('failed to attach pudb (%s)', e)
# ------------------------------------------------------------------------------
#
_SNIPPET_PATHS = ['%s/.radical/snippets/' % os.environ.get('HOME', '/tmp')]
def add_snippet_path(path):
'''
add a path to the search path for dynamically loaded python snippets
(see `ru.get_snippet()`).
'''
if 'RADICAL_DEBUG' in os.environ:
global _SNIPPET_PATHS # pylint: disable=W0603
if path not in _SNIPPET_PATHS:
_SNIPPET_PATHS.append(path)
# ------------------------------------------------------------------------------
#
def get_snippet(sid):
'''
RU exposes small python snippets for runtime code insertion. The usage is
intended as follows:
* a programmer implements a class
* for some experiment or test, that class's behavior must be controled at
runtime.
* in all places where such an adaptation is expected to take place, the
programmer inserts a hook like this:
exec(ru.get_snippet('my_class.init_hook'))
* this will trigger RU to search for python files of the name
`my_class.init_hook.py` in `$HOME/.radical/snippets/' (default), and
return their content for injection.
The snippet search path can be extended by calling.
ru.add_snippet_path(path)
The `RADICAL_DEBUG` environment variable needs to be set for this method to
do anything. A snippet can use the following literal strinfgs which will be
replaced by their actual values:
'###SNIPPET_FILE###' - filename from which snippet was loaded
'###SNIPPET_PATH###' - path in which the snippet file is located
'###SNIPPET_ID###' - the sid string used to identify the snippet
'''
if 'RADICAL_DEBUG' in os.environ:
for path in _SNIPPET_PATHS:
fname = '%s/%s.py' % (path, sid)
try:
with open(fname, 'r') as fin:
snippet = fin.read()
snippet = snippet.replace('###SNIPPET_FILE###', fname)
snippet = snippet.replace('###SNIPPET_PATH###', path)
snippet = snippet.replace('###SNIPPET_ID###', sid)
return snippet
except:
pass
return 'None'
# ------------------------------------------------------------------------------
#
class DebugHelper(object):
'''
When instantiated, and when 'RADICAL_DEBUG' is set in the environment, this
class will install a signal handler for SIGUSR1. When that signal is
received, a stacktrace for all threads is printed to stdout.
We also check if SIGINFO | |
<gh_stars>100-1000
# Import required libraries
import os
import pickle
import copy
import datetime as dt
import math
import requests
import pandas as pd
from flask import Flask
import dash
import dash_daq as daq
import dash_table
from dash.dependencies import Input, Output, State
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
import plotly.graph_objs as go
import dash_dangerously_set_inner_html
import numpy as np
import os
from os import path
from model.base_model import *
from model.R0forecast import *
import train_R0forecaster
external_styles = [
{
"href": "https://fonts.googleapis.com/css2?family=Open+Sans+Condensed:ital,wght@0,300;0,700;1,300&display=swap",
"rel": "stylesheet"
},
{
"href": "https://fonts.googleapis.com/css2?family=Noto+Sans+JP:wght@100;300;400;500;700;900&display=swap",
"rel": "stylesheet"
},
{
"href": "https://fonts.googleapis.com/css2?family=Ubuntu&display=swap",
"rel": "stylesheet"
}
]
app = dash.Dash(__name__, external_stylesheets=external_styles)
server = app.server
app.title = "COVID-19 PIP"
POP_UP = app.get_asset_url("transparent_PIP_logo.png")
# Define theme color codes
LIGHT_PINK = "#FF60AA"
DARK_GRAY = "#323232"
GRAY = "#808080"
CYAN = "#95E3FA"
PURPLE_COLOR = "#AF1CF7"
DARK_PINK = "#CA1A57"
model_dates = "/2020-10-08"
#-------------------------------------------------------
'''
Helper functions for style formating and data processing
List of helper functions >>
---------------------------
_get_input_HTML_format :: returns cell formating for
html/dcc numerical input
_get_radioItems_HTML_format :: returns a radio items
list for display
'''
#-------------------------------------------------------
# TO DO: Attack rate plot
'''
COUNTRIES = ["United States", "United Kingdom", "Italy", "Germany", "Spain",
"Australia", "Brazil", "Canada", "Sweden", "Norway", "Finland",
"Estonia", "Egypt", "Japan", "Croatia"]
'''
COUNTRIES = ["United Kingdom"] #["United States", "United Kingdom", "Italy", "Germany", "Brazil", "Japan"] #, "Egypt"]
# load models and data for all countries
if path.exists(os.getcwd() + "/PIPmodels/global_models"):
global_models = pickle.load(open(os.getcwd() + "/PIPmodels/global_models", 'rb'))
else:
global_models = dict.fromkeys(COUNTRIES)
for country in COUNTRIES:
global_models[country] = pickle.load(open(os.getcwd() + model_dates + "/models/" + country, 'rb'))
pickle.dump(global_models, open(os.getcwd() + "/PIPmodels/global_models", 'wb'))
if path.exists(os.getcwd() + "/PIPmodels/country_data"+"_"+str(dt.date.today())):
country_data = pickle.load(open(os.getcwd() + "/PIPmodels/country_data"+"_"+str(dt.date.today()), 'rb'))
else:
country_data = get_COVID_DELVE_data(COUNTRIES)
pickle.dump(country_data, open(os.getcwd() + "/PIPmodels/country_data", 'wb'))
if path.exists(os.getcwd() + "/PIPmodels/projections"+"_"+str(dt.date.today())):
global_projections = pickle.load(open(os.getcwd() + "/PIPmodels/projections"+"_"+str(dt.date.today()), 'rb'))
else:
global_projections = dict.fromkeys(COUNTRIES)
for country in COUNTRIES:
global_projections[country] = pickle.load(open(os.getcwd() + model_dates + "/projections/" + country, 'rb'))
pickle.dump(global_models, open(os.getcwd() + "/PIPmodels/global_projections", 'wb'))
npi_model = pickle.load(open(os.getcwd() + "/PIPmodels/R0Forecaster", 'rb'))
TARGETS = ["Daily Deaths", "Cumulative Deaths", "Reproduction Number"]
COUNTRY_LIST = [{'label': COUNTRIES[k], 'value': COUNTRIES[k], "style":{"margin-top":"-.3em", "align": "center"}} for k in range(len(COUNTRIES))]
#TARGET_LIST = [{'label': TARGETS[k], 'value': k, "style":{"margin-top":"-.3em", "align": "center"}} for k in range(len(TARGETS))]
TARGET_LIST = [{'label': TARGETS[0], 'value': 0, "style":{"margin-top":"-.3em", "align": "center"}},
{'label': TARGETS[2], 'value': 2, "style":{"margin-top":"-.3em", "align": "center"}}]
BOX_SHADOW = "1px 2px 3px 4px #ccc"
MARGIN_INPUT = "20px"
PANEL_COLOR = "#FBF8F8"
TITLE_STYLE = {"marginBottom": ".25em", "margin-top": "1em", "margin-left": MARGIN_INPUT, "color":DARK_GRAY, "font-weight": "bold",
"font-size": "12", "font-family": "Noto Sans JP"}
SUBTITLE_STYLE = {"color":DARK_PINK, "font-size": 13}
SUBTITLE_STYLE_ = {"margin-top":"10px", "color":DARK_PINK, "font-size": 13}
PANEL_TEXT_STYLE = {"marginBottom": ".25em", "margin-top": "0em", "margin-left": MARGIN_INPUT, "color":GRAY, "font-size": "11px",
"font-style": "italic", "font-family":"Noto Sans JP"}
PANEL_TEXT_STYLE2 = {"marginBottom": ".25em", "margin-top": "0em", "margin-left": MARGIN_INPUT, "color":GRAY, "font-size": "12px",
"font-family":"Noto Sans JP"}
PANEL_TEXT_STYLE3 = {"marginBottom": ".25em", "margin-top": "0em", "margin-left": MARGIN_INPUT, "color":GRAY, "font-size": "12px",
"font-family":"Noto Sans JP", "font-weight":"bold"}
PANEL_TEXT_STYLE4 = {"marginBottom": ".25em", "margin-top": "0em", "margin-left": MARGIN_INPUT, "margin-right": MARGIN_INPUT, "color":GRAY,
"font-size": "12px", "font-family":"Noto Sans JP", "font-weight":"bold"}
PANEL_TEXT_STYLE_ = {"marginBottom": "0em", "margin-top": "0em", "color":DARK_GRAY, "font-size": "13px", "font-family":"Open Sans Condensed"}
CAPTION_STYLE = {"color":"#4E4646", "font-size": 10}
BULLET_STYLE_0 = {"color":"#4E4646", "text-shadow":"#4E4646", "background-color":"#4E4646", "border-radius": "10%", "font-size": 10, "width":"7px", "margin-right":"10px"}
BULLET_STYLE_1 = {"color":"#4F27EC", "text-shadow":"#4F27EC", "background-color":"#4F27EC", "border-radius": "10%", "font-size": 10, "width":"7px", "margin-right":"10px"}
BULLET_STYLE_2 = {"color":"#AF1CF7", "text-shadow":"#AF1CF7", "background-color":"#AF1CF7", "border-radius": "10%", "font-size": 10, "width":"7px", "margin-right":"10px"}
BULLET_STYLE_3 = {"color":"#F71C93", "text-shadow":"#F71C93", "background-color":"#F71C93", "border-radius": "10%", "font-size": 10, "width":"7px", "margin-right":"10px"}
name_style = dict({"color": "#4E4646", 'fontSize': 13, "width": "150px", "marginBottom": ".5em", "textAlign": "left", "font-family": "Noto Sans JP"})
name_style_ = dict({"color": "#4E4646", 'fontSize': 13, "width": "250px", "marginBottom": ".5em", "textAlign": "left", "font-family": "Noto Sans JP"})
input_style = dict({"width": "100px", "height": "30px", "columnCount": 1, "textAlign": "center", "marginBottom": "1em", "font-size":12, "border-color":LIGHT_PINK})
form_style = dict({'width' : '10%', 'margin' : '0 auto'})
radio_style = dict({"width": "150px", "color": "#524E4E", "columnCount": 3, "display": "inline-block", "font-size":11, "border-color":LIGHT_PINK})
radio_style_short = dict({"width": "110px", "color": "#524E4E", "columnCount": 3, "display": "inline-block", "font-size":11})
radio_style_long = dict({"width": "450px", "color": GRAY, "columnCount": 6, "display": "inline-block", "font-size":11, "font-family": "Noto Sans JP"})
name_style_long = dict({"color": "#4E4646", 'fontSize': 13, "width": "450px", "columnCount": 3, "marginBottom": ".5em", "textAlign": "left"})
radio_style_her2 = dict({"width": "150px", "color": "#524E4E", "columnCount": 3, "display": "inline-block", "font-size":11})
name_style_her2 = dict({"color": "#4E4646", 'fontSize': 13, "width": "120px", "columnCount": 1, "marginBottom": ".5em", "textAlign": "left"})
npi_variables = ["npi_workplace_closing", "npi_school_closing", "npi_cancel_public_events",
"npi_gatherings_restrictions", "npi_close_public_transport", "npi_stay_at_home",
"npi_internal_movement_restrictions", "npi_international_travel_controls", "npi_masks"]
def _get_input_HTML_format(name, ID, name_style, input_range, input_step, placeholder, input_style):
_html_input = html.P(children=[html.Div(name, style=name_style),
dcc.Input(placeholder=placeholder, type='number',
min=input_range[0], max=input_range[1], step=input_step,
style=input_style, id=ID)])
return _html_input
def _get_radioItems_HTML_format(name, ID, name_style, options, radio_style):
_html_radioItem = html.P(children=[html.Div(name, style=name_style),
dcc.RadioItems(options=options, value=1, style=radio_style, id=ID)])
return _html_radioItem
def _get_toggle_switch(name, name_style, color_style, ID):
_html_toggle = html.P(children=[html.Div(name, style=name_style),
daq.ToggleSwitch(color=color_style, size=30, value=True,
label=['No', 'Yes'], style={"font-size":9, "font-family": "Noto Sans JP", "color":GRAY}, id=ID)],
style={"width": "100px", "font-size":9})
return _html_toggle
def HORIZONTAL_SPACE(space_size):
return dbc.Row(dbc.Col(html.Div(" ", style={"marginBottom": str(space_size) + "em"})))
def VERTICAL_SPACE(space_size):
return dbc.Col(html.Div(" "), style={"width": str(space_size) + "px"})
#-------------------------------------------------------
'''
App layout components
List of layout components >>
---------------------------
HEADER :: Logo display and navigation buttons on the app
header area
PATIENT_INFO_FORM :: form that reads patient information
to compute and display risk
'''
#-------------------------------------------------------
# Create the **header** with logo and navigation buttons
#-------------------------------------------------------
LEARN_BUTTON = html.A(dbc.Button("Learn More", style={"bgcolor": "gray"}), href="https://www.vanderschaar-lab.com/policy-impact-predictor-for-covid-19/", className="two columns")
WEBSITE_BUTTON = html.A(dbc.Button("Go back to website", style={"bgcolor": "gray"}), href="https://www.vanderschaar-lab.com/policy-impact-predictor-for-covid-19/", className="two columns")
FEEDBACK_BUTTON = html.A(dbc.Button("Send Feedback", style={"bgcolor": "gray"}), href="https://www.vanderschaar-lab.com/contact-us/", className="two columns")
GITHUB_BUTTON = html.A(dbc.Button("GitHub", style={"bgcolor": "gray"}), href="https://www.vanderschaar-lab.com/contact-us/", className="two columns")
UPDATE_BUTTON = dbc.Button("Reset to Current Policy", style={"bgcolor": "gray"}, id="updatebutton")
HEADER = html.Div([
html.Div(
[
dbc.Row([dbc.Col(html.Img(src=app.get_asset_url("logo.png"), id="adjutorium-logo", style={"height": "100px", 'textAlign': 'left',
"width": "auto",})),
VERTICAL_SPACE(325),
dbc.Col(LEARN_BUTTON),
VERTICAL_SPACE(20),
dbc.Col(WEBSITE_BUTTON),
VERTICAL_SPACE(20),
dbc.Col(FEEDBACK_BUTTON),
VERTICAL_SPACE(20),
dbc.Col(GITHUB_BUTTON)]),
], style={"margin-left":"5ex"}, className="header"),
],
)
# Create the *Patient Information form* for app body
# --------------------------------------------------
# Input, name & HTML form styling dictionaries
COUNTRY_DROPMENU = dcc.Dropdown(id='country', options= COUNTRY_LIST, value="United Kingdom",
placeholder=" ", style={"width":"150px", "height": "30px", "font-size": 11, "border-color":GRAY, "color":GRAY,
"font-color":GRAY, "margin-top":"-.1em", "textAlign": "left", "font-family": "Noto Sans JP",
"vertical-align":"top", "display": "inline-block"})
REGION_DROPMENU = dcc.Dropdown(id='region', options= COUNTRY_LIST, disabled=True,
placeholder=" ", style={"width":"150px", "height": "30px", "font-size": 11, "border-color":GRAY, "color":GRAY,
"font-color":GRAY, "margin-top":"-.1em", "textAlign": "left", "font-family": "Noto Sans JP",
"vertical-align":"top", "display": "inline-block"})
TARGET_DROPMENU = dcc.Dropdown(id='target', options= TARGET_LIST, value=0,
placeholder=" ", style={"width":"150px", "height": "30px", "font-size": 11, "border-color":GRAY, "color":GRAY,
"font-color":GRAY, "margin-top":"-.1em", "textAlign": "left", "font-family": "Noto Sans JP",
"vertical-align":"top", "display": "inline-block"})
HORIZON_SLIDER = dcc.Slider(id='horizonslider', marks={7: "1w", 30: "1m", 60: "2m", 90: "3m"}, min=7,
max=90, value=30, step=1, updatemode="drag", tooltip={"always_visible":False})
MASK_SLIDER = dcc.RadioItems(id='maskslider',
options=[{'label': 'No policy measures', 'value': 0},
{'label': 'Recommended', 'value': 1},
{'label': 'Limited mandate', 'value': 2},
{'label': 'Universal', 'value': 3}], value=1,
labelStyle={"display": "inline-block", "font-size": 11,
"font-family": "Noto Sans JP", "color":GRAY, "width":"50%"},
inputStyle={"color":CYAN})
SOCIAL_DIST_OPT = dcc.Checklist(id='socialdistance',
options=[{'label': 'Workplace closure', 'value': 0},
{'label': 'Public events cancellation', 'value': 1},
{'label': 'Public transport closure', 'value': 2},
{'label': 'Gatherings restrictions', 'value': 3},
{'label': 'Shelter-in-place' , 'value': 4},
#{'label': 'Internal movement restrictions' , 'value': 5},
{'label': 'Travel restrictions' , 'value': 6}],
value=[0],
labelStyle={"display": "inline-block", "font-size": 11,
"font-family": "Noto Sans JP", "color":GRAY, "width":"50%"})
DISPLAY_LIST_2 = dcc.Checklist(options=[{'label': 'Show PIP model fit', 'value': 1}],
labelStyle={"font-size": 11, "font-family": "Noto Sans JP", "color":GRAY, 'display': 'inline-block'},
id="pipfit")
DISPLAY_LIST_3 = dcc.Checklist(options=[{'label': 'Show confidence intervals', 'value': 1}],
value=[1],
labelStyle={"font-size": 11, "font-family": "Noto Sans JP", "color":GRAY, 'display': 'inline-block'},
id="confidenceint")
DISPLAY_LIST_4 = dcc.Checklist(options=[{'label': 'Logarithmic scale', 'value': 1}],
labelStyle={"font-size": 11, "font-family": "Noto Sans JP", "color":GRAY, 'display': 'inline-block'},
id="logarithmic")
Num_days = (dt.date.today() - dt.date(2020, 1, 1)).days
BEGIN_DATE = dcc.Slider(id='dateslider', marks={0: "Jan 1st, 2020", Num_days: "Today"},
min=0, max=Num_days, value=0, step=1, updatemode="drag", tooltip={"always_visible":False})
HORIZON_NOTE = "*w = week, m = month."
REQUEST_NOTE = "Select a geographical location and the required forecast."
REQUEST_NOTE_2 = "Select the non-pharmaceutical interventions (NPIs) to be applied in the geographical area selected above."
COUNTRY_SELECT = html.P(children=[html.Div("Country", style=name_style), COUNTRY_DROPMENU])
REGION_SELECT = html.P(children=[html.Div("Region", style=name_style), REGION_DROPMENU])
TARGET_SELECT = html.P(children=[html.Div("Forecast Target", style=name_style), TARGET_DROPMENU])
HORIZON_SELECT = html.P(children=[html.Div("Forecast Days*", style=name_style), HORIZON_SLIDER])
MASK_SELECT = html.P(children=[html.Div("Mask Policy", style=name_style), MASK_SLIDER])
SOCIAL_SELECT = html.P(children=[html.Div("Social Distancing Measures", style=name_style_), SOCIAL_DIST_OPT])
BEGIN_SELECT = html.P(children=[html.Div("View from", style=PANEL_TEXT_STYLE4), BEGIN_DATE])
SCHOOL_CLOSURE = _get_toggle_switch(name="School Closure ", name_style=name_style, color_style=CYAN, ID="school_closure")
PATIENT_INFO_FORM = html.Div(
[
html.Div(
[
dbc.Row(dbc.Col(html.Div("Forecast Settings", style={"marginBottom": "0.5em", "margin-top": "1em", "margin-left": MARGIN_INPUT,
"color":DARK_GRAY, "font-weight": "bold", "font-size": "11", "font-family": 'Noto Sans JP'}))),
dbc.Row(dbc.Col(html.Div(REQUEST_NOTE, style=PANEL_TEXT_STYLE2))),
HORIZONTAL_SPACE(1),
dbc.Row(
[
dbc.Col(COUNTRY_SELECT),
VERTICAL_SPACE(40),
dbc.Col(REGION_SELECT),
], style={"margin-left": "40px"}
),
HORIZONTAL_SPACE(1),
dbc.Row(
[ dbc.Col(TARGET_SELECT),
VERTICAL_SPACE(40),
dbc.Col(HORIZON_SELECT),
], style={"margin-left": "40px"}
),
HORIZONTAL_SPACE(.5),
dbc.Row([VERTICAL_SPACE(200), dbc.Col(html.Div(HORIZON_NOTE, style=PANEL_TEXT_STYLE))]),
HORIZONTAL_SPACE(1),
], style={"box-shadow": BOX_SHADOW, "margin": MARGIN_INPUT, "background-color": PANEL_COLOR, "width": "450px"}),
html.Div(
[
dbc.Row(dbc.Col(html.Div("Policy Scenario", style={"marginBottom": "1em", "margin-top": "1em", "margin-left": MARGIN_INPUT,
"color":DARK_GRAY, "font-weight": "bold", "font-size": "11", "font-family":'Noto Sans JP'}))),
dbc.Row(dbc.Col(html.Div(REQUEST_NOTE_2, style=PANEL_TEXT_STYLE2))),
HORIZONTAL_SPACE(1),
dbc.Row(
[
VERTICAL_SPACE(40),
dbc.Col(SCHOOL_CLOSURE),
VERTICAL_SPACE(60),
dbc.Col(MASK_SELECT),
], style={"margin-left": MARGIN_INPUT}
),
HORIZONTAL_SPACE(.5),
dbc.Row(
[
VERTICAL_SPACE(25),
dbc.Col(SOCIAL_SELECT),
], style={"margin-left": MARGIN_INPUT}
),
HORIZONTAL_SPACE(1),
dbc.Row(
[
VERTICAL_SPACE(80),
dbc.Col(UPDATE_BUTTON),
], style={"margin-left": MARGIN_INPUT}
),
HORIZONTAL_SPACE(1.5),
], style={"box-shadow": BOX_SHADOW, "margin": MARGIN_INPUT, | |
-d '/' -f 2 slices the line by '/' and selects the second field resulting in: QtWebKit.framework
otool_command = "otool -L '%s' | cut -d ' ' -f 1 | grep @rpath.*Qt | cut -d '/' -f 2" % (os.path.join(src, darwin_adjusted_name))
output = subprocess.check_output(otool_command, shell=True)
qt_dependent_libs = re.split("\s+", output.strip())
for lib in qt_dependent_libs:
qt_frameworks_to_copy.add(lib)
for framework_name in qt_frameworks_to_copy:
src_node = qt_libs_source_node.make_node(framework_name)
src = src_node.abspath()
dst = frameworks_node.make_node(framework_name).abspath()
if os.path.islink(dst):
os.unlink(dst)
if os.path.isdir(dst):
shutil.rmtree(dst)
Logs.info("Copying Qt Framework {} to {}".format(src, dst))
self.bld.create_symlink_or_copy(src_node, dst)
if not os.path.islink(dst):
post_copy_cleanup(frameworks_node.make_node(framework_name))
def process_resources(self):
resources_dest_node = get_resource_node(self.bld.platform, self.executable_name, self.destination_node)
resources = getattr(self, 'resources', None)
if resources:
resources_dest_node.mkdir()
self.bld.install_files(resources_dest_node.abspath(), resources)
executable_source_location_node = self.inputs[0].parent
executable_dest_node = self.outputs[0].parent
for dir in getattr(self, 'dir_resources', []):
Logs.debug("package: extra directory to link/copy into the package is: {}".format(dir))
self.bld.create_symlink_or_copy(executable_source_location_node.make_node(dir), executable_dest_node.make_node(dir).abspath(), postpone=False)
def process_assets(self):
"""
Packages any assets.
Assets can come from the asset_source attribute or will use pak_files instead of use_pak_files has been specified.
"""
assets_source = getattr(self, 'assets_path', None)
if getattr(self, 'use_pak_files', False):
assets_source = getattr(self, 'pak_file_path', None)
if assets_source and not os.path.exists(assets_source):
Logs.warn("Specified pak file location {} does not exist. Defaulting to use the standard generated file path".format(assets_source))
assets_source = None
if not assets_source:
assets_platform = self.bld.get_bootstrap_assets(self.bld.platform)
pak_file_dir = self.bld.project + "_" + assets_platform + "_paks"
pak_file_dir = pak_file_dir.lower()
assets_source = self.bld.Path(pak_file_dir)
game_assets_node = get_game_assets_node(self.bld.platform, self.executable_name, self.destination_node)
Logs.debug("package: source {} dest {}".format(assets_source, game_assets_node.abspath()))
if assets_source:
if not os.path.exists(assets_source):
Logs.warn("[WARNING] Asset source location {} does not exist on the file system. Creating the assets source folder.".format(assets_source))
try:
os.makedirs(assets_source)
except OSError as e:
Logs.warn("[WARNING] Creating the assets source folder failed, no assets will be put into the package. {}".format(e))
return
if os.path.isdir(game_assets_node.abspath()):
if should_copy_and_not_link(self.bld):
if os.path.islink(game_assets_node.abspath()):
# Need to remove the junction as rmtree does not do that and
# fails if there is a junction
remove_junction(game_assets_node.abspath())
else:
# Going from a copy to a link so remove the directory if it exists
if not os.path.islink(game_assets_node.abspath()):
shutil.rmtree(game_assets_node.abspath())
# Make the parent directory so that when we either make a link or copy
# assets the parent directory is there and waf install/symlink commands
# will work correctly
game_assets_node.parent.mkdir()
Logs.info("Putting assets into folder {}".format(game_assets_node.abspath()))
self.bld.create_symlink_or_copy(self.bld.root.find_node(assets_source), game_assets_node.abspath(), postpone=False)
def execute(self):
"""
Extended Context.execute to perform packaging on games and tools.
For an executable package to be processed by this context the wscript file must implement the package_[platform] function (i.e. package_darwin_x64), which can call the package_game or package_tool methods on this context. Those functions will create the necessary package_task objects that will be executed after all directories have been recursed through. The package_game/tool functions accept keyword arguments that define how the package_task should packge executable, resources, and assets that are needed. For more information about valid keyword arguments look at the package_task.__init__ method.
"""
# When the package_* functions are called they will set the group to
# packaging then back to build. This way we can filter out the package
# tasks and only execute them and not the build task_generators that will
# be added as we recurse through the directories
self.add_group('build')
self.add_group('packaging')
self.set_group('build')
self.project = self.get_bootstrap_game()
self.restore()
if not self.all_envs:
self.load_envs()
# The package command may be executed before SetupAssistant is executed to
# configure the project, which is valid. If that is the case an exception
# will be thrown by lumberyard.py to indicate this. Catch the exception and
# return so that builds can complete correctly.
try:
self.recurse([self.run_dir])
except Exception as the_error:
Logs.info("Could not run the package command: {}.".format(the_error))
return
# display the time elapsed in the progress bar
self.timer = Utils.Timer()
group = self.get_group('packaging')
# Generating the xcode project should only be done on macOS and if we actually have something to package (len(group) > 0)
if len(group) > 0 and self.is_option_true('run_xcode_for_packaging') and self.platform in ['darwin_x64', 'ios', 'appletv']:
Logs.debug("package: checking for xcode project... ")
platform = self.platform
if 'darwin' in platform:
platform = "mac"
# Check if the Xcode solution exists. We need it to perform bundle
# stuff (processing Info.plist and icon assets...)
project_name_and_location = "/{}/{}.xcodeproj".format(getattr(self.options, platform + "_project_folder", None), getattr(self.options, platform + "_project_name", None))
if not os.path.exists(self.path.abspath() + project_name_and_location):
Logs.debug("package: running xcode_{} command to generate the project {}".format(platform, self.path.abspath() + project_name_and_location))
run_command('xcode_' + platform)
for task_generator in group:
try:
rs = task_generator.runnable_status
scan = task_generator.scan
run = task_generator.run
except AttributeError:
pass
else:
scan()
run()
def package_game(self, **kw):
"""
Packages a game up by creating and configuring a package_task object.
This method should be called by wscript files when they want to package a
game into a final form. See package_task.__init__ for more information
about the various keywords that can be passed in to configure the packaing
task.
"""
if is_valid_package_request(self, **kw):
if 'release' in self.config:
kw['use_pak_files'] = True
create_package_task(self, **kw)
def package_tool(self, **kw):
"""
Packages a tool up by creating and configuring a package_task object.
This method should be called by wscript files when they want to package a
game into a final form. See package_task.__init__ for more information
about the various keywords that can be passed in to configure the packaing
task. Note that option to use pak files does not pertain to tools and is
explicitly set to false by this function.
"""
if is_valid_package_request(self, **kw):
if kw.get('use_pak_files', False):
Logs.info("package: Using pak files not supported for tools. Ignoring the option.")
kw['use_pak_files'] = False
create_package_task(self, **kw)
def is_valid_package_request(pkg, **kw):
""" Returns if the platform and configuration specified for the package_task match what the package context has been created for"""
executable_name = kw.get('target', None)
if not executable_name:
Logs.info("Skipping package because no target was specified.")
return False
has_valid_platform = False
for platform in kw['platforms']:
if pkg.platform.startswith(platform):
has_valid_platform = True
break
if 'all' not in kw['platforms'] and not has_valid_platform:
Logs.info("Skipping packaging {} because the host platform {} is not supported".format(executable_name, pkg.platform))
return False
if 'all' not in kw['configurations'] and pkg.config not in kw['configurations']:
Logs.info("Skipping packaging {} because the configuration {} is not supported".format(executable_name, pkg.config))
return False
if pkg.options.project_spec:
task_gen_name = kw.get('task_gen_name', executable_name)
modules_in_spec = pkg.loaded_specs_dict[pkg.options.project_spec]['modules']
if task_gen_name not in modules_in_spec:
Logs.info("Skipping packaging {} because it is not part of the spec {}".format(executable_name, pkg.options.project_spec))
return False
if pkg.options.targets and executable_name not in pkg.options.targets.split(','):
Logs.debug("package: Skipping packaging {} because it is not part of the specified targets {}".format(executable_name, pkg.options.targets))
return False
return True
def create_package_task(self, **kw):
executable_name = kw.get('target', None)
Logs.debug("package: create_package_task {}".format(executable_name))
has_valid_platform = False
for platform in kw['platforms']:
if platform in self.platform:
has_valid_platform = True
break
if (not has_valid_platform and 'all' not in kw['platforms']):
Logs.info("Skipping packaging {} because the host platform {} is not supported".format(executable_name, self.platform))
return
if (self.config not in kw['configurations'] and 'all' not in kw['configurations']):
Logs.info("Skipping packaging {} because the configuration {} is not supported".format(executable_name, self.config))
return
kw['bld'] = self # Needed for when we build the task
task_gen_name = kw.get('task_gen_name', executable_name)
executable_task_gen = self.get_tgen_by_name(task_gen_name)
if executable_task_gen and getattr(executable_task_gen,'output_folder', None):
executable_source_node = self.srcnode.make_node(executable_task_gen.output_folder)
else:
executable_source_node = self.srcnode.make_node(self.get_output_folders(self.platform, self.config)[0].name)
destination_node = getattr(self, 'destination', None)
if not destination_node:
destination_node = executable_source_node
executable_dest_node = get_path_to_executable_package_location(self.platform, executable_name, destination_node)
executable_source_node = executable_source_node.make_node(executable_name)
if os.path.exists(executable_source_node.abspath()):
new_task = package_task(env=self.env, **kw)
new_task.set_inputs(executable_source_node)
new_task.set_outputs(executable_dest_node.make_node(executable_name))
self.add_to_group(new_task, 'packaging')
else:
if os.path.exists(executable_dest_node.make_node(executable_name).abspath()):
Logs.info("Final package output already exists, skipping packaging of %s" % executable_source_node.abspath())
else:
Logs.warn("[WARNING] Source executable %s does not exist and final package artifact does not exist either. Did you run the build command before the package ommand?" % executable_source_node.abspath())
def symlink_libraries(self, source, destination):
""" Creates a smybolic link for libraries.
An ant_glob is executed on the source node using "*" + result of get_dynamic_lib_extension to determine all the libraries that need to be linked into the destination. If the package is being built for a release configuration or the platform does not support symbolic links a copy will be made of the library.
:param source: Source of the libraries
:type source: waflib.Node
:param destination: Location/path to create the link | |
the top down
if keyLoc[0] == 't':
markers.reverse()
# Construct ctioga2 command for each key
for i, key in enumerate(markers):
if key[0] == 'Bullet' or key[0] == 'BulletOpen':
key[2] /= 1.5
if key[2] > 1.0:
key[2] = 1.0
# Write the extra marker overlay for the reference point
if len(key) == 7:
keyString += ' --draw-marker '+str(xVals[0])+','+str(yVals[i])+' '+key[4]+' /color \'' +\
key[5]+'\' /scale '+str(key[6]*key[2])+'\\\n'
# Write the main marker
keyString += ' --draw-marker '+str(xVals[0])+','+str(
yVals[i])+' '+key[0]+' /color \''+key[1]+'\' /scale '+str(key[2])+'\\\n'
# Write the key text
keyString += ' --draw-text ' + \
str(xVals[1])+','+str(yVals[i])+' \''+key[3] + \
'\' /color \''+colours.value.keyTextColour1D
keyString += '\' /justification left /scale 0.75 /alignment center \\\n'
# Open plotting shell script file for writing
outfile = smart_open(currentBase+'_post1D.bsh', 'w')
outfile.write('#!/usr/bin/env bash\n')
outfile.write('# This plot script created by pippi '+pippiVersion +
' on '+datetime.datetime.now().strftime('%c')+'\n')
outfile.write('ctioga2\\\n')
outfile.write(' --name '+currentBaseMinimal+'_post1D')
outfile.write(' --plot-scale \''+str(plot_scale)+'\'\\\n')
outfile.write(' --page-size \''+plotSizeInternal+'\'\\\n')
outfile.write(' --frame-margins '+str(left_margin)+','
+ str(right_margin)+','
+ str(top_margin)+','
+ str(bottom_margin)+'\\\n')
outfile.write(' --xrange '+str(xtrema[0])+':'+str(xtrema[1])+'\\\n')
outfile.write(' --yrange 0:1\\\n')
outfile.write(
' --ylabel \'Relative probability $P/P_\mathrm{max}$\' /shift 2.1\\\n')
outfile.write(' --xlabel \''+labels.value[plot]+'\'\\\n')
outfile.write(
' --label-style x /scale 1.0 /shift 0.15 --label-style y /scale 1.0 /shift 0.15')
if yAxisAngle.value is not None:
outfile.write(' /angle '+str(yAxisAngle.value))
outfile.write('\\\n')
if contours1D is not None:
for i, contour in enumerate(mainContourLevels):
outfile.write(' --draw-line '+str(xtrema[0])+','+contour+' '+str(xtrema[1])+','+contour+' /color \''+colours.value.mainPostColour1D +
'\' /style Dashes /width '+str(float(colours.value.lineWidth1D)*0.5)+'\\\n')
outfile.write(' --draw-text '+str(xtrema[0]+0.045*(xtrema[1]-xtrema[0]))+','+str(float(contour)+0.005)+' \''+str(contours1D.value[i]) +
'\%CR\' /color \''+colours.value.mainPostColour1D+'\' /scale 0.5 /justification left /alignment bottom\\\n')
if doComparison.value:
# Do everything for comparison chain
if contours1D is not None:
for i, contour in enumerate(secContourLevels):
outfile.write(' --draw-line '+str(xtrema[0])+','+contour+' '+str(xtrema[1])+','+contour+' /color \''+colours.value.comparisonPostColour1D +
'\' /style Dashes /width '+str(float(colours.value.lineWidth1D)*0.5)+'\\\n')
outfile.write(' --draw-text '+str(xtrema[0]+0.045*(xtrema[1]-xtrema[0]))+','+str(float(contour)+0.005)+' \''+str(contours1D.value[i]) +
'\%CR\' /color \''+colours.value.comparisonPostColour1D+'\' /scale 0.5 /justification left /alignment bottom\\\n')
outfile.write(' --plot '+currentSecParse+'_post1D'+histString+'.ct2@1:2 /fill xaxis /fill-transparency '+colours.value.fillTransparency1D +
' /fill-color '+colours.value.comparisonPostColour1D+' /color '+colours.value.comparisonPostColour1D +
' /line-style '+colours.value.comparison1DLineStyle+' /line-width '+colours.value.lineWidth1D+'\\\n')
if bestFitOnPost.value and colours.value.comparisonBestFitMarker is not None:
# Get best-fit point and plot it
bestFit = getCentralVal(dirs.secParseFilename, plot, 'like', lookupKeys)
outfile.write(' --draw-marker '+str(bestFit)+','+str(yRange*colours.value.comparisonBestFitMarkerScale/40.0)+' ' +
colours.value.comparisonBestFitMarker+' /color \''+colours.value.comparisonBestFitColour +
'\' /scale '+str(colours.value.comparisonBestFitMarkerScale)+' \\\n')
if postMeanOnPost.value and colours.value.comparisonPostMeanMarker is not None:
# Get posterior mean and plot it
postMean = getCentralVal(
dirs.secParseFilename, plot, 'post', lookupKeys)
if not postMean:
sys.exit(
'Error: plot_posterior_mean_on_posterior_pdf = T but no multiplicity given!')
outfile.write(' --draw-marker '+str(postMean)+','+str(yRange*colours.value.comparisonPostMeanMarkerScale/40.0)+' ' +
colours.value.comparisonPostMeanMarker+' /color \''+colours.value.comparisonPostMeanColour +
'\' /scale '+str(colours.value.comparisonPostMeanMarkerScale)+' \\\n')
outfile.write(' --plot '+currentParse+'_post1D'+histString+'.ct2@1:2 /fill xaxis /fill-transparency '+colours.value.fillTransparency1D +
' /fill-color '+colours.value.mainPostColour1D+' /color '+colours.value.mainPostColour1D +
' /line-style '+colours.value.main1DLineStyle+' /line-width '+colours.value.lineWidth1D+'\\\n')
if doLegend1D.value is not None and plot in doLegend1D.value:
# Write legend
try:
legendLocation = legendLoc1D.value[plot]
except (KeyError, TypeError):
legendLocation = defaultLegendLocation
outfile.write(' --legend-inside \''+legendLocation +
'\' /scale 1.0 /vpadding 0.1\\\n')
if legendLines.value is not None:
for x in legendLines.value:
outfile.write(' --legend-line \''+x+'\' /color \'' +
colours.value.legendTextColour1D+'\'\\\n')
outfile.write(' --legend-line \'Marg.~posterior\' /color \'' +
colours.value.legendTextColour1D+'\'\\\n')
if bestFitOnPost.value:
# Get best-fit point and plot it
bestFit = getCentralVal(dirs.parseFilename, plot, 'like', lookupKeys)
outfile.write(' --draw-marker '+str(bestFit)+','+str(yRange*colours.value.mainBestFitMarkerScale/40.0)+' ' +
colours.value.mainBestFitMarker+' /color \''+colours.value.mainBestFitColour1D +
'\' /scale '+str(colours.value.mainBestFitMarkerScale)+' \\\n')
if postMeanOnPost.value:
# Get posterior mean and plot it
postMean = getCentralVal(dirs.parseFilename, plot, 'post', lookupKeys)
if not postMean:
sys.exit(
'Error: plot_posterior_mean_on_posterior_pdf = T but no multiplicity given!')
outfile.write(' --draw-marker '+str(postMean)+','+str(yRange*colours.value.mainPostMeanMarkerScale/40.0)+' ' +
colours.value.mainPostMeanMarker+' /color \''+colours.value.mainPostMeanColour1D +
'\' /scale '+str(colours.value.mainPostMeanMarkerScale)+' \\\n')
# Plot reference point
if plotRef:
outfile.write(refString)
# Draw key
outfile.write(keyString)
# Write credits
if blame.value is not None:
blameYCoordinate = str(
blameFractionalVerticalOffset * yRange + ytrema[1])
outfile.write(' --draw-text '+str(xtrema[1])+','+blameYCoordinate +
' \''+blame.value+'\' /scale 0.5 /justification right\\\n')
# Add logo
if logoFile.value is not None:
outfile.write(
' --draw-text '+str(logoCoords[0])+','+str(logoCoords[1])+' '+logoString+'\\\n')
# Set axis colours
for x in ['top', 'bottom', 'left', 'right']:
outfile.write(' --axis-style '+x+' /stroke_color \'' +
colours.value.axisColour1D+'\'\\\n')
outfile.close
subprocess.call('chmod +x '+currentBase+'_post1D.bsh', shell=True)
# Make profile-posterior comparison plotting scripts
if doProfile.value and doPosterior.value:
bestFitData = [colours.value.mainBestFitMarker, colours.value.mainBestFitColour1D,
colours.value.mainBestFitMarkerScale, colours.value.mainProfColour1D]
postMeanData = [colours.value.mainPostMeanMarker, colours.value.mainPostMeanColour1D,
colours.value.mainPostMeanMarkerScale, colours.value.mainPostColour1D]
# Work out which is the main and which is the comparison
if PosteriorIsMainInComboPlot:
[main, sec] = ['post', 'like']
[mainData, secData] = [postMeanData, bestFitData]
else:
[main, sec] = ['like', 'post']
[mainData, secData] = [bestFitData, postMeanData]
# Get contours
if contours1D.value is not None:
mainContourLevels = getContours(dirs.parseFilename, plot, main)
secContourLevels = getContours(dirs.parseFilename, plot, sec)
# Determine keys
keyString = ''
if doKey1D.value is not None and plot in doKey1D.value:
markers = []
# Get details of key for reference point
if plotRef:
markers.append([colours.value.referenceMarkerOuter, colours.value.referenceMarkerOuterColour,
colours.value.referenceMarkerOuterScale, refText, colours.value.referenceMarkerInner,
colours.value.referenceMarkerInnerColour, colours.value.referenceMarkerInnerScale /
colours.value.referenceMarkerOuterScale])
# Get details of key for posterior mean
markers.append(
[postMeanData[0], postMeanData[1], postMeanData[2], 'Mean'])
# Get details of key for best fit
markers.append([bestFitData[0], bestFitData[1],
bestFitData[2], 'Best fit'])
# Reverse vertical ordering if keys are to be placed at the top of the page, so as to fill from the top down
if keyLoc[0] == 't':
markers.reverse()
# Construct ctioga2 command for each key
for i, key in enumerate(markers):
if key[0] == 'Bullet' or key[0] == 'BulletOpen':
key[2] /= 1.5
if key[2] > 1.0:
key[2] = 1.0
# Write the extra marker overlay for the reference point
if len(key) == 7:
keyString += ' --draw-marker '+str(xVals[0])+','+str(yVals[i])+' '+key[4]+' /color \'' +\
key[5]+'\' /scale '+str(key[6]*key[2])+'\\\n'
# Write the main marker
keyString += ' --draw-marker '+str(xVals[0])+','+str(
yVals[i])+' '+key[0]+' /color \''+key[1]+'\' /scale '+str(key[2])+'\\\n'
# Write the key text
keyString += ' --draw-text ' + \
str(xVals[1])+','+str(yVals[i])+' \''+key[3] + \
'\' /color \''+colours.value.keyTextColour1D
keyString += '\' /justification left /scale 0.75 /alignment center \\\n'
# Open plotting shell script file for writing
outfile = smart_open(currentBase+'_combo1D.bsh', 'w')
outfile.write('#!/usr/bin/env bash\n')
outfile.write('# This plot script created by pippi '+pippiVersion +
' on '+datetime.datetime.now().strftime('%c')+'\n')
outfile.write('ctioga2\\\n')
outfile.write(' --name '+currentBaseMinimal+'_combo1D')
outfile.write(' --plot-scale \''+str(plot_scale)+'\'\\\n')
outfile.write(' --page-size \''+plotSizeInternal+'\'\\\n')
outfile.write(' --frame-margins '+str(left_margin)+','
+ str(right_margin)+','
+ str(top_margin)+','
+ str(bottom_margin)+'\\\n')
outfile.write(' --xrange '+str(xtrema[0])+':'+str(xtrema[1])+'\\\n')
outfile.write(' --yrange 0:1\\\n')
outfile.write(
' --ylabel \'Relative probability $P/P_\mathrm{max}$\' /shift 2.1\\\n')
outfile.write(' --xlabel \''+labels.value[plot]+'\'\\\n')
outfile.write(
' --label-style x /scale 1.0 /shift 0.15 --label-style y /scale 1.0 /shift 0.15')
if yAxisAngle.value is not None:
outfile.write(' /angle '+str(yAxisAngle.value))
outfile.write('\\\n')
if contours1D is not None:
if main == 'like':
main_colour = colours.value.mainProfColour1D
main_text = 'CL'
sec_colour = colours.value.mainPostColour1D
sec_text = 'CR'
else:
main_colour = colours.value.mainPostColour1D
main_text = 'CR'
sec_colour = colours.value.mainProfColour1D
sec_text = 'CL'
for i, contour in enumerate(mainContourLevels):
outfile.write(' --draw-line '+str(xtrema[0])+','+contour+' '+str(xtrema[1])+','+contour+' /color \''+main_colour +
'\' /style Dashes /width '+str(float(colours.value.lineWidth1D)*0.5)+'\\\n')
outfile.write(' --draw-text '+str(xtrema[0]+0.045*(xtrema[1]-xtrema[0]))+','+str(float(contour)+0.005)+' \''+str(contours1D.value[i]) +
'\%'+main_text+'\' /color \''+main_colour+'\' /scale 0.5 /justification left /alignment bottom\\\n')
for i, contour in enumerate(secContourLevels):
outfile.write(' --draw-line '+str(xtrema[0])+','+contour+' '+str(xtrema[1])+','+contour+' /color \''+sec_colour +
'\' /style Dashes /width '+str(float(colours.value.lineWidth1D)*0.5)+'\\\n')
outfile.write(' --draw-text '+str(xtrema[0]+0.045*(xtrema[1]-xtrema[0]))+','+str(float(contour)+0.005)+' \''+str(contours1D.value[i]) +
'\%'+sec_text+'\' /color \''+sec_colour+'\' /scale 0.5 /justification left /alignment bottom\\\n')
# Plot comparison distribution
outfile.write(' --plot '+currentParse+'_'+sec+'1D'+histString+'.ct2@1:2 /fill xaxis /fill-transparency '+colours.value.fillTransparency1D +
' /fill-color '+secData[3]+' /color '+secData[3] +
' /line-style '+colours.value.comparison1DLineStyle+' /line-width '+colours.value.lineWidth1D+'\\\n')
# Plot main distribution
outfile.write(' --plot '+currentParse+'_'+main+'1D'+histString+'.ct2@1:2 /fill xaxis /fill-transparency '+colours.value.fillTransparency1D +
' /fill-color '+mainData[3]+' /color '+mainData[3] +
' /line-style '+colours.value.main1DLineStyle+' /line-width '+colours.value.lineWidth1D+'\\\n')
if doLegend1D.value is not None and plot in doLegend1D.value:
# Write legend
try:
legendLocation = legendLoc1D.value[plot]
except (KeyError, TypeError):
legendLocation = defaultLegendLocation
outfile.write(' --legend-inside \''+legendLocation +
'\' /scale 1.0 /vpadding 0.1\\\n')
if legendLines.value is not None:
for x in legendLines.value:
outfile.write(' --legend-line \''+x+'\' /color \'' +
colours.value.legendTextColour1D+'\'\\\n')
outfile.write(' --legend-line \'Like vs. Posterior\' /color \'' +
colours.value.legendTextColour1D+'\'\\\n')
# Get best-fit point
bestFit = getCentralVal(dirs.parseFilename, plot, 'like', lookupKeys)
# Get posterior mean
postMean = getCentralVal(dirs.parseFilename, plot, 'post', lookupKeys)
# Always plot both best fit and posterior mean on comparison plot
outfile.write(' --draw-marker '+str(bestFit)+','+str(yRange*bestFitData[2]/40.0)+' '+bestFitData[0]+' /color \''+bestFitData[1] +
'\' /scale '+str(bestFitData[2])+' \\\n')
if postMean:
outfile.write(' --draw-marker '+str(postMean)+','+str(yRange*postMeanData[2]/40.0)+' '+postMeanData[0]+' /color \''+postMeanData[1] +
'\' /scale '+str(postMeanData[2])+' \\\n')
# Plot reference point
if plotRef:
outfile.write(refString)
# Draw key
outfile.write(keyString)
# Write credits
if blame.value is not None:
blameYCoordinate = str(
blameFractionalVerticalOffset * yRange + ytrema[1])
outfile.write(' --draw-text '+str(xtrema[1])+','+blameYCoordinate +
' \''+blame.value+'\' /scale 0.5 /justification right\\\n')
# Add logo
if logoFile.value is not None:
outfile.write(
' --draw-text '+str(logoCoords[0])+','+str(logoCoords[1])+' '+logoString+'\\\n')
# Set axis colours
for x in ['top', 'bottom', 'left', 'right']:
outfile.write(' --axis-style '+x+' /stroke_color \'' +
colours.value.axisColour1D+'\'\\\n')
outfile.close
subprocess.call('chmod +x '+currentBase+'_combo1D.bsh', shell=True)
def scriptTwoDplots(dirs):
# Loop over requested plots
for plot in twoDplots.value:
print(' Writing scripts for | |
from _PhylogenyExt import *
class Split(SplitBase):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Encapulates a split, or taxon bipartition.
"""
def __init__(self):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Initializes data members and sets up the Split for the 4-taxon case.
>>> from phycas.phylogeny import *
>>> s = Split()
>>> s.setBits((0,1))
>>> print s.createPatternRepresentation()
**--
"""
SplitBase.__init__(self)
def copy(self, other):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Makes this split a copy of other.
>>> from phycas.phylogeny import *
>>> s = Split()
>>> s.createFromPattern('-*--*---*')
>>> r = Split()
>>> r.copy(s)
>>> print r.createPatternRepresentation()
-*--*---*
"""
SplitBase.copy(self, other)
def reset(self):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Sets all bits to 0, but does not change anything else.
>>> from phycas.phylogeny import *
>>> s = Split()
>>> s.createFromPattern('-*--*---*')
>>> s.countOnBits()
3
>>> s.reset()
>>> s.countOnBits()
0
"""
SplitBase.reset(self)
def setBit(self, b):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Sets bit b, where 0 <= b < number of taxa. The following example sets
the first two bits (out of 4, which is the default number of taxa).
>>> from phycas.phylogeny import *
>>> s = Split()
>>> s.setBit(0)
>>> s.setBit(1)
>>> print s.createPatternRepresentation()
**--
"""
SplitBase.setBit(self, b)
def setBits(self, bits_to_set):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Sets bits in tuple bits_to_set, where each value b in bits_to_set
obeys 0 <= b < number of taxa. The following example sets the first
two bits (out of 4, which is the default number of taxa).
>>> from phycas.phylogeny import *
>>> s = Split()
>>> s.setBits((0,1))
>>> print s.createPatternRepresentation()
**--
"""
SplitBase.setBits(self, bits_to_set)
def unsetBit(self, taxon):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Unsets (clears) bit b, where 0 <= b < number of taxa. The following
example sets all 4 bits (4 is is the default number of taxa), then
clears bits 0 and 3.
>>> from phycas.phylogeny import *
>>> s = Split()
>>> s.setBit(0)
>>> s.setBit(1)
>>> s.setBit(2)
>>> s.setBit(3)
>>> print s.createPatternRepresentation()
****
>>> s.unsetBit(0)
>>> s.unsetBit(3)
>>> print s.createPatternRepresentation()
-**-
"""
SplitBase.unsetBit(self, taxon)
def unsetBits(self, bits_to_unset):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Clears (unsets) bits in tuple bits_to_unset, where each value b in
bits_to_unset obeys 0 <= b < number of taxa. The following example
all 4 bits (4 is is the default number of taxa), then clears bits 0
and 3.
>>> from phycas.phylogeny import *
>>> s = Split()
>>> s.setBits((0,1,2,3))
>>> print s.createPatternRepresentation()
****
>>> s.unsetBits((0,3))
>>> print s.createPatternRepresentation()
-**-
"""
SplitBase.unsetBits(self, bits_to_unset)
def isBitSet(self, b):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Allows one to query whether a particular bit b is set, where
0 <= b < number of taxa.
>>> from phycas.phylogeny import *
>>> s = Split()
>>> print s.isBitSet(0)
False
>>> s.setBit(0)
>>> print s.isBitSet(0)
True
"""
return SplitBase.isBitSet(self, b)
def invertSplit(self):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Clears (unsets) all bits that are currently set, and sets all bits
that are currently not set. Note: this function does not return a
value.
>>> from phycas.phylogeny import *
>>> s = Split()
>>> s.createFromPattern('-*--*---*')
>>> s.invertSplit()
>>> print s.createPatternRepresentation()
*-**-***-
"""
SplitBase.invertSplit(self)
def calcComplexity(self):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Returns the minimum of n and m, where n is the number of taxa on one
side of the split and m is the number on the other side. Trivial
splits have m = 1 or n = 1, and thus have compexity 1, whereas the
most complex split has complexity split_ntax/2 (note that this maximum
holds whether or not the number of taxa is even or odd).
>>> from phycas.phylogeny import *
>>> s = Split()
>>> s.createFromPattern('-*--*---*')
>>> print s.calcComplexity()
3
"""
return SplitBase.calcComplexity(self)
def countOnBits(self):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Returns the number of bits that are currently set.
>>> from phycas.phylogeny import *
>>> s = Split()
>>> s.createFromPattern('-*--*---***-*')
>>> print s.countOnBits()
6
"""
return SplitBase.countOnBits(self)
def countOffBits(self):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Returns the number of bits that are currently unset. This function
will be slower than countOnBits if some bits have been excluded.
>>> from phycas.phylogeny import *
>>> s = Split()
>>> s.createFromPattern('-*--*---***-*')
>>> print s.countOffBits()
7
"""
return SplitBase.countOffBits(self)
def createNewickRepresentation(self):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Creates a newick-style (nested-parenthetical) tree description from
the split. Such a tree, when displayed, would have two internal nodes
connected by a single internal edge. Attached to one of the nodes are
all taxa present on one side of the split, with the remaining taxa
attached to the other internal node. In the tree description, bits
that are on are listed first, and are 1-offset (i.e. the first bit
is represented by 1, not 0).
>>> from phycas.phylogeny import *
>>> s = Split()
>>> s.createFromPattern('-*--*---****')
>>> print s.createNewickRepresentation()
(2,5,9,10,11,12,(1,3,4,6,7,8))
"""
return SplitBase.createNewickRepresentation(self, False)
def createPatternRepresentation(self):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Creates a string representing the split as a series of characters. By
default, the set bits are represented by '*' and unset bits by '-'
although this can be changed through the use of the functions
SetOnSymbol and SetOffSymbol, respectively.
>>> from phycas.phylogeny import *
>>> s = Split()
>>> s.createFromPattern('-*--*---****')
>>> print s.createPatternRepresentation()
-*--*---****
"""
return SplitBase.createPatternRepresentation(self)
def equals(self, other_split):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Returns True if this split equals other_split, and False otherwise.
To be equal, the two splits must have exactly the same pattern of
set bits.
>>> from phycas.phylogeny import *
>>> s = Split()
>>> s.createFromPattern('-*--*---****')
>>> r = Split()
>>> r.createFromPattern('-*--*---****')
>>> print r.equals(s)
True
>>> t = Split()
>>> t.createFromPattern('-*--*---***-')
>>> print t.equals(s)
False
"""
return SplitBase.equals(self, other_split)
def cmp(self, other_split):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Returns -1 if this split is less than other_split, 0 if this split
equals other_split, and 1 if this split is greater than other_split.
To be equal, the two splits must have exactly the same pattern of
set bits. One split is less than another if the sum of its component
bit fields is smaller.
>>> from phycas.phylogeny import *
>>> s = Split()
>>> s.createFromPattern('-*--*---****')
>>> r = Split()
>>> r.createFromPattern('--*-*---****')
>>> print r.cmp(s)
-1
>>> t = Split()
>>> t.createFromPattern('*---*---***-')
>>> print t.cmp(s)
1
>>> print s.cmp(s)
0
"""
return SplitBase.cmp(self, other_split)
def isCompatible(self, other_split):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Returns true if other_split is compatible with this split. The two
splits a and b are compatible if: (1) the set of "on" bits in a is a
subset of the set of "on" bits in b; (2) the set of "on" bits in b is
a subset of the set of "on" bits in a; or (3) the intersection of the
sets of "on" bits in a and b is the empty set. For example
split a: -***---*--
split b: ----***--*
are compatible, because of reason (3) above. The two splits below are
also compatible because b is a subset of (i.e. subsumed in) a:
split a: -****-*---
split b: --**--*---
These two splits, on the other hand, are not compatible because the
intersection is not empty and is also not equal to either a or b:
split a: -***---*--
split b: ---***---*
>>> from phycas.phylogeny import *
>>> a = Split()
>>> a.createFromPattern('-***---*--')
>>> b = Split()
>>> b.createFromPattern('---***---*')
>>> print a.isCompatible(b)
False
"""
return SplitBase.isCompatible(self, other_split)
def subsumedIn(self, other_split):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Returns True if the intersection of this split and other_split is
non-empty and equal to this split. See the documentation for the
function isCompatible for more information.
>>> from phycas.phylogeny import *
>>> a = Split()
>>> a.createFromPattern('-****-*---')
>>> b = Split()
>>> b.createFromPattern('--**--*---')
>>> print a.subsumedIn(b)
False
>>> print b.subsumedIn(a)
True
"""
return SplitBase.subsumedIn(self, other_split, 0)
def createFromPattern(self, pattern_string):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Documentation needs to be written...
>>> from phycas.phylogeny import *
>>> s = Split()
>>> s.createFromPattern('-****-*---')
>>> print s.createPatternRepresentation()
-****-*---
"""
SplitBase.createFromPattern(self, pattern_string)
def getOnList(self):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Returns a list of bits that are currently set.
>>> from phycas.phylogeny import *
>>> s = Split()
>>> s.createFromPattern('-****-*---')
>>> print s.getOnList()
[1, 2, 3, 4, 6]
"""
return list(SplitBase.getOnList(self))
def getOffList(self):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Returns a list of bits that are currently not set.
>>> from phycas.phylogeny import *
| |
const [] Ap, npy_int32 const [] Ai,
unsigned short const [] Ax, npy_int32 const [] Bp, npy_int32 const [] Bi,
unsigned short const [] Bx, npy_int32 [] Cp, npy_int32 [] Ci, unsigned short [] Cx)
csc_plus_csc(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Ai,
int const [] Ax, npy_int32 const [] Bp, npy_int32 const [] Bi, int const [] Bx,
npy_int32 [] Cp, npy_int32 [] Ci, int [] Cx)
csc_plus_csc(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Ai,
unsigned int const [] Ax, npy_int32 const [] Bp, npy_int32 const [] Bi,
unsigned int const [] Bx, npy_int32 [] Cp, npy_int32 [] Ci, unsigned int [] Cx)
csc_plus_csc(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Ai,
long long const [] Ax, npy_int32 const [] Bp, npy_int32 const [] Bi,
long long const [] Bx, npy_int32 [] Cp, npy_int32 [] Ci, long long [] Cx)
csc_plus_csc(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Ai,
unsigned long long const [] Ax, npy_int32 const [] Bp, npy_int32 const [] Bi,
unsigned long long const [] Bx, npy_int32 [] Cp, npy_int32 [] Ci,
unsigned long long [] Cx)
csc_plus_csc(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Ai,
float const [] Ax, npy_int32 const [] Bp, npy_int32 const [] Bi,
float const [] Bx, npy_int32 [] Cp, npy_int32 [] Ci, float [] Cx)
csc_plus_csc(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Ai,
double const [] Ax, npy_int32 const [] Bp, npy_int32 const [] Bi,
double const [] Bx, npy_int32 [] Cp, npy_int32 [] Ci, double [] Cx)
csc_plus_csc(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Ai,
long double const [] Ax, npy_int32 const [] Bp, npy_int32 const [] Bi,
long double const [] Bx, npy_int32 [] Cp, npy_int32 [] Ci, long double [] Cx)
csc_plus_csc(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Ai,
npy_cfloat_wrapper const [] Ax, npy_int32 const [] Bp, npy_int32 const [] Bi,
npy_cfloat_wrapper const [] Bx, npy_int32 [] Cp, npy_int32 [] Ci,
npy_cfloat_wrapper [] Cx)
csc_plus_csc(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Ai,
npy_cdouble_wrapper const [] Ax, npy_int32 const [] Bp, npy_int32 const [] Bi,
npy_cdouble_wrapper const [] Bx, npy_int32 [] Cp, npy_int32 [] Ci,
npy_cdouble_wrapper [] Cx)
csc_plus_csc(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Ai,
npy_clongdouble_wrapper const [] Ax, npy_int32 const [] Bp, npy_int32 const [] Bi,
npy_clongdouble_wrapper const [] Bx, npy_int32 [] Cp, npy_int32 [] Ci,
npy_clongdouble_wrapper [] Cx)
csc_plus_csc(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Ai,
npy_bool_wrapper const [] Ax, npy_int64 const [] Bp, npy_int64 const [] Bi,
npy_bool_wrapper const [] Bx, npy_int64 [] Cp, npy_int64 [] Ci,
npy_bool_wrapper [] Cx)
csc_plus_csc(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Ai,
signed char const [] Ax, npy_int64 const [] Bp, npy_int64 const [] Bi,
signed char const [] Bx, npy_int64 [] Cp, npy_int64 [] Ci, signed char [] Cx)
csc_plus_csc(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Ai,
unsigned char const [] Ax, npy_int64 const [] Bp, npy_int64 const [] Bi,
unsigned char const [] Bx, npy_int64 [] Cp, npy_int64 [] Ci, unsigned char [] Cx)
csc_plus_csc(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Ai,
short const [] Ax, npy_int64 const [] Bp, npy_int64 const [] Bi,
short const [] Bx, npy_int64 [] Cp, npy_int64 [] Ci, short [] Cx)
csc_plus_csc(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Ai,
unsigned short const [] Ax, npy_int64 const [] Bp, npy_int64 const [] Bi,
unsigned short const [] Bx, npy_int64 [] Cp, npy_int64 [] Ci, unsigned short [] Cx)
csc_plus_csc(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Ai,
int const [] Ax, npy_int64 const [] Bp, npy_int64 const [] Bi, int const [] Bx,
npy_int64 [] Cp, npy_int64 [] Ci, int [] Cx)
csc_plus_csc(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Ai,
unsigned int const [] Ax, npy_int64 const [] Bp, npy_int64 const [] Bi,
unsigned int const [] Bx, npy_int64 [] Cp, npy_int64 [] Ci, unsigned int [] Cx)
csc_plus_csc(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Ai,
long long const [] Ax, npy_int64 const [] Bp, npy_int64 const [] Bi,
long long const [] Bx, npy_int64 [] Cp, npy_int64 [] Ci, long long [] Cx)
csc_plus_csc(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Ai,
unsigned long long const [] Ax, npy_int64 const [] Bp, npy_int64 const [] Bi,
unsigned long long const [] Bx, npy_int64 [] Cp, npy_int64 [] Ci,
unsigned long long [] Cx)
csc_plus_csc(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Ai,
float const [] Ax, npy_int64 const [] Bp, npy_int64 const [] Bi,
float const [] Bx, npy_int64 [] Cp, npy_int64 [] Ci, float [] Cx)
csc_plus_csc(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Ai,
double const [] Ax, npy_int64 const [] Bp, npy_int64 const [] Bi,
double const [] Bx, npy_int64 [] Cp, npy_int64 [] Ci, double [] Cx)
csc_plus_csc(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Ai,
long double const [] Ax, npy_int64 const [] Bp, npy_int64 const [] Bi,
long double const [] Bx, npy_int64 [] Cp, npy_int64 [] Ci, long double [] Cx)
csc_plus_csc(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Ai,
npy_cfloat_wrapper const [] Ax, npy_int64 const [] Bp, npy_int64 const [] Bi,
npy_cfloat_wrapper const [] Bx, npy_int64 [] Cp, npy_int64 [] Ci,
npy_cfloat_wrapper [] Cx)
csc_plus_csc(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Ai,
npy_cdouble_wrapper const [] Ax, npy_int64 const [] Bp, npy_int64 const [] Bi,
npy_cdouble_wrapper const [] Bx, npy_int64 [] Cp, npy_int64 [] Ci,
npy_cdouble_wrapper [] Cx)
csc_plus_csc(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Ai,
npy_clongdouble_wrapper const [] Ax, npy_int64 const [] Bp, npy_int64 const [] Bi,
npy_clongdouble_wrapper const [] Bx, npy_int64 [] Cp, npy_int64 [] Ci,
npy_clongdouble_wrapper [] Cx)
"""
return _csc.csc_plus_csc(*args)
def csc_minus_csc(*args):
"""
csc_minus_csc(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Ai,
npy_bool_wrapper const [] Ax, npy_int32 const [] Bp, npy_int32 const [] Bi,
npy_bool_wrapper const [] Bx, npy_int32 [] Cp, npy_int32 [] Ci,
npy_bool_wrapper [] Cx)
csc_minus_csc(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Ai,
signed char const [] Ax, npy_int32 const [] Bp, npy_int32 const [] Bi,
signed char const [] Bx, npy_int32 [] Cp, npy_int32 [] Ci, signed char [] Cx)
csc_minus_csc(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Ai,
unsigned char const [] Ax, npy_int32 const [] Bp, npy_int32 const [] Bi,
unsigned char const [] Bx, npy_int32 [] Cp, npy_int32 [] Ci, unsigned char [] Cx)
csc_minus_csc(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Ai,
short const [] Ax, npy_int32 const [] Bp, npy_int32 const [] Bi,
short const [] Bx, npy_int32 [] Cp, | |
<reponame>rbrutherford3/ASCII-Chess<gh_stars>0
#################################################################
# #
# ASCII-Chess, written by <NAME> in 2021 #
# #
#################################################################
from piece import *
# Yield the opposite player
def opponent(player):
if player == 1:
return 2
elif player == 2:
return 1
else:
raise ValueError("Players are either 1 or 2")
# Class that represents one square on a board (note that indexing
# in Python is from 0, so translation is required)
class Square(object):
row: int
column: int
piece: Piece
# Can be initiated with or without an occupying piece
def __init__(self, thisRow, thisColumn, thisPiece=None):
self.row = thisRow
self.column = thisColumn
self.piece = thisPiece
# Over-riding 'to string' function for displaying on board
def __str__(self):
if self.piece is None:
return "---"
else:
return str(self.piece.player) + "_" + self.piece.symbol
# Add a piece after instantiation
def setPiece(self, piece: Piece):
self.piece = piece
# Board class, which holds most functioning and is composed of 64 square objects
def isSquare(row: int, column: int) -> bool:
return 0 <= row <= 7 and 0 <= column <= 7
class Board(object): # Square objects are assigned a location on a
grid: list # [list[Square]] # two-dimensional gridded 'board'
numRows: int = 8
numColumns: int = 8
opponent = opponent # stealing function for getting the opposite player
# Begin by initiating grid
def __init__(self):
self.grid = [[
Square(gridRow, gridColumn)
for gridColumn in range(self.numColumns)
] for gridRow in range(self.numRows)]
self.king1 = King(1)
self.king2 = King(2)
# Need quick access to the king for each side to speed up program
def getKing(self, player) -> King:
if player == 1:
return self.king1
elif player == 2:
return self.king2
else:
raise ValueError("Player can be either 1 or 2")
# This method sets up each individual piece on the 'grid'
def setup(self):
[self.grid[1][column].setPiece(Pawn(1))
for column in range(self.numColumns)]
[self.grid[6][column].setPiece(Pawn(2))
for column in range(self.numColumns)]
[self.grid[0][column].setPiece(Rook(1)) for column in [0, 7]]
[self.grid[7][column].setPiece(Rook(2)) for column in [0, 7]]
[self.grid[0][column].setPiece(Knight(1)) for column in [1, 6]]
[self.grid[7][column].setPiece(Knight(2)) for column in [1, 6]]
[self.grid[0][column].setPiece(Bishop(1)) for column in [2, 5]]
[self.grid[7][column].setPiece(Bishop(2)) for column in [2, 5]]
self.grid[0][3].setPiece(Queen(1))
self.grid[7][3].setPiece(Queen(2))
self.grid[0][4].setPiece(self.king1)
self.king1.location = self.grid[0][4]
self.grid[7][4].setPiece(self.king2)
self.king2.location = self.grid[7][4]
# Draw the board onto the screen with unicode ASCII characters (old school, yes)
def draw(self, player: int):
if player == 1:
print("")
print(" a b c d e f g h")
print(" -----------------------------------------------------------------")
print(" | |///////| |///////| |///////| |///////|")
print(" 8 | " + str(self.grid[7][0]) + " |//" + str(self.grid[7][1]) +
"//| " + str(self.grid[7][2]) + " |//" + str(self.grid[7][3]) +
"//| " + str(self.grid[7][4]) + " |//" + str(self.grid[7][5]) +
"//| " + str(self.grid[7][6]) + " |//" + str(self.grid[7][7]) + "//| 8")
print(" | |///////| |///////| |///////| |///////|")
print(" -----------------------------------------------------------------")
print(" |///////| |///////| |///////| |///////| |")
print(" 7 |//" + str(self.grid[6][0]) + "//| " + str(self.grid[6][1]) +
" |//" + str(self.grid[6][2]) + "//| " + str(self.grid[6][3]) +
" |//" + str(self.grid[6][4]) + "//| " + str(self.grid[6][5]) +
" |//" + str(self.grid[6][6]) + "//| " + str(self.grid[6][7]) + " | 7")
print(" |///////| |///////| |///////| |///////| |")
print(" -----------------------------------------------------------------")
print(" | |///////| |///////| |///////| |///////|")
print(" 6 | " + str(self.grid[5][0]) + " |//" + str(self.grid[5][1]) +
"//| " + str(self.grid[5][2]) + " |//" + str(self.grid[5][3]) +
"//| " + str(self.grid[5][4]) + " |//" + str(self.grid[5][5]) +
"//| " + str(self.grid[5][6]) + " |//" + str(self.grid[5][7]) + "//| 6")
print(" | |///////| |///////| |///////| |///////|")
print(" -----------------------------------------------------------------")
print(" |///////| |///////| |///////| |///////| |")
print(" 5 |//" + str(self.grid[4][0]) + "//| " + str(self.grid[4][1]) +
" |//" + str(self.grid[4][2]) + "//| " + str(self.grid[4][3]) +
" |//" + str(self.grid[4][4]) + "//| " + str(self.grid[4][5]) +
" |//" + str(self.grid[4][6]) + "//| " + str(self.grid[4][7]) + " | 5")
print(" |///////| |///////| |///////| |///////| |")
print(" -----------------------------------------------------------------")
print(" | |///////| |///////| |///////| |///////|")
print(" 4 | " + str(self.grid[3][0]) + " |//" + str(self.grid[3][1]) +
"//| " + str(self.grid[3][2]) + " |//" + str(self.grid[3][3]) +
"//| " + str(self.grid[3][4]) + " |//" + str(self.grid[3][5]) +
"//| " + str(self.grid[3][6]) + " |//" + str(self.grid[3][7]) + "//| 4")
print(" | |///////| |///////| |///////| |///////|")
print(" -----------------------------------------------------------------")
print(" |///////| |///////| |///////| |///////| |")
print(" 3 |//" + str(self.grid[2][0]) + "//| " + str(self.grid[2][1]) +
" |//" + str(self.grid[2][2]) + "//| " + str(self.grid[2][3]) +
" |//" + str(self.grid[2][4]) + "//| " + str(self.grid[2][5]) +
" |//" + str(self.grid[2][6]) + "//| " + str(self.grid[2][7]) + " | 3")
print(" |///////| |///////| |///////| |///////| |")
print(" -----------------------------------------------------------------")
print(" | |///////| |///////| |///////| |///////|")
print(" 2 | " + str(self.grid[1][0]) + " |//" + str(self.grid[1][1]) +
"//| " + str(self.grid[1][2]) + " |//" + str(self.grid[1][3]) +
"//| " + str(self.grid[1][4]) + " |//" + str(self.grid[1][5]) +
"//| " + str(self.grid[1][6]) + " |//" + str(self.grid[1][7]) + "//| 2")
print(" | |///////| |///////| |///////| |///////|")
print(" -----------------------------------------------------------------")
print(" |///////| |///////| |///////| |///////| |")
print(" 1 |//" + str(self.grid[0][0]) + "//| " + str(self.grid[0][1]) +
" |//" + str(self.grid[0][2]) + "//| " + str(self.grid[0][3]) +
" |//" + str(self.grid[0][4]) + "//| " + str(self.grid[0][5]) +
" |//" + str(self.grid[0][6]) + "//| " + str(self.grid[0][7]) + " | 1")
print(" |///////| |///////| |///////| |///////| |")
print(" -----------------------------------------------------------------")
print(" a b c d e f g h")
print("")
# Show board from the other side for player 2
elif player == 2:
print("")
print(" h g f e d c b a")
print(" -----------------------------------------------------------------")
print(" | |///////| |///////| |///////| |///////|")
print(" 1 | " + str(self.grid[0][7]) + " |//" + str(self.grid[0][6]) +
"//| " + str(self.grid[0][5]) + " |//" + str(self.grid[0][4]) +
"//| " + str(self.grid[0][3]) + " |//" + str(self.grid[0][2]) +
"//| " + str(self.grid[0][1]) + " |//" + str(self.grid[0][0]) + "//| 1")
print(" | |///////| |///////| |///////| |///////|")
print(" -----------------------------------------------------------------")
print(" |///////| |///////| |///////| |///////| |")
print(" 2 |//" + str(self.grid[1][7]) + "//| " + str(self.grid[1][6]) +
" |//" + str(self.grid[1][5]) + "//| " + str(self.grid[1][4]) +
" |//" + str(self.grid[1][3]) + "//| " + str(self.grid[1][2]) +
" |//" + str(self.grid[1][1]) + "//| " + str(self.grid[1][0]) + " | 2")
print(" |///////| |///////| |///////| |///////| |")
print(" -----------------------------------------------------------------")
print(" | |///////| |///////| |///////| |///////|")
print(" 3 | " + str(self.grid[2][7]) + " |//" + str(self.grid[2][6]) +
"//| " + str(self.grid[2][5]) + " |//" + str(self.grid[2][4]) +
"//| " + str(self.grid[2][3]) + " |//" + str(self.grid[2][2]) +
"//| " + str(self.grid[2][1]) + " |//" + str(self.grid[2][0]) + "//| 3")
print(" | |///////| |///////| |///////| |///////|")
print(" -----------------------------------------------------------------")
print(" |///////| |///////| |///////| |///////| |")
print(" 4 |//" + str(self.grid[3][7]) + "//| " + str(self.grid[3][6]) +
" |//" + str(self.grid[3][5]) + "//| " + str(self.grid[3][4]) +
" |//" + str(self.grid[3][3]) + "//| " + str(self.grid[3][2]) +
" |//" + str(self.grid[3][1]) + "//| " + str(self.grid[3][0]) + " | 4")
print(" |///////| |///////| |///////| |///////| |")
print(" -----------------------------------------------------------------")
print(" | |///////| |///////| |///////| |///////|")
print(" 5 | " + str(self.grid[4][7]) + " |//" + str(self.grid[4][6]) +
"//| " + str(self.grid[4][5]) + " |//" + str(self.grid[4][4]) +
"//| " + str(self.grid[4][3]) + " |//" + str(self.grid[4][2]) +
"//| " + str(self.grid[4][1]) + " |//" + str(self.grid[4][0]) + "//| 5")
print(" | |///////| |///////| |///////| |///////|")
print(" -----------------------------------------------------------------")
print(" |///////| |///////| |///////| |///////| |")
print(" 6 |//" + str(self.grid[5][7]) + "//| " + str(self.grid[5][6]) +
" |//" + str(self.grid[5][5]) + "//| " + str(self.grid[5][4]) +
" |//" + str(self.grid[5][3]) + "//| " + str(self.grid[5][2]) +
" |//" + str(self.grid[5][1]) + "//| " + str(self.grid[5][0]) + " | 6")
print(" |///////| |///////| |///////| |///////| |")
print(" -----------------------------------------------------------------")
print(" | |///////| |///////| |///////| |///////|")
print(" 7 | " + str(self.grid[6][7]) + " |//" + str(self.grid[6][6]) +
"//| " + str(self.grid[6][5]) + " |//" + str(self.grid[6][4]) +
"//| " + str(self.grid[6][3]) + " |//" + str(self.grid[6][2]) +
"//| " + str(self.grid[6][1]) + " |//" + str(self.grid[6][0]) + "//| 7")
print(" | |///////| |///////| |///////| |///////|")
print(" -----------------------------------------------------------------")
print(" |///////| |///////| |///////| |///////| |")
print(" 8 |//" + | |
__repr__(self):
return '<IrcCommands(%s)>' % ', '.join(map(repr, self.commands))
# -----------------------------------------------------------------------------
# User/Mask classes
_rfc1459trans = string.maketrans(string.ascii_uppercase + r'\[]',
string.ascii_lowercase + r'|{}')
def IRClower(s):
return s.translate(_rfc1459trans)
class CaseInsensibleString(str):
def __init__(self, s=''):
self.lowered = IRClower(s)
lower = lambda self: self.lowered
translate = lambda self, trans: self.lowered
__eq__ = lambda self, s: self.lowered == IRClower(s)
__ne__ = lambda self, s: not self == s
__hash__ = lambda self: hash(self.lowered)
def caseInsensibleKey(k):
if isinstance(k, str):
return CaseInsensibleString(k)
elif isinstance(k, tuple):
return tuple(map(caseInsensibleKey, k))
return k
class CaseInsensibleDict(dict):
key = staticmethod(caseInsensibleKey)
def __init__(self, **kwargs):
for k, v in kwargs.items():
self[k] = v
def __setitem__(self, k, v):
dict.__setitem__(self, self.key(k), v)
def __getitem__(self, k):
return dict.__getitem__(self, self.key(k))
def __delitem__(self, k):
dict.__delitem__(self, self.key(k))
def __contains__(self, k):
return dict.__contains__(self, self.key(k))
def pop(self, k):
return dict.pop(self, self.key(k))
class CaseInsensibleDefaultDict(defaultdict, CaseInsensibleDict):
pass
class CaseInsensibleSet(set):
normalize = staticmethod(caseInsensibleKey)
def __init__(self, iterable=()):
iterable = map(self.normalize, iterable)
set.__init__(self, iterable)
def __contains__(self, v):
return set.__contains__(self, self.normalize(v))
def update(self, L):
set.update(self, map(self.normalize, L))
def add(self, v):
set.add(self, self.normalize(v))
def remove(self, v):
set.remove(self, self.normalize(v))
class ChannelWatchlistSet(CaseInsensibleSet):
_updated = False
def __contains__(self, v):
if not self._updated:
self.__updateFromConfig()
return CaseInsensibleSet.__contains__(self, v)
def __updateFromConfig(self):
self._updated = True
infolist = Infolist('option', 'plugins.var.python.%s.watchlist.*' %SCRIPT_NAME)
n = len('python.%s.watchlist.' %SCRIPT_NAME)
while infolist.next():
name = infolist['option_name']
value = infolist['value']
server = name[n:]
if value:
channels = value.split(',')
else:
channels = []
self.update([ (server, channel) for channel in channels ])
chanopChannels = ChannelWatchlistSet()
class ServerChannelDict(CaseInsensibleDict):
def getChannels(self, server, item=None):
"""Return a list of channels that match server and has item if given"""
if item:
return [ chan for serv, chan in self if serv == server and item in self[serv, chan] ]
else:
return [ chan for serv, chan in self if serv == server ]
def purge(self):
for key in self.keys():
if key not in chanopChannels:
debug('removing %s mask list, not in watchlist.', key)
del self[key]
for data in self.values():
data.purge()
# -----------------------------------------------------------------------------
# Channel Modes (bans)
class MaskObject(object):
def __init__(self, mask, hostmask=[], operator='', date=0, expires=0):
self.mask = mask
self.operator = operator
if date:
date = int(date)
else:
date = now()
self.date = date
if isinstance(hostmask, str):
hostmask = [ hostmask ]
self.hostmask = hostmask
self.expires = int(expires)
def serialize(self):
data = ';'.join([ self.operator,
str(self.date),
str(self.expires),
','.join(self.hostmask) ])
return data
def deserialize(self, data):
op, date, expires, hostmasks = data.split(';')
assert op and date, "Error reading chanmask option %s, missing operator or date" % self.mask
if not is_hostmask(op):
raise Exception('Error reading chanmask option %s, invalid usermask %r' \
% (self.mask, op))
self.operator = op
try:
self.date = int(date)
except ValueError:
self.date = int(time.mktime(time.strptime(date,'%Y-%m-%d %H:%M:%S')))
if expires:
self.expires = int(expires)
else:
self.expires = 0
if hostmasks:
hostmasks = hostmasks.split(',')
if not all(map(is_hostmask, hostmasks)):
raise Exception('Error reading chanmask option %s, a hostmask is invalid: %s' \
% (self.mask, hostmasks))
self.hostmask = hostmasks
def __repr__(self):
return "<MaskObject(%s)>" % self.mask
class MaskList(CaseInsensibleDict):
"""Single list of masks"""
def __init__(self, server, channel):
self.synced = 0
def add(self, mask, **kwargs):
if mask in self:
# mask exists, update it
ban = self[mask]
for attr, value in kwargs.items():
if value and not getattr(ban, attr):
setattr(ban, attr, value)
else:
ban = self[mask] = MaskObject(mask, **kwargs)
return ban
# def searchByNick(self, nick):
# try:
# hostmask = userCache.getHostmask(nick, self.server, self.channel)
# return self.searchByHostmask(hostmask)
# except KeyError:
# return []
def search(self, pattern, reverseMatch=False):
if reverseMatch:
L = [ mask for mask in self if hostmask_match(mask, pattern) ]
else:
L = pattern_match_list(pattern, self.keys())
return L
def purge(self):
pass
class MaskCache(ServerChannelDict):
"""Keeps a cache of masks for different channels."""
def add(self, server, channel, mask, **kwargs):
"""Adds a ban to (server, channel) banlist."""
key = (server, channel)
if key not in self:
self[key] = MaskList(*key)
ban = self[key].add(mask, **kwargs)
return ban
def remove(self, server, channel, mask=None):#, hostmask=None):
key = (server, channel)
try:
if mask is None:
del self[key]
else:
del self[key][mask]
#debug("removing ban: %s" %banmask)
except KeyError:
pass
class ChanopCache(Shelf):
def __init__(self, filename):
path = os.path.join(weechat.info_get('weechat_dir', ''), filename)
Shelf.__init__(self, path, writeback=True)
class ModeCache(ChanopCache):
"""class for store channel modes lists."""
def __init__(self, filename):
ChanopCache.__init__(self, filename)
self.modes = set()
self.map = CaseInsensibleDict()
# reset all sync timers
for cache in self.values():
for masklist in cache.values():
masklist.synced = 0
def registerMode(self, mode, *args):
if mode not in self:
cache = MaskCache()
self[mode] = cache
if mode not in self.modes:
self.modes.add(mode)
self.map[mode] = mode
for name in args:
self.map[name] = mode
def __getitem__(self, mode):
try:
return ChanopCache.__getitem__(self, mode)
except KeyError:
return ChanopCache.__getitem__(self, self.map[mode])
def add(self, server, channel, mode, mask, **kwargs):
assert mode in self.modes
self[mode].add(server, channel, mask, **kwargs)
def remove(self, server, channel, mode, mask):
self[mode].remove(server, channel, mask)
def purge(self):
for cache in self.values():
cache.purge()
class MaskSync(object):
"""Class for fetch and sync bans of any channel and mode."""
__name__ = ''
_hide_msg = False
_hook_mask = ''
_hook_end = ''
# freenode new signals for list quiet messages
_hook_quiet_mask = ''
_hook_quiet_end = ''
# sync queue stuff
queue = []
_maskbuffer = CaseInsensibleDefaultDict(list)
_callback = CaseInsensibleDict()
def hook(self):
# 367 - ban mask
# 368 - end of ban list
# 728 - quiet mask
# 729 - end of quiet list
self.unhook()
self._hook_mask = \
weechat.hook_modifier('irc_in_367', callback(self._maskCallback), '')
self._hook_end = \
weechat.hook_modifier('irc_in_368', callback(self._endCallback), '')
self._hook_quiet_mask = \
weechat.hook_modifier('irc_in_728', callback(self._maskCallback), '')
self._hook_quiet_end = \
weechat.hook_modifier('irc_in_729', callback(self._endCallback), '')
def unhook(self):
for hook in ('_hook_mask',
'_hook_end',
'_hook_quiet_mask',
'_hook_quiet_end'):
attr = getattr(self, hook)
if attr:
weechat.unhook(attr)
setattr(self, hook, '')
def fetch(self, server, channel, mode, callback=None):
"""Fetches masks for a given server and channel."""
buffer = weechat.buffer_search('irc', 'server.%s' %server)
if not buffer or not weechat.info_get('irc_is_channel', channel):
# invalid server or channel
return
# check modes
if mode not in supported_modes(server):
return
maskCache = modeCache[mode]
key = (server, channel)
# check the last time we did this
try:
masklist = maskCache[key]
if (now() - masklist.synced) < 60:
# don't fetch again
return
except KeyError:
pass
if not self.queue:
self.queue.append((server, channel, mode))
self._fetch(server, channel, mode)
elif (server, channel, mode) not in self.queue:
self.queue.append((server, channel, mode))
if callback:
self._callback[server, channel] = callback
def _fetch(self, server, channel, mode):
buffer = weechat.buffer_search('irc', 'server.%s' %server)
if not buffer:
return
cmd = '/mode %s %s' %(channel, mode)
self._hide_msg = True
weechat_command(buffer, cmd)
def _maskCallback(self, data, modifier, modifier_data, string):
"""callback for store a single mask."""
#debug("MASK %s: %s %s", modifier, modifier_data, string)
args = string.split()
if self.queue:
server, channel, _ = self.queue[0]
else:
server, channel = modifier_data, args[3]
if modifier == 'irc_in_367':
try:
mask, op, date = args[4:]
except IndexError:
mask = args[4]
op = date = None
elif modifier == 'irc_in_728':
mask, op, date = args[5:]
# store temporally until "end list" msg
self._maskbuffer[server, channel].append((mask, op, date))
if self._hide_msg:
string = ''
return string
def _endCallback(self, data, modifier, modifier_data, string):
"""callback for end of channel's mask list."""
#debug("MASK END %s: %s %s", modifier, modifier_data, string)
if self.queue:
server, channel, mode = self.queue.pop(0)
else:
args = string.split()
server, channel = modifier_data, args[3]
if modifier == 'irc_in_368':
mode = args[7]
elif modifier == 'irc_in_729':
mode = args[4]
else:
return string
maskCache = modeCache[mode]
# delete old masks in cache
if (server, channel) in maskCache:
masklist = maskCache[server, channel]
banmasks = [ L[0] for L in self._maskbuffer[server, channel] ]
for mask in masklist.keys():
if mask not in banmasks:
del masklist[mask]
for banmask, op, date in self._maskbuffer[server, channel]:
maskCache.add(server, channel, banmask, operator=op, date=date)
del self._maskbuffer[server, channel]
try:
maskList = maskCache[server, channel]
except KeyError:
maskList = maskCache[server, channel] = MaskList(server, channel)
maskList.synced = now()
# run hooked functions if any
if (server, channel) in self._callback:
self._callback[server, channel]()
del self._callback[server, channel]
if self._hide_msg:
string = ''
if self.queue:
next = self.queue[0]
self._fetch(*next)
else:
assert not self._maskbuffer, "mask buffer not empty: %s" % self._maskbuffer.keys()
self._hide_msg = False
return string
maskSync = MaskSync()
# -----------------------------------------------------------------------------
# User cache
class UserObject(object):
def __init__(self, nick, hostmask=None):
self.nick = nick
if hostmask:
self._hostmask = [ hostmask ]
else:
self._hostmask = []
self.seen = now()
self._channels = 0
@property
def hostmask(self):
try:
return self._hostmask[-1]
except IndexError:
return ''
def update(self, hostmask=None):
if hostmask and hostmask != self.hostmask:
if hostmask in self._hostmask:
del self._hostmask[self._hostmask.index(hostmask)]
self._hostmask.append(hostmask)
self.seen = now()
| |
"""
Configuration of pytest for agent tests
"""
from pathlib import Path
from textwrap import dedent
from unittest.mock import patch
import httpx
import respx
from pytest import fixture
from lm_agent.backend_utils import BackendConfigurationRow
from lm_agent.config import settings
MOCK_BIN_PATH = Path(__file__).parent / "mock_tools"
@fixture(autouse=True)
def mock_cache_dir(tmp_path):
_cache_dir = tmp_path / "license-manager-cache"
assert not _cache_dir.exists()
with patch("lm_agent.config.settings.CACHE_DIR", new=_cache_dir):
yield _cache_dir
@fixture
def license_servers():
return ["192.168.127.12 2345", "172.16.31.10 2345"]
@fixture
def respx_mock():
"""
Run a test in the respx context (similar to respx decorator, but it's a fixture).
Mocks the auth0 route used to secure a token.
"""
with respx.mock as mock:
respx.post(f"https://{settings.AUTH0_DOMAIN}/oauth/token").mock(
return_value=httpx.Response(status_code=200, json=dict(access_token="dummy-token"))
)
yield mock
@fixture
def one_configuration_row_flexlm():
return BackendConfigurationRow(
product="testproduct",
features={"testfeature": 10},
license_servers=["flexlm:127.0.0.1:2345"],
license_server_type="flexlm",
grace_time=10000,
)
@fixture
def one_configuration_row_rlm():
return BackendConfigurationRow(
product="converge",
features={"converge_super": 10},
license_servers=["rlm:127.0.0.1:2345"],
license_server_type="rlm",
grace_time=10000,
)
@fixture
def one_configuration_row_lsdyna():
return BackendConfigurationRow(
product="mppdyna",
features={"mppdyna": 500},
license_servers=["lsdyna:127.0.0.1:2345"],
license_server_type="lsdyna",
grace_time=10000,
)
@fixture
def one_configuration_row_lmx():
return BackendConfigurationRow(
product="hyperworks",
features={"hyperworks": 1000000},
license_servers=["lmx:127.0.0.1:2345"],
license_server_type="lmx",
grace_time=10000,
)
@fixture
def lmstat_output_bad():
"""
Some unparseable lmstat output
"""
return dedent(
"""\
lmstat - Copyright (c) 1989-2004 by Macrovision Corporation. All rights reserved.
Flexible License Manager status on Wed 03/31/2021 09:12
Error getting status: Cannot connect to license server (-15,570:111 "Connection refused")
"""
)
@fixture
def lmstat_output():
"""
Some lmstat output to parse
"""
return dedent(
"""\
lmstat - Copyright (c) 1989-2004 by Macrovision Corporation. All rights reserved.
...
Users of TESTFEATURE: (Total of 1000 licenses issued; Total of 93 licenses in use)
...
"""
" jbemfv myserver.example.com /dev/tty (v62.2) (myserver.example.com/24200 12507), "
"start Thu 10/29 8:09, 29 licenses\n"
" cdxfdn myserver.example.com /dev/tty (v62.2) (myserver.example.com/24200 12507), "
"start Thu 10/29 8:09, 27 licenses\n"
" jbemfv myserver.example.com /dev/tty (v62.2) (myserver.example.com/24200 12507), "
"start Thu 10/29 8:09, 37 licenses\n"
)
@fixture
def lmstat_output_no_licenses():
"""
Some lmstat output with no licenses in use to parse
"""
return dedent(
"""\
lmstat - Copyright (c) 1989-2004 by Macrovision Corporation. All rights reserved.
...
Users of TESTFEATURE: (Total of 1000 licenses issued; Total of 0 licenses in use)
...
"""
)
@fixture
def rlm_output_bad():
"""
Some unparseable lmstat output
"""
return dedent(
"""\
rlmutil v12.2
Copyright (C) 2006-2017, Reprise Software, Inc. All rights reserved.
Error connecting to "rlm" server
Connection attempted to host: "" on port 5053
No error
"""
)
@fixture
def rlm_output():
"""
Some rlm output to parse
"""
return dedent(
"""\
Setting license file path to <EMAIL>
rlmutil v12.2
Copyright (C) 2006-2017, Reprise Software, Inc. All rights reserved.
rlm status on licserv.com (port 35015), up 99d 11:08:25
rlm software version v12.2 (build:2)
rlm comm version: v1.2
Startup time: Tue Oct 19 01:40:13 2021
Todays Statistics (13:48:32), init time: Tue Nov 2 23:00:06 2021
Recent Statistics (00:16:08), init time: Wed Nov 3 12:32:30 2021
Recent Stats Todays Stats Total Stats
00:16:08 13:48:32 15d 11:08:25
Messages: 582 (0/sec) 28937 (0/sec) 777647 (0/sec)
Connections: 463 (0/sec) 23147 (0/sec) 622164 (0/sec)
--------- ISV servers ----------
Name Port Running Restarts
csci 63133 Yes 0
------------------------
csci ISV server status on licserv.server.com (port 63133), up 99d 11:08:18
csci software version v12.2 (build: 2)
csci comm version: v1.2
csci Debug log filename: F:\RLM\Logs\csci.dlog
csci Report log filename: F:\RLM\logs\Reportlogs\CSCILOG.rl
Startup time: Tue Oct 19 01:40:20 2021
Todays Statistics (13:48:32), init time: Tue Nov 2 23:00:06 2021
Recent Statistics (00:16:08), init time: Wed Nov 3 12:32:30 2021
Recent Stats Todays Stats Total Stats
00:16:08 13:48:32 15d 11:08:18
Messages: 991 (0/sec) 34770 (0/sec) 935961 (0/sec)
Connections: 945 (0/sec) 17359 (0/sec) 466699 (0/sec)
Checkouts: 0 (0/sec) 1 (0/sec) 937 (0/sec)
Denials: 0 (0/sec) 0 (0/sec) 0 (0/sec)
Removals: 0 (0/sec) 0 (0/sec) 0 (0/sec)
------------------------
csci license pool status on licser.server.com (port 63133)
converge v3.0
count: 1, # reservations: 0, inuse: 0, exp: 31-jan-2022
obsolete: 0, min_remove: 120, total checkouts: 0
converge_gui v1.0
count: 45, # reservations: 0, inuse: 0, exp: 31-jan-2022
obsolete: 0, min_remove: 120, total checkouts: 26
converge_gui_polygonica v1.0
count: 1, # reservations: 0, inuse: 0, exp: 31-jan-2022
obsolete: 0, min_remove: 120, total checkouts: 26
converge_super v3.0
count: 1000, # reservations: 0, inuse: 93, exp: 31-jan-2022
obsolete: 0, min_remove: 120, total checkouts: 169
converge_tecplot v1.0
count: 45, # reservations: 0, inuse: 0, exp: 31-jan-2022
obsolete: 0, min_remove: 120, total checkouts: 16
------------------------
csci license usage status on licser.server.com (port 63133)
converge_super v3.0: <EMAIL> 29/0 at 11/01 09:01 (handle: 15a)
converge_super v3.0: <EMAIL> 27/0 at 11/03 10:38 (handle: 128)
converge_super v3.0: <EMAIL> 37/0 at 11/01 09:01 (handle: 15a)
"""
)
@fixture
def rlm_output_no_licenses():
"""
Some rlm output with no licenses in use to parse
"""
return dedent(
"""\
Setting license file path to <EMAIL>:<EMAIL>
rlmutil v12.2
Copyright (C) 2006-2017, Reprise Software, Inc. All rights reserved.
rlm status on licserv0011.com (port 35015), up 20d 13:21:16
rlm software version v12.2 (build:2)
rlm comm version: v1.2
Startup time: Tue Oct 19 03:40:13 2021
Todays Statistics (16:01:23), init time: Mon Nov 8 00:00:06 2021
Recent Statistics (00:28:35), init time: Mon Nov 8 15:32:54 2021
Recent Stats Todays Stats Total Stats
00:28:35 16:01:23 20d 13:21:16
Messages: 997 (0/sec) 33562 (0/sec) 1033736 (0/sec)
Connections: 797 (0/sec) 26849 (0/sec) 827039 (0/sec)
--------- ISV servers ----------
Name Port Running Restarts
csci 63133 Yes 0
------------------------
csci ISV server status on licserv0011.com (port 63133), up 20d 13:21:09
csci software version v12.2 (build: 2)
csci comm version: v1.2
csci Debug log filename: F:\RLM\Logs\csci.dlog
csci Report log filename: F:\RLM\logs\Reportlogs\CSCILOG.rl
Startup time: Tue Oct 19 03:40:20 2021
Todays Statistics (16:01:23), init time: Mon Nov 8 00:00:06 2021
Recent Statistics (00:28:35), init time: Mon Nov 8 15:32:54 2021
Recent Stats Todays Stats Total Stats
00:28:35 16:01:23 20d 13:21:09
Messages: 1196 (0/sec) 40276 (0/sec) 1243764 (0/sec)
Connections: 598 (0/sec) 20138 (0/sec) 620365 (0/sec)
Checkouts: 0 (0/sec) 0 (0/sec) 262 (0/sec)
Denials: 0 (0/sec) 0 (0/sec) 0 (0/sec)
Removals: 0 (0/sec) 0 (0/sec) 0 (0/sec)
------------------------
csci license pool status on licserv0011.com (port 63133)
converge v3.0
count: 1, # reservations: 0, inuse: 0, exp: 31-jan-2022
obsolete: 0, min_remove: 120, total checkouts: 0
converge_gui v1.0
count: 45, # reservations: 0, inuse: 0, exp: 31-jan-2022
obsolete: 0, min_remove: 120, total checkouts: 26
converge_gui_polygonica v1.0
count: 1, # reservations: 0, inuse: 0, exp: 31-jan-2022
obsolete: 0, min_remove: 120, total checkouts: 26
converge_super v3.0
count: 1000, # reservations: 0, inuse: 0, exp: 31-jan-2022
obsolete: 0, min_remove: 120, total checkouts: 189
converge_tecplot v1.0
count: 45, # reservations: 0, inuse: 0, exp: 31-jan-2022
obsolete: 0, min_remove: 120, total checkouts: 21
"""
)
@fixture
def lsdyna_output_bad():
"""
Some unparseable lsdyna output
"""
return dedent(
"""\
Using default server 31010@localhost
*** ERROR failed to open server localhost
"""
)
@fixture
def lsdyna_output():
"""
Some lsdyna output to parse
"""
return dedent(
"""\
Using user specified server <EMAIL>
LICENSE INFORMATION
PROGRAM EXPIRATION CPUS USED FREE MAX | QUEUE
---------------- ---------- ----- ------ ------ | -----
MPPDYNA 12/30/2022 - 60 500 | 0
fane8y <EMAIL> 80
ssskmj <EMAIL> 80
ssskmj <EMAIL> 80
ywazrn <EMAIL> 80
ywazrn <EMAIL> 80
ndhtw9 <EMAIL> 40
MPPDYNA_971 12/30/2022 0 60 500 | 0
MPPDYNA_970 12/30/2022 0 60 500 | 0
MPPDYNA_960 12/30/2022 0 60 500 | 0
LS-DYNA 12/30/2022 0 60 500 | 0
LS-DYNA_971 12/30/2022 0 60 500 | 0
LS-DYNA_970 12/30/2022 0 60 500 | 0
LS-DYNA_960 12/30/2022 0 60 500 | 0
LICENSE GROUP 440 60 500 | 0
"""
)
@fixture
def lsdyna_output_no_licenses():
"""
Some lsdyna output with no licenses in use to parse
"""
return dedent(
"""\
Using user specified server <EMAIL>
LICENSE INFORMATION
PROGRAM EXPIRATION CPUS USED FREE MAX | QUEUE
---------------- ---------- ----- ------ ------ | -----
MPPDYNA 12/30/2022 0 500 500 | 0
MPPDYNA_971 12/30/2022 0 500 500 | 0
MPPDYNA_970 12/30/2022 0 000 500 | 0
MPPDYNA_960 12/30/2022 0 000 500 | 0
LS-DYNA 12/30/2022 0 000 500 | 0
LS-DYNA_971 12/30/2022 0 000 500 | 0
LS-DYNA_970 12/30/2022 0 000 500 | 0
LS-DYNA_960 12/30/2022 0 000 500 | 0
LICENSE GROUP 0 000 500 | 0
"""
)
@fixture
def lmx_output_bad():
"""
Some unparseable output
"""
return dedent(
"""\
LM-X End-user Utility v3.32
Copyright (C) 2002-2010 X-Formation. All rights reserved.
++++++++++++++++++++++++++++++++++++++++
LM-X license server(s):
----------------------------------------
There are no license server(s) available.
"""
)
@fixture
def lmx_output():
"""
Some LM-X output | |
# Copyright 2019-21 by <NAME>. All rights reserved.
# This file is part of the Biopython distribution and governed by your
# choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
# Please see the LICENSE file that should have been included as part of this
# package.
"""SCADIO: write OpenSCAD program to create protein structure 3D model.
3D printing a protein structure is a non-trivial exercise due to the
overall complexity and the general requirement for supporting overhang regions
while printing. This software is a path to generating a model for printing
(e.g. an STL file), and does not address the issues around converting the
model to a physical product. OpenSCAD <http://www.openscad.org/> can create
a printable model from the script this software produces. MeshMixer
<http://www.meshmixer.com/>, various slicer software, and the 3D printer
technology available to you provide options for addressing the problems around
physically rendering the model.
The model generated here consists of OpenSCAD primitives, e.g. spheres and
cylinders, representing individual atoms and bonds in an explicit model of a
protein structure. The benefit is that individual atoms/bonds may be selected
for specific print customizations relevant to 3D printing (such as rotatable
bond mechanisms or hydrogen bond magnets). Alternatively, use e.g. Chimera to
render a structure as ribbons or similar for printing as a single object.
I suggest generating your initial model using the OpenSCAD script provided
here, then modifying that script according to your needs. Changing the
atomScale and bondRadius values can simplify the model by removing gaps and
the corresponding need for supports, or you may wish to modify the
hedronDispatch() routine to select residues or chain sections for printing
separately and subsequently joining with rotatable bonds. During this
development phase you will likely have your version include only the data
matrices generated here, by using the `includeCode=False` option to
write_SCAD(). An example project using rotatable backbone and magnetic
hydrogen bonds is at <https://www.thingiverse.com/thing:3957471>.
"""
# import re
from Bio.File import as_handle
from Bio.PDB.PDBExceptions import PDBException
from Bio.PDB.internal_coords import IC_Residue, IC_Chain
# from Bio.PDB.Structure import Structure
# from Bio.PDB.Residue import Residue
from Bio.PDB.vectors import homog_scale_mtx
import numpy as np # type: ignore
def _scale_residue(res, scale, scaleMtx):
if res.internal_coord:
res.internal_coord.applyMtx(scaleMtx)
if res.internal_coord.gly_Cbeta:
res.internal_coord.scale = scale
def write_SCAD(
entity,
file,
scale=None,
pdbid=None,
backboneOnly=False,
includeCode=True,
maxPeptideBond=None,
start=None,
fin=None,
handle="protein",
):
"""Write hedron assembly to file as OpenSCAD matrices.
This routine calls both :meth:`.IC_Chain.internal_to_atom_coordinates` and
:meth:`.IC_Chain.atom_to_internal_coordinates` due to requirements for
scaling, explicit bonds around rings, and setting the coordinate space of
the output model.
Output data format is primarily:
- matrix for each hedron:
len1, angle2, len3, atom covalent bond class, flags to indicate
atom/bond represented in previous hedron (OpenSCAD very slow with
redundant overlapping elements), flags for bond features
- transform matrices to assemble each hedron into residue dihedra sets
- transform matrices for each residue to position in chain
OpenSCAD software is included in this Python file to process these
matrices into a model suitable for a 3D printing project.
:param entity: Biopython PDB :class:`.Structure` entity
structure data to export
:param file: Bipoython :func:`.as_handle` filename or open file pointer
file to write data to
:param float scale:
units (usually mm) per angstrom for STL output, written in output
:param str pdbid:
PDB idcode, written in output. Defaults to '0PDB' if not supplied
and no 'idcode' set in entity
:param bool backboneOnly: default False.
Do not output side chain data past Cbeta if True
:param bool includeCode: default True.
Include OpenSCAD software (inline below) so output file can be loaded
into OpenSCAD; if False, output data matrices only
:param float maxPeptideBond: Optional default None.
Override the cut-off in IC_Chain class (default 1.4) for detecting
chain breaks. If your target has chain breaks, pass a large number
here to create a very long 'bond' spanning the break.
:param int start,fin: default None
Parameters for internal_to_atom_coords() to limit chain segment.
:param str handle: default 'protein'
name for top level of generated OpenSCAD matrix structure
See :meth:`.IC_Residue.set_flexible` to set flags for specific residues to
have rotatable bonds, and :meth:`.IC_Residue.set_hbond` to include cavities
for small magnets to work as hydrogen bonds.
See <https://www.thingiverse.com/thing:3957471> for implementation example.
The OpenSCAD code explicitly creates spheres and cylinders to
represent atoms and bonds in a 3D model. Options are available
to support rotatable bonds and magnetic hydrogen bonds.
Matrices are written to link, enumerate and describe residues,
dihedra, hedra, and chains, mirroring contents of the relevant IC_*
data structures.
The OpenSCAD matrix of hedra has additional information as follows:
* the atom and bond state (single, double, resonance) are logged
so that covalent radii may be used for atom spheres in the 3D models
* bonds and atoms are tracked so that each is only created once
* bond options for rotation and magnet holders for hydrogen bonds
may be specified (see :meth:`.IC_Residue.set_flexible` and
:meth:`.IC_Residue.set_hbond` )
Note the application of :data:`Bio.PDB.internal_coords.IC_Chain.MaxPeptideBond`
: missing residues may be linked (joining chain segments with arbitrarily
long bonds) by setting this to a large value.
Note this uses the serial assembly per residue, placing each residue at
the origin and supplying the coordinate space transform to OpenaSCAD
All ALTLOC (disordered) residues and atoms are written to the output
model. (see :data:`Bio.PDB.internal_coords.IC_Residue.no_altloc`)
"""
if maxPeptideBond is not None:
mpbStash = IC_Chain.MaxPeptideBond
IC_Chain.MaxPeptideBond = float(maxPeptideBond)
# step one need IC_Residue atom_coords loaded in order to scale
# so if no internal_coords, initialise from Atom coordinates
added_IC_Atoms = False
if "S" == entity.level or "M" == entity.level:
for chn in entity.get_chains():
if not chn.internal_coord:
chn.internal_coord = IC_Chain(chn)
added_IC_Atoms = True
elif "C" == entity.level:
if not entity.internal_coord: # entity.internal_coord:
entity.internal_coord = IC_Chain(entity)
added_IC_Atoms = True
else:
raise PDBException("level not S, M or C: " + str(entity.level))
if added_IC_Atoms:
# if loaded pdb, need to scale, and asm, gen atomArray
entity.atom_to_internal_coordinates()
else:
# if loaded pic file and need to scale, generate atom coords
entity.internal_to_atom_coordinates(None)
if scale is not None:
scaleMtx = homog_scale_mtx(scale)
if "C" == entity.level:
entity.internal_coord.atomArray = np.dot(
entity.internal_coord.atomArray[:], scaleMtx
)
entity.internal_coord.hAtoms_needs_update[:] = True
entity.internal_coord.scale = scale
else:
for chn in entity.get_chains():
if hasattr(chn.internal_coord, "atomArray"):
chn.internal_coord.atomArray = np.dot(
chn.internal_coord.atomArray[:], scaleMtx
)
chn.internal_coord.hAtoms_needs_update[:] = True
chn.internal_coord.scale = scale
# generate internal coords for scaled entity
# (hedron bond lengths have changed if scaled)
# if not scaling, still need to generate internal coordinate
# bonds for ring sidechains
# AllBonds is a class attribute for IC_Residue.atom_to_internal_coordinates
# to generate explicit hedra covering all bonds
allBondsStash = IC_Residue._AllBonds
IC_Residue._AllBonds = True
# trigger rebuild of hedra for AllBonds
if "C" == entity.level:
entity.internal_coord.ordered_aa_ic_list[0].hedra = {}
delattr(entity.internal_coord, "hAtoms_needs_update")
delattr(entity.internal_coord, "hedraLen")
else:
for chn in entity.get_chains():
chn.internal_coord.ordered_aa_ic_list[0].hedra = {}
delattr(chn.internal_coord, "hAtoms_needs_update")
delattr(chn.internal_coord, "hedraLen")
entity.atom_to_internal_coordinates()
IC_Residue._AllBonds = allBondsStash
# rebuild atom coordinates now with chain starting at origin: in OpenSCAD
# code, each residue model is transformed to N-Ca-C start position instead
# of updating transform matrix along chain
entity.internal_to_atom_coordinates()
with as_handle(file, "w") as fp:
if includeCode:
fp.write(peptide_scad)
if not pdbid and hasattr(entity, "header"):
pdbid = entity.header.get("idcode", None)
if pdbid is None or "" == pdbid:
pdbid = "0PDB"
fp.write(
'protein = [ "' + pdbid + '", ' + str(scale) + ", // ID, protein_scale\n"
)
if "S" == entity.level or "M" == entity.level:
for chn in entity.get_chains():
fp.write(" [\n")
chn.internal_coord._write_SCAD(
fp, backboneOnly=backboneOnly, start=start, fin=fin
)
fp.write(" ]\n")
elif "C" == entity.level:
fp.write(" [\n")
entity.internal_coord._write_SCAD(
fp, backboneOnly=backboneOnly, start=start, fin=fin
)
fp.write(" ]\n")
elif "R" == entity.level:
raise NotImplementedError("writescad single residue not yet implemented.")
fp.write("\n];\n")
if maxPeptideBond is not None:
IC_Chain.MaxPeptideBond = mpbStash
peptide_scad = """
/*
//
// peptide.scad
// Copyright (c) 2019 <NAME>. All rights reserved.
// This file is part of the Biopython distribution and governed by your
// choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
// Please see the LICENSE file that should have been included as part of this
// package.
//
// This is the support file to build an OpenSCAD (http://www.openscad.org/) model
// of a protein from internal coordinates. The resulting model may be constructed
// on a 3D printer.
//
// data matrices should be appended below to form a program ready | |
ids (list[str], optional):
A list of resource IDs. If after filtering, there is not at least one resource
that matches each of the elements of `ids`, then an error is returned. This
cannot be provided together with the `name` or `names` query parameters.
names (list[str], optional):
Performs the operation on the unique name specified. Enter multiple names in
comma-separated format. For example, `name01,name02`.
async_req (bool, optional):
Request runs in separate thread and method returns
multiprocessing.pool.ApplyResult.
_return_http_data_only (bool, optional):
Returns only data field.
_preload_content (bool, optional):
Response is converted into objects.
_request_timeout (int, optional):
Total request timeout in seconds.
Returns:
ValidResponse: If the call was successful.
ErrorResponse: If the call was not successful.
Raises:
PureError: If calling the API fails.
ValueError: If a parameter is of an invalid type.
TypeError: If invalid or missing parameters are used.
"""
kwargs = dict(
ids=ids,
names=names,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
)
kwargs = {k: v for k, v in kwargs.items() if v is not None}
endpoint = self._syslog_api.api20_syslog_servers_delete_with_http_info
_process_references(references, ['ids', 'names'], kwargs)
return self._call_api(endpoint, kwargs)
def get_syslog_servers(
self,
references=None, # type: List[models.ReferenceType]
continuation_token=None, # type: str
filter=None, # type: str
ids=None, # type: List[str]
limit=None, # type: int
names=None, # type: List[str]
offset=None, # type: int
sort=None, # type: List[str]
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.SyslogServerGetResponse
"""
Return a list of configured syslog servers.
Args:
references (list[FixedReference], optional):
A list of references to query for. Overrides ids and names keyword arguments.
continuation_token (str, optional):
An opaque token to iterate over a collection of resources.
filter (Filter, optional):
A filter to include only resources that match the specified criteria.
ids (list[str], optional):
A list of resource IDs. If after filtering, there is not at least one resource
that matches each of the elements of `ids`, then an error is returned. This
cannot be provided together with the `name` or `names` query parameters.
limit (int, optional):
Limit the number of resources in the response. If not specified, defaults to
1000.
names (list[str], optional):
Performs the operation on the unique name specified. Enter multiple names in
comma-separated format. For example, `name01,name02`.
offset (int, optional):
The offset of the first resource to return from a collection.
sort (list[Property], optional):
Sort the response by the specified Properties. Can also be a single element.
async_req (bool, optional):
Request runs in separate thread and method returns
multiprocessing.pool.ApplyResult.
_return_http_data_only (bool, optional):
Returns only data field.
_preload_content (bool, optional):
Response is converted into objects.
_request_timeout (int, optional):
Total request timeout in seconds.
Returns:
ValidResponse: If the call was successful.
ErrorResponse: If the call was not successful.
Raises:
PureError: If calling the API fails.
ValueError: If a parameter is of an invalid type.
TypeError: If invalid or missing parameters are used.
"""
kwargs = dict(
continuation_token=continuation_token,
filter=filter,
ids=ids,
limit=limit,
names=names,
offset=offset,
sort=sort,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
)
kwargs = {k: v for k, v in kwargs.items() if v is not None}
endpoint = self._syslog_api.api20_syslog_servers_get_with_http_info
_process_references(references, ['ids', 'names'], kwargs)
return self._call_api(endpoint, kwargs)
def patch_syslog_servers(
self,
references=None, # type: List[models.ReferenceType]
syslog_server=None, # type: models.SyslogServerPostOrPatch
ids=None, # type: List[str]
names=None, # type: List[str]
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.SyslogServerResponse
"""
Modify the URI of a configured syslog server.
Args:
references (list[FixedReference], optional):
A list of references to query for. Overrides ids and names keyword arguments.
syslog_server (SyslogServerPostOrPatch, required):
ids (list[str], optional):
A list of resource IDs. If after filtering, there is not at least one resource
that matches each of the elements of `ids`, then an error is returned. This
cannot be provided together with the `name` or `names` query parameters.
names (list[str], optional):
Performs the operation on the unique name specified. Enter multiple names in
comma-separated format. For example, `name01,name02`.
async_req (bool, optional):
Request runs in separate thread and method returns
multiprocessing.pool.ApplyResult.
_return_http_data_only (bool, optional):
Returns only data field.
_preload_content (bool, optional):
Response is converted into objects.
_request_timeout (int, optional):
Total request timeout in seconds.
Returns:
ValidResponse: If the call was successful.
ErrorResponse: If the call was not successful.
Raises:
PureError: If calling the API fails.
ValueError: If a parameter is of an invalid type.
TypeError: If invalid or missing parameters are used.
"""
kwargs = dict(
syslog_server=syslog_server,
ids=ids,
names=names,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
)
kwargs = {k: v for k, v in kwargs.items() if v is not None}
endpoint = self._syslog_api.api20_syslog_servers_patch_with_http_info
_process_references(references, ['ids', 'names'], kwargs)
return self._call_api(endpoint, kwargs)
def post_syslog_servers(
self,
references=None, # type: List[models.ReferenceType]
syslog_server=None, # type: models.SyslogServerPostOrPatch
names=None, # type: List[str]
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.SyslogServerResponse
"""
Configure a new syslog server. Transmission of syslog messages is enabled
immediately.
Args:
references (list[FixedReference], optional):
A list of references to query for. Overrides names keyword arguments.
syslog_server (SyslogServerPostOrPatch, required):
names (list[str], optional):
Performs the operation on the unique name specified. Enter multiple names in
comma-separated format. For example, `name01,name02`.
async_req (bool, optional):
Request runs in separate thread and method returns
multiprocessing.pool.ApplyResult.
_return_http_data_only (bool, optional):
Returns only data field.
_preload_content (bool, optional):
Response is converted into objects.
_request_timeout (int, optional):
Total request timeout in seconds.
Returns:
ValidResponse: If the call was successful.
ErrorResponse: If the call was not successful.
Raises:
PureError: If calling the API fails.
ValueError: If a parameter is of an invalid type.
TypeError: If invalid or missing parameters are used.
"""
kwargs = dict(
syslog_server=syslog_server,
names=names,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
)
kwargs = {k: v for k, v in kwargs.items() if v is not None}
endpoint = self._syslog_api.api20_syslog_servers_post_with_http_info
_process_references(references, ['names'], kwargs)
return self._call_api(endpoint, kwargs)
def get_syslog_servers_settings(
self,
references=None, # type: List[models.ReferenceType]
continuation_token=None, # type: str
filter=None, # type: str
ids=None, # type: List[str]
limit=None, # type: int
names=None, # type: List[str]
offset=None, # type: int
sort=None, # type: List[str]
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.SyslogServerSettingsGetResponse
"""
List the certificate or certificate group associated with the syslog servers.
Args:
references (list[FixedReference], optional):
A list of references to query for. Overrides ids and names keyword arguments.
continuation_token (str, optional):
An opaque token to iterate over a collection of resources.
filter (Filter, optional):
A filter to include only resources that match the specified criteria.
ids (list[str], optional):
A list of resource IDs. If after filtering, there is not at least one resource
that matches each of the elements of `ids`, then an error is returned. This
cannot be provided together with the `name` or `names` query parameters.
limit (int, optional):
Limit the number of resources in the response. If not specified, defaults to
1000.
names (list[str], optional):
A list of resource names. If there is not at least one resource that matches
each of the elements of `names`, then an error is returned.
offset (int, optional):
The offset of the first resource to return from a collection.
sort (list[Property], optional):
Sort the response by the specified Properties. Can also be a single element.
async_req (bool, optional):
Request runs in separate thread and method returns
multiprocessing.pool.ApplyResult.
_return_http_data_only (bool, optional):
Returns only data field.
_preload_content (bool, optional):
Response is converted into objects.
_request_timeout (int, optional):
Total request timeout in seconds.
Returns:
ValidResponse: If the call was successful.
ErrorResponse: If the call was not successful.
Raises:
PureError: If calling the API fails.
ValueError: If a parameter is of an invalid type.
TypeError: If invalid or missing parameters are used.
"""
kwargs = dict(
continuation_token=<PASSWORD>,
filter=filter,
ids=ids,
limit=limit,
names=names,
offset=offset,
sort=sort,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
)
kwargs = {k: v for k, v in kwargs.items() if v is not None}
endpoint = self._syslog_api.api20_syslog_servers_settings_get_with_http_info
_process_references(references, ['ids', | |
"""Reader for the GMSH file format."""
from __future__ import division, absolute_import
__copyright__ = "Copyright (C) 2009 <NAME>, <NAME>"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from six.moves import range
from functools import reduce
import numpy as np
#import numpy.linalg as la
from pytools import memoize_method, Record
from meshpy.gmsh import ( # noqa
ScriptSource, LiteralSource, FileSource, ScriptWithFilesSource)
__doc__ = """
.. exception:: GmshFileFormatError
Element types
-------------
.. autoclass:: GmshElementBase
Simplex Elements
^^^^^^^^^^^^^^^^
.. autoclass:: GmshSimplexElementBase
.. autoclass:: GmshPoint
.. autoclass:: GmshIntervalElement
.. autoclass:: GmshTriangularElement
.. autoclass:: GmshIncompleteTriangularElement
.. autoclass:: GmshTetrahedralElement
Tensor Product Elements
^^^^^^^^^^^^^^^^^^^^^^^
.. autoclass:: GmshTensorProductElementBase
.. autoclass:: GmshQuadrilateralElement
.. autoclass:: GmshHexahedralElement
Receiver interface
------------------
.. autoclass:: GmshMeshReceiverBase
Receiver example implementation
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autoclass:: GmshMeshReceiverNumPy
Reader
------
.. autoclass:: ScriptSource
.. autoclass:: FileSource
.. autoclass:: ScriptWithFilesSource
.. autofunction:: read_gmsh
.. autofunction:: generate_gmsh
"""
# {{{ tools
def generate_triangle_vertex_tuples(order):
yield (0, 0)
yield (order, 0)
yield (0, order)
def generate_triangle_edge_tuples(order):
for i in range(1, order):
yield (i, 0)
for i in range(1, order):
yield (order-i, i)
for i in range(1, order):
yield (0, order-i)
def generate_triangle_volume_tuples(order):
for i in range(1, order):
for j in range(1, order-i):
yield (j, i)
def generate_quad_vertex_tuples(dim, order):
from pytools import \
generate_nonnegative_integer_tuples_below
for tup in generate_nonnegative_integer_tuples_below(2, dim):
yield tuple(order * i for i in tup)
class LineFeeder:
def __init__(self, line_iterable):
self.line_iterable = iter(line_iterable)
self.next_line = None
def has_next_line(self):
if self.next_line is not None:
return True
try:
self.next_line = next(self.line_iterable)
except StopIteration:
return False
else:
return True
def get_next_line(self):
if self.next_line is not None:
nl = self.next_line
self.next_line = None
return nl.strip()
try:
nl = next(self.line_iterable)
except StopIteration:
raise GmshFileFormatError("unexpected end of file")
else:
return nl.strip()
# }}}
# {{{ element info
class GmshElementBase(object):
"""
.. automethod:: vertex_count
.. automethod:: node_count
.. automethod:: lexicographic_node_tuples
.. automethod:: get_lexicographic_gmsh_node_indices
.. method:: equidistant_unit_nodes
(Implemented by subclasses)
"""
def __init__(self, order):
self.order = order
# {{{ simplices
class GmshSimplexElementBase(GmshElementBase):
def vertex_count(self):
return self.dimensions + 1
@memoize_method
def node_count(self):
"""Return the number of interpolation nodes in this element."""
d = self.dimensions
o = self.order
from operator import mul
from pytools import factorial
return int(reduce(mul, (o + 1 + i for i in range(d)), 1) / factorial(d))
@memoize_method
def lexicographic_node_tuples(self):
"""Generate tuples enumerating the node indices present
in this element. Each tuple has a length equal to the dimension
of the element. The tuples constituents are non-negative integers
whose sum is less than or equal to the order of the element.
"""
from pytools import \
generate_nonnegative_integer_tuples_summing_to_at_most
result = list(
generate_nonnegative_integer_tuples_summing_to_at_most(
self.order, self.dimensions))
assert len(result) == self.node_count()
return result
@memoize_method
def get_lexicographic_gmsh_node_indices(self):
gmsh_tup_to_index = dict(
(tup, i)
for i, tup in enumerate(self.gmsh_node_tuples()))
return np.array([gmsh_tup_to_index[tup]
for tup in self.lexicographic_node_tuples()],
dtype=np.intp)
class GmshPoint(GmshSimplexElementBase):
dimensions = 0
@memoize_method
def gmsh_node_tuples(self):
return [()]
class GmshIntervalElement(GmshSimplexElementBase):
dimensions = 1
@memoize_method
def gmsh_node_tuples(self):
return [(0,), (self.order,), ] + [
(i,) for i in range(1, self.order)]
class GmshIncompleteTriangularElement(GmshSimplexElementBase):
dimensions = 2
def __init__(self, order):
self.order = order
@memoize_method
def gmsh_node_tuples(self):
result = []
for tup in generate_triangle_vertex_tuples(self.order):
result.append(tup)
for tup in generate_triangle_edge_tuples(self.order):
result.append(tup)
return result
class GmshTriangularElement(GmshSimplexElementBase):
dimensions = 2
@memoize_method
def gmsh_node_tuples(self):
return {
1: [
(0, 0), (1, 0), (0, 1),
],
2: [
(0, 0), (2, 0), (0, 2), (1, 0), (1, 1), (0, 1),
],
3: [
(0, 0), (3, 0), (0, 3), (1, 0), (2, 0), (2, 1), (1, 2), (0, 2),
(0, 1), (1, 1),
],
4: [
(0, 0), (4, 0), (0, 4), (1, 0), (2, 0), (3, 0), (3, 1), (2, 2),
(1, 3), (0, 3), (0, 2), (0, 1), (1, 1), (2, 1), (1, 2),
],
5: [
(0, 0), (5, 0), (0, 5), (1, 0), (2, 0), (3, 0), (4, 0), (4, 1),
(3, 2), (2, 3), (1, 4), (0, 4), (0, 3), (0, 2), (0, 1), (1, 1),
(3, 1), (1, 3), (2, 1), (2, 2), (1, 2),
],
}[self.order]
class GmshTetrahedralElement(GmshSimplexElementBase):
dimensions = 3
@memoize_method
def gmsh_node_tuples(self):
# gmsh's node ordering is on crack
return {
1: [(0, 0, 0), (1, 0, 0), (0, 1, 0), (0, 0, 1)],
2: [
(0, 0, 0), (2, 0, 0), (0, 2, 0), (0, 0, 2), (1, 0, 0), (1, 1, 0),
(0, 1, 0), (0, 0, 1), (0, 1, 1), (1, 0, 1)],
3: [
(0, 0, 0), (3, 0, 0), (0, 3, 0), (0, 0, 3), (1, 0, 0), (2, 0, 0),
(2, 1, 0), (1, 2, 0), (0, 2, 0), (0, 1, 0), (0, 0, 2), (0, 0, 1),
(0, 1, 2), (0, 2, 1), (1, 0, 2), (2, 0, 1), (1, 1, 0), (1, 0, 1),
(0, 1, 1), (1, 1, 1)],
4: [
(0, 0, 0), (4, 0, 0), (0, 4, 0), (0, 0, 4), (1, 0, 0), (2, 0, 0),
(3, 0, 0), (3, 1, 0), (2, 2, 0), (1, 3, 0), (0, 3, 0), (0, 2, 0),
(0, 1, 0), (0, 0, 3), (0, 0, 2), (0, 0, 1), (0, 1, 3), (0, 2, 2),
(0, 3, 1), (1, 0, 3), (2, 0, 2), (3, 0, 1), (1, 1, 0), (1, 2, 0),
(2, 1, 0), (1, 0, 1), (2, 0, 1), (1, 0, 2), (0, 1, 1), (0, 1, 2),
(0, 2, 1), (1, 1, 2), (2, 1, 1), (1, 2, 1), (1, 1, 1)],
5: [
(0, 0, 0), (5, 0, 0), (0, 5, 0), (0, 0, 5), (1, 0, 0), (2, 0, 0),
(3, 0, 0), (4, 0, 0), (4, 1, 0), (3, 2, 0), (2, 3, 0), (1, 4, 0),
(0, 4, 0), (0, 3, 0), (0, 2, 0), (0, 1, 0), (0, 0, 4), (0, 0, 3),
(0, 0, 2), (0, 0, 1), (0, 1, 4), (0, 2, 3), (0, 3, 2), (0, 4, 1),
(1, 0, 4), (2, 0, 3), (3, 0, 2), (4, 0, 1), (1, 1, 0), (1, 3, 0),
(3, 1, 0), (1, 2, 0), (2, 2, 0), (2, 1, 0), (1, 0, 1), (3, 0, 1),
(1, 0, 3), (2, 0, 1), (2, 0, 2), (1, 0, 2), (0, 1, 1), (0, 1, 3),
(0, 3, 1), (0, 1, 2), (0, 2, 2), (0, 2, 1), (1, 1, 3), (3, 1, 1),
(1, 3, 1), (2, 1, 2), (2, 2, 1), (1, 2, 2), (1, 1, 1), (2, 1, 1),
(1, 2, 1), (1, 1, 2)],
}[self.order]
# }}}
# {{{ tensor product elements
class GmshTensorProductElementBase(GmshElementBase):
def vertex_count(self):
return 2**self.dimensions
@memoize_method
def node_count(self):
return (self.order+1) ** self.dimensions
@memoize_method
def lexicographic_node_tuples(self):
"""Generate tuples enumerating the node indices present
in this element. Each tuple has a length equal to the dimension
of the element. The tuples constituents are non-negative integers
whose sum is less than or equal to the order of the element.
"""
from pytools import \
generate_nonnegative_integer_tuples_below
result = list(
generate_nonnegative_integer_tuples_below(
self.order+1, self.dimensions))
assert len(result) == self.node_count()
return result
@memoize_method
def get_lexicographic_gmsh_node_indices(self):
gmsh_tup_to_index = dict(
(tup, i)
for i, tup in enumerate(self.gmsh_node_tuples()))
return np.array([gmsh_tup_to_index[tup]
for tup in self.lexicographic_node_tuples()],
dtype=np.intp)
class GmshQuadrilateralElement(GmshTensorProductElementBase):
dimensions = 2
@memoize_method
def gmsh_node_tuples(self):
# gmsh's node ordering is still on crack
return {
1: [(0, 0), (0, 1), (1, 1), (1, 0), ],
2: [
# start index 0: vertices
(0, 0), (0, 2), (2, 2), (2, 0),
# start index 4: edges
(1, 0), (2, 1), (1, 2), (0, 1),
# start index 8: volume
(1, 1)],
| |
Caesar(_Cipher):
"""
The Caesar Cipher
"""
class Tools:
@staticmethod
def _encDec(text, key, alphabet, isEncrypt):
ans = ""
for char in text:
try:
alphIndex = alphabet.index(char)
except ValueError:
raise Exception("Can't find char '" + char + "' of text in alphabet!")
alphIndex = (alphIndex + isEncrypt * key) % len(alphabet)
ans += alphabet[alphIndex]
return ans
@staticmethod
def encrypt(text, key, alphabet=LOWERS):
"""
Encryption method
:param text: Text to encrypt
:param key: Encryption key
:param alphabet: Alphabet which will be used,
if there is no a value, English is used
:type text: string
:type key: integer
:type alphabet: string
:return: encrypted text
:rtype: string
"""
return Caesar.Tools._encDec(text, key, alphabet, 1)
@staticmethod
def decrypt(text, key, alphabet=LOWERS):
"""
Decryption method
:param text: Text to decrypt
:param key: Decryption key
:param alphabet: Alphabet which will be used,
if there is no a value, English is used
:type text: string
:type key: integer
:type alphabet: string
:return: decrypted text
:rtype: string
"""
return Caesar.Tools._encDec(text, key, alphabet, -1)
class Chao(_Cipher):
"""
The Chaocipher
key: Shuffled string of alphabet
"""
class Tools:
@staticmethod
def permuteAlphabet(alphabet, i, isCrypt):
alphabet = alphabet[i:] + alphabet[:i]
nadir = len(alphabet) / 2
if isCrypt:
alphabet = alphabet[0] + alphabet[2:int(nadir)+1] + alphabet[1] + alphabet[int(nadir)+1:]
else:
alphabet = alphabet[1:] + alphabet[0]
alphabet = alphabet[:2] + alphabet[3:int(nadir)+1] + alphabet[2] + alphabet[int(nadir)+1:]
return alphabet
@staticmethod
def _EncDec(text, key, tp_alphabet, isEncrypt):
ret = ''
for c in text:
try:
if isEncrypt:
i = tp_alphabet.index(c)
ret += key[i]
else:
i = key.index(c)
ret += tp_alphabet[i]
except ValueError:
wrchar = c.encode('utf-8')
raise Exception("Can't find char '" + wrchar + "' of text in alphabet!")
key = Chao.Tools.permuteAlphabet(key, i, True)
tp_alphabet = Chao.Tools.permuteAlphabet(tp_alphabet, i, False)
return ret
@staticmethod
def encrypt(text, key, alphabet=ALL_CHARS):
"""
Encryption method
:param text: Text to encrypt
:param key: Encryption key
:param alphabet: Alphabet which will be used,
if there is no a value, English is used
:type text: string
:type key: integer
:type alphabet: string
:return: encrypted text
:rtype: string
"""
return Chao.Tools._EncDec(text, key, alphabet, True)
@staticmethod
def decrypt(text, key, alphabet=ALL_CHARS):
"""
Decryption method
:param text: Text to decrypt
:param key: Decryption key
:param alphabet: Alphabet which will be used,
if there is no a value, English is used
:type text: string
:type key: integer
:type alphabet: string
:return: decrypted text
:rtype: string
"""
return Chao.Tools._EncDec(text, key, alphabet, False)
class ColumnarTransposition(_Cipher):
"""
The Columnar Transposition Cipher
(As I understood:)
Key length is important (not content)
Text length will be KMM of Text and Key length (fulled by #)
( So its better that len(Text)%len(Key)==0 )
"""
class Tools:
@staticmethod
def get_index_in_alphabet(char, alphabet):
for j in range(len(alphabet)):
try:
alphabet[j].index(char)
break
except ValueError:
pass
return j
@staticmethod
def encrypt(text, key, alphabet=ALL_CHARS):
"""
Encryption method
:param text: Text to encrypt
:param key: Encryption key
:param alphabet: Alphabet which will be used, if there is no a value,
English is used
:type text: string
:type key: integer
:type alphabet: string
:return: text
:rtype: string
"""
# add endings to the text to fill the square
rmd = len(text) % len(key)
if rmd != 0:
text += alphabet[-1] * (len(key) - rmd)
chars = [ColumnarTransposition.Tools.get_index_in_alphabet(char, alphabet)
for char in key]
keyorder1 = sorted(enumerate(chars), key=lambda x: x[1])
ret = u""
for i in range(len(key)):
ret += text[keyorder1[i][0]::len(key)]
return ret
@staticmethod
def decrypt(text, key, alphabet=ALL_CHARS):
"""
Decryption method
:param text: Text to decrypt
:param key: Decryption key
:param alphabet: Alphabet which will be used, if there is no a value,
English is used
:type text: string
:type key: integer
:type alphabet: string
:return: text
:rtype: string
"""
chars = [ColumnarTransposition.Tools.get_index_in_alphabet(char, alphabet)
for char in key]
keyorder = sorted(enumerate(chars), key=lambda x: x[1])
ret = u""
rows = int(len(text) / len(key))
cols = [0] * len(key)
for i, item in enumerate(range(0, len(text), rows)):
cols[keyorder[i][0]] = text[item: item + rows]
for j in range(rows):
for i in range(len(cols)):
ret += cols[i][j]
return ret
# KEY=???
class FourSquare(_Cipher):
"""
The Four-Square Cipher
"""
class Tools:
@staticmethod
def _enc(text, key, alphabet, isEncrypt):
square01 = _PolybiusSquare(alphabet, key[0])
square10 = _PolybiusSquare(alphabet, key[1])
square = _PolybiusSquare(alphabet, "")
# text encryption
if len(text) % 2:
text += alphabet[-1][0]
odd = text[1::2]
even = text[::2]
enc = u""
if isEncrypt:
for i in range(len(even)):
coords = square.get_coordinates(even[i])
row00 = coords[0]
column00 = coords[1]
coords = square.get_coordinates(odd[i])
row11 = coords[0]
column11 = coords[1]
enc += square01.get_char(row00, column11)
enc += square10.get_char(row11, column00)
else:
for i in range(len(even)):
coords = square01.get_coordinates(even[i])
row00 = coords[0]
column00 = coords[1]
coords = square10.get_coordinates(odd[i])
row11 = coords[0]
column11 = coords[1]
enc += square.get_char(row00, column11)
enc += square.get_char(row11, column00)
return enc
@staticmethod
def encrypt(text, key, alphabet=ALL_CHARS):
"""
Encryption method
:param text: Text to encrypt
:param key: Encryption key
:param alphabet: Alphabet which will be used,
if there is no a value, English is used
:type text: string
:type key: integer
:type alphabet: string
:return: text
:rtype: string
"""
return FourSquare.Tools._enc(text, key, alphabet, True)
@staticmethod
def decrypt(text, key, alphabet=ALL_CHARS):
"""
Decryption method
:param text: Text to decrypt
:param key: Decryption key
:param alphabet: Alphabet which will be used,
if there is no a value, English is used
:type text: string
:type key: integer
:type alphabet: string
:return: text
:rtype: string
"""
return FourSquare.Tools._enc(text, key, alphabet, False)
#FIXME:!!!
class Gronsfeld(_Cipher):
"""
The Gronsfeld Cipher
FIXME:!!!
"""
class Tools:
@staticmethod
def _EncDec(text, key, alphabet, isEncrypt):
ans = ""
for i in range(len(text)):
char = text[i]
keyi = key[i % len(key)]
try:
alphIndex = (alphabet.index(char) +
isEncrypt * keyi) % len(alphabet)
except ValueError:
wrchar = char.encode('utf-8')
raise Exception("Can't find char '" + wrchar + "' of text in alphabet!")
ans += alphabet[alphIndex]
return ans
@staticmethod
def encrypt(text, key, alphabet=ALL_CHARS):
"""
Encryption method
:param text: Text to encrypt
:param key: Encryption key
:param alphabet: Alphabet which will be used,
if there is no a value, English is used
:type text: string
:type key: integer
:type alphabet: string
:return: text
:rtype: string
"""
return Gronsfeld.Tools._EncDec(alphabet, key, text, 1)
@staticmethod
def decrypt(text, key, alphabet=ALL_CHARS):
"""
Decryption method
:param text: Text to decrypt
:param key: Decryption key
:param alphabet: Alphabet which will be used,
if there is no a value, English is used
:type text: string
:type key: integer
:type alphabet: string
:return: text
:rtype: string
"""
return Gronsfeld.Tools._EncDec(text, key, alphabet, -1)
class Keyword(_Cipher):
"""
The Keyword Cipher
Key should be shuffled of alphabet
"""
class Tools:
'''
@staticmethod
def __removeDup(input_str):
newstring = input_str[0]
for i in range(len(input_str)):
if newstring[(len(newstring) - 1)] != input_str[i]:
newstring += input_str[i]
else:
pass
return newstring
'''
@staticmethod
def _encDec(alphabet, key, text, isEncrypt):
# remove repeats of letters in the key
newkey = "".join(OrderedDict.fromkeys(key))
# create the substitution string
longkey = "".join(OrderedDict.fromkeys(newkey+"".join(alphabet)))
# do encryption
ans = ""
for i in range(len(text)):
m = text[i]
try:
if isEncrypt == 1:
index = alphabet.index(m)
enc = longkey[index]
else:
index = longkey.index(m)
enc = alphabet[index]
except ValueError:
raise Exception("Can't find char '" + m + "' of text in alphabet!")
ans += enc
return ans
@staticmethod
def encrypt(text, key, alphabet=ALL_CHARS):
"""
Encryption method
:param text: Text to encrypt
:param key: Encryption key
:param alphabet: Alphabet which will be used,
if there is no a value, English is used
:type text: string
:type key: integer
:type alphabet: string
:return: text
:rtype: string
"""
return Keyword.Tools._encDec(alphabet, key, text, 1)
@staticmethod
def decrypt(text, key, alphabet=ALL_CHARS):
"""
Decryption method
:param text: Text to decrypt
:param key: Decryption key
:param alphabet: Alphabet which will be used,
if there is no a value, English is used
:type text: string
:type key: integer
:type alphabet: string
:return: text
:rtype: string
"""
return Keyword.Tools._encDec(alphabet, key, text, -1)
#KEY=??? (ye dafe mosavi ye dafe motefavet!)
class Myszkowski(_Cipher):
"""
-> Transposition
The Myszkowski Transposition Cipher
Key should be iterable and len(key) should be <= len(text)
"""
class Tools:
@staticmethod
def _get_index_in_alphabet(char, alphabet):
for j in range(len(alphabet)):
try:
alphabet[j].index(char)
break
except ValueError:
pass
| |
WATCHLIST_RESPONSE_MOCK = {
"cursor": "",
"hits": 1,
"results": [
{
"status": "ACTIVE",
"infected_user_record_count": 115,
"last_discovered": "2020-04-30T00:09:27Z",
"verification_secret": "",
"verified": "YES",
"infected_employee_record_count": 60,
"identifier_type": "domain",
"infected_consumer_record_count": 55,
"corporate_record_count": 11000,
"identifier_name": "example.org"
}
]
}
BREACH_EMAIL_RESPONSE_MOCK = {
"cursor": "",
"hits": 2,
"results": [
{
"city": "()",
"domain": "example.org",
"infected_path": "C:/Users/Usama/AppData/Local"
"/Temp/6210230526.exe",
"infected_time": "2020-03-26 16:56:49",
"country": "()",
"isp": "()",
"email_username": "admin",
"email": "<EMAIL>",
"spycloud_publish_date": "2020-04-16T00:00:00Z",
"password": "<PASSWORD>",
"target_url": "http://localhost:3000/en/session",
"sighting": 1,
"email_domain": "example.org",
"source_id": 17551,
"infected_machine_id": "0bb046a5-2ef2-4ec3-abbe-ba76f5b78e6c",
"password_plaintext": "<PASSWORD>",
"password_type": "<PASSWORD>",
"document_id": "90f9c47d-c86d-400f-9a57-38fed22b5fad",
"severity": 25
},
{
"domain": "example.org",
"target_url": "192.168.127.12",
"infected_machine_id": "2fc6eba4-738d-11ea-807f-0a7889d429db",
"user_browser": "Google Chrome New",
"country": "India",
"ip_addresses": [
"192.168.127.12"
],
"email_username": "admin",
"email": "<EMAIL>",
"spycloud_publish_date": "2020-04-02T00:00:00Z",
"password": "<PASSWORD>",
"sighting": 6,
"email_domain": "example.org",
"source_id": 17494,
"password_plaintext": "<PASSWORD>",
"password_type": "plaintext",
"document_id": "d27e8c1e-dd07-4237-bf51-40bcb5744fcc",
"severity": 25
}
]
}
CATALOG_17551_RESPONSE_MOCK = {
"cursor": "",
"hits": 1,
"results": [
{
"confidence": 3,
"description": "test description for 17551",
"title": "Vidar Stealer",
"type": "PRIVATE",
"acquisition_date": "2020-04-15T00:00:00Z",
"site": "n/a",
"spycloud_publish_date": "2020-04-16T00:00:00Z",
"site_description": "test site description for 17551",
"uuid": "3a7fc3d4-2f57-4076-951c-287332a4d1f8",
"num_records": 1107872,
"id": 17551,
"assets": {
"username": 540183,
"city": 1107503,
"target_url": 1107872,
"infected_time": 1107503,
"user_browser": 93,
"country": 1107503,
"ip_addresses": 1097638,
"isp": 1107503,
"infected_path": 1107503,
"postal_code": 791450,
"password": <PASSWORD>,
"email": 567689,
"infected_machine_id": 1107503
}
}
]
}
CATALOG_17494_RESPONSE_MOCK = {
"cursor": "",
"hits": 1,
"results": [
{
"confidence": 3,
"description": "test description for 17494",
"title": "Russian Password Stealer",
"type": "PRIVATE",
"acquisition_date": "2020-04-01T00:00:00Z",
"site": "n/a",
"spycloud_publish_date": "2020-04-02T00:00:00Z",
"site_description": "test site description for 17494",
"uuid": "1293a093-5b3b-42c5-aa90-d5784ea8374f",
"num_records": 3142306,
"id": 17494,
"assets": {
"username": 1563727,
"city": 330,
"target_url": 3142306,
"infected_machine_id": 3142306,
"user_browser": 3142279,
"country": 3142289,
"ip_addresses": 3049094,
"infected_path": 3856,
"password": <PASSWORD>,
"email": 1578579
}
}
]
}
CATALOG_PASS_RESPONSE_MOCK = {
"cursor": "",
"hits": 0,
"results": []
}
SPYCLOUD_401_RESPONSE = {
"message": "Unauthorized"
}
SPYCLOUD_403_RESPONSE = {
"message": "Forbidden"
}
EXPECTED_RESPONSE_401_ERROR = {
'errors': [
{
'code': 'authorization error',
'message': 'Authorization failed: Unauthorized',
'type': 'fatal'
}
]
}
EXPECTED_RESPONSE_403_ERROR = {
'errors': [
{
'code': 'authorization error',
'message': 'Authorization failed: Forbidden',
'type': 'fatal'
}
]
}
EXPECTED_RESPONSE_404_ERROR = {
'errors': [
{
'code': 'not found',
'message': 'The Spycloud not found.',
'type': 'fatal'
}
]
}
EXPECTED_RESPONSE_500_ERROR = {
'errors': [
{
'code': 'internal error',
'message': 'The Spycloud internal error.',
'type': 'fatal'
}
]
}
EXPECTED_SUCCESS_RESPONSE = {
'data': {
'indicators': {
'count': 2,
'docs': [
{
'confidence': 'Low',
'description': 'test description for 17551',
'external_ids': [
'17551', '3a7fc3d4-2f57-4076-951c-287332a4d1f8'
],
'external_references': [
{
'description': 'test site description for 17551',
'source_name': 'Spycloud',
'url': 'n/a'
},
{
'description': 'test description for 17551',
'external_id':
'3a7fc3d4-2f57-4076-951c-287332a4d1f8',
'source_name': 'Spycloud',
'url': 'https://portal.spycloud.com/breach/catalog'
'/3a7fc3d4-2f57-4076-951c-287332a4d1f8'
}
],
'producer': 'Spycloud',
'schema_version': '1.0.17',
'short_description': 'Vidar Stealer',
'tags': [
'username', 'city', 'target_url', 'infected_time',
'user_browser', 'country', 'ip_addresses', 'isp',
'infected_path', 'postal_code', 'password', 'email',
'infected_machine_id'
],
'title': 'Vidar Stealer',
'type': 'indicator',
'valid_time': {
'start_time': '2020-04-16T00:00:00.000000Z'
}
},
{
'confidence': 'Low',
'description': 'test description for 17494',
'external_ids': [
'17494', '1293a093-5b3b-42c5-aa90-d5784ea8374f'
],
'external_references': [
{
'description': 'test site description for 17494',
'source_name': 'Spycloud',
'url': 'n/a'
},
{
'description': 'test description for 17494',
'external_id':
'1293a093-5b3b-42c5-aa90-d5784ea8374f',
'source_name': 'Spycloud',
'url': 'https://portal.spycloud.com/breach/catalog'
'/1293a093-5b3b-42c5-aa90-d5784ea8374f'
}
],
'producer': 'Spycloud',
'schema_version': '1.0.17',
'short_description': 'Russian Password Stealer',
'tags': [
'username', 'city', 'target_url',
'infected_machine_id', 'user_browser', 'country',
'ip_addresses', 'infected_path', 'password', 'email'
],
'title': 'Russian Password Stealer',
'type': 'indicator',
'valid_time': {
'start_time': '2020-04-02T00:00:00.000000Z'
}
}
]
},
'sightings': {
'count': 2,
'docs': [
{
'confidence': 'High',
'count': 1,
'data': {
'columns': [
{
'name': 'city',
'type': 'string'
},
{
'name': 'infected_path',
'type': 'string'
},
{
'name': 'infected_time',
'type': 'string'
},
{
'name': 'country',
'type': 'string'
},
{
'name': 'isp',
'type': 'string'
},
{
'name': 'email',
'type': 'string'
},
{
'name': 'password',
'type': 'string'
},
{
'name': 'target_url',
'type': 'string'
},
{
'name': 'password_plaintext',
'type': 'string'
},
{
'name': 'password_type',
'type': 'string'
}
],
'rows': [
[
'()',
'C:/Users/Usama/AppData/Local/Temp'
'/6210230526.exe',
'2020-03-26 16:56:49',
'()',
'()',
'<EMAIL>',
'test123',
'http://localhost:3000/en/session',
'test123',
'plaintext'
]
]
},
'description': 'Present in Vidar Stealer',
'external_ids': [
'90f9c47d-c86d-400f-9a57-38fed22b5fad',
'17551',
'0bb046a5-2ef2-4ec3-abbe-ba76f5b78e6c'
],
'internal': False,
'observables': [
{
'type': 'email',
'value': '<EMAIL>'
}
],
'observed_time': {
'start_time': '2020-04-16T00:00:00.000000Z',
'end_time': '2020-04-16T00:00:00.000000Z'
},
'relations': [],
'schema_version': '1.0.17',
'severity': 'High',
'source': 'Spycloud',
'source_uri': 'https://portal.spycloud.com/breach/catalog/'
'3a7fc3d4-2f57-4076-951c-287332a4d1f8',
'targets': [
{
'observables': [
{
'type': 'email',
'value': '<EMAIL>'
}
],
'observed_time': {
'start_time': '2020-04-16T00:00:00.000000Z',
'end_time': '2020-04-16T00:00:00.000000Z'
},
'type': 'email'
}
],
'title': 'Reported to Spycloud',
'type': 'sighting'
},
{
'confidence': 'High',
'count': 6,
'data': {
'columns': [
{
'name': 'target_url',
'type': 'string'
},
{
'name': 'user_browser',
'type': 'string'
},
{
'name': 'country',
'type': 'string'
},
{
'name': 'ip_addresses',
'type': 'string'
},
{
'name': 'email',
'type': 'string'
},
{
'name': 'password',
'type': 'string'
},
{
'name': 'password_plaintext',
'type': 'string'
},
{
'name': 'password_type',
'type': 'string'
}
],
'rows': [
[
'192.168.127.12',
'Google Chrome New',
'India',
['192.168.127.12'],
'<EMAIL>',
'Admin@1234',
'Admin@<PASSWORD>',
'plaintext'
]
]
},
'description': 'Present in Russian Password Stealer',
'external_ids': [
'd27e8c1e-dd07-4237-bf51-40bcb5744fcc',
'17494',
'2fc6eba4-738d-11ea-807f-0a7889d429db'
],
'internal': False,
'observables': [
{
'type': 'email',
'value': '<EMAIL>'
}
],
'observed_time': {
'start_time': '2020-04-02T00:00:00.000000Z',
'end_time': '2020-04-02T00:00:00.000000Z'
},
'relations': [],
'schema_version': '1.0.17',
'severity': 'High',
'source': 'Spycloud',
'source_uri': 'https://portal.spycloud.com/breach/catalog/'
'1293a093-5b3b-42c5-aa90-d5784ea8374f',
'targets': [
{
'observables': [
{
'type': 'email',
'value': '<EMAIL>'
}
],
'observed_time': {
'start_time': '2020-04-02T00:00:00.000000Z',
'end_time': '2020-04-02T00:00:00.000000Z'
},
'type': 'email'
}
],
'title': 'Reported to Spycloud',
'type': 'sighting'
}
]
}
}
}
EXPECTED_SUCCESS_RESPONSE_WITHOUT_1_CATALOG = {
'data': {
'indicators': {
'count': 1,
'docs': [
{
'confidence': 'Low',
'description': 'test description for 17551',
'external_ids': [
'17551',
'3a7fc3d4-2f57-4076-951c-287332a4d1f8'
],
'external_references': [
{
'description': 'test site description for 17551',
'source_name': 'Spycloud',
'url': 'n/a'
},
{
'description': 'test description for 17551',
'external_id':
'3a7fc3d4-2f57-4076-951c-287332a4d1f8',
'source_name': 'Spycloud',
'url':
'https://portal.spycloud.com/breach/catalog/'
'3a7fc3d4-2f57-4076-951c-287332a4d1f8'
}
],
'producer': 'Spycloud',
'schema_version': '1.0.17',
'short_description': 'Vidar Stealer',
'tags': [
'username',
'city',
'target_url',
'infected_time',
'user_browser',
'country',
'ip_addresses',
'isp',
'infected_path',
'postal_code',
'password',
'email',
'infected_machine_id'
],
'title': 'Vidar Stealer',
'type': 'indicator',
'valid_time': {
'start_time': '2020-04-16T00:00:00.000000Z'
}
}
]
},
'sightings': {
'count': 2,
'docs': [
{
'confidence': 'High',
'count': 1,
'data': {
'columns': [
{
'name': 'city',
'type': 'string'
},
{
'name': 'infected_path',
'type': 'string'
},
{
'name': 'infected_time',
'type': 'string'
},
{
'name': 'country',
'type': 'string'
},
{
'name': 'isp',
'type': 'string'
},
{
'name': 'email',
'type': 'string'
},
{
'name': 'password',
'type': 'string'
},
{
'name': 'target_url',
'type': 'string'
},
{
'name': '<PASSWORD>',
'type': 'string'
},
{
'name': '<PASSWORD>',
'type': 'string'
}
],
'rows': [
[
'()',
'C:/Users/Usama/AppData/Local/'
'Temp/6210230526.exe',
'2020-03-26 16:56:49',
'()',
'()',
'<EMAIL>',
'test123',
'http://localhost:3000/en/session',
'test123',
'plaintext'
]
]
},
'description': 'Present in V<NAME>',
'external_ids': [
'90f9c47d-c86d-400f-9a57-38fed22b5fad',
'17551',
'0bb046a5-2ef2-4ec3-abbe-ba76f5b78e6c'
],
'internal': False,
'observables': [
{
'type': 'email',
'value': '<EMAIL>'
}
],
'observed_time': {
'start_time': '2020-04-16T00:00:00.000000Z',
'end_time': '2020-04-16T00:00:00.000000Z'
},
'relations': [],
'schema_version': '1.0.17',
'severity': 'High',
'source': 'Spycloud',
'source_uri': 'https://portal.spycloud.com/breach/catalog/'
'3a7fc3d4-2f57-4076-951c-287332a4d1f8',
'targets': [
{
'observables': [
{
'type': 'email',
'value': '<EMAIL>'
}
],
'observed_time': {
'start_time': '2020-04-16T00:00:00.000000Z',
'end_time': '2020-04-16T00:00:00.000000Z'
},
'type': 'email'
}
],
'title': 'Reported to Spycloud',
'type': 'sighting'
},
{
'confidence': 'High',
'count': 6,
'data': {
'columns': [
{
'name': 'target_url',
'type': 'string'
},
{
'name': 'user_browser',
'type': 'string'
},
{
'name': 'country',
'type': 'string'
},
{
'name': 'ip_addresses',
'type': 'string'
},
{
'name': 'email',
'type': 'string'
},
{
'name': 'password',
'type': 'string'
},
{
'name': '<PASSWORD>',
'type': 'string'
},
{
'name': 'password_type',
'type': 'string'
}
],
'rows': [
[
'192.168.127.12',
'Google Chrome New',
'India',
['192.168.127.12'],
'<EMAIL>',
'Admin@1234',
'Admin@1234',
'plaintext'
]
]
},
'description': 'Present in breach',
'external_ids': [
'd27e8c1e-dd07-4237-bf51-40bcb5744fcc',
'17494',
'2fc6eba4-738d-11ea-807f-0a7889d429db'
],
'internal': False,
'observables': [
{
'type': 'email',
'value': '<EMAIL>'
}
],
'observed_time': {
'start_time': '2020-04-02T00:00:00.000000Z',
'end_time': '2020-04-02T00:00:00.000000Z'
},
'relations': [],
'schema_version': '1.0.17',
'severity': 'High',
'source': 'Spycloud',
'targets': [
{
'observables': [
{
'type': 'email',
'value': '<EMAIL>'
}
],
'observed_time': {
'start_time': '2020-04-02T00:00:00.000000Z',
'end_time': '2020-04-02T00:00:00.000000Z'
},
'type': 'email'
}
],
'title': 'Reported to Spycloud',
'type': 'sighting'
}
]
}
},
'errors': [
{
'code': 'not found',
'message': 'SpyCloud did not return results for 17494',
'type': 'warning'
}
]
}
EXPECTED_RESPONSE_SSL_ERROR = {
'errors': [
{
'code': 'unknown',
'message': 'Unable to verify SSL certificate: self signed '
'certificate',
'type': 'fatal'
}
]
}
EXPECTED_AUTHORIZATION_HEADER_ERROR = {
'errors': [
{
'code': 'authorization error',
'message': 'Authorization failed: Authorization header is missing',
'type': 'fatal'
}
]
}
EXPECTED_AUTHORIZATION_TYPE_ERROR = {
'errors': [
{
'code': 'authorization error',
'message': 'Authorization failed: Wrong authorization | |
for i in xrange(1, len(k_i)):
sk_i = k_i[i:]
sk_j = k_j[:-i]
if sk_i == sk_j:
return i
return len(k_i)
# init
self.alignment_table_sequence = ''
p_kmer = ''
self.alignment_table_sequence = self.twoD_alignment_table[0][2]
p_kmer = self.twoD_alignment_table[0][2]
for t, c, kmer in self.twoD_alignment_table:
if kmer != p_kmer:
i = find_kmer_overlap(p_kmer, kmer)
self.alignment_table_sequence += kmer[-i:]
p_kmer = kmer
else:
continue
return
def get_strand_event_map(self):
"""Maps the events from the template and complement strands to their base called kmers the map
generated by this function is called the "strand_event_map" because it only works for mapping the
strand read (1D read) to to it's events
"""
def make_map(events):
event_map = [0]
previous_prob = 0
for i, line in islice(enumerate(events), 1, None):
move = line[6]
this_prob = line[8]
if move == 1:
event_map.append(i)
if move > 1:
for skip in xrange(move - 1):
event_map.append(i - 1)
event_map.append(i)
if move == 0:
if this_prob > previous_prob:
event_map[-1] = i
previous_prob = this_prob
final_event_index = [event_map[-1]]
padding = final_event_index * 5 # make this a kmer-measured thing
event_map = event_map + padding
return event_map
self.template_strand_event_map = make_map(self.template_event_table)
self.complement_strand_event_map = make_map(self.complement_event_table)
return
def get_twoD_event_map(self):
"""Maps the kmers in the alignment table sequence read to events in the template and complement strand reads
"""
# initialize
alignment_row = 0
prev_alignment_kmer = ''
nb_template_gaps = 0
previous_complement_event = None
previous_template_event = None
twoD_init = self.initialize_twoD()
if twoD_init is False:
return False
if not self.has2D_alignment_table:
return False
self.get_alignment_sequence()
# go thought the kmers in the read sequence and match up the events
for i, seq_kmer in enumerate(kmer_iterator(self.alignment_table_sequence, self.kmer_length)):
# assign the current row's kmer
current_alignment_kmer = self.twoD_alignment_table[alignment_row][2]
# in the situation where there is a repeat kmer in the alignment then
# we want to pick the best event to kmer alignment, TODO implement this
# right now we just use the first alignment
while current_alignment_kmer == prev_alignment_kmer:
alignment_row += 1
current_alignment_kmer = self.twoD_alignment_table[alignment_row][2]
# a match
if seq_kmer == current_alignment_kmer:
template_event = self.twoD_alignment_table[alignment_row][0]
complement_event = self.twoD_alignment_table[alignment_row][1]
# handle template event
# if there is a gap, count it and don't add anything to the map
if template_event == -1:
nb_template_gaps += 1
# if there is an aligned event
if template_event != -1:
# if it is an aligned event and there are no gaps, add it to the map
if nb_template_gaps == 0:
self.template_event_map.append(template_event)
# update
previous_template_event = template_event
# if there were gaps in the alignment we have to add 'best guess'
# event alignments to the map which is the current aligned event
if nb_template_gaps > 0:
self.template_event_map += [template_event] * (nb_template_gaps + 1)
# reset template gaps
nb_template_gaps = 0
# update
previous_template_event = template_event
# handle complement event
# if there is a gap, add the last aligned complement event to the map
if complement_event == -1:
self.complement_event_map.append(previous_complement_event)
# if there is an aligned complement event add it to the map
if complement_event != -1:
self.complement_event_map.append(complement_event)
# update the most recent aligned complement event
previous_complement_event = complement_event
# update previous alignment kmer and increment alignment row
prev_alignment_kmer = current_alignment_kmer
alignment_row += 1
continue
# not a match, meaning that this kmer in the read sequence is not
# in the event alignment but we need to assign an event to it so
# we use the heuristic that we use the alignment of the most
# recent aligned events to this base
if seq_kmer != current_alignment_kmer:
self.template_event_map.append(previous_template_event)
self.complement_event_map.append(previous_complement_event)
continue
# fill in the final events for the partial last kmer
for _ in xrange(self.kmer_length - 1):
self.template_event_map += [previous_template_event] * (nb_template_gaps + 1)
self.complement_event_map.append(previous_complement_event)
nb_template_gaps = 0
# check that we have mapped all of the bases in the 2D read
assert(len(self.template_event_map) == len(self.alignment_table_sequence))
assert(len(self.complement_event_map) == len(self.alignment_table_sequence))
return True
def transform_events(self, events, drift):
"""Adjust event means by drift
"""
if (events == None or drift == None):
return False
# transform events by time
# events have format [[mean], [start_time], [std_dev], [length]]
# get the start time of the first event
start_time = events[0][1]
for event in events:
# time since first event
delta_time = event[1] - start_time
# drift adjust
event[0] -= (delta_time * drift)
return True
def get_template_events(self):
#template_event_table_address = '/Analyses/Basecall_2D_000/BaseCalled_template/Events'
if self.template_event_table_address in self.fastFive:
self.template_event_table = self.fastFive[self.template_event_table_address]
# maybe move to transform function
self.template_events = [[e[0], e[1], e[2], e[3]] # mean, start, stdev, length
for e in self.template_event_table]
return True
if self.template_event_table_address not in self.fastFive:
return False
def get_complement_events(self):
#complement_event_table_address = '/Analyses/Basecall_2D_000/BaseCalled_complement/Events'
if self.complement_event_table_address in self.fastFive:
#self.has_complement_events = True
self.complement_event_table = self.fastFive[self.complement_event_table_address]
self.complement_events = [[e[0], e[1], e[2], e[3]] # mean, start, stdev, length
for e in self.complement_event_table]
return True
if self.complement_event_table_address not in self.fastFive:
return False
def get_template_model_adjustments(self):
#template_model_address = "/Analyses/Basecall_2D_000/BaseCalled_template/Model"
if self.template_model_address in self.fastFive:
self.has_template_model = True
self.template_scale = self.fastFive[self.template_model_address].attrs["scale"]
self.template_shift = self.fastFive[self.template_model_address].attrs["shift"]
self.template_drift = self.fastFive[self.template_model_address].attrs["drift"]
self.template_var = self.fastFive[self.template_model_address].attrs["var"]
self.template_scale_sd = self.fastFive[self.template_model_address].attrs["scale_sd"]
self.template_var_sd = self.fastFive[self.template_model_address].attrs["var_sd"]
return True
if self.template_model_address not in self.fastFive:
self.has_template_model = False
return False
def get_complement_model_adjustments(self):
#complement_model_address = "/Analyses/Basecall_2D_000/BaseCalled_complement/Model"
if self.complement_model_address in self.fastFive:
self.has_complement_model = True
self.complement_scale = self.fastFive[self.complement_model_address].attrs["scale"]
self.complement_shift = self.fastFive[self.complement_model_address].attrs["shift"]
self.complement_drift = self.fastFive[self.complement_model_address].attrs["drift"]
self.complement_var = self.fastFive[self.complement_model_address].attrs["var"]
self.complement_scale_sd = self.fastFive[self.complement_model_address].attrs["scale_sd"]
self.complement_var_sd = self.fastFive[self.complement_model_address].attrs["var_sd"]
return True
if self.complement_model_address not in self.fastFive:
self.has_complement_model = False
return False
@staticmethod
def calculate_lambda(noise_mean, noise_stdev):
return (np.power(noise_mean, 3)) / (np.power(noise_stdev, 2))
def export_model(self, skip_bins, model_address, destination):
"""Exports the model to a file. Format:
line 1: [correlation coefficient] [level_mean] [level_sd] [noise_mean]
[noise_sd] [noise_lambda ] (.../kmer) \n
line 2: skip bins \n
line 3: [correlation coefficient] [level_mean] [level_sd, scaled]
[noise_mean] [noise_sd] [noise_lambda ] (.../kmer) \n
"""
assert self.is_open
lambdas = []
if model_address in self.fastFive:
model = self.fastFive[model_address]
# line 1
print("0", end=' ', file=destination) # placeholder for correlation parameter
for kmer, level_mean, level_sd, noise_mean, noise_sd, weight in model:
lam = self.calculate_lambda(noise_mean, noise_sd)
lambdas.append(lam)
print(level_mean, level_sd, noise_mean, noise_sd, lam, end=' ', file=destination)
print("", end="\n", file=destination)
# line 2
for p in skip_bins:
print(p, end=' ', file=destination)
print("", end="\n", file=destination)
# line 3
print("0", end=' ', file=destination) # placeholder for correlation parameter
i = 0
for kmer, level_mean, level_sd, noise_mean, noise_sd, weight in model:
#lam = self.calculate_lambda(noise_mean, noise_sd)
lam = lambdas[i]
print(level_mean, (level_sd * 1.75), noise_mean, noise_sd, lam, end=' ', file=destination)
i += 1
print("", end="\n", file=destination)
return True
else:
return False
def export_template_model(self, destination):
template_model_address = "/Analyses/Basecall_2D_000/BaseCalled_template/Model"
t_skip_prob_bins = [0.487, 0.412, 0.311, 0.229, 0.174, 0.134, 0.115, 0.103, 0.096, 0.092,
0.088, 0.087, 0.084, 0.085, 0.083, 0.082, 0.085, 0.083, 0.084, 0.082,
0.080, 0.085, 0.088, 0.086, 0.087, 0.089, 0.085, 0.090, 0.087, 0.096]
got_model = self.export_model(t_skip_prob_bins, template_model_address, destination)
return got_model
def export_complement_model(self, destination):
complement_model_address = "/Analyses/Basecall_2D_000/BaseCalled_complement/Model"
c_skip_prob_bins = [0.531, 0.478, 0.405, 0.327, 0.257, 0.207, 0.172, 0.154, 0.138, 0.132,
0.127, 0.123, 0.117, 0.115, 0.113, 0.113, 0.115, 0.109, 0.109, 0.107,
0.104, 0.105, 0.108, 0.106, 0.111, 0.114, 0.118, 0.119, 0.110, 0.119]
got_model = self.export_model(c_skip_prob_bins, complement_model_address, destination)
return got_model
def get_model_id(self, address):
if address in self.fastFive:
model_name = self.fastFive[address].attrs["model_file"]
model_name = model_name.split('/')[-1]
return model_name
else:
return None
def close(self):
self.fastFive.close()
class NanoporeModel(object):
def __init__(self, fast5File):
self.fastFive = h5py.File(fast5File, "r")
self.stay_prob = 0
self.skip_prob_bins = []
self.model_name = ''
self.model = None
def export_model(self, destination_path):
"""Exports the model to a file. Format:
line 1: [correlation coefficient] [level_mean] [level_sd] [noise_mean]
[noise_sd] [noise_lambda ] (.../kmer) \n
line 2: skip bins \n
line 3: [correlation coefficient] [level_mean] [level_sd, scaled]
[noise_mean] [noise_sd] [noise_lambda ] (.../kmer) \n
"""
def calculate_lambda(noise_mean, noise_stdev):
return (np.power(noise_mean, 3)) / (np.power(noise_stdev, 2))
if self.model is None:
print("This method is meant to be used as part of the child class TemplateModel or ComplementModel",
file=sys.stderr)
# output the model for cPecan to a file
model_path = destination_path + self.model_name
out_file = open(model_path, 'w')
# line 1
print("0", end=' ', file=out_file) # placeholder for correlation parameter
for kmer, level_mean, level_stdev, sd_mean, sd_stdev, weight in self.model:
lam = calculate_lambda(sd_mean, sd_stdev)
print(level_mean, level_stdev, sd_mean, sd_stdev, lam, end=' ', file=out_file)
print("", end="\n", file=out_file)
# line 2
for _ in self.skip_prob_bins:
print(_, end=' ', | |
<reponame>dlens/dlxapi
# coding: utf-8
"""
Decision Lens API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from dlxapi.api_client import ApiClient
class ProjectsApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def add_contributing_users_for_project(self, id, **kwargs): # noqa: E501
"""Add users to a project. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_contributing_users_for_project(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: project id (required)
:param AddUsersRequest body: Email ids and personal message
:return: list[PortfolioPlanUser]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.add_contributing_users_for_project_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.add_contributing_users_for_project_with_http_info(id, **kwargs) # noqa: E501
return data
def add_contributing_users_for_project_with_http_info(self, id, **kwargs): # noqa: E501
"""Add users to a project. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_contributing_users_for_project_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: project id (required)
:param AddUsersRequest body: Email ids and personal message
:return: list[PortfolioPlanUser]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method add_contributing_users_for_project" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in params or
params['id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `id` when calling `add_contributing_users_for_project`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['OAuth2'] # noqa: E501
return self.api_client.call_api(
'/projects/{id}/users', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[PortfolioPlanUser]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_project(self, portfolio_id, project, **kwargs): # noqa: E501
"""Creates a new project. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_project(portfolio_id, project, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str portfolio_id: Portfolio id (required)
:param Project project: Project to create (required)
:return: Project
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_project_with_http_info(portfolio_id, project, **kwargs) # noqa: E501
else:
(data) = self.create_project_with_http_info(portfolio_id, project, **kwargs) # noqa: E501
return data
def create_project_with_http_info(self, portfolio_id, project, **kwargs): # noqa: E501
"""Creates a new project. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_project_with_http_info(portfolio_id, project, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str portfolio_id: Portfolio id (required)
:param Project project: Project to create (required)
:return: Project
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['portfolio_id', 'project'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_project" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'portfolio_id' is set
if self.api_client.client_side_validation and ('portfolio_id' not in params or
params['portfolio_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `portfolio_id` when calling `create_project`") # noqa: E501
# verify the required parameter 'project' is set
if self.api_client.client_side_validation and ('project' not in params or
params['project'] is None): # noqa: E501
raise ValueError("Missing the required parameter `project` when calling `create_project`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'portfolio_id' in params:
query_params.append(('portfolioId', params['portfolio_id'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'project' in params:
body_params = params['project']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['OAuth2'] # noqa: E501
return self.api_client.call_api(
'/projects', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Project', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_project(self, id, **kwargs): # noqa: E501
"""Delete a project. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_project(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: Project id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_project_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.delete_project_with_http_info(id, **kwargs) # noqa: E501
return data
def delete_project_with_http_info(self, id, **kwargs): # noqa: E501
"""Delete a project. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_project_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: Project id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_project" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in params or
params['id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `id` when calling `delete_project`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['OAuth2'] # noqa: E501
return self.api_client.call_api(
'/projects/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_projects(self, project_ids, **kwargs): # noqa: E501
"""Delete projects. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_projects(project_ids, async_req=True)
>>> result = thread.get()
:param async_req bool
:param list[str] project_ids: Project ids (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_projects_with_http_info(project_ids, **kwargs) # noqa: E501
else:
(data) = self.delete_projects_with_http_info(project_ids, **kwargs) # noqa: E501
return data
def delete_projects_with_http_info(self, project_ids, **kwargs): # noqa: E501
"""Delete projects. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_projects_with_http_info(project_ids, async_req=True)
>>> result = thread.get()
:param async_req bool
:param list[str] project_ids: Project ids (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['project_ids'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_projects" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'project_ids' is set
if self.api_client.client_side_validation and ('project_ids' not in params or
params['project_ids'] is None): # noqa: E501
raise ValueError("Missing the required | |
will
match receipts on amount only. "notification_order" will match
receipts in order of payment notifications received by signal.
"confirmation_order" will match payments in order of confirmation on
the Mobilecoin blockchain
Returns:
bool: boolean representing if expected receipts match actual receipts
"""
paid = self.payment_receipts
expected = self.expected_receipts
if len(paid) != len(expected):
logging.warning(f"expected {len(expected)} payments, received {len(paid)}")
return False
if strategy == "notification_order":
paid = sorted(paid, key=lambda x: x.signal_timestamp or 0)
if strategy == "amount":
paid = sorted(paid, key=lambda x: x.amount or 0)
expected = sorted(expected, key=lambda x: x[1].amount or 0)
result = [paid[i] == expected[i][1] for i in range(len(paid))]
if result:
return all(result)
return False
def create_test_definition_file(test: Test) -> JSON:
"""
Transforms test object to JSON so that it can be stored for re-use.
This will may local, getpost, and postgres in the future.
"""
test_json = asdict(test)
return test_json
def send_n_messages( # pylint: disable=too-many-arguments
name: str,
description: str,
recipient: str,
amount: int,
message: str,
expected_response: Optional[str] = None,
delay: float = 1.0,
order: str = "sequential",
validate_responses: bool = False,
timeout: float = 360.0,
) -> Test:
"""
Auto-definition of test for sending an {amount} of messages to a {receipient}
This function is a prototype for defining future tests.
"""
steps = []
for i in range(amount):
sender_message = message
response = None
if expected_response:
if message == expected_response:
response_message = message + " " + str(i + 1)
sender_message = response_message
response = TestMessage("tester", response_message, sender=recipient)
else:
response = TestMessage("tester", expected_response, sender=recipient)
steps.append(
TestStep(
uid=f"{name}-{i+1}",
description=f"send message: {sender_message}",
message=TestMessage(recipient, sender_message),
expected_response=response,
delay=delay,
)
)
return Test(name, description, recipient, steps, order, validate_responses, timeout)
def script_test(name: str, recipient: str, script: list[tuple[str, str]]) -> Test:
"""
Test definition that can be declared using tuples
"""
return Test(
name,
name,
recipient,
steps=[
TestStep(
uid=f"{name}-{call[:4]}",
description=f"send message: {call}",
message=TestMessage(recipient, call),
expected_response=TestMessage("tester", response, sender=recipient),
delay=0.2,
)
for call, response in script
],
validate_responses=True,
)
def payments_test(
name: str,
recipient: str,
script: list[
tuple[
tuple[str, Optional[int]],
tuple[Optional[str], Optional[int], Optional[str]],
]
],
) -> Test:
steps = []
for step in script:
message, send_amount = step[0]
response, receive_amount, note = step[1]
receipt = None
if receive_amount:
receipt = PaymentReceipt(
sender=recipient, recipient="tester", amount=receive_amount, note=note
)
payment = None
if send_amount:
payment = (recipient, send_amount)
steps.append(
TestStep(
uid=f"{name}-{message}",
description=f"send message: {message}",
message=TestMessage(recipient, message, payment=payment),
expected_response=TestMessage("tester", response, sender=recipient),
expected_receipt=receipt,
delay=4,
)
)
return Test(name, name, recipient, steps=steps)
imogen = "+12406171474" # "+12406171657"
echopay = get_secret("ECHOPAY")
ping_test = script_test(
"ping", imogen, [("/ping", "/pong"), ("/ping 1", "/pong 1"), ("/pong", "OK")]
)
pay_test = payments_test(
"echopay_test",
echopay,
[
(("/ping", None), ("/pong", None, None)),
(("/pong", None), ("OK", None, None)),
(
("/pay", None),
(
"receipt sent!",
1000000000,
"check out this java-free payment notification",
),
),
],
)
redis_test = script_test(
"redis",
imogen,
[
("/imagine_nostart foo", "you are #1 in line"),
("/list_queue", "foo"),
("/dump_queue", "foo"),
("/list_queue", "queue empty"),
],
)
# todo: /send <number> across two contactbot instances or one with multiple accounts,
# check for reaction
# maybe a signal-cli based test for groups
load_test = send_n_messages(
name="send_3_messages",
description="send 20 messages",
recipient="+12406171615",
amount=3,
message="it's okay to be broken",
delay=3.5,
timeout=30 * 3.5,
)
acceptance_test = send_n_messages(
name="test_echobot",
description="test echobot for correct behavior",
recipient="+12406171615",
amount=3,
message="it's okay to be broken",
expected_response="it's okay to be broken",
delay=3.5,
validate_responses=True,
timeout=20 * 3.5,
)
class Tiamat(PayBot):
"""
Bot for running acceptance and load tests of other bots.
Attributes:
available_tests (dict[str, Test]): set of available tests to Tiamat
test (Test): test specification object for current test, should be
reset to None after each each test
test_result (TestResult): iestResult object that stores results of
current test, should be rest to None after each test
test_running (bool): indicates whether test is running
test_admin (str): signal number of primary test admin
secondary_admins (list[str]): list of signal numbers that can also manage
Tiamat tests
test_result_log (list[TestResult): list of TestResult objects from past
tests
pending_step_results (Queue[StepResult]): FIFO Queue containing
StepResult objects to be compared against actual messages received by
bots being tested
response_queue (Queue[tuple[Message, test, float]): Queue containing
messages received by bot being tested.
payment_tasks (list[Task]): List of record_payment tasks
monitor (Task): response_monitor task that reads incoming messages
from bot
test_launcher (Task): test launcher task
"""
def __init__(
self,
admin: str,
available_tests: list[Test],
secondary_admins: Optional[list[str]] = None,
) -> None:
"""
Args:
admin (str): signal number of primary admin
available_tests (list[Test]): List of test specifications that can be
run
secondary_admins: (Optional[list[str]]): List of numbers also
authorized to run tests with Tiamat
"""
super().__init__()
self.available_tests: dict[str, Test] = {
_test.name: _test for _test in available_tests
}
self.test: Optional[Test] = None
self.test_result: Optional[TestResult] = None
self.test_running: bool = False
self.test_admin: str = admin
self.secondary_admins: Optional[list[str]] = secondary_admins
self.test_result_log: list[TestResult] = []
self.pending_step_results: Queue[StepResult] = Queue()
self.response_queue: Queue[tuple[Message, Test, float]] = Queue()
self.payment_tasks: list[Task] = []
self.monitor: Optional[Task] = None
self.test_launcher: Optional[Task] = None
@staticmethod
def is_data_message(response: Message) -> bool:
if response.blob.get("content", {}).get("source", {}).get("dataMessage"):
return True
return False
async def set_profile(self) -> None:
profile = {
"jsonrpc": "2.0",
"method": "setProfile",
"id": 666,
"params": {
"profile_fields": {
"name": {"givenName": "tiamat", "familyName": ""},
"mobilecoinAddress": get_secret("MOBADDRESS"),
"about": "The many headed dragon helps",
"about_emoji": "\N{Rainbow}",
}
},
}
await self.outbox.put(profile)
logging.info(profile)
async def handle_message(self, message: Message) -> Union[Response, None]:
"""
Handles messages when they arrive. If a test is active and the message
is from the bot being tested it will be put into a queue to be processed
by the response_monitor task. If a payment is received by the bot being
tested, it will launch a record_payment task to verify it.
It will also listen for messages/payments from test admins and process
those normally. Messages from any other users are not respondedto.
Args:
message (Message): Message received by bot framework
Returns:
Union[Response, None]: A Response typed object that is processed
and sent via the auxin signal client to the sender of the message
"""
if (
isinstance(self.test, Test)
and self.test_running
and self.test.validate_responses
and message.source == self.test.recipient
and self.is_data_message(message)
):
if message.payment:
logging.info(f"payment message received: {message}")
payment_task = create_task(
self.record_payment(message, self.test.payment_timeout)
)
self.payment_tasks.append(payment_task)
else:
await self.response_queue.put((message, self.test, time.time()))
# If you're admin, respond, else, blackhole
if self.is_admin(message.source):
logging.info(message)
if message.payment:
logging.info(f"payment received - {message}")
return await super().handle_payment(message)
return await super().handle_message(message)
return None
async def configure_test(self, test: Test) -> None:
"""
Prepare test configuration by setting new Test definition and TestResult
objects within the class
Args:
test (Test): test definition object
"""
logging.info(f"attempting to load {test.name}")
if self.test_running or self.test:
message = "existing test running, please wait"
logging.warning(message)
self.test = test
self.test_result = TestResult(test=test, test_account=self.bot_number)
message = f"{test.name} configured, steps: {test.steps} ready to run"
logging.info(message)
def is_test_ready(self) -> bool:
"""
Perform checks prior to launching test to ensure Tiamat is configured
correctly to launch the test and the test is valid.
Returns:
bool: Boolean flag indicating proper test configuration
"""
if self.test_running:
logging.warning("Existing test running, aborting run attempt")
return False
if not isinstance(self.test, Test):
logging.warning("No currently loaded test, aborting run attempt")
return False
if not isinstance(self.test_result, TestResult):
logging.warning(
"Test result object must be configured prior to launching test, aborting"
)
return False
try:
self.test.validate_self()
except ValueError:
logging.warning("Test definition is invalid, please reconfigure")
return False
return True
async def send_sequential_messages(self) -> None:
"""
Executes sending of messages within the loaded test definition
"""
assert self.test
assert self.test_result
for step in self.test.steps:
await asyncio.sleep(step.delay)
logging.debug(f"starting step: {step}")
step_result = StepResult(
uid=step.uid,
message_sent=step.message,
expected_response=step.expected_response,
)
step_result.python_timestamp = time.time()
if step.message.payment:
recipient, amount = step.message.payment
assert amount
send_receipt = await self.send_payment(
recipient, amount, ""
) # Type checked when test created
else:
rpc_id = await self.send_message(
recipient=step.message.recipient,
msg=step.message.message,
group=step.message.group,
endsession=step.message.endsession,
attachments=step.message.attachments, # type: ignore
)
send_receipt = await self.pending_requests[rpc_id]
logging.info(f"send receipt is {send_receipt}")
if isinstance(send_receipt, Message):
step_result.auxin_timestamp = send_receipt.timestamp / 1000
step_result.auxin_roundtrip_latency = (
step_result.auxin_timestamp - step_result.python_timestamp
)
if self.test.validate_responses:
await self.pending_step_results.put(step_result)
else:
self.test_result.step_results.append(step_result)
logging.info(f"all steps in {self.test.name} executed")
async def launch_test(self) -> Optional[TestResult]:
"""
Coroutine that launches a defined test by executing the steps defined
by that test.
This will begin sending a sequence of messages (potentially
| |
id, unique_database, unique_table):
uri = '/tmp'
try:
# Grant server privileges and verify
admin_client.execute("grant all on server to {0} {1}".format(kw, id), user=ADMIN)
result = self.client.execute("show grant {0} {1} on server".format(kw, id))
TestRanger._check_privileges(result, [
[kw, id, "", "", "", "*", "", "all", "false"],
[kw, id, "*", "", "", "", "*", "all", "false"],
[kw, id, "*", "*", "*", "", "", "all", "false"]])
# Revoke server privileges and verify
admin_client.execute("revoke all on server from {0} {1}".format(kw, id))
result = self.client.execute("show grant {0} {1} on server".format(kw, id))
TestRanger._check_privileges(result, [])
# Grant uri privileges and verify
admin_client.execute("grant all on uri '{0}' to {1} {2}"
.format(uri, kw, id))
result = self.client.execute("show grant {0} {1} on uri '{2}'"
.format(kw, id, uri))
TestRanger._check_privileges(result, [
[kw, id, "", "", "", "{0}{1}".format(NAMENODE, uri), "", "all", "false"]])
# Revoke uri privileges and verify
admin_client.execute("revoke all on uri '{0}' from {1} {2}"
.format(uri, kw, id))
result = self.client.execute("show grant {0} {1} on uri '{2}'"
.format(kw, id, uri))
TestRanger._check_privileges(result, [])
# Grant database privileges and verify
admin_client.execute("grant select on database {0} to {1} {2}"
.format(unique_database, kw, id))
result = self.client.execute("show grant {0} {1} on database {2}"
.format(kw, id, unique_database))
TestRanger._check_privileges(result, [
[kw, id, unique_database, "", "", "", "*", "select", "false"],
[kw, id, unique_database, "*", "*", "", "", "select", "false"]])
# Revoke database privileges and verify
admin_client.execute("revoke select on database {0} from {1} {2}"
.format(unique_database, kw, id))
result = self.client.execute("show grant {0} {1} on database {2}"
.format(kw, id, unique_database))
TestRanger._check_privileges(result, [])
# Grant table privileges and verify
admin_client.execute("grant select on table {0}.{1} to {2} {3}"
.format(unique_database, unique_table, kw, id))
result = self.client.execute("show grant {0} {1} on table {2}.{3}"
.format(kw, id, unique_database, unique_table))
TestRanger._check_privileges(result, [
[kw, id, unique_database, unique_table, "*", "", "", "select", "false"]])
# Revoke table privileges and verify
admin_client.execute("revoke select on table {0}.{1} from {2} {3}"
.format(unique_database, unique_table, kw, id))
result = self.client.execute("show grant {0} {1} on table {2}.{3}"
.format(kw, id, unique_database, unique_table))
TestRanger._check_privileges(result, [])
# Grant column privileges and verify
admin_client.execute("grant select(x) on table {0}.{1} to {2} {3}"
.format(unique_database, unique_table, kw, id))
result = self.client.execute("show grant {0} {1} on column {2}.{3}.x"
.format(kw, id, unique_database, unique_table))
TestRanger._check_privileges(result, [
[kw, id, unique_database, unique_table, "x", "", "", "select", "false"]])
# Revoke column privileges and verify
admin_client.execute("revoke select(x) on table {0}.{1} from {2} {3}"
.format(unique_database, unique_table, kw, id))
result = self.client.execute("show grant {0} {1} on column {2}.{3}.x"
.format(kw, id, unique_database, unique_table))
TestRanger._check_privileges(result, [])
finally:
admin_client.execute("revoke all on server from {0} {1}".format(kw, id))
admin_client.execute("revoke all on uri '{0}' from {1} {2}"
.format(uri, kw, id))
admin_client.execute("revoke select on database {0} from {1} {2}"
.format(unique_database, kw, id))
admin_client.execute("revoke select on table {0}.{1} from {2} {3}"
.format(unique_database, unique_table, kw, id))
admin_client.execute("revoke select(x) on table {0}.{1} from {2} {3}"
.format(unique_database, unique_table, kw, id))
@CustomClusterTestSuite.with_args(
impalad_args=IMPALAD_ARGS, catalogd_args=CATALOGD_ARGS)
def test_grant_revoke_ranger_api(self, unique_name):
# This test fails due to bumping up the Ranger to a newer version.
# TODO(fangyu.rao): Fix in a follow up commit.
pytest.xfail("failed due to bumping up the Ranger to a newer version")
user = getuser()
admin_client = self.create_impala_client()
unique_db = unique_name + "_db"
resource = {
"database": unique_db,
"column": "*",
"table": "*"
}
access = ["select", "create"]
try:
# Create the test database
admin_client.execute("drop database if exists {0} cascade".format(unique_db),
user=ADMIN)
admin_client.execute("create database {0}".format(unique_db), user=ADMIN)
# Grant privileges via Ranger REST API
TestRanger._grant_ranger_privilege(user, resource, access)
# Privileges should be stale before a refresh
result = self.client.execute("show grant user {0} on database {1}"
.format(user, unique_db))
TestRanger._check_privileges(result, [])
# Refresh and check updated privileges
admin_client.execute("refresh authorization")
result = self.client.execute("show grant user {0} on database {1}"
.format(user, unique_db))
TestRanger._check_privileges(result, [
["USER", user, unique_db, "*", "*", "", "", "create", "false"],
["USER", user, unique_db, "*", "*", "", "", "select", "false"]
])
# Revoke privileges via Ranger REST API
TestRanger._revoke_ranger_privilege(user, resource, access)
# Privileges should be stale before a refresh
result = self.client.execute("show grant user {0} on database {1}"
.format(user, unique_db))
TestRanger._check_privileges(result, [
["USER", user, unique_db, "*", "*", "", "", "create", "false"],
["USER", user, unique_db, "*", "*", "", "", "select", "false"]
])
# Refresh and check updated privileges
admin_client.execute("refresh authorization")
result = self.client.execute("show grant user {0} on database {1}"
.format(user, unique_db))
TestRanger._check_privileges(result, [])
finally:
admin_client.execute("revoke all on database {0} from user {1}"
.format(unique_db, user))
admin_client.execute("drop database if exists {0} cascade".format(unique_db),
user=ADMIN)
@CustomClusterTestSuite.with_args(
impalad_args=IMPALAD_ARGS, catalogd_args=CATALOGD_ARGS)
def test_show_grant_hive_privilege(self, unique_name):
# This test fails due to bumping up the Ranger to a newer version.
# TODO(fangyu.rao): Fix in a follow up commit.
pytest.xfail("failed due to bumping up the Ranger to a newer version")
user = getuser()
admin_client = self.create_impala_client()
unique_db = unique_name + "_db"
resource = {
"database": unique_db,
"column": "*",
"table": "*"
}
access = ["lock", "select"]
try:
TestRanger._grant_ranger_privilege(user, resource, access)
admin_client.execute("drop database if exists {0} cascade".format(unique_db),
user=ADMIN)
admin_client.execute("create database {0}".format(unique_db), user=ADMIN)
admin_client.execute("refresh authorization")
result = self.client.execute("show grant user {0} on database {1}"
.format(user, unique_db))
TestRanger._check_privileges(result, [
["USER", user, unique_db, "*", "*", "", "", "select", "false"]
])
# Assert that lock, select privilege exists in Ranger server
assert "lock" in TestRanger._get_ranger_privileges_db(user, unique_db)
assert "select" in TestRanger._get_ranger_privileges_db(user, unique_db)
admin_client.execute("revoke select on database {0} from user {1}"
.format(unique_db, user))
# Assert that lock is still present and select is revoked in Ranger server
assert "lock" in TestRanger._get_ranger_privileges_db(user, unique_db)
assert "select" not in TestRanger._get_ranger_privileges_db(user, unique_db)
admin_client.execute("refresh authorization")
result = self.client.execute("show grant user {0} on database {1}"
.format(user, unique_db))
TestRanger._check_privileges(result, [])
finally:
admin_client.execute("drop database if exists {0} cascade".format(unique_db),
user=ADMIN)
TestRanger._revoke_ranger_privilege(user, resource, access)
@staticmethod
def _grant_ranger_privilege(user, resource, access):
data = {
"grantor": ADMIN,
"grantorGroups": [],
"resource": resource,
"users": [user],
"groups": [],
"accessTypes": access,
"delegateAdmin": "false",
"enableAudit": "true",
"replaceExistingPermissions": "false",
"isRecursive": "false",
"clusterName": "server1"
}
headers = {"Content-Type": "application/json", "Accept": "application/json"}
r = requests.post("{0}/service/plugins/services/grant/test_impala?pluginId=impala"
.format(RANGER_HOST),
auth=RANGER_AUTH, json=data, headers=headers)
assert 200 <= r.status_code < 300
@staticmethod
def _revoke_ranger_privilege(user, resource, access):
data = {
"grantor": ADMIN,
"grantorGroups": [],
"resource": resource,
"users": [user],
"groups": [],
"accessTypes": access,
"delegateAdmin": "false",
"enableAudit": "true",
"replaceExistingPermissions": "false",
"isRecursive": "false",
"clusterName": "server1"
}
headers = {"Content-Type": "application/json", "Accept": "application/json"}
r = requests.post("{0}/service/plugins/services/revoke/test_impala?pluginId=impala"
.format(RANGER_HOST),
auth=RANGER_AUTH, json=data, headers=headers)
assert 200 <= r.status_code < 300
@staticmethod
def _get_ranger_privileges_db(user, db):
policies = TestRanger._get_ranger_privileges(user)
result = []
for policy in policies:
resources = policy["resources"]
if "database" in resources and db in resources["database"]["values"]:
for policy_items in policy["policyItems"]:
if user in policy_items["users"]:
for access in policy_items["accesses"]:
result.append(access["type"])
return result
@staticmethod
def _get_ranger_privileges(user):
headers = {"Content-Type": "application/json", "Accept": "application/json"}
r = requests.get("{0}/service/plugins/policies"
.format(RANGER_HOST),
auth=RANGER_AUTH, headers=headers)
return json.loads(r.content)["policies"]
def _add_ranger_user(self, user):
data = {"name": user, "password": "<PASSWORD>", "userRoleList": ["ROLE_USER"]}
headers = {"Content-Type": "application/json", "Accept": "application/json"}
r = requests.post("{0}/service/xusers/secure/users".format(RANGER_HOST),
auth=RANGER_AUTH,
json=data, headers=headers)
return json.loads(r.content)["id"]
def _remove_ranger_user(self, id):
r = requests.delete("{0}/service/xusers/users/{1}?forceDelete=true"
.format(RANGER_HOST, id), auth=RANGER_AUTH)
assert 300 > r.status_code >= 200
@staticmethod
def _check_privileges(result, expected):
def columns(row):
cols = row.split("\t")
return cols[0:len(cols) - 1]
assert map(columns, result.data) == expected
def _refresh_authorization(self, client, statement):
if statement is not None:
self.execute_query_expect_success(client, statement)
def _run_query_as_user(self, query, username, expect_success):
"""Helper to run an input query as a given user."""
impala_client = self.create_impala_client()
if expect_success:
return self.execute_query_expect_success(
impala_client, query, user=username, query_options={'sync_ddl': 1})
return self.execute_query_expect_failure(impala_client, query, user=username)
@CustomClusterTestSuite.with_args(
impalad_args=IMPALAD_ARGS, catalogd_args=CATALOGD_ARGS)
def test_unsupported_sql(self):
"""Tests unsupported SQL statements when running with Ranger."""
user = "admin"
impala_client = self.create_impala_client()
error_msg = "UnsupportedFeatureException: {0} is not supported by Ranger."
for statement in [("show roles", error_msg.format("SHOW ROLES")),
("show current roles", error_msg.format("SHOW CURRENT ROLES")),
("create role foo", error_msg.format("CREATE ROLE")),
("drop role foo", error_msg.format("DROP ROLE")),
("grant select on database functional to role foo",
error_msg.format("GRANT <privilege> TO ROLE")),
("revoke select on database functional from role foo",
error_msg.format("REVOKE <privilege> FROM ROLE")),
("show grant role foo", error_msg.format("SHOW GRANT ROLE")),
("show role grant group foo",
error_msg.format("SHOW ROLE GRANT GROUP"))]:
result = self.execute_query_expect_failure(impala_client, statement[0], user=user)
assert statement[1] in str(result)
@CustomClusterTestSuite.with_args(
impalad_args=IMPALAD_ARGS, catalogd_args=CATALOGD_ARGS)
def test_grant_revoke_invalid_principal(self):
"""Tests grant/revoke to/from invalid principal should return more readable
error messages."""
valid_user = "admin"
invalid_user = "invalid_user"
invalid_group = "invalid_group"
# TODO(IMPALA-8640): Create two different Impala clients because the users to
# workaround the bug.
invalid_impala_client = self.create_impala_client()
valid_impala_client = self.create_impala_client()
for statement in ["grant select on table functional.alltypes to | |
optional
Prefix for HOLE output files.
write_input_files: bool, optional
Whether to write out the input HOLE text as files.
Files are called `hole.inp`.
Returns
-------
dict
A dictionary of :class:`numpy.recarray`\ s, indexed by frame.
.. versionadded:: 1.0
"""
input_file = '{prefix}hole{i:03d}.inp'
output_file = '{prefix}hole{i:03d}.out'
sphpdb_file = '{prefix}hole{i:03d}.sph'
input_file = '{prefix}hole{i:03d}.inp'
output_file = '{prefix}hole{i:03d}.out'
sphpdb_file = '{prefix}hole{i:03d}.sph'
hole_header = textwrap.dedent("""
! Input file for <NAME>'s HOLE program
! written by MDAnalysis.analysis.hole2.HoleAnalysis
! for a Universe
! u = mda.Universe({}
! )
! Frame {{i}}
""")
hole_body = textwrap.dedent("""
COORD {{coordinates}}
RADIUS {radius}
SPHPDB {{sphpdb}}
SAMPLE {sample:f}
ENDRAD {end_radius:f}
IGNORE {ignore}
SHORTO {output_level:d}
""")
_guess_cpoint = False
sphpdbs = None
outfiles = None
frames = None
profiles = None
def __init__(self, universe,
select='protein',
verbose=False,
ignore_residues=IGNORE_RESIDUES,
vdwradii_file=None,
executable='hole',
sos_triangle='sos_triangle',
sph_process='sph_process',
tmpdir=os.path.curdir,
cpoint=None,
cvect=None,
sample=0.2,
end_radius=22,
output_level=0,
prefix=None,
write_input_files=False):
super(HoleAnalysis, self).__init__(universe.universe.trajectory,
verbose=verbose)
if output_level > 3:
msg = 'output_level ({}) needs to be < 3 in order to extract a HOLE profile!'
warnings.warn(msg.format(output_level))
if prefix is None:
prefix = ''
if isinstance(cpoint, str):
if 'geometry' in cpoint.lower():
self._guess_cpoint = True
self.cpoint = '{cpoint[0]:.10f} {cpoint[1]:.10f} {cpoint[2]:.10f}'
else:
self._guess_cpoint = False
self.cpoint = cpoint
self.prefix = prefix
self.cvect = cvect
self.sample = sample
self.end_radius = end_radius
self.output_level = output_level
self.write_input_files = write_input_files
self.select = select
self.ag = universe.select_atoms(select, updating=True)
self.universe = universe
self.tmpdir = tmpdir
self.ignore_residues = ignore_residues
# --- finding executables ----
hole = util.which(executable)
if hole is None:
raise OSError(errno.ENOENT, exe_err.format(name=hole,
kw='executable'))
self.base_path = os.path.dirname(hole)
sos_triangle_path = util.which(sos_triangle)
if sos_triangle_path is None:
path = os.path.join(self.base_path, sos_triangle)
sos_triangle_path = util.which(path)
if sos_triangle_path is None:
raise OSError(errno.ENOENT, exe_err.format(name=sos_triangle,
kw='sos_triangle'))
sph_process_path = util.which(sph_process)
if sph_process_path is None:
path = os.path.join(self.base_path, 'sph_process')
sph_process_path = util.which(path)
if sph_process_path is None:
raise OSError(errno.ENOENT, exe_err.format(name=sph_process,
kw='sph_process'))
self.exe = {
'hole': hole,
'sos_triangle': sos_triangle_path,
'sph_process': sph_process_path
}
# --- setting up temp files ----
self.tmp_files = []
if vdwradii_file is not None:
self.vdwradii_file = check_and_fix_long_filename(vdwradii_file,
tmpdir=self.tmpdir)
if os.path.islink(self.vdwradii_file):
self.tmp_files.append(self.vdwradii_file)
else:
self.vdwradii_file = write_simplerad2()
self.tmp_files.append(self.vdwradii_file)
# --- setting up input header ----
filenames = [universe.filename]
try:
filenames.extend(universe.trajectory.filenames)
except AttributeError:
filenames.append(universe.trajectory.filename)
hole_filenames = '\n! '.join(filenames)
self._input_header = self.hole_header.format(hole_filenames)
def run(self, start=None, stop=None, step=None, verbose=None,
random_seed=None):
"""
Perform the calculation
Parameters
----------
start : int, optional
start frame of analysis
stop : int, optional
stop frame of analysis
step : int, optional
number of frames to skip between each analysed frame
verbose : bool, optional
Turn on verbosity
random_seed : int, optional
integer number to start the random number generator.
By default,
:program:`hole` will use the time of the day.
For reproducible runs (e.g., for testing) set ``random_seed``
to an integer. Default: ``None``
"""
self.random_seed = random_seed
return super(HoleAnalysis, self).run(start=start, stop=stop,
step=step, verbose=verbose)
def _prepare(self):
"""Set up containers and generate input file text"""
# set up containers
self.sphpdbs = np.zeros(self.n_frames, dtype=object)
self.outfiles = np.zeros(self.n_frames, dtype=object)
self.frames = np.zeros(self.n_frames, dtype=int)
self.profiles = {}
# generate input file
body = set_up_hole_input('',
infile_text=self.hole_body,
infile=None,
vdwradii_file=self.vdwradii_file,
tmpdir=self.tmpdir,
sample=self.sample,
end_radius=self.end_radius,
cpoint=self.cpoint,
cvect=self.cvect,
random_seed=self.random_seed,
ignore_residues=self.ignore_residues,
output_level=self.output_level,
dcd=None)
self.infile_text = self._input_header + body
def guess_cpoint(self):
"""Guess a point inside the pore.
This method simply uses the center of geometry of the selection as a
guess.
Returns
-------
float:
center of geometry of selected AtomGroup
"""
return self.ag.center_of_geometry()
def _single_frame(self):
"""Run HOLE analysis and collect profiles"""
# set up files
frame = self._ts.frame
i = self._frame_index
outfile = self.output_file.format(prefix=self.prefix, i=frame)
sphpdb = self.sphpdb_file.format(prefix=self.prefix, i=frame)
self.sphpdbs[i] = sphpdb
self.outfiles[i] = outfile
if outfile not in self.tmp_files:
self.tmp_files.append(outfile)
if sphpdb not in self.tmp_files:
self.tmp_files.append(sphpdb)
else:
self.tmp_files.append(sphpdb + '.old')
self.frames[i] = frame
# temp pdb
logger.info('HOLE analysis frame {}'.format(frame))
fd, pdbfile = tempfile.mkstemp(suffix='.pdb')
os.close(fd) # close immediately (Issue 129)
# get infile text
fmt_kwargs = {'i': frame, 'coordinates': pdbfile, 'sphpdb': sphpdb}
if self._guess_cpoint:
fmt_kwargs['cpoint'] = self.guess_cpoint()
infile_text = self.infile_text.format(**fmt_kwargs)
if self.write_input_files:
infile = self.input_file.format(prefix=self.prefix, i=frame)
with open(infile, 'w') as f:
f.write(infile_text)
try:
self.ag.write(pdbfile)
run_hole(outfile=outfile, infile_text=infile_text,
executable=self.exe['hole'])
finally:
try:
os.unlink(pdbfile)
except OSError:
pass
recarrays = collect_hole(outfile=outfile)
try:
self.profiles[frame] = recarrays[0]
except KeyError:
msg = 'No profile found in HOLE output. Output level: {}'
logger.info(msg.format(self.output_level))
def create_vmd_surface(self, filename='hole.vmd', dot_density=15,
no_water_color='red', one_water_color='green',
double_water_color='blue'):
"""Process HOLE output to create a smooth pore surface suitable for VMD.
Takes the ``sphpdb`` file for each frame and feeds it to `sph_process <http://www.holeprogram.org/doc/old/hole_d04.html#sph_process>`_
and `sos_triangle <http://www.holeprogram.org/doc/old/hole_d04.html#sos_triangle>`_ as described under `Visualization of HOLE
results <http://www.holeprogram.org/doc/index.html>`_.
Load the output file *filename* into VMD in Extensions > Tk Console ::
source hole.vmd
The level of detail is determined by ``dot_density``.
The surface will be colored by ``no_water_color``, ``one_water_color``, and
``double_water_color``. You can change these in the
Tk Console::
set no_water_color blue
Parameters
----------
filename: str, optional
file to write the pore surfaces to.
dot_density: int, optional
density of facets for generating a 3D pore representation.
The number controls the density of dots that will be used.
A sphere of dots is placed on each centre determined in the
Monte Carlo procedure. The actual number of dots written is
controlled by ``dot_density`` and the ``sample`` level of the
original analysis. ``dot_density`` should be set between 5
(few dots per sphere) and 35 (many dots per sphere).
no_water_color: str, optional
Color of the surface where the pore radius is too tight for a
water molecule.
one_water_color: str, optional
Color of the surface where the pore can fit one water molecule.
double_water_color: str, optional
Color of the surface where the radius is at least double the
minimum radius for one water molecule.
Returns
-------
str
``filename`` with the pore surfaces.
"""
if self.sphpdbs is None or len(self.sphpdbs) == 0:
raise ValueError('No sphpdb files to read. Try calling run()')
frames = []
for i, sphpdb in zip(self.frames, self.sphpdbs[self.frames]):
tmp_tri = create_vmd_surface(sphpdb=sphpdb,
sph_process=self.exe['sph_process'],
sos_triangle=self.exe['sos_triangle'],
dot_density=dot_density)
shapes = [[], [], []]
with open(tmp_tri) as f:
for line in f:
if line.startswith('draw color'):
color = line.split()[-1].lower()
if color == 'red':
dest = shapes[0]
elif color == 'green':
dest = shapes[1]
elif color == 'blue':
dest = shapes[2]
else:
msg = 'Encountered unknown color {}'
raise ValueError(msg.format(color))
if line.startswith('draw trinorm'):
line = line.strip('draw trinorm').strip()
dest.append('{{ {} }}'.format(line))
try:
os.unlink(tmp_tri)
except OSError:
pass
tri = '{ { ' + ' } { '.join(list(map(' '.join, shapes))) + ' } }'
frames.append('set triangles({i}) '.format(i=i) + tri)
trinorms = '\n'.join(frames)
vmd_1 = vmd_script_array.format(no_water_color=no_water_color,
one_water_color=one_water_color,
double_water_color=double_water_color)
vmd_text = vmd_1 + trinorms + vmd_script_function
with open(filename, 'w') as f:
f.write(vmd_text)
return filename
def min_radius(self):
"""Return the minimum radius over all profiles as a function of q"""
if not self.profiles:
raise ValueError('No profiles available. Try calling run()')
return np.array([[q, p.radius.min()] for q, p in self.profiles.items()])
def min_radius(self):
"""Return the minimum radius over all profiles as a function of q"""
if not self.profiles:
raise ValueError('No profiles available. Try calling run()')
return np.array([[q, p.radius.min()] for q, p in self.profiles.items()])
def delete_temporary_files(self):
"""Delete temporary files"""
for f in self.tmp_files:
try:
os.unlink(f)
except OSError:
pass
self.tmp_files = []
self.outfiles = []
self.sphpdbs = []
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Delete temporary files on exit"""
self.delete_temporary_files()
def _process_plot_kwargs(self, frames=None,
color=None, cmap='viridis',
linestyle='-'):
"""Process the colors and linestyles for plotting
Parameters
----------
frames: array-like, optional
Frames to plot. If ``None``, plots all of them.
Default: ``None``
color: str or array-like, optional
Color or colors for the plot. If ``None``, colors are
drawn from ``cmap``. Default: ``None``
cmap: str, optional
color map to make colors for the plot if ``color`` is
not given. Names should be from the ``matplotlib.pyplot.cm``
module. Default: 'viridis'
linestyle: str or array-like, optional
Line style for the plot. Default: '-'
Returns
-------
(array-like, array-like, array-like)
frames, colors, linestyles
"""
if frames is None:
frames = self.frames
else:
frames = util.asiterable(frames)
if color is None:
colormap = plt.cm.get_cmap(cmap)
norm = matplotlib.colors.Normalize(vmin=min(frames),
vmax=max(frames))
colors = colormap(norm(frames))
else:
colors = itertools.cycle(util.asiterable(color))
linestyles = itertools.cycle(util.asiterable(linestyle))
return frames, colors, linestyles
def plot(self, frames=None,
color=None, cmap='viridis',
linestyle='-', y_shift=0.0,
label=True, ax=None,
| |
= request.form['title']
unsigned_credential["credentialSubject"]["description"] = request.form['description']
unsigned_credential["credentialSubject"]["startDate"] = request.form['start_date']
unsigned_credential["credentialSubject"]["endDate"] = request.form['end_date']
unsigned_credential["credentialSubject"]["skills"] = list()
for skill in request.form['skills'].split(',') :
unsigned_credential["credentialSubject"]["skills"].append(
{
"@type": "Skill",
"description": skill
})
unsigned_credential["credentialSubject"]['issuedBy']["logo"] = mode.ipfs_gateway + session['issuer_explore']['picture']
unsigned_credential["credentialSubject"]['issuedBy']["name"] = session['issuer_explore']['name']
unsigned_credential["credentialSubject"]['signatureLines']["name"] = ""
# update local issuer database
manager_username = ns.get_data_from_username(request.form['reviewer_username'] + '.' + session['credential_issuer_username'], mode)['referent']
credential = company.Credential(session['credential_issuer_username'], mode)
credential.add(session['username'],
request.form['reviewer_username'],
manager_username,
"drafted",
unsigned_credential["id"],
json.dumps(unsigned_credential, ensure_ascii=False),
session['reference'])
# send an email to reviewer for workflow
reviewer_email = ns.get_data_from_username(request.form['reviewer_username'] + '.' + session['credential_issuer_username'], mode)['email']
subject = _('You have received a request for professional experience assessment from ')+ session['name'] + _(' to review')
try :
Talao_message.messageHTML(subject, reviewer_email, 'request_certificate', {'name' : session['name'], 'link' : 'https://talao.co'}, mode)
except :
logging.error('email failed')
# send email to user
flash(_('Your request for an experience professional assessment has been registered for review.'), 'success')
# clean up and return
del session['select']
return redirect (mode.server + 'user/issuer_explore/?issuer_username=' + session['credential_issuer_username'])
def request_skill_credential(mode) :
""" Basic request for skill credential
"""
check_login()
# load JSON-LD model for ProfessionalSkillAssessment
unsigned_credential = json.load(open('./verifiable_credentials/ProfessionalSkillAssessment.jsonld', 'r'))
# update credential with form data
unsigned_credential["id"] = "urn:uuid:" + str(uuid.uuid1())
unsigned_credential["credentialSubject"]["id"] = ns.get_did(session['workspace_contract'],mode)
unsigned_credential["credentialSubject"]["familyName"] = session['personal']["lastname"]['claim_value']
unsigned_credential["credentialSubject"]["givenName"] = session['personal']['firstname']['claim_value']
unsigned_credential["credentialSubject"]["skills"] = list()
for skill in request.form['skills'].split(',') :
unsigned_credential["credentialSubject"]["skills"].append(
{
"@type": "Skill",
"description": skill
})
unsigned_credential["credentialSubject"]['issuedBy']["logo"] = mode.ipfs_gateway + session['issuer_explore']['picture']
unsigned_credential["credentialSubject"]['issuedBy']["name"] = session['issuer_explore']['name']
unsigned_credential["credentialSubject"]['signatureLines']["name"] = ""
# update local issuer database
manager_username = ns.get_data_from_username(request.form['reviewer_username'] + '.' + session['credential_issuer_username'], mode)['referent']
credential = company.Credential(session['credential_issuer_username'], mode)
credential.add(session['username'],
request.form['reviewer_username'],
manager_username,
"drafted",
unsigned_credential["id"],
json.dumps(unsigned_credential, ensure_ascii=False),
session['reference'])
# send an email to reviewer for workflow
reviewer_email = ns.get_data_from_username(request.form['reviewer_username'] + '.' + session['credential_issuer_username'], mode)['email']
subject = _('You have received a request for a professional skill assessment from ')+ session['name'] + _(' to review')
try :
Talao_message.messageHTML(subject, reviewer_email, 'request_certificate', {'name' : session['name'], 'link' : 'https://talao.co'}, mode)
except :
logging.error('email failed')
# send email to user
flash(_('Your request for a skill professional assessment has been registered for review.'), 'success')
# clean up and return
del session['select']
return redirect (mode.server + 'user/issuer_explore/?issuer_username=' + session['credential_issuer_username'])
def company_dashboard(mode) :
"""
# @route /company/dashboard/
"""
check_login()
# created, user_name, reviewer_name, issuer_name, status, credential, id
issuer_select = ""
employee = company.Employee(session['host'], mode)
issuer_list = employee.get_list('issuer', 'all')
for issuer in issuer_list :
issuer_select += """<option value=""" + issuer['username'].split('.')[0] + """>""" + issuer['username'].split('.')[0] + """</option>"""
reviewer_select = ""
reviewer_list = employee.get_list('reviewer', 'all')
for reviewer in reviewer_list :
reviewer_select += """<option value=""" + reviewer['username'].split('.')[0] + """>""" + reviewer['username'].split('.')[0] + """</option>"""
if request.method == 'GET' :
# init of dashboard display / role
if session['role'] == 'reviewer' :
issuer_query = 'all'
reviewer_query = session['employee']
elif session['role'] == 'issuer' :
issuer_query = session['employee']
reviewer_query = 'all'
else :
issuer_query = 'all'
reviewer_query = 'all'
# init of dashboard display / credential status
signed = drafted = reviewed = ''
if session['role'] == 'issuer' :
reviewed = 'checked'
status = ('reviewed','','')
elif session['role'] == 'reviewer' :
drafted = 'checked'
status = ('drafted','','')
else :
drafted = reviewed = 'checked'
status = ('drafted', 'reviewed', '')
# display dashboard
credential_list = credential_list_html(session['host'], issuer_query, reviewer_query, status, mode)
return render_template('./issuer/company_dashboard.html',
**session['menu'],
credential_list=credential_list,
drafted=drafted,
reviewed=reviewed,
signed=signed,
reviewer_select=reviewer_select,
manager_select=issuer_select)
if request.method == 'POST' :
# update dashboard with select
status = (request.form.get('draftedbox', ""), request.form.get('reviewedbox', ""), request.form.get('signedbox', ""))
drafted = "checked" if request.form.get('draftedbox') else ""
signed = "checked" if request.form.get('signedbox') else ""
reviewed = "checked" if request.form.get('reviewedbox') else ""
if session['role'] == 'reviewer' :
issuer_query = 'all'
reviewer_query = session['employee']
else :
issuer_query = request.form['issuer']
reviewer_query = request.form['reviewer']
credential_list = credential_list_html(session['host'], issuer_query, reviewer_query, status, mode)
return render_template('./issuer/company_dashboard.html',
**session['menu'],
credential_list=credential_list,
drafted=drafted,
reviewed=reviewed,
signed=signed,
reviewer_select=reviewer_select,
manager_select=issuer_select)
def credential_list_html(host, issuer_username, reviewer_username, status, mode) :
""" build the html list
return the table list to display in dashboard in html
"""
check_login()
credential = company.Credential(host, mode)
mylist = credential.get(issuer_username, reviewer_username, status)
credential_list = ""
for mycredential in mylist :
if json.loads(mycredential[5])['credentialSubject'].get('type') == "ProfessionalExperienceAssessment" :
subject_link = mode.server + 'resume/?did=' + json.loads(mycredential[5])['credentialSubject']['id']
title = json.loads(mycredential[5])['credentialSubject']['title'][:20]
description = json.loads(mycredential[5])['credentialSubject']['description'][:200] + "..."
type = "ProfessionalExperienceAssessment"
name = json.loads(mycredential[5])['credentialSubject'].get('givenName', "deprecated") + ' ' + json.loads(mycredential[5])['credentialSubject'].get('familyName', "deprecated")
elif json.loads(mycredential[5])['credentialSubject'].get('type') == "ProfessionalSkillAssessment" :
subject_link = mode.server + 'resume/?did=' + json.loads(mycredential[5])['credentialSubject']['id']
title = "N/A"
description = "N/A"
type = "ProfessionalSkillAssessment"
name = json.loads(mycredential[5])['credentialSubject'].get('givenName', "deprecated") + ' ' + json.loads(mycredential[5])['credentialSubject'].get('familyName', "deprecated")
elif json.loads(mycredential[5])['credentialSubject'].get('type') == "IdentityPass" :
subject_link = mode.server + 'resume/?did=' + json.loads(mycredential[5])['credentialSubject']['id']
type = "IdentityPass"
title = description = "N/A"
name = json.loads(mycredential[5])['credentialSubject']['recipient']['givenName'] + ' ' + json.loads(mycredential[5])['credentialSubject']['recipient']['familyName']
elif json.loads(mycredential[5])['credentialSubject'].get('type') == "CertificateOfEmployment" :
subject_link = mode.server + 'resume/?did=' + json.loads(mycredential[5])['credentialSubject']['id']
type = "CertificateOfEmployment"
title = description = "N/A"
name = json.loads(mycredential[5])['credentialSubject']['givenName'] + ' ' + json.loads(mycredential[5])['credentialSubject']['familyName']
else :
subject_link = mode.server + 'resume/?did=' + json.loads(mycredential[5])['credentialSubject']['id']
type = _("Not Supported")
title = description = "N/A"
name = 'N/A'
credential = """<tr>
<td><a href=/company/issue_credential_workflow?id=""" + mycredential[6] + """> """ + mycredential[6][:2] + '...' + mycredential[6][-2:] + """</a></td>
<!-- <td><a href=""" + subject_link + """>""" + name + """</a></td> -->
<td>""" + name + """</td>
<td>""" + mycredential[7] + """</td>
<td>""" + title + """...</td>
<td>""" + description + """</td>
<td>""" + mycredential[0][:10] + """</td>
<td>""" + type + """</td>
<td>""" + mycredential[2] + """</td>
<td>""" + mycredential[3] + """ </td>
<td>""" + mycredential[4] + """</td>
</tr>"""
credential_list += credential
return credential_list
def issue_credential_workflow(mode) :
"""
call = (created, user_name, reviewer_name, issuer_name, status, credential, id)
update = update_verifiable_credential(id, host_name, reviewer_username, issuer_username, status, credential, mode)
"""
check_login()
if request.method == 'GET' :
session['credential_id'] = request.args['id']
credential = company.Credential(session['host'], mode)
session['call'] = credential.get_by_id(session['credential_id'])
# credential cannot be updated if already signed
field = "disabled" if session['call'][4] == 'signed' or session['role'] in ['admin'] else ""
# credential is loaded as dict
my_credential = json.loads(session['call'][5])['credentialSubject']
# switch
if my_credential['type'] == "ProfessionalExperienceAssessment" :
skills_str = ""
for skill in my_credential['skills'] :
skills_str += skill['description'] + ','
reviewRecommendation, reviewDelivery, reviewSchedule, reviewCommunication = 0,1,2,3
for i in [0,1] :
if my_credential["review"][reviewRecommendation]["reviewBody"][i]["@language"] == session['language'] :
questionRecommendation = my_credential["review"][reviewRecommendation]["reviewBody"][i]['@value']
break
for i in [0,1] :
if my_credential["review"][reviewDelivery]["reviewBody"][i]["@language"] == session['language'] :
questionDelivery = my_credential["review"][reviewDelivery]["reviewBody"][i]['@value']
break
for i in [0,1] :
if my_credential["review"][reviewSchedule]["reviewBody"][i]["@language"] == session['language'] :
questionSchedule = my_credential["review"][reviewSchedule]["reviewBody"][i]['@value']
break
for i in [0,1] :
if my_credential["review"][reviewCommunication]["reviewBody"][i]["@language"] == session['language'] :
questionCommunication = my_credential["review"][reviewCommunication]["reviewBody"][i]['@value']
break
return render_template ('./issuer/issue_experience_credential_workflow.html',
credential_id=request.args['id'],
picturefile = mode.ipfs_gateway + session['picture'],
clipboard = mode.server + "board/?did=" + session['did'],
**my_credential,
recipient_name = my_credential["givenName"] + ' ' + my_credential["familyName"],
author_name = my_credential["issuedBy"]["name"],
signer_name = my_credential["signatureLines"]["name"],
scoreRecommendation = my_credential["review"][reviewRecommendation]["reviewRating"]["ratingValue"],
questionRecommendation = questionRecommendation,
scoreSchedule = my_credential["review"][reviewSchedule]["reviewRating"]["ratingValue"],
questionSchedule = questionSchedule,
scoreCommunication = my_credential["review"][reviewCommunication]["reviewRating"]["ratingValue"],
questionCommunication = questionCommunication,
scoreDelivery = my_credential["review"][reviewDelivery]["reviewRating"]["ratingValue"],
questionDelivery = questionDelivery,
skills_str= skills_str,
field= field)
elif my_credential['type'] == "ProfessionalSkillAssessment" :
skill_html = ""
for count,skill in enumerate(my_credential['skills']):
skill_count = 'skill_' + str(count)
skill_html += """
<div class="form-row">
<div class="col">
<div class="form-group">
<label><strong>""" + _('Skill') + """ : </strong>""" + skill['description'] + """</label>
<input class="form-control" placeholder='""" + _("Draft an assessment of this skill") + """' type="text" name='""" + skill_count + """' required>
</div>
</div>
</div>
"""
return render_template('./issuer/issue_skill_credential.html',
credential_id=request.args['id'],
picturefile = mode.ipfs_gateway + session['picture'],
reference= session['call'][6],
skill_html = skill_html,
signer_name = my_credential["signatureLines"]["name"],
givenName = my_credential.get("givenName"),
familyName = my_credential.get("familyName"),
clipboard = mode.server + "board/?did=" + session['did'],
#image = my_credential["recipient"].get("image"),
field= field)
elif my_credential['type'] == "IdentityPass" :
return render_template('./issuer/issue_identity_credential.html',
credential_id=request.args['id'],
picturefile = mode.ipfs_gateway + session['picture'],
reference= session['call'][6],
clipboard = mode.server + "board/?did=" + session['did'],
jobTitle = my_credential["recipient"].get("jobTitle"),
givenName = my_credential["recipient"].get("givenName"),
familyName = my_credential["recipient"].get("familyName"),
address = my_credential["recipient"].get("address"),
birthDate = my_credential["recipient"].get("birthDate"),
email = my_credential["recipient"].get("email"),
telephone = my_credential["recipient"].get("telephone"),
gender = my_credential["recipient"].get("gender"),
image = my_credential["recipient"].get("image"),
field= field)
elif my_credential['type'] == "CertificateOfEmployment" :
return render_template('./issuer/issue_work_credential.html',
credential_id=request.args['id'],
picturefile = mode.ipfs_gateway + session['picture'],
reference= session['call'][6],
clipboard = mode.server + "board/?did=" + session['did'],
jobTitle = my_credential.get("jobTitle"),
givenName = my_credential.get("givenName"),
familyName = my_credential.get("familyName"),
startDate = my_credential.get("startDate"),
employmentType = my_credential.get("employmentType"),
baseSalary = my_credential.get("baseSalary"),
field= field)
else :
flash(_('view not yet available.'), 'warning')
return redirect (mode.server +'company/dashboard/')
if request.method == 'POST' :
if request.form['exit'] == 'delete' :
# credential is removed from database
credential = company.Credential(session['host'], mode)
credential.delete(session['credential_id'])
del session['credential_id']
del session['call']
return redirect (mode.server +'company/dashboard/')
if request.form['exit'] == 'back' :
# | |
n(T-1) x d0 x d1 x ...)```,
this splits values into a TensorArray with T tensors.
TensorArray index t will be the subtensor of values with starting position
```(n0 + n1 + ... + n(t-1), 0, 0, ...)```
and having size
```nt x d0 x d1 x ...```
Args:
handle: A `Tensor` of type `resource`. The handle to a TensorArray.
value: A `Tensor`. The concatenated tensor to write to the TensorArray.
lengths: A `Tensor` of type `int64`.
The vector of lengths, how to split the rows of value into the
TensorArray.
flow_in: A `Tensor` of type `float32`.
A float scalar that enforces proper chaining of operations.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "TensorArraySplitV3", name,
tld.op_callbacks, handle, value, lengths, flow_in)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return tensor_array_split_v3_eager_fallback(
handle, value, lengths, flow_in, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"TensorArraySplitV3", handle=handle, value=value, lengths=lengths,
flow_in=flow_in, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"TensorArraySplitV3", _inputs_flat, _attrs, _result)
_result, = _result
return _result
TensorArraySplitV3 = tf_export("raw_ops.TensorArraySplitV3")(_ops.to_raw_op(tensor_array_split_v3))
def tensor_array_split_v3_eager_fallback(handle, value, lengths, flow_in, name, ctx):
_attr_T, (value,) = _execute.args_to_matching_eager([value], ctx)
handle = _ops.convert_to_tensor(handle, _dtypes.resource)
lengths = _ops.convert_to_tensor(lengths, _dtypes.int64)
flow_in = _ops.convert_to_tensor(flow_in, _dtypes.float32)
_inputs_flat = [handle, value, lengths, flow_in]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"TensorArraySplitV3", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"TensorArraySplitV3", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def tensor_array_unpack(handle, value, flow_in, name=None):
r"""TODO: add doc.
Args:
handle: A `Tensor` of type mutable `string`.
value: A `Tensor`.
flow_in: A `Tensor` of type `float32`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
raise RuntimeError("tensor_array_unpack op does not support eager execution. Arg 'handle' is a ref.")
# Add nodes to the TensorFlow graph.
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"TensorArrayUnpack", handle=handle, value=value, flow_in=flow_in,
name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"TensorArrayUnpack", _inputs_flat, _attrs, _result)
_result, = _result
return _result
TensorArrayUnpack = tf_export("raw_ops.TensorArrayUnpack")(_ops.to_raw_op(tensor_array_unpack))
def tensor_array_unpack_eager_fallback(handle, value, flow_in, name, ctx):
raise RuntimeError("tensor_array_unpack op does not support eager execution. Arg 'handle' is a ref.")
def tensor_array_v2(size, dtype, element_shape=None, dynamic_size=False, clear_after_read=True, tensor_array_name="", name=None):
r"""Deprecated. Use TensorArrayV3
Args:
size: A `Tensor` of type `int32`.
dtype: A `tf.DType`.
element_shape: An optional `tf.TensorShape` or list of `ints`. Defaults to `None`.
dynamic_size: An optional `bool`. Defaults to `False`.
clear_after_read: An optional `bool`. Defaults to `True`.
tensor_array_name: An optional `string`. Defaults to `""`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `string`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "TensorArrayV2", name,
tld.op_callbacks, size, "dtype", dtype, "element_shape",
element_shape, "dynamic_size", dynamic_size, "clear_after_read",
clear_after_read, "tensor_array_name", tensor_array_name)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return tensor_array_v2_eager_fallback(
size, dtype=dtype, element_shape=element_shape,
dynamic_size=dynamic_size, clear_after_read=clear_after_read,
tensor_array_name=tensor_array_name, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
dtype = _execute.make_type(dtype, "dtype")
if element_shape is None:
element_shape = None
element_shape = _execute.make_shape(element_shape, "element_shape")
if dynamic_size is None:
dynamic_size = False
dynamic_size = _execute.make_bool(dynamic_size, "dynamic_size")
if clear_after_read is None:
clear_after_read = True
clear_after_read = _execute.make_bool(clear_after_read, "clear_after_read")
if tensor_array_name is None:
tensor_array_name = ""
tensor_array_name = _execute.make_str(tensor_array_name, "tensor_array_name")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"TensorArrayV2", size=size, dtype=dtype, element_shape=element_shape,
dynamic_size=dynamic_size,
clear_after_read=clear_after_read,
tensor_array_name=tensor_array_name, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("dtype", _op._get_attr_type("dtype"), "element_shape",
_op.get_attr("element_shape"), "dynamic_size",
_op._get_attr_bool("dynamic_size"), "clear_after_read",
_op._get_attr_bool("clear_after_read"), "tensor_array_name",
_op.get_attr("tensor_array_name"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"TensorArrayV2", _inputs_flat, _attrs, _result)
_result, = _result
return _result
TensorArrayV2 = tf_export("raw_ops.TensorArrayV2")(_ops.to_raw_op(tensor_array_v2))
def tensor_array_v2_eager_fallback(size, dtype, element_shape, dynamic_size, clear_after_read, tensor_array_name, name, ctx):
dtype = _execute.make_type(dtype, "dtype")
if element_shape is None:
element_shape = None
element_shape = _execute.make_shape(element_shape, "element_shape")
if dynamic_size is None:
dynamic_size = False
dynamic_size = _execute.make_bool(dynamic_size, "dynamic_size")
if clear_after_read is None:
clear_after_read = True
clear_after_read = _execute.make_bool(clear_after_read, "clear_after_read")
if tensor_array_name is None:
tensor_array_name = ""
tensor_array_name = _execute.make_str(tensor_array_name, "tensor_array_name")
size = _ops.convert_to_tensor(size, _dtypes.int32)
_inputs_flat = [size]
_attrs = ("dtype", dtype, "element_shape", element_shape, "dynamic_size",
dynamic_size, "clear_after_read", clear_after_read, "tensor_array_name",
tensor_array_name)
_result = _execute.execute(b"TensorArrayV2", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"TensorArrayV2", _inputs_flat, _attrs, _result)
_result, = _result
return _result
_TensorArrayV3Output = collections.namedtuple(
"TensorArrayV3",
["handle", "flow"])
def tensor_array_v3(size, dtype, element_shape=None, dynamic_size=False, clear_after_read=True, identical_element_shapes=False, tensor_array_name="", name=None):
r"""An array of Tensors of given size.
Write data via Write and read via Read or Pack.
Args:
size: A `Tensor` of type `int32`. The size of the array.
dtype: A `tf.DType`. The type of the elements on the tensor_array.
element_shape: An optional `tf.TensorShape` or list of `ints`. Defaults to `None`.
The expected shape of an element, if known. Used to
validate the shapes of TensorArray elements. If this shape is not
fully specified, gathering zero-size TensorArrays is an error.
dynamic_size: An optional `bool`. Defaults to `False`.
A boolean that determines whether writes to the TensorArray
are allowed to grow the size. By default, this is not allowed.
clear_after_read: An optional `bool`. Defaults to `True`.
If true (default), Tensors in the TensorArray are cleared
after being read. This disables multiple read semantics but allows early
release of memory.
identical_element_shapes: An optional `bool`. Defaults to `False`.
If true (default is false), then all
elements in the TensorArray will be expected to have have identical shapes.
This allows certain behaviors, like dynamically checking for
consistent shapes on write, and being able to fill in properly
shaped zero tensors on stack -- even if the element_shape attribute
is not fully defined.
tensor_array_name: An optional `string`. Defaults to `""`.
Overrides the name used for the temporary tensor_array
resource. Default value is the name of the 'TensorArray' op (which
is guaranteed unique).
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (handle, flow).
handle: A `Tensor` of type `resource`.
flow: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "TensorArrayV3", name,
tld.op_callbacks, size, "dtype", dtype, "element_shape",
element_shape, "dynamic_size", dynamic_size, "clear_after_read",
clear_after_read, "identical_element_shapes",
identical_element_shapes, "tensor_array_name", tensor_array_name)
_result = _TensorArrayV3Output._make(_result)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return tensor_array_v3_eager_fallback(
size, dtype=dtype, element_shape=element_shape,
dynamic_size=dynamic_size, clear_after_read=clear_after_read,
identical_element_shapes=identical_element_shapes,
tensor_array_name=tensor_array_name, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
dtype = _execute.make_type(dtype, "dtype")
if element_shape is None:
element_shape = None
element_shape = _execute.make_shape(element_shape, "element_shape")
if dynamic_size is None:
dynamic_size = False
dynamic_size = _execute.make_bool(dynamic_size, "dynamic_size")
if clear_after_read is None:
clear_after_read = True
clear_after_read = _execute.make_bool(clear_after_read, "clear_after_read")
if identical_element_shapes is None:
identical_element_shapes = False
identical_element_shapes = _execute.make_bool(identical_element_shapes, "identical_element_shapes")
if tensor_array_name is None:
tensor_array_name = ""
tensor_array_name = _execute.make_str(tensor_array_name, "tensor_array_name")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"TensorArrayV3", size=size, dtype=dtype, element_shape=element_shape,
dynamic_size=dynamic_size,
clear_after_read=clear_after_read,
identical_element_shapes=identical_element_shapes,
tensor_array_name=tensor_array_name, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("dtype", _op._get_attr_type("dtype"), "element_shape",
_op.get_attr("element_shape"), "dynamic_size",
_op._get_attr_bool("dynamic_size"), "clear_after_read",
_op._get_attr_bool("clear_after_read"),
"identical_element_shapes",
_op._get_attr_bool("identical_element_shapes"),
"tensor_array_name", _op.get_attr("tensor_array_name"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"TensorArrayV3", _inputs_flat, _attrs, _result)
_result = _TensorArrayV3Output._make(_result)
return _result
TensorArrayV3 = tf_export("raw_ops.TensorArrayV3")(_ops.to_raw_op(tensor_array_v3))
def tensor_array_v3_eager_fallback(size, dtype, element_shape, dynamic_size, clear_after_read, identical_element_shapes, tensor_array_name, name, ctx):
dtype = _execute.make_type(dtype, "dtype")
if element_shape is None:
element_shape = None
element_shape = _execute.make_shape(element_shape, "element_shape")
if dynamic_size is None:
dynamic_size = False
dynamic_size = _execute.make_bool(dynamic_size, "dynamic_size")
if clear_after_read is None:
clear_after_read = True
clear_after_read = _execute.make_bool(clear_after_read, "clear_after_read")
if identical_element_shapes is None:
identical_element_shapes = False
identical_element_shapes = _execute.make_bool(identical_element_shapes, "identical_element_shapes")
if tensor_array_name is None:
tensor_array_name = ""
tensor_array_name = _execute.make_str(tensor_array_name, "tensor_array_name")
size = _ops.convert_to_tensor(size, _dtypes.int32)
_inputs_flat = [size]
_attrs = ("dtype", dtype, "element_shape", element_shape, "dynamic_size",
dynamic_size, "clear_after_read", clear_after_read,
"identical_element_shapes", identical_element_shapes, "tensor_array_name",
tensor_array_name)
_result = _execute.execute(b"TensorArrayV3", 2, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"TensorArrayV3", _inputs_flat, _attrs, _result)
_result = _TensorArrayV3Output._make(_result)
return _result
def tensor_array_write(handle, index, value, flow_in, name=None):
r"""TODO: |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.