input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
# -*-coding: utf8-*-
"""symboldict module
"""
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import functools
from importlib import import_module
import sys
from .version import __version__
import warnings
def deprecated(func):
'''This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used.
ref:
https://wiki.python.org/moin/PythonDecoratorLibrary#Generating_Deprecation_Warnings
'''
@functools.wraps
def new_func(*args, **kwargs):
warnings.warn("Call to deprecated function {}.".format(func.__name__),
category=DeprecationWarning)
return func(*args, **kwargs)
return new_func
class VoidValueError(ValueError): pass
if sys.version_info < (3, 4):
Enum = object
_isrule = (0, 1, 2, 3).__contains__
else:
from enum import Enum
_isrule = lambda x: isinstance(x, Rule)
class Rule(Enum):
"""Enumerated rules for :meth:`SymbolControl.getvalue()`.
The available values are
Attributes:
Rule.DONT_LOAD
Rule.TRY_LOAD_ONCE
Rule.TRY_LOAD_EACH
Rule.FORCE_RELOAD
"""
DONT_LOAD = 0
TRY_LOAD_ONCE = 1
TRY_LOAD_EACH = 2
FORCE_RELOAD = 3
_DONT = Rule.DONT_LOAD
_EACH = Rule.TRY_LOAD_EACH
_FORCE = Rule.FORCE_RELOAD
_ONCE = Rule.TRY_LOAD_ONCE
_DOT = str('.')
class Symbol(object):
"""Symbol(*parts) -> new Symbol instance
Args:
parts(convertibles to str): A sequence of :class:`str` or objects
convertible to :class:`str`. These strings are joined by :code:`'.'` to
create the instance's path.
Example:
>>> Symbol('spam.ham', 'eggs')
Symbol('spam.ham.eggs')
>>> Symbol()
Symbol('')
>>> Symbol(None, 12)
Symbol('None.12')
This is the type of objects used as values in :class:`SymbolDict`
instances. Their role is mainly to wrap a dot-separated path to a python
object, which may exist or not, with a convenient interface.
Example:
>>> Symbol('os.path.isfile')
>>> Symbol('telnetlib.Telnet')
>>> Symbol('complex.conjugate')
Defining an instance does *not* trigger an attempt to retrieve the
indicated python object by importing modules or accessing attributes.
This is the role of the :meth:`SymbolControl.getvalue` or
:meth:`SymbolDict.__getattr__` methods.
"""
__slots__ = ("_path", "_has", "_val")
def __init__(self, *parts):
"""x.__init__(...) initializes x; see help(type(x)) for signature"""
parts = [str(x) for x in parts]
s = _DOT.join(x for x in parts if x)
_storpath(self, s)
_storhas(self, False)
_storval(self, False) # no failed load
def __getattribute__(self, attr):
"""Overridden attribute access creates a new :class:`Symbol`.
Args:
attr(str): new path element
Example:
>>> Symbol('spam').eggs
Symbol('spam.eggs')
"""
return Symbol(Symbol._path.__get__(self), attr)
def __call__(self):
"""Calling a :class:`Symbol` instance wraps it into a :class:`SymbolControl` object.
Returns:
SymbolControl: a lightweight object wrapping the :class:`Symbol`.
This allows to bypass the overloading of the dot operator to
access some methods.
Example:
>>> s = Symbol('os.path.isfile')
>>> s().getvalue()
<function isfile at ...>
"""
return SymbolControl(self)
def __setattr__(self, attr, value):
"""Attribute setting is disabled for :class:`Symbol` instances.
Raises:
TypeError: this exception is always raised.
Example:
>>> s = Symbol('spam')
>>> s.ham = 'eggs'
Traceback ...
TypeError: Attribute setting is disabled for Symbol instances
"""
raise TypeError('Attribute setting is disabled for Symbol instances')
def __str__(self):
"""Converting a :class:`Symbol` to :class:`str` returns the wrapped path.
Returns:
str: the string stored in the Symbol instance.
Example:
>>> s = Symbol('spam.ham').eggs
>>> s
Symbol('spam.ham.eggs')
>>> str(s)
'spam.ham.eggs'
"""
return _readpath(self)
def __repr__(self):
return "Symbol({})".format(repr(_readpath(self)))
def __eq__(self, other):
"""Equality relation with another instance.
A :class:`Symbol` :code:`x` is equal to a python object :code:`y` if
they have the same type and the same path.
Example:
>>> x = Symbol('spam.ham')
>>> y = Symbol('spam.ham')
>>> x == y
True
>>> x == 'spam.ham'
False
The relations :code:`!=, <, >, <=, >=` are defined as well.
"""
return (_cid(self) == _cid(other)) and (_readpath(self) == _readpath(other))
def __ne__(self, other):
return not (self == other)
def __le__(self, other):
return not (self > other)
def __lt__(self, other):
return (_cid(self) < _cid(other)) or (
(_cid(self) == _cid(other)) and (_readpath(self) < _readpath(other)))
def __ge__(self, other):
return not (self < other)
def __gt__(self, other):
return (_cid(self) > _cid(other)) or (
(_cid(self) == _cid(other)) and (_readpath(self) > _readpath(other)))
def __hash__(self):
"""Computes a hash value for the Symbol instance.
Returns:
int: the instance's hash value
:class:`Symbol` instances are hashable objects which
can be used as dictionary keys or as set elements.
Example:
>>> S = set([Symbol('spam.ham'), Symbol('eggs')])
>>> Symbol('eggs') in S
True
"""
return hash((_cid(self), _readpath(self)))
_readpath = Symbol._path.__get__
_storpath = Symbol._path.__set__
_readhas = Symbol._has.__get__
_storhas = Symbol._has.__set__
_readval = Symbol._val.__get__
_storval = Symbol._val.__set__
def _cid(obj):
return id(type(obj)) # cannot use .__class__
symbol = Symbol()
"""An instance of :class:`Symbol` with empty path.
Used to build other Symbols by the means of attribute access.
Example:
>>> symbol.spam.ham
Symbol('spam.ham')
"""
def _getvalue(symb, rule):
"""Attempts to return the python object referenced symbolically by this instance.
Args:
rule(Rule): a rule specifying how to obtain the object's value.
Returns:
any: a python object referenced by this instance,
if such a value can be found.
Raises:
Exception
met while trying to
obtain the value when it does not exist.
see :meth:`SymbolControl.getvalue()` for description.
"""
# must fetch ?
# void value failed
# ONCE ? Rtn Exc
# EACH ? Rtn ?
# DONT Exc Rtn Exc
# RELO ? ? ?
if _readhas(symb):
if rule is _ONCE:
return _readval(symb)
elif rule is _DONT or rule is _EACH:
return _readval(symb)
# else fetch
elif (rule is _ONCE and _readval(symb)) or (rule is _DONT):
raise VoidValueError
# else fetch
# fetch starts here
if not _isrule(rule): # check only when load is needed
raise TypeError(('Need symboldict.Rule,', type(rule), 'found'))
try:
#... procedure -> v
# we try to load
L = _readpath(symb).split('.')
acc = L[0]
try:
# may raise ValueError if s is empty string
v = import_module(acc)
except ImportError:
# this section may raise AttributeError for example
if acc in __builtins__:
v = __builtins__[acc]
else:
raise
for attr in L[1:]:
v = getattr(v, attr)
else:
for attr in L[1:]:
acc = acc + _DOT + attr
try:
v = getattr(v, attr)
except AttributeError:
v = import_module(acc)
except Exception:
_storhas(symb, False)
_storval(symb, True) # FETCH FAILED
raise
else:
_storhas(symb, True)
_storval(symb, v)
return v
class SymbolControl(object):
"""SymbolControl(symb) -> new SymbolControl instance
Args:
symb(Symbol): a symbol referenced by the SymbolControl object
This is a class of lightweight wrappers around :class:`Symbol` instances
used to bypass the overriding of the dot operator in this class.
SymbolControl objects are returned by the :meth:`Symbol.__call__()`
method. Their main purpose is to hold methods to manipulate Symbols.
Example:
>>> s = Symbol('os.path.isfile')
>>> s()
<symboldict.SymbolControl object ...>
>>> s().hasvalue()
True
>>> s().path()
'os.path.isfile'
>>> s().getvalue()
<function isfile ...>
"""
__slots__ = ('__symb',)
def __init__(self, symb):
"""x.__init__(...) initializes x; see help(type(x)) for signature"""
self.__symb = symb
def hasvalue(self, rule=Rule.TRY_LOAD_ONCE):
"""Returns a boolean indicating if a value is available for the python object referenced symbolically by this instance.
Args:
rule(Rule): a rule specifying how to obtain the object's value.
It defaults to ``Rule.TRY_LOAD_ONCE``.
Returns:
bool: a boolean indicating if a value is available for this instance.
This method returns True if the corresponding call to :meth:`getvalue()`
would succeed, and returns False if the call to :meth:`getvalue()` would
fail. In any case, it does not raise an exception.
The rule argument has the same meaning as in :meth:`Symbol.getvalue()`.
Example:
>>> a = Symbol('wave.Error')
>>> a().hasvalue()
True
>>> b = Symbol('spam.ham')
>>> b().hasvalue()
False
"""
try:
_getvalue(self.__symb, rule)
except Exception:
return False
else:
return True
def getvalue(self, rule=Rule.TRY_LOAD_ONCE):
"""Attempts to return the python object referenced symbolically by this instance.
Args:
rule(Rule): a rule specifying how to obtain the object's value.
It defaults to ``Rule.TRY_LOAD_ONCE``.
Returns:
any: a python object referenced by this instance,
if such a value can be found.
Raises:
Exception
met while trying to
obtain the value when it does not exist.
This method tries to obtain a value by importing modules and taking
attributes according to the dotted path of the contained :class:`Symbol`
instance. In this path, the word before the first dot can be the
name of an importable module or that of a builtin python object
in the `__builtins__` dictionnary.
If the object can not be found, an exception is raised.
Example:
>>> a = Symbol('wave.Error')
>>> a().getvalue()
<class 'wave.Error'>
The `rule` parameter can be used to specify the policy
with respect | |
<reponame>CopticScriptorium/gitdox
#!/usr/bin/python
# -*- coding: utf-8 -*-
#print("Content-type:text/html\r\n\r\n")
from six import iteritems
import cgi, cgitb
import os, shutil
import sys, traceback
from modules.logintools import login
import urllib
from modules.gitdox_sql import *
from modules.gitdox_git import *
from modules.configobj import ConfigObj
import requests
from requests.auth import HTTPBasicAuth
import platform, re
from paths import ether_url, get_menu, get_nlp_credentials
from modules.ether import make_spreadsheet, delete_spreadsheet, sheet_exists, get_socialcalc, ether_to_sgml, \
build_meta_tag, get_ether_stylesheets, get_file_list, postprocess_sgml
from modules.renderer import render
import modules.redis_cache as cache
# Support IIS site prefix on Windows
if platform.system() == "Windows":
prefix = "transc\\"
else:
prefix = ""
# Read configuration
scriptpath = os.path.dirname(os.path.realpath(__file__)) + os.sep
userdir = scriptpath + "users" + os.sep
templatedir = scriptpath + "templates" + os.sep
config = ConfigObj(userdir + 'config.ini')
project = config["project"]
editor_help_link = config["editor_help_link"]
# Captions and API URLs for NLP buttons
xml_nlp_button = config["xml_nlp_button"]
spreadsheet_nlp_button = config["spreadsheet_nlp_button"]
xml_nlp_api = config["xml_nlp_api"]
spreadsheet_nlp_api = config["spreadsheet_nlp_api"]
code_2fa = None
def harvest_meta(sgml):
"""
Get metadata key value pairs from <meta> element in imported SGML file
:param sgml: TT SGML as string
:return: dictionary of key value pairs
"""
sgml = sgml.replace("\r","").strip()
meta = {}
if not sgml.startswith("<meta "):
return meta
else:
metatag = re.search(r'<meta ([^\n]*)>',sgml).group(1)
matches = re.findall(r'([^ =>]+?)="([^"]+)"',metatag)
for match in matches:
meta[match[0].strip()] = match[1].strip().replace("<","<").replace(">",">")
return meta
def serialize_file(text_content,file_name):
f=open(prefix+file_name,'w')
f.write(text_content)#.encode("utf8"))
f.close()
def get_user_list():
user_list=[]
scriptpath = os.path.dirname(os.path.realpath(__file__)) + os.sep
userdir = scriptpath + "users" + os.sep
return get_file_list(userdir,"ini",forbidden=["admin","default","config"],hide_extension=True)
def load_page(user,admin,theform):
global ether_url
global code_2fa
if theform.getvalue("2fa"):
code_2fa = theform.getvalue("2fa")
else:
code_2fa = ""
max_id = generic_query("SELECT MAX(id) AS max_id FROM docs","")[0][0]
if not max_id: # This is for the initial case after init db
max_id = 0
text_content = ""
repo_name = ""
corpus = ""
status = ""
assignee = ""
mode = "xml"
schema = ""
doc_id = "" # Should only remain so if someone navigated directly to editor.py
docname = ""
old_docname, old_corpus, old_repo, old_status, old_assignee, old_mode, old_schema = ["", "", "", "", "", "", ""]
if int(admin) > 0:
git_username, git_token, git_2fa = get_git_credentials(user, admin, code_2fa)
else:
git_username, git_token, git_2fa = (None, None, None)
# dict of variables we'll need to render the html
render_data = {}
if theform.getvalue('id'):
doc_id = theform.getvalue('id')
if int(doc_id) > int(max_id):
# Creating new doc case, assign some default values
docname = "new_document"
repo_name = "account/repo_name"
status = "editing"
assignee = "default_user"
corpus = "default_corpus"
schema = ""
text_content = ""
# If one of the four forms is edited or we're cloning a doc, then we create the doc, otherwise nothing happens (user cannot fill in nothing and create the doc)
if theform.getvalue('edit_docname') and user != "demo":
if docname != 'new_document':
if doc_id > max_id:
create_document(doc_id, docname, corpus, status, assignee, repo_name, text_content)
max_id = doc_id
else:
update_docname(doc_id, docname)
if theform.getvalue('edit_filename') and user != "demo":
repo_name = theform.getvalue('edit_filename')
if repo_name != 'account/repo_name':
if doc_id > max_id:
create_document(doc_id, docname, corpus, status, assignee, repo_name, text_content)
max_id = doc_id
else:
update_filename(doc_id, repo_name)
if theform.getvalue('edit_corpusname') and user != "demo":
corpus = theform.getvalue('edit_corpusname')
if corpus != 'default_corpus':
if doc_id > max_id:
create_document(doc_id, docname, corpus, status, assignee, repo_name, text_content)
max_id = doc_id
else:
update_corpus(doc_id, corpus)
if theform.getvalue('edit_status') and user != "demo":
status = theform.getvalue('edit_status')
if status != 'editing':
if doc_id > max_id:
create_document(doc_id, docname, corpus, status, assignee, repo_name, text_content)
max_id = doc_id
else:
update_status(doc_id, status)
if theform.getvalue('edit_assignee') and user != "demo":
assignee = theform.getvalue('edit_assignee')
if assignee != "default_user":
if doc_id > max_id:
create_document(doc_id, docname, corpus, status, assignee, repo_name, text_content)
max_id = doc_id
else:
update_assignee(doc_id, assignee)
# cloning metadata from an existing doc into a new doc
if theform.getvalue('source_doc'):
source_meta = get_doc_meta(theform.getvalue('source_doc'))
if doc_id > max_id:
create_document(doc_id, docname, corpus, status, assignee, repo_name, text_content)
max_id = doc_id
for meta in source_meta:
m_key, m_val = meta[2:4]
save_meta(int(doc_id), m_key.decode("utf8"), m_val.decode("utf8"))
cache.invalidate_by_doc(doc_id, "meta")
else:
# Get previous values from DB
old_docname, old_corpus, old_repo, old_status, old_assignee, old_mode, old_schema = get_doc_info(doc_id)
# Assume new values are same, overwrite with different form values and update DB if new values found
docname, corpus, repo_name, status, assignee, mode, schema = old_docname, old_corpus, old_repo, old_status, old_assignee, old_mode, old_schema
docname = old_docname
# Handle switch to spreadsheet mode if NLP spreadsheet service is called
if theform.getvalue('nlp_spreadsheet') == "do_nlp_spreadsheet" and mode == "xml" and user != "demo":
data_to_process = generic_query("SELECT content FROM docs WHERE id=?",(doc_id,))[0][0]
api_call = spreadsheet_nlp_api
if api_call != "":
nlp_user, nlp_password = get_nlp_credentials()
data = {"data":data_to_process, "lb":"line", "format":"sgml_no_parse"}
resp = requests.post(api_call, data, auth=HTTPBasicAuth(nlp_user,nlp_password))
sgml = resp.text.encode("utf8")
postproc = config["nlp_postprocessing"] if "nlp_postprocessing" in config else None
sgml = postprocess_sgml(sgml,postproc)
else:
sgml = data_to_process.encode("utf8")
out, err = make_spreadsheet(sgml, ether_url + "_/gd_" + corpus + "_" + docname, "sgml")
mode = "ether"
# handle copying metadata
if theform.getvalue('source_doc'):
source_meta = get_doc_meta(theform.getvalue('source_doc'))
existing_meta_keys = [x[2] for x in get_doc_meta(doc_id)]
# don't overwrite existing keys
meta_to_write = [x for x in source_meta if x[2] not in existing_meta_keys]
for meta in meta_to_write:
m_key, m_val = meta[2], meta[3]
save_meta(int(doc_id), m_key, m_val)
cache.invalidate_by_doc(doc_id, "meta")
if theform.getvalue('edit_docname'):
docname = theform.getvalue('edit_docname')
elif old_docname != "":
docname = old_docname
if theform.getvalue('edit_corpusname'):
corpus = theform.getvalue('edit_corpusname')
elif old_corpus != "":
corpus = old_corpus
if theform.getvalue('id'):
if int(doc_id) <= int(max_id):
# After clicking edit in landing page, editing existing doc case, get the values from the db. pull the content from db to be displayed in the editor window.
if theform.getvalue('edit_docname'):
docname = theform.getvalue('edit_docname')
if docname != old_docname and user != "demo":
update_docname(doc_id,docname)
if theform.getvalue('edit_filename'):
repo_name=theform.getvalue('edit_filename')
if repo_name != old_repo and user != "demo":
update_filename(doc_id,repo_name)
if theform.getvalue('edit_corpusname'):
corpus = theform.getvalue('edit_corpusname')
if corpus != old_corpus and user != "demo":
update_corpus(doc_id,corpus)
if theform.getvalue('edit_status'):
status = theform.getvalue('edit_status')
if status != old_status and user != "demo":
update_status(doc_id,status)
if theform.getvalue('edit_assignee'):
assignee = theform.getvalue('edit_assignee')
if assignee != old_assignee and user != "demo":
update_assignee(doc_id,assignee)
if theform.getvalue('edit_mode'):
mode = theform.getvalue('edit_mode')
if mode != old_mode and user != "demo":
update_mode(doc_id,mode)
if theform.getvalue('nlp_spreadsheet') == "do_nlp_spreadsheet": # mode has been changed to spreadsheet via NLP
update_mode(doc_id, "ether")
mode = "ether"
if old_docname != docname or old_corpus != corpus:
old_sheet_name = "gd" + "_" + old_corpus + "_" + old_docname
if sheet_exists(ether_url, old_sheet_name): # Check if there is an ether sheet to copy
old_socialcalc = get_socialcalc(ether_url, old_sheet_name)
out, err = make_spreadsheet(old_socialcalc, ether_url + "_/gd_" + corpus + "_" + docname, "socialcalc")
if out == "OK":
delete_spreadsheet(ether_url,old_sheet_name)
text_content = generic_query("SELECT content FROM docs WHERE id=?",(doc_id,))[0][0]
# In the case of reloading after hitting 'save', either create new doc into db, or update db
# CodeMirror sends the form with its code content in it before 'save' so we just display it again
if theform.getvalue('code'):
text_content = theform.getvalue('code')
text_content = text_content.replace("\r","")
text_content = re.sub(r'&(?!amp;)',r'&',text_content) # Escape unescaped XML &
text_content = unicode(text_content.decode("utf8"))
if user != "demo":
if int(doc_id)>int(max_id):
create_document(doc_id, docname,corpus,status,assignee,repo_name,text_content)
else:
save_changes(doc_id,text_content)
cache.invalidate_by_doc(doc_id, "xml")
git_status=False
commit_message = ""
if theform.getvalue('commit_msg'):
commit_message = theform.getvalue('commit_msg')
if theform.getvalue('push_git') == "push_git":
repo_name = generic_query("SELECT filename FROM docs WHERE id=?", (doc_id,))[0][0]
file_name = generic_query("SELECT name FROM docs WHERE id=?", (doc_id,))[0][0]
repo_info = repo_name.split('/')
git_account, git_repo = repo_info[0], repo_info[1]
if len(repo_info) > 2:
subdir = '/'.join(repo_info[2:]) + "/"
else:
subdir = ""
# The user will indicate the subdir in the repo_name stored in the db.
# Therefore, a file may be associated with the target repo subdir zangsir/coptic-xml-tool/uploaded_commits,
# and that is fine, but we will need to make this uploaded_commits subdir first to create our file.
if not os.path.isdir(prefix + subdir) and subdir != "":
dirs = subdir.split(os.sep)[:-1]
path_so_far = ""
for dir in dirs:
if not os.path.isdir(prefix + path_so_far + dir + os.sep):
os.mkdir(prefix + path_so_far + dir + os.sep, 0755)
path_so_far += dir + os.sep
if mode == "xml":
text_content = generic_query("SELECT content FROM docs WHERE id=?", (doc_id,))[0][0]
serializable_content = build_meta_tag(doc_id) + text_content.strip() + "\n</meta>\n"
serializable_content = serializable_content.encode('utf8')
file_name = file_name.replace(" ","_") + ".xml"
else: # (mode == "ether")
text_content = ether_to_sgml(get_socialcalc(ether_url, "gd" + "_" + corpus + "_" + docname),doc_id)
serializable_content = text_content
file_name = file_name.replace(" ","_") + "_ether.sgml"
saved_file = subdir + file_name
serialize_file(serializable_content, saved_file)
git_status = push_update_to_git(git_username, git_token, saved_file, git_account, git_repo, commit_message)
# File system cleanup
if subdir == "":
# Delete a file
os.remove(prefix+file_name)
else:
# Delete a subdirectory
shutil.rmtree(prefix+subdir)
if theform.getvalue('nlp_xml') == "do_nlp_xml" and mode == "xml":
api_call = xml_nlp_api
if api_call != "":
nlp_user, nlp_password = get_nlp_credentials()
data = {"data":text_content, "format":"pipes"}
resp = requests.post(api_call, data, auth=HTTPBasicAuth(nlp_user,nlp_password))
text_content=resp.text
# Editing options
# Docname
# Filename
status_list = open(prefix+"status.tab").read().replace("\r","").split("\n")
render_data['status_options'] = [{'text': x, 'selected': x == status} for x in status_list]
render_data['assignee_options'] = [{'text': x, 'selected': x == assignee} for x in get_user_list()]
render_data['mode_options'] = [{'text': x, 'selected': x == mode} for x in ["xml", "ether"]]
render_data['nlp_service'] = {'xml_button_html': xml_nlp_button.decode("utf8"),
'spreadsheet_button_html': spreadsheet_nlp_button.decode("utf8"),
'disabled': user == "demo" or mode == "ether"}
render_data['git_2fa'] = git_2fa == "true"
if git_status:
render_data['git_commit_response'] = git_status.replace('<','').replace('>','')
# prepare embedded editor html
if mode == "ether":
render_data['ether_mode'] = True
ether_url += "gd_" + corpus + "_" + docname
render_data['ether_url'] = ether_url
render_data['ether_stylesheets'] = get_ether_stylesheets()
if "file" in theform and user != "demo":
fileitem = theform["file"]
if len(fileitem.filename) > 0:
# strip leading path from file name to avoid directory traversal attacks
fn = os.path.basename(fileitem.filename)
if fn.endswith(".xls") or fn.endswith(".xlsx"):
make_spreadsheet(fileitem.file.read(),"https://etheruser:<EMAIL>pass<EMAIL>/ethercalc/_/gd_" + corpus + "_" + docname,"excel")
else:
sgml = fileitem.file.read()
meta_key_val = harvest_meta(sgml)
make_spreadsheet(sgml,"https://etheruser:etherpass@<EMAIL>.georget<EMAIL>.<EMAIL>/ethercalc/_/gd_" + corpus + "_" + docname)
for (key, value) in iteritems(meta_key_val):
key = key.replace("@","_")
save_meta(int(doc_id),key.decode("utf8"),value.decode("utf8"))
cache.invalidate_by_doc(doc_id, "meta")
else:
render_data['ether_mode'] = False
# stop here if no doc selected
if doc_id:
render_data['doc_is_selected'] = len(doc_id) != 0
else:
return render("editor", render_data)
render_data['id'] = doc_id
render_data['mode'] = mode
render_data['schema'] = schema
render_data['docname'] = docname
render_data['corpusname'] = corpus
render_data['text_content'] = text_content
render_data['repo'] = repo_name
render_data["admin_gt_zero"] = int(admin) > 0
render_data["admin_eq_three"] = admin == "3"
# handle clone meta button, and allow github pushing
if int(admin) > 0:
doc_list = generic_query("SELECT id,corpus,name,status,assignee_username,mode FROM docs ORDER BY corpus, name COLLATE NOCASE",())
render_data["docs"] = []
for doc in doc_list:
doc_vars = {}
doc_vars["id"] = str(doc[0])
doc_vars["corpus"] = doc[1]
doc_vars["name"] = doc[2]
render_data['docs'].append(doc_vars)
render_data["can_save"] = not (int(admin) < 3)
render_data["editor_help_link_html"] = editor_help_link
render_data["first_load"] = len(theform.keys()) == 1
return render("editor", render_data)
def open_main_server():
thisscript = os.environ.get('SCRIPT_NAME', '')
action | |
from ..base import OptionsGroup
from ..exceptions import ConfigurationError
from ..typehints import Strlist, Strint
from ..utils import KeyValue
class MasterProcess(OptionsGroup):
"""Master process is a separate process offering mentoring capabilities
for other processes. Only one master process per uWSGI instance.
uWSGI's built-in prefork+threading multi-worker management mode,
activated by flicking the master switch on.
.. note:: For all practical serving deployments it's not really a good idea not to use master mode.
"""
def set_basic_params(
self,
enable: bool = None,
name: str = None,
no_orphans: bool = None,
as_root: bool = None,
subproc_check_interval: int = None,
fifo_file: str = None
):
"""
:param enable: Enable uWSGI master process.
:param name: Set master process name to given value.
:param no_orphans: Automatically kill workers if master dies (can be dangerous for availability).
:param as_root: Leave master process running as root.
:param subproc_check_interval: Set the interval (in seconds) of master checks. Default: 1
The master process makes a scan of subprocesses, etc. every N seconds.
.. warning:: You can increase this time if you need to, but it's DISCOURAGED.
:param fifo_file: Enables the master FIFO.
.. note:: Placeholders can be used to build paths, e.g.: {project_runtime_dir}.fifo
See ``Section.project_name`` and ``Section.runtime_dir``.
Instead of signals, you can tell the master to create a UNIX named pipe (FIFO)
that you may use to issue commands to the master.
Up to 10 different FIFO files supported. By default the first specified is bound (mapped as '0').
* http://uwsgi.readthedocs.io/en/latest/MasterFIFO.html#the-master-fifo
.. note:: Since 1.9.17
"""
self._set('master', enable, cast=bool)
self._set('procname-master', name)
self._set('no-orphans', no_orphans)
self._set('master-as-root', as_root)
self._set('check-interval', subproc_check_interval)
self._set('master-fifo', self._section.replace_placeholders(fifo_file), multi=True)
return self._section
def set_exit_events(self, no_workers: bool = None, idle: bool = None, reload: bool = None, sig_term: bool = None):
"""Do exit on certain events
:param bool no_workers: Shutdown uWSGI when no workers are running.
:param bool idle: Shutdown uWSGI when idle.
:param bool reload: Force exit even if a reload is requested.
:param bool sig_term: Exit on SIGTERM instead of brutal workers reload.
.. note:: Before 2.1 SIGTERM reloaded the stack while SIGINT/SIGQUIT shut it down.
"""
self._set('die-on-no-workers', no_workers, cast=bool)
self._set('exit-on-reload', reload, cast=bool)
self._set('die-on-term', sig_term, cast=bool)
self.set_idle_params(exit=idle)
return self._section
def set_exception_handling_params(
self,
handler: Strlist = None,
catch: bool = None,
no_write_exception: bool = None
):
"""Exception handling related params.
:param handler: Register one or more exception handling C-functions.
:param catch: Catch exceptions and report them as http output (including stack trace and env params).
.. warning:: Use only for testing purposes.
:param no_write_exception: Disable exception generation on write()/writev().
.. note:: This can be combined with ``logging.set_filters(write_errors=False, sigpipe=False)``.
.. note: Currently available for Python.
"""
self._set('exception-handler', handler, multi=True)
self._set('catch-exceptions', catch, cast=bool)
self._set('disable-write-exception', no_write_exception, cast=bool)
return self._section
def set_idle_params(self, timeout: int = None, exit: bool = None):
"""Activate idle mode - put uWSGI in cheap mode after inactivity timeout.
:param timeout: Inactivity timeout in seconds.
:param exit: Shutdown uWSGI when idle.
"""
self._set('idle', timeout)
self._set('die-on-idle', exit, cast=bool)
return self._section
def set_reload_params(self, mercy: int = None, exit: bool = None):
"""Set reload related params.
:param mercy: Set the maximum time (in seconds) we wait
for workers and other processes to die during reload/shutdown.
:param exit: Force exit even if a reload is requested.
"""
self._set('reload-mercy', mercy)
self.set_exit_events(reload=exit)
return self._section
def add_cron_task(
self,
command: str,
weekday: Strint = None,
month: Strint = None,
day: Strint = None,
hour: Strint = None,
minute: Strint = None,
legion: str = None,
unique: bool = None,
harakiri: int = None
):
"""Adds a cron task running the given command on the given schedule.
http://uwsgi.readthedocs.io/en/latest/Cron.html
HINTS:
* Use negative values to say `every`:
hour=-3 stands for `every 3 hours`
* Use - (minus) to make interval:
minute='13-18' stands for `from minute 13 to 18`
.. note:: We use cron2 option available since 1.9.11.
:param command: Command to execute on schedule (with or without path).
:param weekday: Day of a the week number. Defaults to `each`.
0 - Sunday 1 - Monday 2 - Tuesday 3 - Wednesday
4 - Thursday 5 - Friday 6 - Saturday
:param month: Month number 1-12. Defaults to `each`.
:param day: Day of the month number 1-31. Defaults to `each`.
:param hour: Hour 0-23. Defaults to `each`.
:param minute: Minute 0-59. Defaults to `each`.
:param legion: Set legion (cluster) name to use this cron command against.
Such commands are only executed by legion lord node.
:param unique: Marks command as unique. Default to not unique.
Some commands can take a long time to finish or just hang doing their thing.
Sometimes this is okay, but there are also cases when running multiple instances
of the same command can be dangerous.
:param harakiri: Enforce a time limit (in seconds) on executed commands.
If a command is taking longer it will be killed.
"""
rule = KeyValue(
locals(),
keys=['weekday', 'month', 'day', 'hour', 'minute', 'harakiri', 'legion', 'unique'],
aliases={'weekday': 'week'},
bool_keys=['unique'],
)
self._set('cron2', (f'{rule} {command}').strip(), multi=True)
return self._section
def attach_process_classic(
self,
command_or_pid_path: str,
background: bool,
control: bool = False,
for_legion: bool = False
):
"""Attaches a command/daemon to the master process optionally managed by a pidfile.
This will allow the uWSGI master to control/monitor/respawn this process.
.. note:: This uses old classic uWSGI means of process attaching
To have more control use ``.attach_process()`` method (requires uWSGI 2.0+)
http://uwsgi-docs.readthedocs.io/en/latest/AttachingDaemons.html
:param command_or_pid_path:
:param background: Must indicate whether process is in background.
:param control: Consider this process a control: when the daemon dies, the master exits.
.. note:: pidfile managed processed not supported.
:param for_legion: Legion daemons will be executed only on the legion lord node,
so there will always be a single daemon instance running in each legion.
Once the lord dies a daemon will be spawned on another node.
.. note:: uWSGI 1.9.9+ required.
"""
prefix = 'legion-' if for_legion else ''
if '.pid' in command_or_pid_path:
if background:
# Attach a command/daemon to the master process managed by a pidfile (the command must daemonize)
self._set(f'{prefix}smart-attach-daemon', command_or_pid_path, multi=True)
else:
# Attach a command/daemon to the master process managed by a pidfile (the command must NOT daemonize)
self._set(f'{prefix}smart-attach-daemon2', command_or_pid_path, multi=True)
else:
if background:
raise ConfigurationError('Background flag is only supported for pid-governed commands')
if control:
# todo needs check
self._set('attach-control-daemon', command_or_pid_path, multi=True)
else:
# Attach a command/daemon to the master process (the command has to remain in foreground)
self._set(f'{prefix}attach-daemon', command_or_pid_path, multi=True)
return self._section
def attach_process(
self,
command: str,
for_legion: bool = False,
broken_counter: int = None,
pidfile: str = None,
control: bool = None,
daemonize: bool = None,
touch_reload: Strlist = None,
signal_stop: int = None,
signal_reload: int = None,
honour_stdin: bool = None,
uid: Strint = None,
gid: Strint = None,
new_pid_ns: bool = None,
change_dir: str = None
):
"""Attaches a command/daemon to the master process.
This will allow the uWSGI master to control/monitor/respawn this process.
http://uwsgi-docs.readthedocs.io/en/latest/AttachingDaemons.html
:param command: The command line to execute.
:param for_legion: Legion daemons will be executed only on the legion lord node,
so there will always be a single daemon instance running in each legion.
Once the lord dies a daemon will be spawned on another node.
:param broken_counter: Maximum attempts before considering a daemon "broken".
:param pidfile: The pidfile path to check (enable smart mode).
:param control: If True, the daemon becomes a `control` one:
if it dies the whole uWSGI instance dies.
:param daemonize: Daemonize the process (enable smart2 mode).
:param touch_reload: List of files to check:
whenever they are 'touched', the daemon is restarted
:param signal_stop: The signal number to send to the daemon when uWSGI is stopped.
:param signal_reload: The signal number to send to the daemon when uWSGI is reloaded.
:param honour_stdin: The signal number to send to the daemon when uWSGI is reloaded.
:param uid: Drop privileges to the specified uid.
.. note:: Requires master running as root.
:param gid: Drop privileges to the specified gid.
.. note:: Requires master running as root.
:param | |
interface to %s_automation.c (in %s)' % (self.m_name, path.abspath(path.curdir)) )
generatorDir = path.dirname(sys.argv[0])
arduinoConfig = path.abspath(path.join(generatorDir, '..', '..', '..', 'libraries', 'SequantoAutomation', 'utility', 'src', 'config.h'))
fp = open ( '%s_automation.c' % self.m_name, 'w' )
fp.write ( '/*\n' )
fp.write ( ' * DO NOT EDIT THIS FILE IT IS AUTO GENERATED!!!\n' )
fp.write ( ' *\n' )
fp.write ( ' * Generated by %s\n' % sys.argv[0] )
fp.write ( ' * on %s\n' % datetime.datetime.now() )
fp.write ( ' * Using command line %s\n' % ' '.join(sys.argv) )
fp.write ( ' */\n' )
fp.write ( '\n' )
if self.m_arduino:
fp.write ( '#include "%s"\n' % arduinoConfig )
fp.write ( '#include <string.h>\n' )
fp.write ( '#ifdef HAVE_STDINT_H\n' )
fp.write ( '#include <stdint.h>\n' )
fp.write ( '#endif\n' )
fp.write ( '#include <sequanto/automation.h>\n' )
fp.write ( '\n' )
fp.write ( '#include "%s_automation.h"\n' % (self.m_name) )
fp.write ( '\n' )
fp.write ( '#ifndef _SQ_CONFIG_H_\n' )
fp.write ( '#include "config.h"\n' )
fp.write ( '#endif\n' )
fp.write ( '#ifndef SQ_MAX_PARAMETERS\n' )
fp.write ( '#error SQ_MAX_PARAMETERS not defined!\n' )
fp.write ( '#endif\n' )
fp.write ( '/* Check that no automated function takes more paramters than is defined as the maximum in config.h */\n' )
fp.write ( '#if(SQ_MAX_PARAMETERS < %i)\n' % self.m_maxNumberOfParameters )
fp.write ( '#error The number of parameters of one of the automated functions takes more than SQ_MAX_PARAMETERS\n' )
fp.write ( '#endif\n' )
fp.write ( 'static const char NEWLINE[] SQ_CONST_VARIABLE = "\\r\\n";\n' )
fp.write ( 'static const char UPDATE_START[] SQ_CONST_VARIABLE = "!UPDATE ";\n' )
fp.write ( 'typedef enum _SQInfoType { INFO_TYPE_LIST = 0, INFO_TYPE_PROPERTY = 1, INFO_TYPE_CALLABLE = 2, INFO_TYPE_MONITOR = 3, INFO_TYPE_BRANCH = 4 } SQInfoType;\n' )
fp.write ( 'typedef struct _SQInfo { const char * name; SQInfoType type; int index; } SQInfo;\n' )
fp.write ( '\n' )
fp.write ( 'static const char ROOT[] SQ_CONST_VARIABLE = "/";\n' )
i = 0
for objectPath, type, index in self.m_objectPaths:
fp.write ( 'static const char NAME%i[] SQ_CONST_VARIABLE = "%s";\n' % (i, objectPath) )
i += 1
fp.write ( 'static const SQInfo INFO_LIST[] SQ_CONST_VARIABLE = {\n' )
fp.write ( ' { ROOT, INFO_TYPE_LIST, -1},\n' )
i = 0
for objectPath, type, index in self.m_objectPaths:
fp.write ( ' { NAME%i, %s, %s},\n' % (i, type, index) )
i += 1
fp.write ( '};' )
fp.write ( '\n' )
fp.write ( 'static const int NUMBER_OF_INFO SQ_CONST_VARIABLE = %i;\n' % (len(self.m_objectPaths) + 1) )
fp.write ( '\n' )
for branch in self.m_foundBranches:
fp.write ( 'static const char %s[] SQ_CONST_VARIABLE = "%s";\n' % (branch.generatedName, branch.objectPath) )
fp.write ( '\n' )
fp.write ( 'static const int NUMBER_OF_BRANCHES SQ_CONST_VARIABLE = %i;\n' % (len(self.m_foundBranches)) )
fp.write ( '\n' )
fp.write ( 'typedef SQBool (*SQBranchInfoHandler) ( SQStream * _stream, const char * _objectPath );\n' )
fp.write ( 'typedef SQBool (*SQBranchListHandler) ( SQStream * _stream, const char * _objectPath );\n' )
fp.write ( 'typedef SQBool (*SQBranchGetHandler) ( SQStream * _stream, const char * _objectPath );\n' )
fp.write ( 'typedef SQBool (*SQBranchSetHandler) ( SQStream * _stream, const char * _objectPath, const SQValue * const _value );\n' )
fp.write ( 'typedef SQBool (*SQBranchEnableHandler) ( SQStream * _stream, const char * _objectPath );\n' )
fp.write ( 'typedef SQBool (*SQBranchDisableHandler) ( SQStream * _stream, const char * _objectPath );\n' )
fp.write ( 'typedef SQBool (*SQBranchCallHandler) ( SQStream * _stream, const char * _objectPath, const SQValue * const _values, int _numberOfValues );\n' )
fp.write ( 'typedef SQBool (*SQBranchDumpHandler) ( SQStream * _stream, const char * _objectPath );\n' )
fp.write ( '\n' )
fp.write ( 'typedef struct _SQBranch\n' )
fp.write ( '{\n' )
fp.write ( ' const char * name;\n' )
fp.write ( ' size_t length;\n' )
fp.write ( ' SQBranchInfoHandler info_handler;\n' )
fp.write ( ' SQBranchListHandler list_handler;\n' )
fp.write ( ' SQBranchGetHandler get_handler;\n' )
fp.write ( ' SQBranchSetHandler set_handler;\n' )
fp.write ( ' SQBranchEnableHandler enable_handler;\n' )
fp.write ( ' SQBranchDisableHandler disable_handler;\n' )
fp.write ( ' SQBranchCallHandler call_handler;\n' )
fp.write ( ' SQBranchDumpHandler dump_handler;\n' )
fp.write ( '} SQBranch;\n' )
fp.write ( '\n' )
fp.write ( 'static const SQBranch BRANCH_LIST[] SQ_CONST_VARIABLE = {\n' )
if len(self.m_foundBranches) > 0:
for branch in self.m_foundBranches:
fp.write ( ' { %s, %i, %s, %s, %s, %s, %s, %s, %s, %s },\n' % (branch.generatedName, len(branch.objectPath), branch.infoHandlerFunction, branch.listHandlerFunction,
branch.getHandlerFunction, branch.setHandlerFunction, branch.enableHandlerFunction, branch.disableHandlerFunction,
branch.callHandlerFunction, branch.dumpHandlerFunction) )
else:
fp.write ( ' { NULL, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL },\n' )
fp.write ( '};\n' )
for lineNumber, name in self.m_enums:
enum = self.m_parser.getEnum(name)
fp.write ( 'enum %s\n' % (name) )
fp.write ( '{\n' )
first = True
for key, value in enum.values:
if first:
first = False
else:
fp.write ( ', ' )
if value is None:
fp.write ( '%s\n' % (key) )
else:
fp.write ( '%s = %s\n' % (key, value) )
fp.write ( '};\n' )
for enum in self.m_foundEnums:
fp.write ( 'void %s ( SQStream * _stream )\n' % (enum.getFunction.name) )
fp.write ( '{\n' )
self.writeSuccessMessageWithValue ( fp, enum.type, enum.automationType, enum.valueName )
fp.write ( '}\n' )
for property in self.m_foundProperties:
translatedObjectPath = property.translatedObjectPath
if property.writeUpdateFunction:
if property.smart:
fp.write ( 'void %s ( %s, %s _value )\n' % (property.updateFunctionName, property.additionalSmartParameters, self.getRecognizedCType(property.type)) )
else:
fp.write ( 'void %s ( %s _value )\n' % (property.updateFunctionName, self.getRecognizedCType(property.type)) )
fp.write ( '{\n' )
fp.write ( ' SQStream * stream = NULL;\n' )
fp.write ( ' SQServer * server = sq_server_get_instance ();\n' )
fp.write ( ' stream = server->m_stream;\n' )
fp.write ( ' sq_stream_enter_write ( stream );\n' )
fp.write ( ' sq_stream_write_string ( stream, sq_get_constant_string( UPDATE_START ) );\n' )
if property.smart:
parts = property.normalizedSmartObjectPath.split('/%s')
initialPart = parts[0]
fp.write ( ' sq_stream_write_string ( stream, sq_get_constant_string( NAME%i ) );\n' % self.findObjectPathIndex(initialPart) )
for i in range(1, len(parts)):
fp.write ( ' sq_stream_write_string ( stream, sq_get_constant_string(ROOT) );\n' )
parameter = property.getFunction.parameters[i - 1]
if self.getAutomationType(parameter.type) == "integer":
fp.write ( ' sq_protocol_write_integer ( stream, %s );\n' % parameter.name )
else:
fp.write ( ' sq_stream_write_string ( stream, %s );\n' % parameter.name )
if parts[i] != '':
fp.write ( ' sq_stream_write_string ( stream, "%s" );\n' % parts[i] )
else:
fp.write ( ' sq_stream_write_string ( stream, sq_get_constant_string( NAME%i ) );\n' % self.findObjectPathIndex(property.objectPath) )
fp.write ( ' sq_stream_write_byte ( stream, \' \' );\n' )
if property.automationType == 'byte_array':
fp.write ( ' sq_protocol_write_%s ( stream, _value->m_start, _value->m_start + _value->m_length );\n' % property.automationType )
elif property.type == 'SQStringOut':
fp.write ( ' sq_protocol_write_string_out ( stream, &_value );\n' )
elif property.type == 'SQStringOut *':
fp.write ( ' sq_protocol_write_string_out ( stream, _value );\n' )
elif property.automationType == 'float' and property.type != 'float':
fp.write ( ' sq_protocol_write_float ( stream, (float) _value );\n' )
else:
fp.write ( ' sq_protocol_write_%s ( stream, _value );\n' % property.automationType )
fp.write ( ' sq_stream_write_string ( stream, sq_get_constant_string( NEWLINE ) );\n' )
fp.write ( ' sq_stream_exit_write ( stream );\n' )
fp.write ( '}\n' )
fp.write ( '\n' )
if property.generateGetFunction:
if property.smart:
if property.firstSmartObjectPath:
fp.write ( '%s %s ( %s );\n' % (self.getRecognizedCType(property.type), property.getFunction.name, property.additionalSmartParameters) )
else:
fp.write ( '%s %s ( void );\n' % (self.getRecognizedCType(property.type), property.getFunction.name) )
fp.write ( 'void sq_generated_property_%s%s ( SQStream * _stream, SQBool _justValue )\n' % (property.getFunction.name, property.additionalSmartName) )
fp.write ( '{\n' )
if property.smart:
fp.write ( ' %s value = %s( %s );\n' % (self.getRecognizedCType(property.type), property.getFunction.name, ', '.join(property.smartValues)) )
else:
fp.write ( ' %s value = %s();\n' % (self.getRecognizedCType(property.type), property.getFunction.name) )
assert property.automationType == self.getAutomationType(property.type)
fp.write ( ' if ( _justValue == SQ_TRUE )\n' )
fp.write ( ' { \n' )
self.writeValue ( fp, property.type, property.automationType, 'value' )
fp.write ( ' }\n' )
fp.write ( ' else\n' )
fp.write ( ' {\n ' )
self.writeSuccessMessageWithValue ( fp, property.type, property.automationType, 'value' )
fp.write ( ' }\n' )
fp.write ( '}\n' )
fp.write ( '\n' )
if property.setFunction is not None:
fp.write ( 'void %s ( %s );\n' % (property.setFunction.name, ', '.join(['%s %s' % (self.getRecognizedCType(parameter.type), parameter.name) for parameter in property.setFunction.parameters])) )
fp.write ( 'void sq_generated_property_%s%s ( const SQValue * const _value )\n' % (property.setFunction.name, | |
'''
Dataset and Dataloader classes for both numerical test cases
=====
Distributed by: <NAME> SCAI Lab (MIT Liscense)
- Associated publication:
url: http://aimsciences.org//article/id/3a9f3d14-3421-4947-a45f-a9cc74edd097
doi: https://dx.doi.org/10.3934/fods.2020019
github: https://github.com/zabaras/deep-turbulence
=====
'''
from torch.utils.data import Dataset, DataLoader, TensorDataset
from utils.log import Log
from collections import OrderedDict
import torch
import torch.nn.functional as F
import numpy as np
import random
import os
class TrainingDataset(Dataset):
'''
Training data-set with additive noise option for targets
'''
def __init__(self, inputs, targets, lstm_seeds, input_noise_std=0.01, tar_noise_std=0.0):
"""
Training data-set for TM-Glow
Args:
"""
assert inputs.size(0) == targets.size(0), 'inputs and target tensors must have same batch dimension size'
assert inputs.size(0) == lstm_seeds.size(0), 'inputs and LSTM seed tensors must have same batch dimension size'
self.inputs = inputs
self.targets = targets
self.lstm_seeds = lstm_seeds
self.target_noise = tar_noise_std
self.input_noise = input_noise_std
def __len__(self):
return len(self.inputs)
def __getitem__(self, idx):
input0 = self.inputs[idx] + self.input_noise*torch.randn(self.inputs[idx].size()).type(self.targets.type())
target0 = self.targets[idx] + self.target_noise*torch.randn(self.targets[idx].size()).type(self.targets.type())
lstm_seed0 = self.lstm_seeds[idx]
return input0, target0, lstm_seed0
class TMGLowDataLoader(object):
'''
Parent class for TM-Glow dataloader creators
Note: These are not used as actual data loaders, they create data loaders
'''
def __init__(self, training_dir='.', testing_dir='.', log=None):
super().__init__()
# Directory of data
self.training_dir = training_dir
self.testing_dir = testing_dir
if(log is None):
self.log = Log()
else:
self.log = log
self.input_mean = None
self.output_mean = None
self.input_std = None
self.output_std = None
def readFluidData(self, input_file_name:str, target_file_name:str, fStride=1, cStride=1):
coarse_file = os.path.join(self.training_dir, input_file_name)
try:
data_npz = np.load(coarse_file)
self.log.log('Reading file {:s}.'.format(input_file_name), rec=False)
# Remove z-velocity as it is not needed
inputData = np.concatenate([data_npz['data'][::cStride,:2,:,:], data_npz['data'][::cStride,3:,:,:]], axis=1)
# inputData.append(data_np)
# inputTime.append(data_npz['times'])
except FileNotFoundError:
self.log.error("Uh-oh, seems a low-fidelity data file couldn't be found!")
self.log.error('Check this file exists: {}'.format(coarse_file))
inputData = None
# Read in high-fidelity (target data)
fine_file = os.path.join(self.training_dir, target_file_name)
try:
data_npz = np.load(fine_file)
self.log.log('Reading file {:s}.'.format(target_file_name), rec=False)
# Remove z-velocity as it is not needed
targetData = np.concatenate([data_npz['data'][::fStride,:2,:,:], data_npz['data'][::fStride,3:,:,:]], axis=1)
# targetData.append(data_np)
# targetTime.append(data_npz['times'])
except FileNotFoundError:
self.log.error("Uh-oh, seems a high-fidelity data file couldn't be found!")
self.log.error('Check this file exists: {}'.format(fine_file))
targetData = None
return inputData, targetData
def calcNormalizingParams(self, inputData, targetData):
'''
Calculates the hyper-paramters used for normalizing the
training input/output data. Normalizes data to a standard unit Gaussian.
Args:
inputData (tensor): [b,t,c,d1,d2] tensor of low-fidelity inputs
targetData (tensor): [b,t,c,d1*,d2*] tensor of high-fidelity target
'''
self.log.warning('Calculating normalizing constants')
self.input_mean = torch.zeros(3)
self.output_mean = torch.zeros(3)
self.input_mean[0] = torch.mean(inputData[:,:,0])
self.input_mean[1] = torch.mean(inputData[:,:,1])
self.input_mean[2] = torch.mean(inputData[:,:,2])
self.output_mean[0] = torch.mean(targetData[:,:,0])
self.output_mean[1] = torch.mean(targetData[:,:,1])
self.output_mean[2] = torch.mean(targetData[:,:,2])
self.input_std = torch.zeros(3)+1
self.output_std = torch.zeros(3)+1
self.input_std[0] = torch.std(inputData[:,:,0])
self.input_std[1] = torch.std(inputData[:,:,1])
self.input_std[2] = torch.std(inputData[:,:,2])
self.output_std[0] = torch.std(targetData[:,:,0])
self.output_std[1] = torch.std(targetData[:,:,1])
self.output_std[2] = torch.std(targetData[:,:,2])
def setNormalizingParams(self, model):
'''
Given a PyTorch model this sets the normalizing paramters of
the loader class using what is stored in the model. This is done
to save normalizing constants between runs.
Args:
model: PyTorch model with normalizing constants as
'''
self.input_mean = torch.zeros(3)
self.output_mean = torch.zeros(3)
self.input_mean = model.in_mu.cpu()
self.output_mean = model.out_mu.cpu()
self.input_std = torch.zeros(3)
self.output_std = torch.zeros(3)
self.input_std = model.in_std.cpu()
self.output_std = model.out_std.cpu()
def transferNormalizingParams(self, model):
'''
Given a PyTorch model this gets the calculated normalizing
parameters and assigned them to registered parameters of
the model. This is done to save normalizing constants between runs.
Args:
model: PyTorch model with normalizing constants params to be set
device (PyTorch device): device the PyTorch model is on
'''
device = next(model.parameters()).device # Model's device
model.in_mu = self.input_mean.to(device)
model.out_mu = self.output_mean.to(device)
model.in_std = self.input_std.to(device)
model.out_std = self.output_std.to(device)
def normalizeInputData(self, inputData):
'''
Normalize the input tensor on each channel (x-vel, y-vel, pressure)
'''
# Normalize training data to unit Gaussian
inputData[:,:,0] = inputData[:,:,0] - self.input_mean[0]
inputData[:,:,1] = inputData[:,:,1] - self.input_mean[1]
inputData[:,:,2] = inputData[:,:,2] - self.input_mean[2]
inputData[:,:,0] = inputData[:,:,0] / self.input_std[0]
inputData[:,:,1] = inputData[:,:,1] / self.input_std[1]
inputData[:,:,2] = inputData[:,:,2] / self.input_std[2]
return inputData
def normalizeTargetData(self, targetData):
'''
Normalize the target tensor on each channel (x-vel, y-vel, pressure)
'''
targetData[:,:,0] = targetData[:,:,0] - self.output_mean[0]
targetData[:,:,1] = targetData[:,:,1] - self.output_mean[1]
targetData[:,:,2] = targetData[:,:,2] - self.output_mean[2]
targetData[:,:,0] = targetData[:,:,0] / self.output_std[0]
targetData[:,:,1] = targetData[:,:,1] / self.output_std[1]
targetData[:,:,2] = targetData[:,:,2] / self.output_std[2]
return targetData
# /=================================================================================/
class BackwardStepLoader(TMGLowDataLoader):
'''
Class used for creating data loaders for the backwards step numerical example
Args:
ntrain (int): number of training data
ntest (int): number of testing data
data_dir (string): path of numpy data files
shuffle (boolean): shuffle the training data or not
log (Log): logging class
'''
def __init__(self, training_dir, testing_dir, shuffle=True, log=None):
super().__init__(training_dir, testing_dir, log)
self.shuffle = shuffle
def createTrainingLoader(self, ntrain, u0, tSplit=1, inUpscale=1, batch_size=32, tar_noise_std=0):
'''
Creates the training loader
Args:
ntrain (np.array): Numpy array of training-indexes
u0 (np.array): Input velocity for each training index (used for normalizing)
tSplit (int): Number of time to split the simulation data into smaller time-series for training
inUpscale (int): Initial upscaling, used to just make the architecture similar
batch_size (int): Training batch-size
tar_noise_std (int): Random noise to add on the target
'''
self.log.log('Creating backwards step training loader.')
if(batch_size > len(ntrain*tSplit)):
self.log.warning('Lowering mini-batch size to match training cases.')
batch_size = len(ntrain*tSplit)
inputData = []
targetData = []
u0Data = []
# Loop through cases and read in each file
for i, idx in enumerate(ntrain):
inputData0, targetData0 = self.readFluidData("backwardStepCoarse{:d}-[U,p].npz".format(idx), "backwardStepFine{:d}-[U,p].npz".format(idx))
inputData.append(inputData0)
targetData.append(targetData0)
u0Data.append(u0[idx])
# Stack into single tensor.
inputData = torch.Tensor(np.stack(inputData, axis=0))
targetData = torch.Tensor(np.stack(targetData, axis=0))
u0Data = torch.Tensor(u0Data)
# Scale input if needed though interpolation
inputData0 = []
for i in range(inputData.size(1)):
inputStep = F.interpolate(inputData[:,i], scale_factor=inUpscale, mode='bilinear', align_corners=True)
inputData0.append(inputStep)
inputData = torch.stack(inputData0, dim=1)
# If normalizing parameters is not present, calculate them
u0 = u0Data.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).unsqueeze(-1)
u0 = torch.cat((u0, u0, u0**2), dim=2)
inputData = inputData/u0
targetData = targetData/u0
u0 = u0Data.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).unsqueeze(-1)
u0 = u0.expand(u0.size(0), inputData.size(1), 1, inputData.size(-2), inputData.size(-1))
inputData = torch.cat([inputData, u0], dim=2)
if(self.input_mean is None or self.input_std is None):
self.calcNormalizingParams(inputData, targetData)
# Normalize the data by a unit Gaussian
inputData = self.normalizeInputData(inputData)
targetData = self.normalizeTargetData(targetData)
# Split time-series into sub series
self.log.log('Splitting time-series into {:d} chunks.'.format(tSplit))
input_idx = int(inputData.size(1)//tSplit)
target_idx = int(targetData.size(1)//tSplit)
input_splits = []
target_splits = []
u0_splits = []
for i in range(tSplit):
rndIdx = np.random.randint(0, input_idx)
input_splits.append(inputData[:,i*input_idx:(i+1)*input_idx])
target_splits.append(targetData[:,i*target_idx:(i+1)*target_idx])
u0_splits.append(u0Data)
inputData = torch.cat(input_splits, dim=0)
targetData = torch.cat(target_splits, dim=0)
# Model is very sensistve to these initial states, thus they must
# be the same between starting and stopping the training of the model
c0Seeds = torch.LongTensor(inputData.size(0)).random_(0, 1000)
dataset = TrainingDataset(inputData, targetData, c0Seeds, tar_noise_std)
training_loader = DataLoader(dataset, batch_size=batch_size, shuffle=self.shuffle, drop_last=False)
return training_loader
def createTestingLoader(self, ntest, u0, inUpscale=1, batch_size=32):
'''
Creates the testing loader
Args:
ntrain (np.array): Numpy array of training-indexes
u0 (np.array): Input velocity for each training index (used for normalizing)
inUpscale (int): Initial upscaling, used to just make the architecture similar
batch_size (int): Training batch-size
'''
self.log.log('Creating backwards step testing loader.')
if(batch_size > len(ntest)):
self.log.warning('Lowering mini-batch size to match training cases.')
batch_size = len(ntest)
inputData = []
targetData = []
u0Data = []
# Loop through cases and read in each file
for i, idx in enumerate(ntest):
inputData0, targetData0 = self.readFluidData("backwardStepCoarse{:d}-[U,p].npz".format(idx), "backwardStepFine{:d}-[U,p].npz".format(idx))
inputData.append(inputData0)
targetData.append(targetData0)
u0Data.append(u0[idx])
# Stack into single tensor.
inputData = torch.Tensor(np.stack(inputData, axis=0))
targetData = torch.Tensor(np.stack(targetData, axis=0))
u0Data = torch.Tensor(u0Data)
# Scale input if needed though interpolation
inputData0 = []
for i in range(inputData.size(1)):
inputStep = F.interpolate(inputData[:,i], scale_factor=inUpscale, mode='bilinear', align_corners=True)
inputData0.append(inputStep)
inputData = torch.stack(inputData0, dim=1)
# Normalize by the inlet velocity
u0 = u0Data.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).unsqueeze(-1)
u0 = torch.cat((u0,u0,u0**2), dim=2)
inputData = inputData/u0
targetData = targetData/u0
# Add inlet velocity as an input channel
u0 = u0Data.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).unsqueeze(-1)
u0 = u0.expand(u0.size(0), inputData.size(1), 1, inputData.size(-2), inputData.size(-1))
inputData = torch.cat([inputData, u0], dim=2)
# If normalizing parameters is not present, calculate them
if(self.input_mean is None or self.input_std is None):
self.calcNormalizingParams(inputData, targetData)
# Normalize the data by a unit Gaussian
inputData = self.normalizeInputData(inputData)
targetData = self.normalizeTargetData(targetData)
dataset = TensorDataset(inputData, targetData, u0Data)
testing_loader = DataLoader(dataset, batch_size=batch_size, shuffle=self.shuffle, drop_last=False)
return testing_loader
# /=================================================================================/
class CylinderArrayLoader(TMGLowDataLoader):
'''
Class used for creating data loaders for the cylinder array numerical example
Args:
ntrain (int): number of training data
ntest (int): number of testing data
data_dir (string): path of numpy data files
shuffle (boolean): shuffle the training data | |
cleaning and estimate action center and spread.
Parameters
----------
data_threshold: dict
parameters for threshold detector
data_dbscan: dict
parameters for dbscan detector
data_local_outlier_factor: dict
parameters for local outlier factor detector
data_isolation_forest: dict
parameters for isolation forest detector
bx: torch.Tensor
bx values at monitor locations
by: torch.Tensor
by values at monitor locations
sigma_bx: torch.Tensor
bx errors at monitor locations
sigma_by: torch.Tensor
by errors at monitor locations
Returns
-------
None, update self.action dictionary
"""
self.action = {}
index = self.model.monitor_index
bx = bx if bx is not None else self.model.bx[index]
by = by if by is not None else self.model.by[index]
sigma_bx = sigma_bx if sigma_bx is not None else self.model.sigma_bx[index]
sigma_by = sigma_by if sigma_by is not None else self.model.sigma_by[index]
jx = self.table.ax**2/(2.0*bx)
jy = self.table.ay**2/(2.0*by)
sigma_jx = self.table.ax**2/bx**2*self.table.sigma_ax**2
sigma_jx += self.table.ax**4/bx**4/4*sigma_bx**2
sigma_jx.sqrt_()
sigma_jy = self.table.ay**2/by**2*self.table.sigma_ay**2
sigma_jy += self.table.ay**4/by**4/4*sigma_by**2
sigma_jy.sqrt_()
mask = torch.clone(self.flag[index])
mask = torch.stack([mask, mask]).to(torch.bool)
data = standardize(torch.stack([jx, jy]), center_estimator=median, spread_estimator=biweight_midvariance)
if data_threshold['use']:
factor = data_threshold['factor']
center = median(data)
spread = biweight_midvariance(data).sqrt()
min_value, max_value = center - factor*spread, center + factor*spread
mask *= threshold(data, min_value, max_value)
if data_dbscan['use']:
factor = data_dbscan['factor']
for case in range(1):
mask[case] *= dbscan(data[case].reshape(-1, 1), epsilon=factor)
if data_local_outlier_factor['use']:
for case in range(1):
mask[case] *= local_outlier_factor(data[case].reshape(-1, 1), contamination=data_local_outlier_factor['contamination'])
if data_isolation_forest['use']:
for case in range(1):
mask[case] *= isolation_forest(data[case].reshape(-1, 1), contamination=data_isolation_forest['contamination'])
mask_jx, mask_jy = mask
mask_jx, mask_jy = mask_jx/sigma_jx**2, mask_jy/sigma_jy**2
center_jx = weighted_mean(jx, weight=mask_jx)
spread_jx = weighted_variance(jx, weight=mask_jx, center=center_jx).sqrt()
center_jy = weighted_mean(jy, weight=mask_jy)
spread_jy = weighted_variance(jy, weight=mask_jy, center=center_jy).sqrt()
self.action['jx'], self.action['sigma_jx'] = jx, sigma_jx
self.action['center_jx'], self.action['spread_jx'] = center_jx, spread_jx
self.action['jy'], self.action['sigma_jy'] = jy, sigma_jy
self.action['center_jy'], self.action['spread_jy'] = center_jy, spread_jy
self.action['mask'] = mask
def get_twiss_from_amplitude(self) -> None:
"""
Estimate twiss from amplitude.
Note, action dictionary should be precomputed
Parameters
----------
None
Returns
-------
None, update self.twiss_from_amplitude dictionary
"""
if self.action == {}:
raise Exception('error: action dictionary is empty')
self.data_amplitude = {}
ax, sigma_ax = self.table.ax, self.table.sigma_ax
ay, sigma_ay = self.table.ay, self.table.sigma_ay
jx, sigma_jx = self.action['center_jx'], self.action['spread_jx']
jy, sigma_jy = self.action['center_jy'], self.action['spread_jy']
bx, by = ax**2/(2.0*jx), ay**2/(2.0*jy)
sigma_bx = torch.sqrt(ax**2/jx**2*sigma_ax**2 + 0.25*ax**4/jx**4*sigma_jx**2)
sigma_by = torch.sqrt(ay**2/jy**2*sigma_ay**2 + 0.25*ay**4/jy**4*sigma_jy**2)
index = self.model.monitor_index
bx_model, by_model = self.model.bx[index], self.model.by[index]
self.data_amplitude['bx'], self.data_amplitude['sigma_bx'] = bx, sigma_bx
self.data_amplitude['by'], self.data_amplitude['sigma_by'] = by, sigma_by
def phase_virtual(self, limit:int=None, exclude:list=None, **kwargs) -> None:
"""
Estimate x & y phase for virtual locations.
Parameters
----------
limit: int
range limit to use
exclude: list
list of virtual location to exclude
**kwargs:
passed to Decomposition.phase_virtual
Returns
-------
None, update self.virtual_x and self.virtual_y dictionaries
"""
self.virtual_x, self.virtual_y = {}, {}
limit = max(self.limit) if limit is None else limit
exclude = [] if exclude is None else exclude
index = [index for index in self.model.virtual_index if index not in exclude]
nux, sigma_nux = self.table.nux, self.table.sigma_nux
NUX, sigma_NUX = self.model.nux, self.model.sigma_nux
nuy, sigma_nuy = self.table.nuy, self.table.sigma_nuy
NUY, sigma_NUY = self.model.nuy, self.model.sigma_nuy
fx, sigma_fx = self.fx, self.sigma_fx
FX, sigma_FX = self.model.fx, self.model.sigma_fx
fy, sigma_fy = self.fy, self.sigma_fy
FY, sigma_FY = self.model.fy, self.model.sigma_fy
def auxiliary_x(probe):
return Decomposition.phase_virtual(probe, limit, self.flag, nux, NUX, fx, FX,
sigma_frequency=sigma_nux, sigma_frequency_model=sigma_NUX,
sigma_phase=sigma_fx, sigma_phase_model=sigma_FX,
**kwargs)
def auxiliary_y(probe):
return Decomposition.phase_virtual(probe, limit, self.flag, nuy, NUY, fy, FY,
sigma_frequency=sigma_nuy, sigma_frequency_model=sigma_NUY,
sigma_phase=sigma_fy, sigma_phase_model=sigma_FY,
**kwargs)
data_x = [auxiliary_x(probe) for probe in index]
data_y = [auxiliary_y(probe) for probe in index]
for count, probe in enumerate(index):
self.virtual_x[probe], self.virtual_y[probe] = data_x[count], data_y[count]
self.fx[probe], self.sigma_fx[probe] = self.virtual_x[probe].get('model')
self.fy[probe], self.sigma_fy[probe] = self.virtual_y[probe].get('model')
def phase_correct(self, *, limit:int=None, **kwargs) -> None:
"""
Correct x & y phase for monitor locations.
Note, this introduce strong bias towards model, do not use large range limit
Note, phase at the location is not used
Parameters
----------
limit: int
range limit
**kwargs:
passed to phase_virtual Decomposition method
Returns
-------
None, update self.correct_x and self.correct_y dictionaries
"""
self.correct_x, self.correct_y = {}, {}
limit = max(self.limit) if limit is None else limit
index = self.model.monitor_index
self.fx_correct, self.sigma_fx_correct = torch.clone(self.fx), torch.clone(self.sigma_fx)
self.fy_correct, self.sigma_fy_correct = torch.clone(self.fy), torch.clone(self.sigma_fy)
nux, sigma_nux = self.table.nux, self.table.sigma_nux
NUX, sigma_NUX = self.model.nux, self.model.sigma_nux
nuy, sigma_nuy = self.table.nuy, self.table.sigma_nuy
NUY, sigma_NUY = self.model.nuy, self.model.sigma_nuy
fx, sigma_fx = self.fx, self.sigma_fx
FX, sigma_FX = self.model.fx, self.model.sigma_fx
fy, sigma_fy = self.fy, self.sigma_fy
FY, sigma_FY = self.model.fy, self.model.sigma_fy
def auxiliary_x(probe):
return Decomposition.phase_virtual(probe, limit, self.flag, nux, NUX, fx, FX,
sigma_frequency=sigma_nux, sigma_frequency_model=sigma_NUX,
sigma_phase=sigma_fx, sigma_phase_model=sigma_FX,
**kwargs)
def auxiliary_y(probe):
return Decomposition.phase_virtual(probe, limit, self.flag, nuy, NUY, fy, FY,
sigma_frequency=sigma_nuy, sigma_frequency_model=sigma_NUY,
sigma_phase=sigma_fy, sigma_phase_model=sigma_FY,
**kwargs)
data_x = [auxiliary_x(probe) for probe in index]
data_y = [auxiliary_y(probe) for probe in index]
for count, probe in enumerate(index):
self.correct_x[probe], self.correct_y[probe] = data_x[count], data_y[count]
self.fx_correct[probe], self.sigma_fx_correct[probe] = self.correct_x[probe].get('model')
self.fy_correct[probe], self.sigma_fy_correct[probe] = self.correct_y[probe].get('model')
@staticmethod
def phase_alfa(a_m:torch.Tensor,
f_ij:torch.Tensor, f_m_ij:torch.Tensor,
f_ik:torch.Tensor, f_m_ik:torch.Tensor,
*,
error:bool=True, model:bool=True,
sigma_a_m:torch.Tensor=0.0,
sigma_f_ij:torch.Tensor=0.0, sigma_f_m_ij:torch.Tensor=0.0,
sigma_f_ik:torch.Tensor=0.0, sigma_f_m_ik:torch.Tensor=0.0) -> tuple:
"""
Estimate twiss alfa at index (i) from given triplet (i, j, k) phase data.
Note, probed index (i), other indices (j) and (k), pairs (i, j) and (i, k)
Phase advance is assumed to be from (i) to other indices, should be negative if (i) is ahead of the other index (timewise)
Parameters
----------
a_m: torch.Tensor
model value
f_ij: torch.Tensor
phase advance between probed and the 1st index (j)
f_m_ij: torch.Tensor
model phase advance between probed and the 1st index (j)
f_ik: torch.Tensor
phase advance between probed and the 2nd index (k)
f_m_ik: torch.Tensor
model phase advance between probed and 2nd index (k)
error: bool
flag to compute error
model: bool
flag to include model error
sigma_a_m: torch.Tensor
model value error
sigma_f_ij: torch.Tensor
phase advance error between probed and the 1st index (j)
sigma_f_m_ij: torch.Tensor
model phase advance error between probed and the 1st index (j)
sigma_f_ik: torch.Tensor
phase advance error between probed and the 2nd index (k)
sigma_f_m_ik: torch.Tensor
model phase advance error between probed and the 2nd index (k)
Returns
-------
(a, 0) or (a, sigma_a)
"""
a = a_m*(1.0/torch.tan(f_ij)-1.0/torch.tan(f_ik))/(1.0/torch.tan(f_m_ij)-1.0/torch.tan(f_m_ik))-1.0/torch.tan(f_ij)*1.0/torch.sin(f_m_ij - f_m_ik)*torch.cos(f_m_ik)*torch.sin(f_m_ij) + 1.0/torch.tan(f_ik)*1.0/torch.sin(f_m_ij - f_m_ik)*torch.cos(f_m_ij)*torch.sin(f_m_ik)
if not error:
return (a, torch.zeros_like(a))
sigma_a = sigma_f_ij**2*(1.0/torch.sin(f_ij))**4*(1.0/torch.tan(f_m_ik) + a_m)**2/(1.0/torch.tan(f_m_ij) - 1.0/torch.tan(f_m_ik))**2
sigma_a += sigma_f_ik**2*(1.0/torch.sin(f_ik))**4*(1.0/torch.tan(f_m_ij) + a_m)**2/(1.0/torch.tan(f_m_ij) - 1.0/torch.tan(f_m_ik))**2
if model:
sigma_a += sigma_a_m**2*((1.0/torch.tan(f_ij) - 1.0/torch.tan(f_ik))**2/(1.0/torch.tan(f_m_ij) - 1.0/torch.tan(f_m_ik))**2)
sigma_a += sigma_f_m_ik**2*(1.0/torch.tan(f_ij) - 1.0/torch.tan(f_ik))**2*(1.0/torch.sin(f_m_ij - f_m_ik))**4*torch.sin(f_m_ij)**2*(torch.cos(f_m_ij) + a_m*torch.sin(f_m_ij))**2
sigma_a += sigma_f_m_ij**2*(1.0/torch.tan(f_ij) - 1.0/torch.tan(f_ik))**2*(1.0/torch.sin(f_m_ij - f_m_ik))**4*torch.sin(f_m_ik)**2*(torch.cos(f_m_ik) + a_m*torch.sin(f_m_ik))**2
sigma_a.sqrt_()
return (a, sigma_a)
@staticmethod
def phase_beta(b_m:torch.Tensor,
f_ij:torch.Tensor, f_m_ij:torch.Tensor,
f_ik:torch.Tensor, f_m_ik:torch.Tensor,
*,
error:bool=True, model:bool=True,
sigma_b_m:torch.Tensor=0.0,
sigma_f_ij:torch.Tensor=0.0, sigma_f_m_ij:torch.Tensor=0.0,
sigma_f_ik:torch.Tensor=0.0, sigma_f_m_ik:torch.Tensor=0.0) -> tuple:
"""
Estimate twiss beta at index (i) from given triplet (i, j, k) phase data.
Note, probed index (i), other indices (j) and (k), pairs (i, j) and (i, k)
Phase advance is assumed to be from (i) to other indices, should be negative if (i) is ahead of the other index (timewise)
Parameters
----------
b_m: torch.Tensor
model value
f_ij: torch.Tensor
phase advance between probed and the 1st index (j)
f_m_ij: torch.Tensor
model phase advance between probed and the 1st index (j)
f_ik: torch.Tensor
phase advance between probed and the 2nd index (k)
f_m_ik: torch.Tensor
model phase advance between probed and 2nd index (k)
error: bool
flag to compute error
model: bool
flag to include model error
sigma_b_m: torch.Tensor
model value error
sigma_f_ij: torch.Tensor
phase advance error between probed and the 1st index (j)
sigma_f_m_ij: torch.Tensor
model phase advance error between probed and the 1st index (j)
sigma_f_ik: torch.Tensor
phase advance error between probed and the 2nd index (k)
sigma_f_m_ik: torch.Tensor
model phase advance error between probed and the 2nd index (k)
Returns
-------
(b, 0) or (b, sigma_b)
"""
b = b_m*(1.0/torch.tan(f_ij) - 1.0/torch.tan(f_ik))/(1.0/torch.tan(f_m_ij) - 1.0/torch.tan(f_m_ik))
if not error:
return (b, torch.zeros_like(b))
sigma_b = sigma_f_ij**2*b_m**2*(1.0/torch.sin(f_ij))**4/(1.0/torch.tan(f_m_ij) - 1.0/torch.tan(f_m_ik))**2
sigma_b += sigma_f_ik**2*b_m**2*(1.0/torch.sin(f_ik))**4/(1.0/torch.tan(f_m_ij) - 1.0/torch.tan(f_m_ik))**2
if model:
sigma_b += sigma_b_m**2*(1.0/torch.tan(f_ij) - 1.0/torch.tan(f_ik))**2/(1.0/torch.tan(f_m_ij) - 1.0/torch.tan(f_m_ik))**2
sigma_b += sigma_f_m_ij**2*b_m**2*(1.0/torch.tan(f_ij) - 1.0/torch.tan(f_ik))**2*(1.0/torch.sin(f_m_ij))**4/(1.0/torch.tan(f_m_ij) - 1.0/torch.tan(f_m_ik))**4
sigma_b += sigma_f_m_ik**2*b_m**2*(1.0/torch.tan(f_ij) - 1.0/torch.tan(f_ik))**2*(1.0/torch.sin(f_m_ik))**4/(1.0/torch.tan(f_m_ij) - 1.0/torch.tan(f_m_ik))**4
sigma_b.sqrt_()
return (b, sigma_b)
def get_twiss_from_phase(self, *, virtual:bool=True, error:bool=True, model:bool=False,
use_correct:bool=False, use_correct_sigma:bool=False, use_model:bool=False) -> None:
"""
Estimate twiss from phase data.
Note, raw data is saved, no cleaning is performed
Values (and errors) are computed for each triplet
Parameters
----------
error: bool
flag to compute twiss errors
model: bool
flag to include model error
use_correct: bool
flag to use | |
and also the entry
level for the user level operations.
"""
manager = None
""" The manager of the template file, this is considered to
be the owner and generator instance """
base_path = None
""" The base path to be used in the resolution of template
files, this value may or may not be defined and in case it's
not defined only the default (relative path) resolution approach
is used for the include and extends operations """
file_path = None
""" The path to the file from which the contents of the template
are loaded, this is the original reference """
encoding = None
""" The encoding used in the file, this is the main encoding
to be used both in the loading and storage of it """
variable_encoding = None
""" The encoding that is going to be used to encode the value
of the various variables to be expanded in the template """
strict_mode = False
""" The strict mode flag, that controls if the an error in a
variable resolution should raise an exception, usage of this
value should be done with care to avoid unwanted behavior """
root_node = None
""" The root node from which the visitor will start the visiting
using a recursive approach """
visitor = None
""" The visitor object that will be used for the visiting of the
various nodes that make part of the abstract syntax tree defined
from the provided root node """
locale_bundles = []
""" The list that contains the various bundles to be searched for
localization, the order set is going to be the priority for template
value resolution (from first to last list element) """
nodes = {}
""" The dictionary that associates the identifiable node id with
the node reference that it corresponds, this map may be used for
abstract syntax tree manipulations (eg: inheritance manipulation) """
def __init__(
self,
manager = None,
base_path = None,
file_path = None,
encoding = None,
root_node = None,
eval = False
):
"""
Constructor of the class.
:type manager: TemplateEngine
:param manager: The manager to be used.
:type base_path: String
:param base_path: The base file system path that is going to be
used for processing templates in the include and extends operation.
:type file_path: String
:param file_path: The path to the file to be used, this value may or
may not be defined depending on how the template is created.
:type encoding: String
:param encoding: The encoding used in the file, in case this value
is not defined the encoding is assumed to be the default one.
:type root_node: AstNode
:param root_node: The root node to be used.
:type eval: bool
:param eval: If the evaluation based visitor should be used instead
of the normal (and safe) interpreter based visitor. Care should be
taking while deciding which visitor to be used.
"""
self.manager = manager
self.base_path = base_path
self.file_path = file_path
self.encoding = encoding
self.root_node = root_node
self.visitor = visitor.EvalVisitor(self) if eval else visitor.Visitor(self)
self.locale_bundles = []
self.nodes = {}
self.index_nodes()
@classmethod
def format(cls, template, *args):
"""
Custom format operation that takes a template value and
a variable set of arguments and formats it according to
the C definition of string templating.
:type template: String
:param template: The template string to be used in the
C like format operation.
:rtype: String
:returns: The "final" formated string value.
"""
try: return template % args
except Exception: return None
@classmethod
def convert(cls, value, mode):
"""
Converts the provided value according to the requested "modification"
operation.
The final converted value should be a "plain" string value.
:type value: String
:param value: The base value that is going to be converted
according to the provided/requested mode.
:type mode: String
:param mode: The mode that described the operation that is
going to be applied to the "base" value.
:rtype: String
:return: The final converted value according to the requested
mode.
"""
conversion_method = visitor.CONVERSION_MAP.get(mode, None)
if not conversion_method: return value
try: return conversion_method(value)
except Exception: return None
def index_nodes(self):
"""
Runs the indexing stage of the identifiable nodes, this is
required for the inheritance of blocks to properly work.
More that one execution of this method may be required if
the abstract syntax tree changed in response to the processing
of one or more file inclusion (sub tree inclusion).
"""
self._index_node(self.root_node)
def assign(self, name, value):
"""
Assigns a variable to a value. This assignment
allows the usage of the value internally in the template.
The assigned variable value may assume any given data
type that is accessible by the template language.
:type name: String
:param name: The name of the variable to assign a value.
:type value: Object
:param value: The value to be assigned to the variable
"""
self.visitor.set_global(name, value)
def set_global_map(self, global_map):
"""
Sets the global map to the current template file.
The global map should be used as the support for the variable
assignment.
:type global_map: Dictionary
:param global_map: The global map containing all the variable values.
"""
self.visitor.set_global_map(global_map)
def set_string_buffer(self, string_buffer):
"""
Updates the underlying string buffer that is going to be
used by the visitor to the provided one.
The string buffer should be an opened file like object that
accepts the typical calls.
In case a typical file object is used this may reduce the
amount of memory used by the visitor by an order of magnitude
so this method may be very useful for long output generation
in the template engine.
:type string_buffer: File
:param string_buffer: The file like object that is going to
be used by the underlying visitor object.
"""
self.visitor.string_buffer = string_buffer
def attach_process_methods(self, process_methods_list):
"""
Attaches a series of process methods to the visitor
currently being used.
This will allow for the usage of many more process
methods that the ones available by default (extension).
:type process_methods_list: List
:param process_methods_list: The list of tuples containing the
method name and method (function).
"""
# iterates over all the process methods in the list unpacking
# the tuples and then attaching each of these methods to the
# currently defined visitor so that they may be used
for method_name, method in process_methods_list:
self.visitor.attach_process_method(method_name, method)
def attach_locale_bundles(self, locale_bundles):
"""
Attaches a series of locale bundles methods to the visitor
currently being used.
This method also attaches the locale bundles to the current
instance (for context retrieval).
:type locale_bundles: List
:param locale_bundles: The list of locale bundles to be used
for resolution in the current context.
"""
self.locale_bundles = locale_bundles
self.visitor.locale_bundles = locale_bundles
def load_system_variable(self, name = "_system"):
"""
Loads a system information variable to the template
file. This variable would allow for access to the
status of the current manager/system.
:type name: String
:param name: The name of the variable used
to retain the system wide information.
"""
# retrieves the template engine plugin
# in order to obtain the plugin manager
template_engine_plugin = self.manager.plugin
plugin_manager = template_engine_plugin.manager
# retrieves the map containing the "global" system information
system_information_map = plugin_manager.get_system_information_map()
# assigns the system information map variable to
# the template so that it may be used to retrieve
# global information about the system
self.assign(name, system_information_map)
def load_functions(self):
"""
Loads the complete set of base functions that are going to be
used at template runtime to perform common operations.
These functions will be exposed on the global dictionary.
"""
# retrieves the reference to the class associated with the
# current instance to be able to access class variables
cls = self.__class__
# runs the assign operation for the complete set of functions
# that are considered part of the global namespace
self.assign("colony", colony)
self.assign("format", cls.format)
self.assign("convert", cls.convert)
def load_visitor(self):
"""
Runs the various loading/prepare operations in the currently
set visitor so | |
is None and data is not None:
raise EVMException("Invalid end transaction result")
if not isinstance(data, (type(None), Array, bytes)):
raise EVMException("Invalid end transaction data type")
self.result = result
self.data = data
def is_rollback(self):
if self.result in {"STOP", "RETURN", "SELFDESTRUCT"}:
return False
else:
assert self.result in {"THROW", "TXERROR", "REVERT"}
return True
def __str__(self):
return f"EndTX<{self.result}>"
class InvalidOpcode(EndTx):
"""Trying to execute invalid opcode"""
def __init__(self):
super().__init__("THROW")
class StackOverflow(EndTx):
"""Attempted to push more than 1024 items"""
def __init__(self):
super().__init__("THROW")
class StackUnderflow(EndTx):
"""Attempted to pop from an empty stack"""
def __init__(self):
super().__init__("THROW")
class NotEnoughGas(EndTx):
"""Not enough gas for operation"""
def __init__(self):
super().__init__("THROW")
class Stop(EndTx):
"""Program reached a STOP instruction"""
def __init__(self):
super().__init__("STOP")
class Return(EndTx):
"""Program reached a RETURN instruction"""
def __init__(self, data=bytearray()):
super().__init__("RETURN", data)
class Revert(EndTx):
"""Program reached a REVERT instruction"""
def __init__(self, data):
super().__init__("REVERT", data)
class SelfDestruct(EndTx):
"""Program reached a SELFDESTRUCT instruction"""
def __init__(self):
super().__init__("SELFDESTRUCT")
class TXError(EndTx):
"""A failed Transaction"""
def __init__(self):
super().__init__("TXERROR")
def concretized_args(**policies):
"""
Make sure an EVM instruction has all of its arguments concretized according to
provided policies.
Example decoration:
@concretized_args(size='ONE', address='')
def LOG(self, address, size, \*topics):
...
The above will make sure that the *size* parameter to LOG is Concretized when symbolic
according to the 'ONE' policy and concretize *address* with the default policy.
:param policies: A kwargs list of argument names and their respective policies.
Provide None or '' as policy to use default.
:return: A function decorator
"""
def concretizer(func):
spec = inspect.getfullargspec(func)
@wraps(func)
def wrapper(*args, **kwargs):
for arg, policy in policies.items():
assert arg in spec.args, "Concretizer argument not found in wrapped function."
# index is 0-indexed, but ConcretizeArgument is 1-indexed. However, this is correct
# since implementation method is always a bound method (self is param 0)
index = spec.args.index(arg)
if not issymbolic(args[index]):
continue
if not policy:
policy = "SAMPLED"
if policy == "ACCOUNTS":
value = args[index]
world = args[0].world
# special handler for EVM only policy
cond = world._constraint_to_accounts(value, ty="both", include_zero=True)
world.constraints.add(cond)
policy = "ALL"
if args[index].taint:
# TODO / FIXME: The taint should persist!
logger.warning(
f"Concretizing {func.__name__}'s {index} argument and dropping its taints: "
"the value might not be tracked properly (in case of using detectors)"
)
logger.info(
f"Concretizing instruction {args[0].world.current_vm.instruction} argument {arg} by {policy}"
)
raise ConcretizeArgument(index, policy=policy)
return func(*args, **kwargs)
wrapper.__signature__ = inspect.signature(func)
return wrapper
return concretizer
class EVM(Eventful):
"""
Machine State. The machine state is defined as
the tuple (g, pc, m, i, s) which are the gas available, the
program counter pc , the memory contents, the active
number of words in memory (counting continuously
from position 0), and the stack contents. The memory
contents are a series of zeroes of bitsize 256
"""
_published_events = {
"evm_execute_instruction",
"evm_read_storage",
"evm_write_storage",
"evm_read_memory",
"evm_write_memory",
"evm_read_code",
"decode_instruction",
"concrete_sha3",
"symbolic_sha3",
}
class transact:
def __init__(self, pre=None, pos=None, doc=None):
self._pre = pre
self._pos = pos
if doc is None and pre is not None:
doc = pre.__doc__
self.__doc__ = doc
self.__name__ = pre.__name__
def __get__(self, obj, objtype=None):
if obj is None:
return self
if self._pre is None:
raise AttributeError("unreadable attribute")
from types import MethodType
# return different version depending on obj._pending_transaction
def _pre_func(my_obj, *args, **kwargs):
if my_obj._on_transaction:
result = self._pos(my_obj, *args, **kwargs)
my_obj._on_transaction = False
return result
else:
try:
self._pre(my_obj, *args, **kwargs)
raise AssertionError(
"The pre-transaction handler must raise a StartTx transaction"
)
except StartTx:
my_obj._on_transaction = True
raise
return MethodType(_pre_func, obj)
def __set__(self, obj, value):
raise AttributeError("can't set attribute")
def __delete__(self, obj):
raise AttributeError("can't delete attribute")
def pos(self, pos):
return type(self)(self._pre, pos)
def __init__(
self, constraints, address, data, caller, value, bytecode, world=None, gas=210000, **kwargs
):
"""
Builds a Ethereum Virtual Machine instance
:param memory: the initial memory
:param address: the address of the account which owns the code that is executing.
:param data: the byte array that is the input data to this execution
:param caller: the address of the account which caused the code to be executing. A 160-bit code used for identifying Accounts
:param value: the value, in Wei, passed to this account as part of the same procedure as execution. One Ether is defined as being 10**18 Wei
:param bytecode: the byte array that is the machine code to be executed
:param world: the EVMWorld object where the transaction is being executed
:param gas: gas budget for this transaction
"""
super().__init__(**kwargs)
if data is not None and not issymbolic(data):
data_size = len(data)
data_symbolic = constraints.new_array(
index_bits=256,
value_bits=8,
index_max=data_size,
name=f"DATA_{address:x}",
avoid_collisions=True,
default=0,
)
data_symbolic[0:data_size] = data
data = data_symbolic
if bytecode is not None and not issymbolic(bytecode):
bytecode_size = len(bytecode)
bytecode_symbolic = constraints.new_array(
index_bits=256,
value_bits=8,
index_max=bytecode_size,
name=f"BYTECODE_{address:x}",
avoid_collisions=True,
default=0,
)
bytecode_symbolic[0:bytecode_size] = bytecode
bytecode = bytecode_symbolic
# TODO: Handle the case in which bytecode is symbolic (This happens at
# CREATE instructions that has the arguments appended to the bytecode)
# This is a very cornered corner case in which code is actually symbolic
# We should simply not allow to jump to unconstrained(*) symbolic code.
# (*) bytecode that could take more than a single value
self._check_jumpdest = False
self._valid_jumpdests = set()
# Compile the list of valid jumpdests via linear dissassembly
def extend_with_zeroes(b):
try:
for x in b:
x = to_constant(x)
if isinstance(x, int):
yield (x)
else:
yield (0)
for _ in range(32):
yield (0)
except Exception as e:
return
for i in EVMAsm.disassemble_all(extend_with_zeroes(bytecode)):
if i.mnemonic == "JUMPDEST":
self._valid_jumpdests.add(i.pc)
# A no code VM is used to execute transactions to normal accounts.
# I'll execute a STOP and close the transaction
# if len(bytecode) == 0:
# raise EVMException("Need code")
self._constraints = constraints
# Uninitialized values in memory are 0 by spec
self.memory = constraints.new_array(
index_bits=256,
value_bits=8,
name=f"EMPTY_MEMORY_{address:x}",
avoid_collisions=True,
default=0,
)
self.address = address
self.caller = (
caller
) # address of the account that is directly responsible for this execution
self.data = data
self.value = value
self._bytecode = bytecode
self.suicides = set()
self.logs = []
# FIXME parse decode and mark invalid instructions
# self.invalid = set()
# Machine state
self.pc = 0
self.stack = []
# We maintain gas as a 512 bits internally to avoid overflows
# it is shortened to 256 bits when it is used by the GAS instruction
self._gas = Operators.ZEXTEND(gas, 512)
self._world = world
self._allocated = 0
self._on_transaction = False # for @transact
self._checkpoint_data = None
self._published_pre_instruction_events = False
# Used calldata size
min_size = 0
max_size = len(self.data)
self._used_calldata_size = 0
self._calldata_size = len(self.data)
self._valid_jmpdests = set()
@property
def bytecode(self):
return self._bytecode
@property
def constraints(self):
return self._constraints
@constraints.setter
def constraints(self, constraints):
self._constraints = constraints
self.memory.constraints = constraints
@property
def gas(self):
return self._gas
def __getstate__(self):
state = super().__getstate__()
state["memory"] = self.memory
state["world"] = self._world
state["constraints"] = self.constraints
state["address"] = self.address
state["caller"] = self.caller
state["data"] = self.data
state["value"] = self.value
state["bytecode"] = self._bytecode
state["pc"] = self.pc
state["stack"] = self.stack
state["gas"] = self._gas
state["allocated"] = self._allocated
state["suicides"] = self.suicides
state["logs"] = self.logs
state["_on_transaction"] = self._on_transaction
state["_checkpoint_data"] = self._checkpoint_data
state["_published_pre_instruction_events"] = self._published_pre_instruction_events
state["_used_calldata_size"] = self._used_calldata_size
state["_calldata_size"] = self._calldata_size
state["_valid_jumpdests"] = self._valid_jumpdests
state["_check_jumpdest"] = self._check_jumpdest
return state
def __setstate__(self, state):
self._checkpoint_data = state["_checkpoint_data"]
self._published_pre_instruction_events = state["_published_pre_instruction_events"]
self._on_transaction = state["_on_transaction"]
self._gas = state["gas"]
self.memory = state["memory"]
self.logs = state["logs"]
self._world = state["world"]
self.constraints = state["constraints"]
self.address = state["address"]
self.caller = state["caller"]
self.data = state["data"]
self.value = state["value"]
self._bytecode = state["bytecode"]
self.pc = state["pc"]
self.stack = state["stack"]
self._allocated = state["allocated"]
self.suicides = state["suicides"]
self._used_calldata_size = state["_used_calldata_size"]
self._calldata_size = state["_calldata_size"]
self._valid_jumpdests = state["_valid_jumpdests"]
self._check_jumpdest = state["_check_jumpdest"]
super().__setstate__(state)
def _get_memfee(self, address, size=1):
"""
This calculates the amount of extra gas needed for accessing to
previously unused memory.
:param address: base memory offset
:param size: size of the memory access
"""
if not issymbolic(size) and size == 0:
return 0
address = self.safe_add(address, size)
allocated = self.allocated
GMEMORY = 3
GQUADRATICMEMDENOM = 512 # 1 gas per 512 quadwords
old_size = Operators.ZEXTEND(Operators.UDIV(self.safe_add(allocated, 31), 32), 512)
new_size = Operators.ZEXTEND(Operators.UDIV(self.safe_add(address, 31), 32), 512)
old_totalfee = self.safe_mul(old_size, GMEMORY) + Operators.UDIV(
self.safe_mul(old_size, old_size), GQUADRATICMEMDENOM
)
new_totalfee = self.safe_mul(new_size, | |
3 * y(t) + 31),
Eq(diff(y(t), t, t), 9 * x(t) + 7 * y(t) + 12),
)
sol2 = [
Eq(
x(t),
3 * C1 * exp(t * rootof(l ** 4 - 15 * l ** 2 + 29, 0))
+ 3 * C2 * exp(t * rootof(l ** 4 - 15 * l ** 2 + 29, 1))
+ 3 * C3 * exp(t * rootof(l ** 4 - 15 * l ** 2 + 29, 2))
+ 3 * C4 * exp(t * rootof(l ** 4 - 15 * l ** 2 + 29, 3))
- Rational(181, 29),
),
Eq(
y(t),
C1
* (rootof(l ** 4 - 15 * l ** 2 + 29, 0) ** 2 - 8)
* exp(t * rootof(l ** 4 - 15 * l ** 2 + 29, 0))
+ C2
* (rootof(l ** 4 - 15 * l ** 2 + 29, 1) ** 2 - 8)
* exp(t * rootof(l ** 4 - 15 * l ** 2 + 29, 1))
+ C3
* (rootof(l ** 4 - 15 * l ** 2 + 29, 2) ** 2 - 8)
* exp(t * rootof(l ** 4 - 15 * l ** 2 + 29, 2))
+ C4
* (rootof(l ** 4 - 15 * l ** 2 + 29, 3) ** 2 - 8)
* exp(t * rootof(l ** 4 - 15 * l ** 2 + 29, 3))
+ Rational(183, 29),
),
]
assert dsolve(eq2) == sol2
# FIXME: assert checksysodesol(eq2, sol2) == (True, [0, 0]) # this one fails
eq3 = (
Eq(diff(x(t), t, t) - 9 * diff(y(t), t) + 7 * x(t), 0),
Eq(diff(y(t), t, t) + 9 * diff(x(t), t) + 7 * y(t), 0),
)
sol3 = [
Eq(
x(t),
C1 * cos(t * (Rational(9, 2) + sqrt(109) / 2))
+ C2 * sin(t * (Rational(9, 2) + sqrt(109) / 2))
+ C3 * cos(t * (-sqrt(109) / 2 + Rational(9, 2)))
+ C4 * sin(t * (-sqrt(109) / 2 + Rational(9, 2))),
),
Eq(
y(t),
-C1 * sin(t * (Rational(9, 2) + sqrt(109) / 2))
+ C2 * cos(t * (Rational(9, 2) + sqrt(109) / 2))
- C3 * sin(t * (-sqrt(109) / 2 + Rational(9, 2)))
+ C4 * cos(t * (-sqrt(109) / 2 + Rational(9, 2))),
),
]
assert dsolve(eq3) == sol3
assert checksysodesol(eq3, sol3) == (True, [0, 0])
eq4 = (
Eq(diff(x(t), t, t), 9 * t * diff(y(t), t) - 9 * y(t)),
Eq(diff(y(t), t, t), 7 * t * diff(x(t), t) - 7 * x(t)),
)
sol4 = [
Eq(
x(t),
C3 * t
+ t
* Integral(
(
9 * C1 * exp(3 * sqrt(7) * t ** 2 / 2)
+ 9 * C2 * exp(-3 * sqrt(7) * t ** 2 / 2)
)
/ t ** 2,
t,
),
),
Eq(
y(t),
C4 * t
+ t
* Integral(
(
3 * sqrt(7) * C1 * exp(3 * sqrt(7) * t ** 2 / 2)
- 3 * sqrt(7) * C2 * exp(-3 * sqrt(7) * t ** 2 / 2)
)
/ t ** 2,
t,
),
),
]
assert dsolve(eq4) == sol4
assert checksysodesol(eq4, sol4) == (True, [0, 0])
eq5 = (
Eq(
diff(x(t), t, t),
(log(t) + t ** 2) * diff(x(t), t) + (log(t) + t ** 2) * 3 * diff(y(t), t),
),
Eq(
diff(y(t), t, t),
(log(t) + t ** 2) * 2 * diff(x(t), t)
+ (log(t) + t ** 2) * 9 * diff(y(t), t),
),
)
sol5 = [
Eq(
x(t),
-sqrt(22)
* (
C1 * Integral(exp((-sqrt(22) + 5) * Integral(t ** 2 + log(t), t)), t)
+ C2
- C3 * Integral(exp((sqrt(22) + 5) * Integral(t ** 2 + log(t), t)), t)
- C4
- (sqrt(22) + 5)
* (
C1
* Integral(exp((-sqrt(22) + 5) * Integral(t ** 2 + log(t), t)), t)
+ C2
)
+ (-sqrt(22) + 5)
* (
C3 * Integral(exp((sqrt(22) + 5) * Integral(t ** 2 + log(t), t)), t)
+ C4
)
)
/ 88,
),
Eq(
y(t),
-sqrt(22)
* (
C1 * Integral(exp((-sqrt(22) + 5) * Integral(t ** 2 + log(t), t)), t)
+ C2
- C3 * Integral(exp((sqrt(22) + 5) * Integral(t ** 2 + log(t), t)), t)
- C4
)
/ 44,
),
]
assert dsolve(eq5) == sol5
assert checksysodesol(eq5, sol5) == (True, [0, 0])
eq6 = (
Eq(diff(x(t), t, t), log(t) * t * diff(y(t), t) - log(t) * y(t)),
Eq(diff(y(t), t, t), log(t) * t * diff(x(t), t) - log(t) * x(t)),
)
sol6 = [
Eq(
x(t),
C3 * t
+ t
* Integral(
(C1 * exp(Integral(t * log(t), t)) + C2 * exp(-Integral(t * log(t), t)))
/ t ** 2,
t,
),
),
Eq(
y(t),
C4 * t
+ t
* Integral(
(C1 * exp(Integral(t * log(t), t)) - C2 * exp(-Integral(t * log(t), t)))
/ t ** 2,
t,
),
),
]
assert dsolve(eq6) == sol6
assert checksysodesol(eq6, sol6) == (True, [0, 0])
eq7 = (
Eq(
diff(x(t), t, t),
log(t) * (t * diff(x(t), t) - x(t)) + exp(t) * (t * diff(y(t), t) - y(t)),
),
Eq(
diff(y(t), t, t),
(t ** 2) * (t * diff(x(t), t) - x(t)) + (t) * (t * diff(y(t), t) - y(t)),
),
)
sol7 = [
Eq(
x(t),
C3 * t
+ t
* Integral(
(
C1 * x0(t)
+ C2
* x0(t)
* Integral(
t
* exp(t)
* exp(Integral(t ** 2, t))
* exp(Integral(t * log(t), t))
/ x0(t) ** 2,
t,
)
)
/ t ** 2,
t,
),
),
Eq(
y(t),
C4 * t
+ t
* Integral(
(
C1 * y0(t)
+ C2
* (
y0(t)
* Integral(
t
* exp(t)
* exp(Integral(t ** 2, t))
* exp(Integral(t * log(t), t))
/ x0(t) ** 2,
t,
)
+ exp(Integral(t ** 2, t))
* exp(Integral(t * log(t), t))
/ x0(t)
)
)
/ t ** 2,
t,
),
),
]
assert dsolve(eq7) == sol7
# FIXME: assert checksysodesol(eq7, sol7) == (True, [0, 0])
eq8 = (
Eq(diff(x(t), t, t), t * (4 * x(t) + 9 * y(t))),
Eq(diff(y(t), t, t), t * (12 * x(t) - 6 * y(t))),
)
sol8 = [
Eq(
x(t),
-sqrt(133)
* (
-4 * C1 * airyai(t * (-1 + sqrt(133)) ** (S(1) / 3))
+ 4 * C1 * airyai(-t * (1 + sqrt(133)) ** (S(1) / 3))
- 4 * C2 * airybi(t * (-1 + sqrt(133)) ** (S(1) / 3))
+ 4 * C2 * airybi(-t * (1 + sqrt(133)) ** (S(1) / 3))
+ (-sqrt(133) - 1)
* (
C1 * airyai(t * (-1 + sqrt(133)) ** (S(1) / 3))
+ C2 * airybi(t * (-1 + sqrt(133)) ** (S(1) / 3))
)
- (-1 + sqrt(133))
* (
C1 * airyai(-t * (1 + sqrt(133)) ** (S(1) / 3))
+ C2 * airybi(-t * (1 + sqrt(133)) ** (S(1) / 3))
)
)
/ 3192,
),
Eq(
y(t),
-sqrt(133)
* (
-C1 * airyai(t * (-1 + sqrt(133)) ** (S(1) / 3))
+ C1 * airyai(-t * (1 + sqrt(133)) ** (S(1) / 3))
- C2 * airybi(t * (-1 + sqrt(133)) ** (S(1) / 3))
+ C2 * airybi(-t * (1 + sqrt(133)) ** (S(1) / 3))
)
/ 266,
),
]
assert dsolve(eq8) == sol8
assert checksysodesol(eq8, sol8) == (True, [0, 0])
assert filldedent(dsolve(eq8)) == filldedent(
"""
[Eq(x(t), -sqrt(133)*(-4*C1*airyai(t*(-1 + sqrt(133))**(1/3)) +
4*C1*airyai(-t*(1 + sqrt(133))**(1/3)) - 4*C2*airybi(t*(-1 +
sqrt(133))**(1/3)) + 4*C2*airybi(-t*(1 + sqrt(133))**(1/3)) +
(-sqrt(133) - 1)*(C1*airyai(t*(-1 + sqrt(133))**(1/3)) +
C2*airybi(t*(-1 + sqrt(133))**(1/3))) - (-1 +
sqrt(133))*(C1*airyai(-t*(1 + sqrt(133))**(1/3)) + C2*airybi(-t*(1 +
sqrt(133))**(1/3))))/3192), Eq(y(t), -sqrt(133)*(-C1*airyai(t*(-1 +
sqrt(133))**(1/3)) + C1*airyai(-t*(1 + sqrt(133))**(1/3)) -
C2*airybi(t*(-1 | |
from tnsnames.tnsnamesListener import TnsnamesListener
from tnsnames.tnsnamesParser import tnsnamesParser
from tnsnames.tnsnamesStack import TnsnamesStack
# Base class for all format classes
class TnsnamesFormatter(TnsnamesListener):
def __init__(self):
self._lines = []
self._tnsStack = TnsnamesStack()
self._enteredAlias = False
self._enteredAddressList = False
self._enteredDescriptionList = False
self._enteredDescription = False
@property
def get_lines(self):
return self._lines
# Enter a parse tree produced by tnsnamesParser#tnsnames.
def enterTnsnames(self, ctx: tnsnamesParser.TnsnamesContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#tnsnames.
def exitTnsnames(self, ctx: tnsnamesParser.TnsnamesContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#tns_entry.
def enterTns_entry(self, ctx: tnsnamesParser.Tns_entryContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#tns_entry.
def exitTns_entry(self, ctx: tnsnamesParser.Tns_entryContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#ifile.
def enterIfile(self, ctx: tnsnamesParser.IfileContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#ifile.
def exitIfile(self, ctx: tnsnamesParser.IfileContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#lsnr_entry.
def enterLsnr_entry(self, ctx: tnsnamesParser.Lsnr_entryContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#lsnr_entry.
def exitLsnr_entry(self, ctx: tnsnamesParser.Lsnr_entryContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#lsnr_description.
def enterLsnr_description(self, ctx: tnsnamesParser.Lsnr_descriptionContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#lsnr_description.
def exitLsnr_description(self, ctx: tnsnamesParser.Lsnr_descriptionContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#alias_list.
def enterAlias_list(self, ctx: tnsnamesParser.Alias_listContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#alias_list.
def exitAlias_list(self, ctx: tnsnamesParser.Alias_listContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#alias.
def enterAlias(self, ctx: tnsnamesParser.AliasContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
self._enteredAlias = True
# Exit a parse tree produced by tnsnamesParser#alias.
def exitAlias(self, ctx: tnsnamesParser.AliasContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
self._enteredAlias = False
# Enter a parse tree produced by tnsnamesParser#description_list.
def enterDescription_list(self, ctx: tnsnamesParser.Description_listContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
self._enteredDescriptionList = True
# Exit a parse tree produced by tnsnamesParser#description_list.
def exitDescription_list(self, ctx: tnsnamesParser.Description_listContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
self._enteredDescriptionList = False
# Enter a parse tree produced by tnsnamesParser#dl_params.
def enterDl_params(self, ctx: tnsnamesParser.Dl_paramsContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#dl_params.
def exitDl_params(self, ctx: tnsnamesParser.Dl_paramsContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#dl_parameter.
def enterDl_parameter(self, ctx: tnsnamesParser.Dl_parameterContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#dl_parameter.
def exitDl_parameter(self, ctx: tnsnamesParser.Dl_parameterContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#description.
def enterDescription(self, ctx: tnsnamesParser.DescriptionContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
self._enteredDescription = True
# Exit a parse tree produced by tnsnamesParser#description.
def exitDescription(self, ctx: tnsnamesParser.DescriptionContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
self._enteredDescription = False
# Enter a parse tree produced by tnsnamesParser#d_params.
def enterD_params(self, ctx: tnsnamesParser.D_paramsContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#d_params.
def exitD_params(self, ctx: tnsnamesParser.D_paramsContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#d_parameter.
def enterD_parameter(self, ctx: tnsnamesParser.D_parameterContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#d_parameter.
def exitD_parameter(self, ctx: tnsnamesParser.D_parameterContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#d_enable.
def enterD_enable(self, ctx: tnsnamesParser.D_enableContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#d_enable.
def exitD_enable(self, ctx: tnsnamesParser.D_enableContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#d_sdu.
def enterD_sdu(self, ctx: tnsnamesParser.D_sduContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#d_sdu.
def exitD_sdu(self, ctx: tnsnamesParser.D_sduContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#d_recv_buf.
def enterD_recv_buf(self, ctx: tnsnamesParser.D_recv_bufContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#d_recv_buf.
def exitD_recv_buf(self, ctx: tnsnamesParser.D_recv_bufContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#d_send_buf.
def enterD_send_buf(self, ctx: tnsnamesParser.D_send_bufContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#d_send_buf.
def exitD_send_buf(self, ctx: tnsnamesParser.D_send_bufContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#d_service_type.
def enterD_service_type(self, ctx: tnsnamesParser.D_service_typeContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#d_service_type.
def exitD_service_type(self, ctx: tnsnamesParser.D_service_typeContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#d_security.
def enterD_security(self, ctx: tnsnamesParser.D_securityContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#d_security.
def exitD_security(self, ctx: tnsnamesParser.D_securityContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#d_conn_timeout.
def enterD_conn_timeout(self, ctx: tnsnamesParser.D_conn_timeoutContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#d_conn_timeout.
def exitD_conn_timeout(self, ctx: tnsnamesParser.D_conn_timeoutContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#d_retry_count.
def enterD_retry_count(self, ctx: tnsnamesParser.D_retry_countContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#d_retry_count.
def exitD_retry_count(self, ctx: tnsnamesParser.D_retry_countContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#d_tct.
def enterD_tct(self, ctx: tnsnamesParser.D_tctContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#d_tct.
def exitD_tct(self, ctx: tnsnamesParser.D_tctContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#ds_parameter.
def enterDs_parameter(self, ctx: tnsnamesParser.Ds_parameterContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#ds_parameter.
def exitDs_parameter(self, ctx: tnsnamesParser.Ds_parameterContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#address_list.
def enterAddress_list(self, ctx: tnsnamesParser.Address_listContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
self._enteredAddressList = True
# Exit a parse tree produced by tnsnamesParser#address_list.
def exitAddress_list(self, ctx: tnsnamesParser.Address_listContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
self._enteredAddressList = False
# Enter a parse tree produced by tnsnamesParser#al_params.
def enterAl_params(self, ctx: tnsnamesParser.Al_paramsContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#al_params.
def exitAl_params(self, ctx: tnsnamesParser.Al_paramsContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#al_parameter.
def enterAl_parameter(self, ctx: tnsnamesParser.Al_parameterContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#al_parameter.
def exitAl_parameter(self, ctx: tnsnamesParser.Al_parameterContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#al_failover.
def enterAl_failover(self, ctx: tnsnamesParser.Al_failoverContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#al_failover.
def exitAl_failover(self, ctx: tnsnamesParser.Al_failoverContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#al_load_balance.
def enterAl_load_balance(self, ctx: tnsnamesParser.Al_load_balanceContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#al_load_balance.
def exitAl_load_balance(self, ctx: tnsnamesParser.Al_load_balanceContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#al_source_route.
def enterAl_source_route(self, ctx: tnsnamesParser.Al_source_routeContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#al_source_route.
def exitAl_source_route(self, ctx: tnsnamesParser.Al_source_routeContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#address.
def enterAddress(self, ctx: tnsnamesParser.AddressContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#address.
def exitAddress(self, ctx: tnsnamesParser.AddressContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#a_params.
def enterA_params(self, ctx: tnsnamesParser.A_paramsContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#a_params.
def exitA_params(self, ctx: tnsnamesParser.A_paramsContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#a_parameter.
def enterA_parameter(self, ctx: tnsnamesParser.A_parameterContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#a_parameter.
def exitA_parameter(self, ctx: tnsnamesParser.A_parameterContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#protocol_info.
def enterProtocol_info(self, ctx: tnsnamesParser.Protocol_infoContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#protocol_info.
def exitProtocol_info(self, ctx: tnsnamesParser.Protocol_infoContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#tcp_protocol.
def enterTcp_protocol(self, ctx: tnsnamesParser.Tcp_protocolContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#tcp_protocol.
def exitTcp_protocol(self, ctx: tnsnamesParser.Tcp_protocolContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#tcp_params.
def enterTcp_params(self, ctx: tnsnamesParser.Tcp_paramsContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#tcp_params.
def exitTcp_params(self, ctx: tnsnamesParser.Tcp_paramsContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#tcp_parameter.
def enterTcp_parameter(self, ctx: tnsnamesParser.Tcp_parameterContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#tcp_parameter.
def exitTcp_parameter(self, ctx: tnsnamesParser.Tcp_parameterContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#tcp_host.
def enterTcp_host(self, ctx: tnsnamesParser.Tcp_hostContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#tcp_host.
def exitTcp_host(self, ctx: tnsnamesParser.Tcp_hostContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#tcp_port.
def enterTcp_port(self, ctx: tnsnamesParser.Tcp_portContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#tcp_port.
def exitTcp_port(self, ctx: tnsnamesParser.Tcp_portContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#tcp_tcp.
def enterTcp_tcp(self, ctx: tnsnamesParser.Tcp_tcpContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#tcp_tcp.
def exitTcp_tcp(self, ctx: tnsnamesParser.Tcp_tcpContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#host.
def enterHost(self, ctx: tnsnamesParser.HostContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#host.
def exitHost(self, ctx: tnsnamesParser.HostContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#port.
def enterPort(self, ctx: tnsnamesParser.PortContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#port.
def exitPort(self, ctx: tnsnamesParser.PortContext):
assert tnsnamesParser.ruleNames[ctx.getRuleIndex()] == self._tnsStack.pop
# Enter a parse tree produced by tnsnamesParser#ipc_protocol.
def enterIpc_protocol(self, ctx: tnsnamesParser.Ipc_protocolContext):
self._tnsStack.push(tnsnamesParser.ruleNames[ctx.getRuleIndex()])
# Exit a parse tree produced by tnsnamesParser#ipc_protocol.
def exitIpc_protocol(self, ctx: tnsnamesParser.Ipc_protocolContext):
| |
or_coord_format = 'cart'
else:
or_coord_format = 'cart'
#
# update display section
#
if 'display' in di:
for k in di['display']:
try:
self.display[k] = eval(di['display'][k])
except:
self.display[k] = di['display'][k]
# self.ax = self.display['box']
#
# [points]
#
# update points section
for nn in di['points']:
nodeindex = eval(nn)
if or_coord_format == 'latlon':
x, y = coords.xy[nn]
else:
x, y = eval(di['points'][nn])
#
# limitation of point precision is important for avoiding
# topological problems in shapely.
# Layout precision is hard limited to millimeter precision.
#
self.Gs.add_node(nodeindex) # add point node
self.Gs.pos[nodeindex] = (
round(1000 * x) / 1000., round(1000 * y) / 1000.)
self.labels[nodeindex] = nn
#
# [segments]
#
# update segments section
self.name['AIR'] = []
self.name['_AIR'] = []
#
# get the maximum index
#
maxnum = max([eval(x) for x in di['segments'].keys()])
for key in di['segments']:
d = eval(di['segments'][key])
nta = d['connect'][0]
nhe = d['connect'][1]
#print(key,nta,nhe)
name = d['name']
z = d['z']
if not 'transition' in d:
transition = False
else:
transition = d['transition']
if not 'offset' in d:
offset = 0
else:
offset = d['offset']
# add new segment
#
# The segment number is the same as in the .lay file
#
# Very useful feature
#
num = self.add_segment(nta, nhe,
num = eval(key),
name = name,
transition = transition,
offset = offset,
z = z)
# exploit iso for segment completion (AIR type)
#
# Complement single segment which do not reach zceil or zfloor with
# an iso segment with AIR property
#
segdone = []
for key in di['segments']:
iseg = eval(key)
d = eval(di['segments'][key])
nta = d['connect'][0]
nhe = d['connect'][1]
# if not already done
if iseg not in segdone:
# get all the iso from the segment key
iso = copy.copy(self.Gs.node[iseg]['iso'])
# append key to iso
iso.append(iseg)
# stack all the intervals in increasing order
ziso = []
for ns in iso:
ziso.append(self.Gs.node[ns]['z'])
# get the complementary intervals
if self.typ == 'outdoor':
zmin = 1e6
zmax = -1e6
for iz in ziso:
zmin = np.minimum(zmin,min(iz))
zmax = np.maximum(zmax,max(iz))
ziso = [(zmin,zmax)]
zair = pyu.compint(ziso,self.zfloor,self.zceil)
# add AIR wall in the intervals
for za in zair:
num = self.add_segment(nta, nhe,
name='AIR',
offset=0,
z=(za[0], za[1]))
segdone = segdone + iso
#
# add _AIR wall around the layout
#
self.boundary()
# compliant with config file without material/slab information
#
# {latlon]
#
if config.has_section('latlon'):
llcrnrlon = eval(config.get('latlon', 'llcrnrlon'))
llcrnrlat = eval(config.get('latlon', 'llcrnrlat'))
urcrnrlon = eval(config.get('latlon', 'urcrnrlon'))
urcrnrlat = eval(config.get('latlon', 'urcrnrlat'))
projection = config.get('latlon','projection')
lon_0 = (llcrnrlon+urcrnrlon)/2.
lat_0 = (llcrnrlat+urcrnrlat)/2.
# Construction of Basemap for coordinates transformation
self.m = Basemap(llcrnrlon=llcrnrlon,
llcrnrlat=llcrnrlat,
urcrnrlon=urcrnrlon,
urcrnrlat=urcrnrlat,
resolution='i',
projection=projection,
lon_0=lon_0,
lat_0=lat_0)
self.extent = (llcrnrlon,urcrnrlon,llcrnrlat,urcrnrlat)
self.pll = self.m(self.extent[0],self.extent[2])
self.pur = self.m(self.extent[1],self.extent[3])
self.extent_c = (self.pll[0],self.pur[0],self.pll[1],self.pur[1])
if config.has_section('files'):
# self.filematini=config.get('files','materials')
# self.fileslabini=config.get('files','slab')
self._filefur = config.get('files', 'furniture')
if config.has_section('slabs'):
#filemat = self._filename.replace('ini', 'mat')
#fileslab = self._filename.replace('ini', 'slab')
ds = di['slabs']
dm = di['materials']
for k in ds:
ds[k] = eval(ds[k])
for k in dm:
dm[k] = eval(dm[k])
self.sl = sb.SlabDB(ds=ds, dm=dm)
# In this section we handle the ini file format evolution
if 'fileoverlay' in self.display:
self.display['overlay_file'] = self.display.pop('fileoverlay')
self.display['overlay_axis'] = self.display['box']
self.save()
if 'inverse' in self.display:
self.display['overlay_flip'] = ""
self.display.pop('inverse')
self.save()
# convert graph Gs to numpy arrays for faster post processing
self.g2npy()
#
fd = open(filelay,'rb')
self._hash = hashlib.md5(fd.read()).hexdigest()
fd.close()
def loadfur(self, _filefur):
""" loadfur load a furniture file
Parameters
----------
_filefur : string
short name of the furniture ini file
Notes
-----
Furniture objects are stored in self.lfur list
Examples
--------
Load a Layout file and an associated furniture ini file
.. plot::
:include-source:
>>> import matplotlib.pyplot as plt
>>> from pylayers.gis.layout import *
>>> L = Layout('WHERE1.lay')
>>> L.loadfur('Furw1.ini')
>>> fig = plt.figure()
>>> ax = fig.gca()
>>> fig,ax = L.showGs(fig=fig,ax=ax,furniture=True)
>>> ti = plt.title('loadfur')
>>> plt.show()
"""
filefur = pyu.getlong(_filefur, pro.pstruc['DIRFUR'])
config = ConfigParser.ConfigParser()
config.read(filefur)
furname = config.sections()
self.lfur = []
for name in furname:
F = fur.Furniture()
F.load(_filefur, name)
self.lfur.append(F)
self.filefur = _filefur
def load_modif(self, _filename, build=True, cartesian=False, dist_m=400):
""" load a Layout in different formats
Parameters
----------
_filename : string
Notes
-----
+ .lay : ini file format (natural one) DIRLAY
"""
newfile = False
filename = pyu.getlong(_filename, pro.pstruc['DIRLAY'])
if os.path.exists(filename): # which exists
self.loadini(arg)
else: # which do not exist
self._filename = _filename
newfile = True
print("new file", self._filename)
# construct geomfile (.off) for vizualisation with geomview
self.subseg()
if not newfile:
try:
self.geomfile()
except:
print("problem to construct geomfile")
# if check:
# self.check()
self.boundary(dx=10, dy=10)
# create shapely polygons L._shseg
def subseg(self):
""" establishes the association : name <-> edgelist
Returns
-------
dico : dict
sub segment name as key and segment number as value
"""
dico = {}
listtransition = []
for k in self.Gs.node.keys():
dk = self.Gs.node[k]
if 'transition' in dk:
transition = dk['transition']
if transition:
listtransition.append(k)
if 'ss_name' in dk:
lname = dk['ss_name']
for j, name in enumerate(lname):
if name in dico:
dico[name].append((k, j))
else:
dico[name] = [(k, j)]
self.dsseg = dico
self.listtransition = listtransition
return(dico)
def add_pnod(self, p, e1, e2):
""" Project point p on segment e1 along segment e2
Parameters
----------
p : ndarray
point
e1 : int
edge number 1
e2 : int
edge number 2
..todo
This function is void
"""
#p1 = p + alpha*ve2
#p1 = pa + beta * (pb-pa)
pass
def add_fnod(self, p=(0.0, 0.0)):
""" add free node p
Parameters
----------
p : (1x2) tuple
Examples
--------
>>> from pylayers.gis.layout import *
>>> L = Layout('defstr.lay')
>>> L.add_fnod((10.0,10.0))
-13
"""
# next free node
if len(self.Gs.node)>0:
num = -( -min(self.Gs.node) + 1 )
else:
num = -1
self.Gs.add_node(num)
self.Gs.pos[num] = p
self.Np = self.Np + 1
# update labels
self.labels[num] = str(num)
return(num)
def add_nfpe(self, np0, s1, s2):
""" Add node on s1 from projection of np0 along s2
Parameters
----------
np0 : point number
s1 : edge number 1
s2 : edge number 2
"""
np1 = list(self.Gs[s1].keys())
np2 = list(self.Gs[s2].keys())
xA = self.Gs.pos[np1[0]][0]
yA = self.Gs.pos[np1[0]][1]
xB = self.Gs.pos[np1[1]][0]
yB = self.Gs.pos[np1[1]][1]
xC = self.Gs.pos[np2[0]][0]
yC = self.Gs.pos[np2[0]][1]
xD = self.Gs.pos[np2[1]][0]
yD = self.Gs.pos[np2[1]][1]
xP = self.Gs.pos[np0][0]
yP = self.Gs.pos[np0][1]
A = np.array([[xB - xA, xD - xC], [yB - yA, yD - yC]])
b = np.array([xP - xA, yP - yA])
x = sp.linalg.solve(A, b)
if ((x[0] > 0.) & (x[0] < 1.0)):
self.add_pons(s1, 1 - x[0])
def add_pons(self, ns, alpha=0.5):
""" add point on segment
Parameters
----------
ns : int
segment number
alpha : parameterization of the point
alpha = 0 (tail) alpha = 1 (head)
Notes
-----
delete segment ns
create 2 segments with same properties
"""
# v1.1 nop = self.Gs.neighbors(ns)
nop = list(self.Gs[ns])
namens = self.Gs.node[ns]['name']
zminns = self.Gs.node[ns]['z'][0]
zmaxns = self.Gs.node[ns]['z'][1]
p1 = np.array([self.Gs.pos[nop[0]][0], self.Gs.pos[nop[0]][1]])
p2 = np.array([self.Gs.pos[nop[1]][0], self.Gs.pos[nop[1]][1]])
p = tuple(alpha * p1 + (1 - alpha) * p2)
num = self.add_fnod(p)
# delete old edge ns
self.del_segment(ns)
# add new edge np[0] num
self.add_segment(nop[0], num, name=namens, z=[
zminns, zmaxns], offset=0)
# add new edge num np[1]
self.add_segment(num, nop[1], name=namens, z=[
zminns, zmaxns], offset=0)
def add_segment(self,
n1,
n2,
num=-1,
maxnum=-1,
transition = False,
name='PARTITION',
z=(0.0, 40000000),
offset=0,
verbose=True):
""" add segment between node n1 and node n2
Parameters
----------
n1 : integer < 0
n2 : integer < 0
num : segment index (-1 default not given)
maxnum : maximum number (-1 default not given)
name : string
layer name 'PARTITION'
z : tuple of 2 floats
default = (0,40000000)
offset : float
[-1,1] default (0)
Returns
-------
num : segment number (>0)
Notes
-----
A segment dictionnary has the following mandatory attributes
name : slab name associated with segment
z : list (zmin,zmax) (meters)
norm : array (1x3) segment normal
transition : boolean
ncycles : list of involved cycles
connect : list of point number
iso : list of | |
_ref_Abel_v_12_t),
(12,71 ) : ((0,1,645,446,813,543,413,7,55,177,468,503,646), _ref_Abel_v_12_t),
(12,73 ) : ((0,1,607,719,837,496,240,645,184,829,451,830,770), _ref_Abel_v_12_t),
(12,83 ) : ((0,1,627,898,836,939,742,42,847,531,173,607,361), _ref_Abel_v_12_t),
(12,85 ) : ((0,1,778,1000,913,819,961,456,507,186,509,495,300), _ref_Abel_v_12_t),
(12,89 ) : ((0,1,602,894,827,661,350,647,304,47,430,533,550), _ref_Abel_v_12_t),
(12,91 ) : ((0,1,777,1054,855,892,792,134,224,740,240,898,631), _ref_Abel_v_12_t),
(12,93 ) : ((0,1,601,1004,872,557,599,819,381,248,270,1091,49), _ref_Abel_v_12_t),
(12,101) : ((0,1,787,1049,818,1064,288,346,464,958,1188,340,1192), _ref_Abel_v_12_t),
(12,103) : ((0,1,770,1027,806,1082,515,436,1096,1060,57,1135,1144), _ref_Abel_v_12_t),
(12,115) : ((0,1,747,1179,873,484,969,692,679,153,1237,1110,616), _ref_Abel_v_12_t),
(12,119) : ((0,1,701,1225,834,515,367,727,1349,407,891,1189,153), _ref_Abel_v_12_t),
(12,121) : ((0,1,713,1265,848,421,998,69,874,1126,693,467,1164), _ref_Abel_v_12_t),
(12,129) : ((0,1,623,1170,824,450,1099,418,948,177,207,797,59), _ref_Abel_v_12_t),
(12,133) : ((0,1,648,1157,822,371,407,180,1120,898,342,548,117), _ref_Abel_v_12_t),
(12,135) : ((0,1,712,1253,844,623,943,992,191,845,299,1381,611), _ref_Abel_v_12_t),
(12,139) : ((0,1,627,1216,711,489,642,904,733,1246,96,1617,12), _ref_Abel_v_12_t),
(12,141) : ((0,1,447,522,967,763,1035,344,93,561,1137,523,828), _ref_Abel_v_12_t),
(12,145) : ((0,1,426,582,937,534,1538,1606,1148,1436,191,1406,823), _ref_Abel_v_12_t),
(12,149) : ((0,1,420,509,957,593,835,1031,1502,319,1552,1047,993), _ref_Abel_v_12_t),
(12,155) : ((0,1,300,482,962,638,1207,1682,885,211,1838,1244,531), _ref_Abel_v_12_t),
(12,161) : ((0,1,455,318,952,400,470,584,1368,292,678,1138,383), _ref_Abel_v_12_t),
(12,169) : ((0,1,425,326,951,1211,1881,1063,1631,1363,1554,665,1600), _ref_Abel_v_12_t),
(12,171) : ((0,1,432,319,933,688,549,63,2002,1702,653,1081,1813), _ref_Abel_v_12_t),
(12,185) : ((0,1,404,324,935,605,366,360,178,221,533,1940,30), _ref_Abel_v_12_t),
(12,189) : ((0,1,303,329,957,866,2180,1899,597,2209,1186,994,1301), _ref_Abel_v_12_t),
(12,191) : ((0,1,491,527,939,377,1685,1735,1967,1176,391,2192,681), _ref_Abel_v_12_t),
(12,195) : ((0,1,331,313,934,384,2105,479,1546,86,184,1127,1822), _ref_Abel_v_12_t),
(12,199) : ((0,1,377,524,946,560,316,1591,2036,273,1841,2091,713), _ref_Abel_v_12_t),
(12,203) : ((0,1,324,312,933,341,547,68,39,1008,561,1372,1300), _ref_Abel_v_12_t),
(12,213) : ((0,1,343,312,933,378,229,60,1179,1781,1960,66,536), _ref_Abel_v_12_t),
(12,223) : ((0,1,463,316,933,413,970,1083,2322,491,1226,1809,560), _ref_Abel_v_12_t),
(12,229) : ((0,1,338,312,933,380,401,2398,612,1279,1514,268,528), _ref_Abel_v_12_t),
(12,233) : ((0,1,405,314,934,398,1053,310,2254,2250,2652,1300,1079), _ref_Abel_v_12_t),
(12,243) : ((0,1,486,314,933,375,697,151,1964,1623,1590,1756,1152), _ref_Abel_v_12_t),
(12,253) : ((0,1,322,312,933,395,1047,12,176,1859,881,1220,2465), _ref_Abel_v_12_t),
(12,255) : ((0,1,463,316,938,345,360,2537,2648,2270,789,2959,2796), _ref_Abel_v_12_t),
(12,259) : ((0,1,486,314,933,350,575,1962,2347,750,3054,2719,1841), _ref_Abel_v_12_t),
(12,265) : ((0,1,333,312,933,343,759,1754,2650,1633,2479,2718,1164), _ref_Abel_v_12_t),
(12,269) : ((0,1,432,312,938,345,567,2441,966,1935,470,2105,3043), _ref_Abel_v_12_t),
(12,271) : ((0,1,463,313,933,356,453,2869,793,748,2116,3126,2839), _ref_Abel_v_12_t),
(12,275) : ((0,1,477,313,943,358,474,2312,1258,52,1452,2370,260), _ref_Abel_v_12_t),
(12,281) : ((0,1,483,313,933,387,418,961,1586,766,2937,275,2569), _ref_Abel_v_12_t),
(12,289) : ((0,1,474,313,943,367,963,3147,2157,238,12,1610,2189), _ref_Abel_v_12_t),
(12,293) : ((0,1,423,335,945,397,235,2878,1793,2484,2440,503,1609), _ref_Abel_v_12_t),
(12,295) : ((0,1,428,337,931,406,360,1978,68,375,721,2390,2465), _ref_Abel_v_12_t),
(12,301) : ((0,1,436,351,924,367,1196,265,2527,720,664,105,250), _ref_Abel_v_12_t),
(12,303) : ((0,1,487,572,946,462,2646,2616,1249,3143,21,2537,2128), _ref_Abel_v_12_t),
(12,309) : ((0,1,417,327,944,341,1924,1975,2308,1234,1658,1829,1606), _ref_Abel_v_12_t),
(12,311) : ((0,1,435,557,937,371,267,428,1289,3355,2948,3030,861), _ref_Abel_v_12_t),
(12,321) : ((0,1,319,325,952,364,674,2128,643,393,1025,619,868), _ref_Abel_v_12_t),
(12,323) : ((0,1,445,344,920,365,567,3483,3364,1240,344,2683,3070), _ref_Abel_v_12_t),
(12,335) : ((0,1,478,557,969,462,1587,1457,2552,2575,2420,168,924), _ref_Abel_v_12_t),
(12,341) : ((0,1,498,362,954,440,584,421,3867,3964,404,664,2233), _ref_Abel_v_12_t),
(12,355) : ((0,1,415,329,927,512,615,2336,127,2245,2250,2272,1888), _ref_Abel_v_12_t),
(12,363) : ((0,1,541,368,971,370,297,555,148,4195,1197,1527,211), _ref_Abel_v_12_t),
(12,379) : ((0,1,424,545,948,415,378,1181,2984,3458,3288,3888,74), _ref_Abel_v_12_t),
(12,383) : ((0,1,477,534,964,441,246,972,2504,3957,3101,4366,2168), _ref_Abel_v_12_t),
(12,385) : ((0,1,543,334,943,531,793,1852,538,4231,4492,580,3816), _ref_Abel_v_12_t),
(12,399) : ((0,1,487,571,964,391,300,4515,2211,3063,2771,2586,1056), _ref_Abel_v_12_t),
(12,401) : ((0,1,442,543,964,514,567,763,3816,3621,2124,1092,1456), _ref_Abel_v_12_t),
(12,405) : ((0,1,433,552,963,385,684,63,4243,3494,3500,560,4611), _ref_Abel_v_12_t),
(12,409) : ((0,1,426,541,954,411,708,1875,2058,2443,1913,2924,3673), _ref_Abel_v_12_t),
(12,411) : ((0,1,430,558,963,397,372,492,2502,3948,18,1191,3761), _ref_Abel_v_12_t),
(12,413) : ((0,1,436,546,977,467,242,3695,682,483,3026,461,1334), _ref_Abel_v_12_t),
}
# Translate all V(m,t) into (mt+1,m+2;1,0;t)-QDM constructors
for (m,t),(vec,source) in iteritems(Vmt_vectors):
n,k,lmbda,mu,u = (m*t+1,m+2,1,0,t)
if not (n+u,lmbda) in QDM:
QDM[n+u,lmbda] = {}
QDM[n+u,lmbda][n,lmbda,mu,u] = (k,lambda m=m,t=t,vec=vec:QDM_from_Vmt(m,t,vec))
# Create the list of V(m,t) vectors for the doc
_all_m = sorted(set(m for m,_ in Vmt_vectors.keys()))
LIST_OF_VMT_VECTORS = "\n".join(" - `m={}` and `t=` ".format(m) +
", ".join("`{}`".format(t) for _,t in sorted(Vmt_vectors.keys()) if _ == m)
for m in _all_m)
r"""
Tests for the Vmt vectors
EXAMPLES::
sage: from sage.combinat.designs.designs_pyx import is_quasi_difference_matrix
sage: from sage.combinat.designs.orthogonal_arrays import QDM_from_Vmt
sage: from sage.combinat.designs.database import Vmt_vectors
sage: for (m,t),(vec,source) in sorted(Vmt_vectors.items()):
....: G,M = QDM_from_Vmt(m,t,vec)
....: if m*t < 600:
....: assert is_quasi_difference_matrix(M,G,m+2,1,1,t,verbose=1),(m,t)
....: assert len(source)>10
"""
DF = {
##############
# lambda = 1 #
##############
( 15, 3, 1):
{(15,): [[0,1,4],[0,2,9],[0,5,10]]},
( 21, 3, 1):
{(21,): [[0,1,3],[0,4,12],[0,5,11],[0,7,14]]},
( 21, 5, 1):
{(21,): [[0,1,4,14,16]]},
( 25, 3, 1):
{(25,): [[0,1,3],[0,4,11],[0,5,13],[0,6,15]]},
( 25, 4, 1):
{(5,5): [[(0,0),(0,1),(1,0),(2,2)],[(0,0),(0,2),(2,0),(4,4)]]},
( 27, 3, 1):
{(27,): [[0,1,3],[0,4,11],[0,5,15],[0,6,14],[0,9,18]]},
( 33, 3, 1):
{(33,): [[0,1,3],[0,4,10],[0,5,18],[0,7,19],[0,8,17],[0,11,22]]},
( 37, 4, 1):
{(37,): [[0,1,3,24],[0,4,26,32],[0,10,18,30]]},
( 39, 3, 1):
{(39,): [[0,1,3],[0,4,18],[0,5,27],[0,6,16],[0,7,15],[0,9,20],[0,13,26]]},
( 40, 4, 1):
{(40,): [[0,1,4,13],[0,2,7,24],[0,6,14,25],[0,10,20,30]]},
( 45, 3, 1):
{(45,): [[0,1,3],[0,4,10],[0,5,28],[0,7,34],[0,8,32],[0,9,29],[0,12,26],[0,15,30]]},
( 45, 5, 1):
{(3,3,5): [[(0,1,0),(0,2,0),(1,0,2),(2,0,2),(0,0,1)],
[(2,1,0),(1,2,0),(2,2,2),(1,1,2),(0,0,1)],
[(0,0,0),(0,0,1),(0,0,2),(0,0,3),(0,0,4)]]},
( 49, 3, 1):
{(49,): [[0,1,3],[0,4,9],[0,6,17],[0,7,23],[0,8,30],[0,10,31],[0,12,36],[0,14,34]]},
( 49, 4, 1):
{(49,): [[0,1,3,8,],[0,4,18,29],[0,6,21,33],[0,9,19,32]]},
( 51, 3, 1):
{(51,): [[0,1,3],[0,4,9],[0,6,25],[0,7,35],
[0,8,22],[0,10,21],[0,12,27],[0,13,31],[0,17,34]]},
( 52, 4, 1):
{(52,): [[0,1,3,7,],[0,5,19,35],[0,8,20,31],[0,9,24,34],[0,13,26,39]]},
( 55, 3, 1):
{(55,): [[0,1,3],[0,4,9],[0,6,16],[0,7,32],[0,8,29],
[0,11,42],[0,12,27],[0,14,36],[0,17,37]]},
( 57, 3, 1):
{(57,): [[0,1,3],[0,4,9],[0,6,13],[0,8,26],[0,10,33],
[0,11,32],[0,12,40],[0,14,41],[0,15,35],[0,19,38]]},
( 63, 3, 1):
{(63,): [[0,1,3],[0,4,9],[0,6,13],[0,8,25],[0,10,41],[0,11,44],
[0,12,36],[0,14,37],[0,15,43],[0,16,34],[0,21,42]]},
( 64, 4, 1):
{(64,): [[0,1,3,7,],[0,5,18,47],[0,8,33,44],
[0,9,19,43],[0,12,26,49],[0,16,32,48]]},
( 65, 5, 1):
{(65,): [[0,1,3,31,45],[0,4,10,19,57],[0,5,16,41,48],[0,13,26,39,52]]},
( 69, 3, 1):
{(69,): [[0,1,3],[0,4,9],[0,6,13],[0,8,24],[0,10,38],[0,11,47],[0,12,32],
[0,14,40],[0,15,50],[0,17,42],[0,18,39],[0,23,46]]},
( 75, 3, 1):
{(75,): [[0,1,67],[0,2,47],[0,3,41],[0,4,69],[0,5,68],[0,11,55],[0,13,61],
[0,15,33],[0,16,52],[0,17,43],[0,19,40],[0,22,51],[0,25,50]]},
( 76, 4, 1):
{(76,): [[0,1,7,22],[0,2,11,45],[0,3,59,71],[0,4,32,50],
[0,10,37,51],[0,13,36,60],[0,19,38,57]]},
( 81, 3, 1):
{(81,): [[0,1,39],[0,2,58],[0,3,34],[0,4,21],[0,5,67],[0,6,15],[0,7,36],
[0,8,59],[0,10,63],[0,11,37],[0,12,61],[0,13,48],[0,16,40],[0,27,54]]},
( 81, 5, 1):
{(81,): [[0,1,5,12,26],[0,2,10,40,64],[0,3,18,47,53],[0,9,32,48,68]]},
( 85, 4, 1):
{(85,): [[0,2,41,42],[0,17,32,38],[0,18,27,37],[0,13,29,36],
[0,11,31,35],[0,12,26,34,],[0,5,30,33]]},
( 91, 6, 1):
{(91,): [[0,1,3,7,25,38], [0,16,21,36,48,62], [0,30,40,63,74,82]]},
( 91, 7, 1): # from the La Jolla covering repository, attributed to <NAME> and <NAME>
{(91,): [[8, 9, 14, 25, 58, 81, 85], [5, 33, 35, 42, 45, 67, 88], [4, 17, 30, 43, 56, 69, 82]]},
(121, 5, 1):
{(121,): [[0,14,26,51,60],[0,15,31,55,59],[0,10,23,52,58],
[0,3,36,56,57],[0,7,18,45,50],[0,8,30,47,49]]},
(121, 6, 1):
{(11,11): [[(0,0),(0,3),(0,4),(1,1),(1,7),(4,6)],
[(0,0),(0,2),(2,5),(4,7),(6,4),(8,0)],
[(0,0),(1,5),(2,0),(4,1),(6,0),(7,2)],
[(0,0),(1,0),(3,9),(4,8),(6,1),(9,5)]]},
(141, 5, 1):
{(141,): [[0,33,60,92,97],[0,3,45,88,110],[0,18,39,68,139],[0,12,67,75,113],
[0,1,15,84,94],[0,7,11,24,30],[0,36,90,116,125]]},
(161, 5, 1):
{(161,): [[0,19,34,73,80],[0,16,44,71,79],[0,12,33,74,78],[0,13,30,72,77],
[0,11,36,67,76],[0,18,32,69,75],[0,10,48,68,70],[0,3,29,52,53]]},
(175, 7, 1):
{(7,5,5): [[(0,0,0),(1,0,0),(2,0,0),(3,0,0),(4,0,0),(5,0,0),(6,0,0)],
[(0,0,0),(1,1,3),(1,4,2),(2,2,2),(2,3,3),(4,2,0),(4,3,0)],
[(0,0,0),(1,3,4),(1,2,1),(2,2,3),(2,3,2),(4,0,2),(4,0,3)],
[(0,0,0),(1,1,2),(1,4,3),(2,1,1),(2,4,4),(4,0,1),(4,0,4)],
[(0,0,0),(1,3,1),(1,2,4),(2,4,1),(2,1,4),(4,1,0),(4,4,0)]]},
(201, 5, 1):
{(201,): [[0,1,45,98,100],[0,3,32,65,89],[0,4,54,70,75],[0,6,49,69,91],[0,7,58,81,95],
[0,8,34,72,90],[0,9,36,77,96],[0,10,35,83,94],[0,12,40,79,92],[0,15,46,76,93]]},
(217, 7, 1):
{(217,): [[0,1,37,67,88,92,149],[0,15,18,65,78,121,137],[0,8,53,79,85,102,107],
[0,11,86,100,120,144,190],[0,29,64,165,198,205,207],[0,31,62,93,124,155,186]]},
(221, 5, 1):
{(221,): [[0,1,24,61,116],[0,3,46,65,113],[0,4,73,89,130],[0,5,77,122,124],
[0,6,39,50,118],[0,7,66,81,94],[0,8,38,64,139],[0,9,29,80,107],
[0,10,35,93,135],[0,12,34,52,88],[0,14,31,63,84]]},
(259, 7, 1): # the following one is lemma 2.2 in Abel "Some new BIBDs with block size 7"
{(7,37): [[(0,0),(1,0),(2,0),(3,0),(4,0),(5,0),(6,0)],
[(0,0),(0,1),(0,6),(1,4),(2,19),(3,25),(6,26)],
[(0,0),(0,10),(0,23),(2,3),(4,5),(5,1),(6,28)],
[(0,0),(0,8),(0,26),(1,13),(3,10),(4,30),(5,21)],
[(0,0),(0,4),(1,25),(1,34),(2,33),(2,35),(4,10)],
[(0,0),(0,3),(1,26),(2,7),(2,28),(4,17),(4,34)],
[(0,0),(0,30),(1,7),(1,22),(2,1),(4,21),(4,33)]]},
##############
# lambda = 2 #
##############
( 16, 3, 2):
{(16,): [[0,1,2],[0,2,8],[0,3,7],[0,4,7],[0,5,10]]},
( 28, 3, 2):
{(28,): [[0,1,12],[0,2,11],[0,2,12],[0,3,7],[0,3,13],
[0,4,9],[0,5,13],[0,6,7],[0,6,14]]},
( 40, 3, 2):
{(40,): [[0,1,4],[0,1,16],[0,2,7],[0,2,9],[0,3,17],[0,4,17],[0,5,19],
[0,6,16],[0,6,18],[0,8,18],[0,8,19],[0,9,20],[0,12,25]]},
( 19, 4, 2):
{(19,): [[0,1,3,12],[0,1,5,13],[0,4,6,9]]},
( 21, 4, 3):
{(21,): [[0,2,3,7],[0,3,5,9],[0,1,7,11],[0,2,8,11],[0,1,9,14]]},
( 22, 4, 2):
{(22,): [[0,4,16,17],[0,12,14,21],[0,14,16,19],[0,4,11,15]]},
( 31, 4, 2):
{(31,): [[0,1,8,11],[0,1,13,17],[0,2,11,14],[0,5,7,13],[0,5,9,15]]},
( 34, 4, 2):
{(34,): [[0,1,22,24],[0,1,19,25],[0,2,6,29],[0,4,7,20],[0,5,8,20],[0,8,17,25]]},
( 43, 4, 2):
{(43,): [[0,1,6,36],[0,3,18,22],[0,9,11,23],[0,10,12,26],[0,26,27,33],
[0,13,35,38],[0,19,28,39,]]},
( 46, 4, 2):
{(46,): [[0,2,7,10],[0,4,19,32],[0,10,34,35],[0,5,8,24],[0,26,30,39],
[0,17,26,32],[0,28,34,45],[0,2,23,25]]},
(31, 5, 2):
{(31,): [[0,1,3,7,15],[0,3,9,14,21],[0,4,5,13,15,]]},
( 35, 5, 2):
{(35,): [[0,2,8,12,13],[0,3,18,22,27],[0,17,23,32,33],
[0,7,14,21,28],[0,7,14,21,28]]},
( 51, 5, 2):
{(51,): [[0,1,14,31,35],[0,1,9,23,33],[0,11,16,18,42],
[0,7,13,36,39],[0,4,10,12,15]]},
( 71, 5, 2):
{(71,): [[1,5,25,54,57],[3,4,15,20,29],[9,12,16,45,60],[27,36,38,48,64],
[2,10,37,43,50],[6,8,30,40,58],[18,19,24,32,49]]},
( 46, 6, 2):
{(46,): [[0,1,3,11,31,35],[0,1,4,10,23,29],[0,2,7,15,32,41]]},
( 61, 6, 2):
{(61,): [[12,15,28,34,35,59],[1,13,18,47,51,53],
[8,10,11,21,29,43],[16,20,25,32,40,50]]},
( 43, 7, 2):
{(43,): [[0,1,11,19,31,38,40],[0,2,10,16,25,38,42]]},
( 64, 7, 2):
{(64,): [[0,1,2,4,7,28,52],[0,4,9,21,31,39,53],[0,6,15,23,34,41,54]]},
( 75, 5, 2):
{(5,15): [[(0,0),(1,10),(1,8),(4,1),(4,2)],
[(0,0),(2,5),(2,10),(3,7),(3,13)],
[(0,0),(1,10),(1,2),(4,4),(4,8)],
[(0,0),(2,5),(2,10),(3,14),(3,11)],
[(0,0),(1,4),(1,5),(4,1),(4,8)],
[(0,0),(1,1),(1,5),(4,4),(4,2)],
[(0,0),(2,7),(2,13),(3,1),(3,4)],
[(0,0),(1,0),(2,0),(3,0),(4,0)],
[(0,0),(1,0),(2,0),(3,0),(4,0)]]},
( 85, 7, 2):
{(85,): [[0,1,11,20,32,35,39],[0,2,6,16,29,50,65],
[0,3,9,27,55,72,80],[0,5,7,30,47,48,59]]},
( 85, 8, 2):
{(85,): [[24,31,39,50,67,68,70,82],[20,49,51,55,56,60,72,81],
[9,19,29,37,43,56,59,81]]},
(153, 9, 2):
{(3,3,17): [[(0,0,0),(0,1,0),(0,2,0),(1,0,0),(1,1,0),(1,2,0),(2,0,0),(2,1,0),(2,2,0)],
[(0,0,0),(0,1,0),(0,2,0),(1,0,0),(1,1,0),(1,2,0),(2,0,0),(2,1,0),(2,2,0)],
[(0,0,0),(0,1,1),(0,1,16),(0,2,4),(0,2,13),(1,0,3),(1,0,14),(2,0,5),(2,0,12)],
[(0,0,0),(0,1,2),(0,1,15),(0,2,8),(0,2,9),(1,0,6),(1,0,11),(2,0,10),(2,0,7)],
[(0,0,0),(0,1,3),(0,1,14),(0,2,12),(0,2,5),(1,0,9),(1,0,8),(2,0,15),(2,0,2)],
[(0,0,0),(0,1,6),(0,1,11),(0,2,7),(0,2,10),(1,0,1),(1,0,16),(2,0,13),(2,0,4)]]},
(181,10, 2):
{(181,): [[1,7,40,42,51,59,113,125,135,151],
[19,22,31,35,36,64,74,133,154,156],
[10,15,34,47,58,65,83,87,161,164],
[12,18,32,52,77,78,142,157,165,172]]},
##############
# lambda = 3 #
##############
( 21, 6, 3):
{(21,): [[0,2,10,15,19,20],[0,3,7,9,10,16]]},
( 41, 6, 3):
{(41,): [[0,1,10,16,18,37],[0,6,14,17,19,26],
[0,2,20,32,33,36],[0,11,12,28,34,38]]},
( 51, 6, 3):
{(51,): [[15,17,18,27,34,48],[3,17,30,34,42,45],[9,17,24,33,34,39],
[3,25,41,43,44,48],[3,5,25,29,43,48]]},
( 61, 6, 3):
{(61,): [[0,1,9,20,58,34],[0,2,7,18,40,55],[0,4,14,19,36,49],
[0,8,11,28,37,38],[0,13,15,16,22,56],[0,26,30,32,44,51]]},
( 29, 7, 3):
{(29,): [[1,7,16,20,23,24,25],[2,3,11,14,17,19,21]]},
( 43, 7, 3):
{(43,): [[1,4,11,16,21,35,41],[3,5,12,19,20,33,37],[9,13,14,15,17,25,36]]},
( 57, 7, 3):
{(57,): [[0,1,11,12,15,35,53],[0,7,17,20,27,29,48],
[0,5,18,26,32,49,51],[0,2,6,9,14,41,42]]},
( 61,10, 3):
{(61,): [[1,4,18,20,32,35,36,41,42,54],[11,13,14,21,23,28,34,39,43,47]]},
( 71, 7, 3):
{(71,): [[1,20,30,32,37,45,48],[2,3,19,25,40,60,64],[4,6,9,38,49,50,57],
[5,8,12,18,27,29,43],[10,15,16,24,36,54,58]]},
( 85, 7, 3):
{(85,): [[0,7,23,27,28,31,71],[0,12,22,41,61,74,79],
[0,6,11,13,38,42,77],[0,1,7,16,19,27,49],
[0,9,26,39,54,56,71],[0,2,3,12,37,53,63]]},
( 97, 9, 3):
{(97,): [[1,2,25,35,46,58,61,70,90],[3,4,8,38,43,50,69,86,87],
[6,12,16,32,53,55,57,75,82],[9,18,24,26,31,34,37,48,64]]},
( 49, 9, 3):
{(49,): [[0,1,3,5,9,14,19,25,37],[0,2,12,13,16,19,34,41,42]]},
(121,10, 3):
{(11,11): [[(0,1),(0,3),(0,4),(0,5),(0,9),(1,8),(3,2),(4,10),(5,7),(9,6)],
[(1,2),(3,6),(4,8),(5,10),(9,7),(10,2),(8,6),(7,8),(6,10),(2,7)],
[(1,7),(3,10),(4,6),(5,2),(9,8),(1,4),(3,1),(4,5),(5,9),(9,3)],
[(10,10),(8,8),(7,7),(6,6),(2,2),(1,0),(3,0),(4,0),(5,0),(9,0)]]},
###############
# lambda = 4 #
###############
( 22, 7, 4):
{(22,): [[0,2,6,8,9,10,13],[0,3,5,6,12,13,17]]},
( 29, 8, 4):
{(29,): [[0,1,7,16,20,23,24,25],[0,2,3,11,14,17,19,21]]},
( 71, 8, 4):
{(71,): [[0,1,20,30,32,37,45,48],[0,2,3,19,25,40,60,64],
[0,4,6,9,38,49,50,57],[0,5,8,12,18,27,29,43],
[0,10,15,16,24,36,54,58]]},
( 43, 8, 4):
{(43,): [[0,1,4,11,16,21,35,41],[0,3,5,12,19,20,33,37],
[0,9,13,14,15,17,25,36]]},
( 46,10, 4):
{(46,): [[3,7,13,16,23,24,25,28,30,42],[2,10,12,18,25,34,40,43,44,45]]},
( 55, 9, 4):
{(55,): [[0,4,21,25,26,42,45,53,54],[0,6,8,25,37,39,45,48,52],
[2,5,6,13,15,20,25,39,45]]},
( 67,12, 4):
{(67,): [[1,8,23,25,28,29,31,37,47,54,55,64],
[3,20,25,32,36,39,44,45,54,55,57,59]]},
##############
# lambda = 5 #
##############
( 13, 5, 5):
{(13,): [[0,1,2,4,8],[0,1,3,6,12],[0,2,5,6,10]]},
( 17, 5, 5):
{(17,): [[0,1,4,13,16],[0,3,5,12,14],[0,2,8,9,15],[0,6,7,10,11]]},
( 21, 6, 5):
{(21,): [[0,2,6,12,15,16],[0,3,6,7,11,19],
[0,7,15,16,17,18],[0,2,7,9,14,16]]},
( 22, 6, 5):
{(22,): [[0,1,2,5,10,13],[0,1,5,6,8,15],
[0,2,3,6,16,18],[0,2,6,11,13,17]]},
( 28, 6, 5):
{(28,): [[0,4,7,8,16,21],[5,7,8,9,14,20],[7,12,14,16,17,25],
[1,4,7,13,14,24],[2,4,8,16,18,22]]},
( 33, 5, 5):
{(33,): [[0,2,3,7,25],[0,3,13,14,29],[0,4,5,12,13],[0,2,12,16,26],
[0,3,12,20,31],[3,9,12,15,27],[0,8,13,14,31],[0,2,7,13,29]]},
( 33, 6, 5):
{(33,): [[0,3,12,17,18,28],[0,2,3,16,28,29],[0,16,20,26,28,30],
[0,2,3,12,16,27],[0,6,20,21,28,30],[0,4,11,15,22,26]]},
( 37,10, 5):
{(37,): [[0,1,7,9,10,12,16,26,33,34],[0,2,14,15,18,20,24,29,31,32]]},
( 39, 6,5):
{(39,): [[0,3,4,17,19,32],[0,1,5,12,30,36],[0,3,8,9,25,27],[0,7,10,12,17,21],
[0,16,18,19,27,35],[0,2,18,27,28,33],[0,6,13,19,26,32]]},
( 45,11, 5):
{(45,): [[1,3,7,10,22,25,30,35,37,38,44],[0,2,3,14,22,26,27,28,31,32,38]]},
( 46,10, 5):
{(46,): [[0,4,6,11,12,15,24,25,28,42],[0,2,5,7,8,9,14,24,34,35],
[0,2,12,32,40,23,25,35,9,17]]},
( 55,10, 5):
{(55,): [[0,5,11,15,20,22,25,33,44,45],[3,7,8,10,31,37,39,45,46,49],
[3,7,8,10,31,37,39,45,46,49]]},
( 67,11, 5):
{(67,): [[1,9,14,15,22,24,25,40,59,62,64],[2,13,18,28,30,44,48,50,51,57,61],
[4,21,26,29,33,35,36,47,55,56,60]]},
( 73,10, 5):
{(73,): [[0,1,2,4,8,16,32,37,55,64],[0,5,7,10,14,20,28,39,40,56],
[0,25,27,35,49,50,54,61,67,70],[0,11,15,21,22,30,42,44,47,60]]},
###############
# lambda >= 6 #
###############
( 11, 4,6):
{(11,): [[0,1,8,9],[0,2,5,7],[0,1,4,5],[0,2,3,5],[0,4,5,9]]},
( 15, 4,6):
{(15,): [[0,1,2,3],[0,2,4,6],[0,4,8,12],[0,8,1,9],
[3,6,9,12],[0,1,5,10],[0,2,5,10]]},
( 15, 5,6):
{(15,): [[0,1,2,3,6],[0,2,4,7,8],[0,2,4,9,10],
[0,3,6,10,11],[0,3,6,9,12]]},
( 21, 8,14):
{(21,): [[0,9,10,13,14,15,18,19],[0,1,4,7,9,15,16,18],[0,1,2,4,6,14,15,16],
[0,1,3,4,8,14,16,18],[0,1,4,9,11,12,14,16]]},
( 21, 10, 9):
{(21,): [[0,1,2,3,4,7,8,11,14,16],[0,6,7,9,11,12,15,16,17,19]]},
( 22, 8, 8):
{(22,): [[0,1,5,7,13,17,20,21],[0,2,7,11,13,14,16,17],[0,3,4,12,14,15,17,21]]},
( 22, 8,12):
{(22,): [[1,2,3,5,6,9,15,18], [1,2,3,5,8,9,10,15],
[1,3,4,9,13,18,19,21], [2,4,6,12,13,15,17,1],
[2,4,8,12,13,15,19,1], [2,4,8,16,13,15,19,5]]},
( 25, 7, 7):
{(5,5): [[(0,0),(0,1),(0,4),(1,1),(1,2),(4,3),(4,4)],
[(0,0),(1,0),(1,3),(2,3),(3,2),(4,0),(4,2)],
[(0,0),(0,2),(0,3),(2,2),(2,4),(3,1),(3,3)],
[(0,0),(1,4),(2,0),(2,1),(3,0),(3,4),(4,1)]]},
( 29, 8,6):
{(29,): [[0,5,10,11,12,13,16,20],[0,8,10,12,17,22,23,26],
[0,4,5,11,13,23,25,26]]},
( 34,12, 8):
{(34,): [[0,5,9,14,15,17,20,25,26,27,28,30],
[0,6,7,10,13,17,18,20,22,24,25,26]]},
( 34,12,10):
{(34,): [[0,2,3,4,8,9,11,13,14,24,27,30],
[0,2,6,7,8,11,13,14,22,25,26,32],
[0,2,10,18,22,32,17,19,27,1,5,15]]},
( 43,15,10):
{(43,): [[1,3,6,13,18,21,22,25,26,27,33,35,36,38,40],
[9,10,11,13,16,17,19,23,26,27,28,33,35,38,39]]},
( 45,12, 3):
{(3,3,5): [[(0,0,0),(0,0,1),(0,0,2),(0,2,1),(0,0,3),(0,1,1),
(1,0,0),(1,1,2),(1,2,3),(2,0,0),(2,1,3),(2,2,2)]]},
( 46,10, 6):
{(46,): [[0,2,11,13,21,22,30,33,34,40],[0,2,6,7,22,23,28,32,35,38],
[0,2,4,7,8,9,12,23,26,41]]},
( 49,21,10):
{(7,7): [[(0,1),(0,2),(0,4),(1,1),(1,2),(1,4),(2,1),(2,2),(2,4),(3,1),(3,2),
(3,4),(4,1),(4,2),(4,4),(5,1),(5,2),(5,4),(6,1),(6,2),(6,4)],
[(1,0),(1,1),(1,2),(1,4),(2,0),(2,1),(2,2),(2,4),(4,0),(4,1),(4,2),
(4,4),(3,3),(3,5),(3,6),(5,3),(5,5),(5,6),(6,3),(6,5),(6,6)]]},
( 53,13, 6):
{(53,): [[1,10,13,15,16,24,28,36,42,44,46,47,49],
[2,3,19,20,26,30,31,32,35,39,41,45,48]]},
( 53,14, 7):
{(53,): [[0,1,10,13,15,16,24,28,36,42,44,46,47,49],
[0,2,3,19,20,26,30,31,32,35,39,41,45,48]]},
( 61,15, 7):
{(61,): [[0,1,3,4,8,10,13,22,30,35,44,45,46,50,58],
[0,1,3,5,13,18,29,34,35,37,41,43,44,51,55]]},
( 67,12, 6):
{(67,): [[0,1,9,14,15,22,24,25,40,59,62,64],
[0,2,13,18,28,30,44,48,50,51,57,61],
[0,4,21,26,29,33,35,36,47,55,56,60]]},
# a 133-cyclic set from Ken Smith database
# see https://math.ccrwest.org/diffsets/diff_sets/DS_133_33_8_133.html
(133,33, 8):
{(133,): [[0,4,7,8,15,17,19,22,24,25,29,30,38,
47,49,50,55,58,61,62,71,73,76,77,78,
82,95,111,113,114,121,123,127]]},
# a 901-cyclic
# see https://math.ccrwest.org/diffsets/diff_sets/DS_901_225_56_901.html
(901,225,56):
{(901,): [[ 0, 1, 5, 9, 12, 13, 14, 16, 22, 25, 41, 43,
45, 47, 53, 59, 60, 65, 69, 70, 71, 79, 80, 81,
89, 92, 93,106,108,109,110,114,117,124,125,126,
133,139,144,147,152,156,159,167,168,169,173,174,
182,183,192,194,196,198,202,203,205,208,209,212,
214,215,219,222,223,224,225,226,229,231,232,233,
235,244,254,256,259,264,265,274,277,286,292,293,
295,296,300,307,308,313,318,319,325,326,345,350,
352,355,363,369,371,379,382,387,394,395,397,400,
401,402,405,407,419,422,423,424,433,445,447,460,
461,465,467,469,477,484,492,498,502,503,516,523,
526,529,530,531,533,536,540,543,545,550,559,564,
570,571,574,577,579,581,583,585,587,596,599,602,
611,617,618,620,621,622,625,630,634,636,639,641,
656,658,661,664,665,688,689,691,694,695,706,708,
711,713,720,721,724,729,735,737,742,746,752,760,
766,767,772,778,780,786,795,801,813,824,826,827,
828,835,837,840,843,845,848,849,852,853,859,862,
863,865,870,874,878,881,886,897,898]]}
}
# Create the list of DF for the documentation
_all_l = sorted(set(l for v,k,l in DF.keys()))
LIST_OF_DF = "\n".join(r" - `\lambda={}`:\n ".format(l) +
", ".join("`({},{},{})`".format(v, k, l) for v,k,_ in sorted(DF.keys()) if _ == l)
for l in _all_l)
def DM_12_6_1():
r"""
Return a `(12,6,1)`-difference matrix as built in [Hanani75]_.
This design is Lemma 3.21 from [Hanani75]_.
EXAMPLES::
sage: from sage.combinat.designs.designs_pyx import is_difference_matrix
sage: from sage.combinat.designs.database import DM_12_6_1
sage: G,M = DM_12_6_1()
sage: is_difference_matrix(M,G,6,1)
True
Can be obtained from the constructor::
sage: _ = designs.difference_matrix(12,6)
REFERENCES:
.. [Hanani75] <NAME>,
Balanced incomplete block designs and related designs,
:doi:`10.1016/0012-365X(75)90040-0`,
Discrete Mathematics, Volume 11, Issue 3, 1975, Pages 255-369.
"""
from sage.groups.additive_abelian.additive_abelian_group import AdditiveAbelianGroup
from sage.rings.finite_rings.integer_mod_ring import IntegerModRing as AdditiveCyclic
G = AdditiveCyclic(2).cartesian_product(AdditiveCyclic(6))
M = [[(0,0),(0,0),(0,0),(0,0),(0,0),(0,0)],
[(0,0),(0,1),(1,0),(0,3),(1,2),(0,4)],
[(0,0),(0,2),(1,2),(1,0),(0,1),(1,5)],
[(0,0),(0,3),(0,2),(0,1),(1,5),(1,4)],
[(0,0),(0,4),(1,1),(1,3),(0,5),(0,2)],
[(0,0),(0,5),(0,1),(1,5),(1,3),(1,1)],
[(0,0),(1,0),(1,3),(0,2),(0,3),(1,2)],
[(0,0),(1,1),(1,5),(1,2),(1,4),(1,0)],
[(0,0),(1,2),(0,4),(0,5),(0,2),(1,3)],
[(0,0),(1,3),(1,4),(0,4),(1,1),(0,1)],
[(0,0),(1,4),(0,5),(1,1),(1,0),(0,3)],
[(0,0),(1,5),(0,3),(1,4),(0,4),(0,5)]]
return G,M
def DM_21_6_1():
r"""
Return a `(21,6,1)`-difference matrix.
As explained in | |
chromo, pos):
anno_file = dat.GzipFile(anno_file, 'r')
anno = pd.read_table(anno_file, header=None, usecols=[0, 1, 2],
dtype={0: 'str', 1: 'int32', 2: 'int32'})
anno_file.close()
anno.columns = ['chromo', 'start', 'end']
anno.chromo = anno.chromo.str.upper().str.replace('CHR', '')
anno = anno.loc[anno.chromo == chromo]
anno.sort_values('start', inplace=True)
start, end = an.join_overlapping(anno.start.values, anno.end.values)
anno = np.array(an.is_in(pos, start, end), dtype='int8')
return anno
class App(object):
def run(self, args):
name = os.path.basename(args[0])
parser = self.create_parser(name)
opts = parser.parse_args(args[1:])
return self.main(name, opts)
def create_parser(self, name):
p = argparse.ArgumentParser(
prog=name,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Creates DeepCpG data for training and testing.')
# I/O
p.add_argument(
'--pos_file',
help='File with positions of CpG sites that are to be predicted.'
' If missing, only CpG sites that are observed in at least one of'
' the given cells will be used.')
p.add_argument(
'--cpg_profiles',
help='Input single-cell methylation profiles in dcpg or bedGraph'
' format that are to be imputed',
nargs='+')
p.add_argument(
'--cpg_wlen',
help='If provided, extract `cpg_wlen`//2 neighboring CpG sites',
type=int)
p.add_argument(
'--cpg_cov',
help='Minimum CpG coverage. Only use CpG sites for which the true'
' methylation state is known in at least that many cells.',
type=int,
default=1)
p.add_argument(
'--dna_files',
help='Directory or FASTA files named "*.chromosome.`chromo`.fa*"'
' with the DNA sequences for chromosome `chromo`.',
nargs='+')
p.add_argument(
'--dna_wlen',
help='DNA window length',
type=int,
default=1001)
p.add_argument(
'--anno_files',
help='Files with genomic annotations that are used as input'
' features. Currently ignored by `dcpg_train.py`.',
nargs='+')
p.add_argument(
'-o', '--out_dir',
help='Output directory',
default='.')
g = p.add_argument_group('output statistics')
g.add_argument(
'--cpg_stats',
help='Per CpG statistics derived from single-cell profiles.'
' Required, e.g., for predicting mean methylation levels or'
' cell-to-cell variance.',
nargs='+',
choices=['mean', 'mode', 'var', 'cat_var', 'cat2_var', 'entropy',
'diff', 'cov'])
g.add_argument(
'--cpg_stats_cov',
help='Minimum coverage for computing per CpG statistics',
type=int,
default=3)
g.add_argument(
'--win_stats',
help='Window-based output statistics derived from single-cell'
' profiles. Required, e.g., for predicting mean methylation levels'
' or cell-to-cell variance.',
nargs='+',
choices=['mean', 'mode', 'var', 'cat_var', 'cat2_var', 'entropy',
'diff', 'cov'])
g.add_argument(
'--win_stats_wlen',
help='Window lengths for computing statistics',
type=int,
nargs='+',
default=[1001, 2001, 3001, 4001, 5001])
g = p.add_argument_group('advanced arguments')
g.add_argument(
'--chromos',
nargs='+',
help='Chromosomes that are used')
g.add_argument(
'--nb_sample',
type=int,
help='Maximum number of samples')
g.add_argument(
'--nb_sample_chromo',
type=int,
help='Number of random samples from each chromosome')
g.add_argument(
'--chunk_size',
type=int,
default=32768,
help='Maximum number of samples per output file. Should be'
' divisible by batch size.')
g.add_argument(
'--seed',
help='Seed of random number generator',
type=int,
default=0)
g.add_argument(
'--verbose',
help='More detailed log messages',
action='store_true')
g.add_argument(
'--log_file',
help='Write log messages to file')
return p
def main(self, name, opts):
if opts.seed is not None:
np.random.seed(opts.seed)
logging.basicConfig(filename=opts.log_file,
format='%(levelname)s (%(asctime)s): %(message)s')
log = logging.getLogger(name)
if opts.verbose:
log.setLevel(logging.DEBUG)
else:
log.setLevel(logging.INFO)
log.debug(opts)
# Check input arguments
if not opts.cpg_profiles:
if not (opts.pos_file or opts.dna_files):
raise ValueError('Position table and DNA database expected!')
if opts.dna_wlen and opts.dna_wlen % 2 == 0:
raise '--dna_wlen must be odd!'
if opts.cpg_wlen and opts.cpg_wlen % 2 != 0:
raise '--cpg_wlen must be even!'
# Parse functions for computing output statistics
cpg_stats_meta = None
win_stats_meta = None
if opts.cpg_stats:
cpg_stats_meta = get_stats_meta(opts.cpg_stats)
if opts.win_stats:
win_stats_meta = get_stats_meta(opts.win_stats)
make_dir(opts.out_dir)
outputs = OrderedDict()
# Read single-cell profiles if provided
if opts.cpg_profiles:
log.info('Reading CpG profiles ...')
outputs['cpg'] = read_cpg_profiles(
opts.cpg_profiles,
chromos=opts.chromos,
nb_sample=opts.nb_sample,
nb_sample_chromo=opts.nb_sample_chromo,
log=log.info)
# Create table with unique positions
if opts.pos_file: #the pos_file provide the CpG positions which need to be predicted
# Read positions from file
log.info('Reading position table ...')
pos_table = pd.read_table(opts.pos_file, usecols=[0, 1],
dtype={0: str, 1: np.int32},
header=None, comment='#')
pos_table.columns = ['chromo', 'pos']
pos_table['chromo'] = dat.format_chromo(pos_table['chromo'])
pos_table = prepro_pos_table(pos_table)
else:
# Extract positions from profiles, if not provided. Predict position which available in at least one cells.
pos_tables = []
for cpg_table in list(outputs['cpg'].values()):
pos_tables.append(cpg_table[['chromo', 'pos']])
pos_table = prepro_pos_table(pos_tables)
if opts.chromos:
pos_table = pos_table.loc[pos_table.chromo.isin(opts.chromos)]
if opts.nb_sample_chromo:
pos_table = dat.sample_from_chromo(pos_table, opts.nb_sample_chromo)
if opts.nb_sample:
pos_table = pos_table.iloc[:opts.nb_sample]
log.info('%d samples' % len(pos_table))
make_dir(opts.out_dir)
# Iterate over chromosomes
# ------------------------
for chromo in pos_table.chromo.unique():
log.info('-' * 80)
log.info('Chromosome %s ...' % (chromo))
idx = pos_table.chromo == chromo ##idx is T/F for whether the entries are equal to the chromo
chromo_pos = pos_table.loc[idx].pos.values #a numpy array with 1D data
chromo_outputs = OrderedDict()
if 'cpg' in outputs:
# Concatenate CpG tables into single nb_site x nb_output matrix
chromo_outputs['cpg'] = map_cpg_tables(outputs['cpg'],
chromo, chromo_pos)
#chromo_outputs, one array called 'cpg', 'cpg' has #sample array,
#each item is mapped table of target_pos with value filled
#OrderedDict([('BS27_1_SER', array([1, 1, 1, ..., 1, 1, 0], dtype=int8)),
#('BS27_3_SER', array([-1, 1, 1, ..., 1, -1, -1], dtype=int8))])
chromo_outputs['cpg_mat'] = np.vstack(
list(chromo_outputs['cpg'].values())).T
#add one more array to it. np.vstack, stack array sequence vertically
#chromo_outputs['cpg_mat'].shape=(402166, 2)
#402166 is the CHR1 target pos number, 2 is the input two samples, BS27_1_SER, BS27_3_SER
assert len(chromo_outputs['cpg_mat']) == len(chromo_pos)
if 'cpg_mat' in chromo_outputs and opts.cpg_cov:
cov = np.sum(chromo_outputs['cpg_mat'] != dat.CPG_NAN, axis=1)
assert np.all(cov >= 1)
idx = cov >= opts.cpg_cov
tmp = '%s sites matched minimum coverage filter'
tmp %= format_out_of(idx.sum(), len(idx))
log.info(tmp)
if idx.sum() == 0:
continue
chromo_pos = chromo_pos[idx]
chromo_outputs = select_dict(chromo_outputs, idx)
# Read DNA of chromosome
chromo_dna = None
if opts.dna_files: #this will only read the corresponding chromosome sequence
chromo_dna = fasta.read_chromo(opts.dna_files, chromo) #chromo_dna is string, len=195471971 for chr1
annos = None
if opts.anno_files:
log.info('Annotating CpG sites ...')
annos = dict()
for anno_file in opts.anno_files:
name = split_ext(anno_file)
annos[name] = annotate(anno_file, chromo, chromo_pos)
# Iterate over chunks
# -------------------
nb_chunk = int(np.ceil(len(chromo_pos) / opts.chunk_size))
for chunk in range(nb_chunk):
log.info('Chunk \t%d / %d' % (chunk + 1, nb_chunk))
chunk_start = chunk * opts.chunk_size
chunk_end = min(len(chromo_pos), chunk_start + opts.chunk_size)
chunk_idx = slice(chunk_start, chunk_end)
chunk_pos = chromo_pos[chunk_idx]
chunk_outputs = select_dict(chromo_outputs, chunk_idx) #OrderedDict()
#chunk_outputs is 1D array
filename = 'c%s_%06d-%06d.h5' % (chromo, chunk_start, chunk_end)
filename = os.path.join(opts.out_dir, filename)
chunk_file = h5.File(filename, 'w')
# Write positions
chunk_file.create_dataset('chromo', shape=(len(chunk_pos),),
dtype='S2') #create_dataset() in default for h5py
chunk_file['chromo'][:] = chromo.encode() #set the chunk_file['chromo'] = 1 for all.
#chunk_file['chromo'].shape = (32768,)
chunk_file.create_dataset('pos', data=chunk_pos, dtype=np.int32)
#chunk_file['pos'].shape = (32768,) # the size is default chunk_size
if len(chunk_outputs): #len(chunk_outputs)=2
out_group = chunk_file.create_group('outputs')
#for now, type(out_group) = <class 'h5py._hl.group.Group'>
#list(out_group) = []
# Write cpg profiles
if 'cpg' in chunk_outputs:
for name, value in six.iteritems(chunk_outputs['cpg']):
#name = ["BS27_1_SER", 'BS27_3_SER'] # the sample name
#value= 2 numpy array, both with shape=(32768,)
assert len(value) == len(chunk_pos)
# Round continuous values
out_group.create_dataset('cpg/%s' % name,
data=value.round(),
dtype=np.int8,
compression='gzip')
#type(out_group)= <class 'h5py._hl.group.Group'>
#list(out_group) = ['cpg']
#list(out_group['cpg']) = ['BS27_1_SER', 'BS27_3_SER']
# Compute and write statistics
if cpg_stats_meta is not None:
log.info('Computing per CpG statistics ...')
cpg_mat = np.ma.masked_values(chunk_outputs['cpg_mat'],
dat.CPG_NAN)
#cpg_mat.shape=(32768, 2)
mask = np.sum(~cpg_mat.mask, axis=1)
mask = mask < opts.cpg_stats_cov
for name, fun in six.iteritems(cpg_stats_meta):
stat = fun[0](cpg_mat).data.astype(fun[1])
stat[mask] = dat.CPG_NAN
assert len(stat) == len(chunk_pos)
out_group.create_dataset('cpg_stats/%s' % name,
data=stat,
dtype=fun[1],
compression='gzip')
#until here:
#>>> chunk_file.visit(printname)
#chromo
#outputs
#outputs/cpg
#outputs/cpg/BS27_1_SER
#utputs/cpg/BS27_3_SER
#pos
# Write input features
in_group = chunk_file.create_group('inputs')
# DNA windows
if chromo_dna:
log.info('Extracting DNA sequence windows ...')
dna_wins = extract_seq_windows(chromo_dna, pos=chunk_pos,
wlen=opts.dna_wlen)
#give the fasta sequence of one chromosome ('chromo_dna'), and targeted position ('chunk_pos')
#, and wlen=1001, return a numpy array with shape as (32768, 1001). The array has been transfered as
#number rather than base pair
assert len(dna_wins) == len(chunk_pos)
in_group.create_dataset('dna', data=dna_wins, dtype=np.int8,
compression='gzip')
#>>> in_group.visit(printname) = dna
# CpG neighbors
if opts.cpg_wlen:
log.info('Extracting CpG neighbors ...')
cpg_ext = fext.KnnCpgFeatureExtractor(opts.cpg_wlen // 2)
context_group = in_group.create_group('cpg')
# outputs['cpg'], since neighboring CpG sites might lie
# outside chunk borders and un-mapped values are needed
for name, cpg_table in six.iteritems(outputs['cpg']):
#name="BS27_1_SER" and "BS27_3_SER"
#cpg_table = numpy array, with three columns information for each input sample.
cpg_table = cpg_table.loc[cpg_table.chromo == chromo]
state, dist = cpg_ext.extract(chunk_pos,
cpg_table.pos.values,
cpg_table.value.values) #extract the cpg distance and state with wlen
nan = np.isnan(state)
state[nan] = dat.CPG_NAN #set nan value as -1, which means unknown
dist[nan] = dat.CPG_NAN
# States can be binary (np.int8) or continuous
# (np.float32).
state = state.astype(cpg_table.value.dtype, copy=False) #set data type
dist = dist.astype(np.float32, copy=False)
assert len(state) == len(chunk_pos)
assert len(dist) == len(chunk_pos)
assert np.all((dist > 0) | (dist == dat.CPG_NAN))
group = context_group.create_group(name)
group.create_dataset('state', data=state,
compression='gzip')
group.create_dataset('dist', data=dist,
compression='gzip')
#list(group) = ['state','dist']
if win_stats_meta is not None and opts.cpg_wlen:
log.info('Computing window-based statistics | |
<gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2017 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import json
from xml.dom import minidom
import inspect
import requests
from . import compat, utils
from .compat import ElementTree, six
from .utils import to_text
def _route_xml_path(root, *keys, **kw):
create_if_not_exists = kw.get('create_if_not_exists', False)
if isinstance(root, six.string_types):
root = ElementTree.fromstring(root)
for key in keys:
if key == '.':
return root
prev = root
root = root.find(key)
if root is None:
if not create_if_not_exists:
return
root = ElementTree.Element(key)
prev.append(root)
return root
def _route_json_path(root, *keys, **kw):
create_if_not_exists = kw.get('create_if_not_exists', False)
if isinstance(root, six.string_types):
root = json.loads(root)
for key in keys:
prev = root
root = root.get(key)
if root is None:
if not create_if_not_exists:
return
root = prev[key] = compat.OrderedDict()
return root
def parse_ndarray(array):
try:
import numpy as np
return np.asarray(array)
except ImportError:
return array
def serialize_ndarray(array):
try:
return array.tolist()
except AttributeError:
return array
_serialize_types = dict()
_serialize_types['bool'] = (utils.str_to_bool, utils.bool_to_str)
_serialize_types['json'] = (
lambda s: json.loads(s) if s is not None else None,
lambda s: json.dumps(s) if s is not None else None,
)
_serialize_types['rfc822'] = (utils.parse_rfc822, utils.gen_rfc822)
_serialize_types['rfc822l'] = (utils.parse_rfc822, lambda s: utils.gen_rfc822(s, localtime=True))
_serialize_types['ndarray'] = (parse_ndarray, serialize_ndarray)
class SerializeField(object):
def __init__(self, *keys, **kwargs):
self._path_keys = keys
self._required = kwargs.get('required', False) # used when serialized
self._blank_if_null = kwargs.get('blank_if_null',
True if self._required else False)
self._default = kwargs.get('default')
if 'type' in kwargs:
self._parse_callback, self._serialize_callback = _serialize_types[kwargs.pop('type')]
else:
self._parse_callback = kwargs.get('parse_callback')
self._serialize_callback = kwargs.get('serialize_callback')
self.set_to_parent = kwargs.get('set_to_parent', False)
def _to_str(self, val):
if isinstance(val, six.string_types):
return utils.to_str(val)
return val
def _set_default_keys(self, *keys):
if not self._path_keys:
self._path_keys = keys
def parse(self, root, **kwargs):
raise NotImplementedError
def serialize(self, root, value):
raise NotImplementedError
class HasSubModelField(SerializeField):
def __init__(self, model, *args, **kwargs):
if isinstance(model, six.string_types):
self._model_cls = None
self._model_str = model
else:
self._model_cls = model
self._model_str = None
super(HasSubModelField, self).__init__(*args, **kwargs)
@property
def _model(self):
if self._model_cls is not None:
return self._model_cls
models = self._model_str.split('.')
model_name = models[0]
module = None
for stack in inspect.stack():
globs = stack[0].f_globals
if model_name in globs:
possible_module = globs[model_name]
if inspect.isclass(possible_module) and \
issubclass(possible_module, SerializableModel):
module = possible_module
break
if module is None:
raise ValueError('Unknown model name: %s' % self._model_str)
res = None
for model in models[1:]:
if res is None:
res = getattr(module, model)
else:
res = getattr(res, model)
self._model_cls = res
return res
_default_name_maker = dict(capitalized=utils.underline_to_capitalized, raw=lambda v: v, camel=utils.underline_to_camel)
class SerializableModelMetaClass(type):
def __new__(mcs, name, bases, kv):
slots = []
fields = dict()
for base in bases:
base_slots = list(getattr(base, '__slots__', []))
if '__weakref__' in base_slots:
base_slots.remove('__weakref__')
slots.extend(base_slots)
fields.update(getattr(base, '__fields', dict()))
slots.extend(kv.get('__slots__', []))
fields.update(kv.get('__fields', dict()))
attrs = []
parent_attrs = []
def_name = kv.pop('_' + name + '__default_name', 'capitalized')
for attr, field in (pair for pair in six.iteritems(kv) if not pair[0].startswith('__')):
if inspect.isclass(field) and issubclass(field, SerializeField):
field = field()
if isinstance(field, SerializeField):
field._set_default_keys(_default_name_maker[def_name](attr))
if not field.set_to_parent:
slots.append(attr)
attrs.append(attr)
if field.set_to_parent:
parent_attrs.append(attr)
fields[attr] = field
kv['_parent_attrs'] = set(parent_attrs)
slots = tuple(compat.OrderedDict.fromkeys(slots))
slots_pos = dict([(v, k) for k, v in enumerate(slots)])
fields = compat.OrderedDict(
sorted(six.iteritems(fields), key=lambda s: slots_pos.get(s[0], float('inf'))))
for attr in attrs:
if attr in kv:
del kv[attr]
slots = tuple(slot for slot in slots if slot not in kv)
if len(slots) > 0:
kv['__slots__'] = slots
if len(fields) > 0:
kv['__fields'] = fields
return type.__new__(mcs, name, bases, kv)
class SerializableModel(six.with_metaclass(SerializableModelMetaClass)):
__slots__ = '_parent', '__weakref__'
def __init__(self, **kwargs):
slots = getattr(self, '__slots__', [])
for k, v in six.iteritems(kwargs):
if k in slots:
setattr(self, k, v)
for attr in slots:
try:
super(SerializableModel, self).__getattribute__(attr)
except AttributeError:
setattr(self, attr, None)
@property
def parent(self):
return self._parent
@classmethod
def _is_null(cls, v):
if v is None:
return True
if isinstance(v, (list, dict)) and len(v) == 0:
return True
return False
@classmethod
def _setattr(cls, obj, k, v, skip_null=True):
if cls._is_null(v) and object.__getattribute__(obj, k) is not None:
if not skip_null:
setattr(obj, k, v)
return
fields = getattr(type(obj), '__fields')
if not isinstance(fields[k], HasSubModelField):
setattr(obj, k, v)
elif isinstance(v, list):
setattr(obj, k, v)
else:
sub_obj = object.__getattribute__(obj, k)
new_obj = v
if sub_obj is None:
setattr(obj, k, v)
return
sub_fields = getattr(new_obj, '__fields', {})
for k in six.iterkeys(sub_fields):
if sub_fields[k].set_to_parent is True:
continue
cls._setattr(sub_obj, k, object.__getattribute__(new_obj, k),
skip_null=skip_null)
@classmethod
def _init_obj(cls, content, obj=None, **kw):
fields = dict(getattr(cls, '__fields'))
_type = getattr(cls, '_type_indicator', None)
_name = 'name' if 'name' in fields else None
if obj is None and (_type is not None or 'name' in fields):
kwargs = dict(kw)
for field in (_name, _type):
if field is None:
continue
typo = fields[field].parse(content, **kw)
kwargs[field] = typo
obj = cls(**kwargs)
return obj or cls(**kw)
@classmethod
def deserial(cls, content, obj=None, **kw):
obj = cls._init_obj(content, obj=obj, **kw)
obj_type = type(obj)
fields = dict(getattr(obj_type, '__fields'))
if isinstance(content, six.string_types):
if issubclass(obj_type, XMLSerializableModel):
content = ElementTree.fromstring(content)
else:
content = json.loads(content)
parent_kw = dict()
self_kw = dict()
for attr, prop in six.iteritems(fields):
if isinstance(prop, SerializeField):
kwargs = dict(kw)
if isinstance(prop, HasSubModelField):
kwargs['_parent'] = obj
if not prop.set_to_parent:
self_kw[attr] = prop.parse(content, **kwargs)
else:
parent_kw[attr] = prop.parse(content, **kwargs)
for k, v in six.iteritems(self_kw):
obj_type._setattr(obj, k, v, skip_null=getattr(obj_type, 'skip_null', True))
if obj.parent is not None:
for k, v in six.iteritems(parent_kw):
# remember that do not use `hasattr` here
try:
old_v = object.__getattribute__(obj.parent, k)
except AttributeError:
continue
if v is not None and old_v != v:
setattr(obj.parent, k, v)
return obj
def serial(self):
if isinstance(self, XMLSerializableModel):
assert self._root is not None
root = ElementTree.Element(self._root)
else:
root = compat.OrderedDict()
for attr, prop in six.iteritems(getattr(self, '__fields')):
if isinstance(prop, SerializeField):
try:
prop.serialize(root, object.__getattribute__(self, attr))
except NotImplementedError:
continue
return root
def extract(self, **base_kw):
kwargs = base_kw.copy()
for attr in self.__slots__:
try:
kwargs[attr] = object.__getattribute__(self, attr)
except AttributeError:
pass
return kwargs
class XMLSerializableModel(SerializableModel):
__slots__ = '_root',
@classmethod
def parse(cls, response, obj=None, **kw):
if 'parent' in kw:
kw['_parent'] = kw.pop('parent')
if isinstance(response, requests.Response):
# PY2 prefer bytes, while PY3 prefer str
response = response.text if six.PY3 else response.content
return cls.deserial(response, obj=obj, **kw)
def serialize(self):
root = self.serial()
xml_content = ElementTree.tostring(root, 'utf-8')
prettified_xml = minidom.parseString(xml_content).toprettyxml(indent=' '*2, encoding='utf-8')
prettified_xml = to_text(prettified_xml, encoding='utf-8')
cdata_re = re.compile(r'<!\[CDATA\[.*\]\]>', (re.M | re.S))
for src_cdata in cdata_re.finditer(prettified_xml):
src_cdata = src_cdata.group(0)
dest_cdata = src_cdata.replace('&', '&').replace('<', '<'). \
replace('"', '"').replace('>', '>')
prettified_xml = prettified_xml.replace(src_cdata, dest_cdata)
return prettified_xml.replace('"', '"')
class JSONSerializableModel(SerializableModel):
@classmethod
def parse(cls, response, obj=None, **kw):
if 'parent' in kw:
kw['_parent'] = kw.pop('parent')
if isinstance(response, requests.Response):
# PY2 prefer bytes, while PY3 prefer str
response = response.text if six.PY3 else response.content
return cls.deserial(response, obj=obj, **kw)
def serialize(self, **kwargs):
root = self.serial()
return json.dumps(root, **kwargs)
class XMLTagField(SerializeField):
def parse(self, root, **kwargs):
node = _route_xml_path(root, *self._path_keys)
val = self._default
if node is not None:
val = node.tag
if val is None:
return
if self._parse_callback:
return self._parse_callback(val)
return val
def _set_default_keys(self, *keys):
super(XMLTagField, self)._set_default_keys('.')
class XMLNodeField(SerializeField):
def parse(self, root, **kwargs):
node = _route_xml_path(root, *self._path_keys)
val = self._default
if node is not None:
val = node.text or self._default
if val is None:
return
if self._parse_callback:
return self._parse_callback(val)
return val
def serialize(self, root, value):
value = value if value is not None else self._default
if value is None and self._blank_if_null:
value = ''
if not self._required and value is None:
return
node = _route_xml_path(root, create_if_not_exists=True, *self._path_keys)
if self._serialize_callback:
node.text = utils.to_text(self._serialize_callback(value))
else:
node.text = utils.to_text(value)
class XMLNodeAttributeField(SerializeField):
def __init__(self, *keys, **kwargs):
self._attr = kwargs.pop('attr', None)
super(XMLNodeAttributeField, self).__init__(*keys, **kwargs)
def parse(self, root, **kwargs):
assert self._attr is not None
node = _route_xml_path(root, *self._path_keys)
val = self._default
if node is not None:
val = node.get(self._attr)
if val is None:
return
if self._parse_callback:
return self._parse_callback(val)
return node.get(self._attr)
def serialize(self, root, value):
assert self._attr is not None
value = value if value is not None else self._default
if value is None:
if self._default is not None:
value = self._default
elif | |
<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 21 16:45:28 2019
@author: qde
"""
import numpy as np
from numpy.linalg import inv, norm
from scipy.linalg import solve_discrete_are,lstsq
from numpy.random import randn
from filterpy.common import pretty_str
from filterpy.kalman import KalmanFilter
class UnstableData(object):
'''Implements a model for storing unstable data.
Parameters
----------
val: float
Unstable eigenvalue.
vect: numpy float array
Unstable eigenvector linked to the previous eigenvalue.
position: int
Position of the eigenvalue in the studied matrix.
'''
def __init__(self,val,vect,position):
self.value = val
self.vector = vect
self.position = position
def __repr__(self):
return '\n'.join([
'UnstableData object',
pretty_str('value', self.value),
pretty_str('vector', self.vector),
pretty_str('position', self.position)])
class MoAttacker(object):
'''
Implements an attack simulation based on the research article Mo et al.
2010 in order to generate a sequence of false measurements for a given subset
of sensors.
Parameters
----------
kf: KalmanFilter
Estimator of the system under attack.
Attributes
----------
unst_data: UnstableData list
List of UnstableData objects (eigenvalue, eigenvector and position of the
eigenvalue).
attack_sequence: float numpy array
Column-stacked array as np.array([[ y0 | y1 | ... | y_attsize ]])
corresponding to the values the attacker will have to inject in the
system to compromise it.
'''
def __init__(self,kf,fb = False):
super().__init__()
self.unst_data = []
self.attack_sequence = np.zeros((kf.dim_z,0))
self.kf = kf
self.fb = fb
def add_false_measurement(self,yai):
'''
Adds the new computed false measurement to the sequence by adding the column
to the right.
Parameters
----------
yai: float numpy array
New vector taking part of the attack sequence for time i.
'''
self.attack_sequence = np.hstack([self.attack_sequence, yai])
def compute_unstable_eig(self,A):
'''
Looks up the unstable eigenvectors and eigenvalues from the input matrix.
Parameters
----------
A: float matrix
The matrix which eigenvectors and values will be tested.
Returns
-------
unst_data: UnstableData iterable
List of UnstableData (eigenvalue, eigenvector and position of the value in the matrix).
Notes
-----
The list of unstable eigenvectors and eigenvalues is not only returned but
also directly added to the class attribute.
'''
eig_vals, eig_vects = np.linalg.eig(A)
unst_data = []
# Unstable eigenvalues are aigenvalues having their real part greater than 0
for counter,value in enumerate(eig_vals):
if np.real(value) >= 0:
unst_data.append(UnstableData(value, eig_vects[:,counter],counter))
self.unst_data = unst_data
return unst_data
def compute_steady_state_P(self):
'''
Computes the process to get P (covariance matrix on state) in steady
state as the solution of a discrete Ricatti equation.
Returns
-------
ss_P: matrix
Covariance matrix on state when the system is on steady state.
Notes
-----
This function should be used if the system is equiped with some kind of
feedback linearization.
'''
ss_P = solve_discrete_are(self.kf.F.T, self.kf.H.T, self.kf.Q, self.kf.R)
return ss_P
def compute_steady_state_K(self):
'''
Computes the process to get K (Kalman gain) in steady state as the
solution of a discrete Ricatti equation.
Returns
-------
ss_K: matrix
Kalman gain matrix when the system is on steady state.
K = P*H'*inv(H*P*H'+R)
'''
kf = self.kf
if self.fb: P = self.compute_steady_state_P()
else: P = kf.P
ss_K = P@(kf.H.T)@inv(kf.H@P@kf.H.T + kf.R)
return ss_K
def compute_attackers_input(self,ss_K,Gamma):
'''
Computes the initial space in which the attacker will have to find the
initial steps of the attack sequence.
Parameters
----------
ss_K: float matrix
Kalman gain of the estimator in steady state.
Gamma: int matrix
Attack matrix, saying which sensors have been compromised.
Returns:
--------
attackers_input: float matrix
Matrix
'''
kf = self.kf
attackers_input = np.concatenate((-(kf.F - ss_K@kf.H@kf.F)@ss_K@Gamma, -ss_K@Gamma),axis=1)
return attackers_input
def initialize_attack_sequence(self,attackers_input,attack_vector):
'''
Computes the two first element of the attack sequence.
Parameters
----------
attackers_input: float matrix
Matrix of the attacker's needed input to reach Cv.
attack_vector: float numpy array
Unstable eigenvector under which the attack is being made.
Returns
-------
ya0, ya1: float numpy arrays
First and second elements of the attack sequence.
'''
# Computes the first two elements by taking
false_measurements,_,_,_ = lstsq(attackers_input, attack_vector)
# Reshape correctly the result
ya0 = false_measurements[0:self.kf.dim_z,0].reshape((self.kf.dim_z,1))
ya1 = false_measurements[self.kf.dim_z:, 0].reshape((self.kf.dim_z,1))
return ya0, ya1
def compute_max_norm(self,Gamma,ya0,ya1):
'''
Computes the maximal norm after simulating the first two measurements.
Parameters
----------
Gamma: int matrix
Attack matrix of the system.
ya0, ya1: float numpy arrays
Two first steps of the attack sequence.
Returns
-------
M: float
Maximal norm of the attack sequence.
Notes
-----
- e corresponds to the error between the healthy and compromised system.
- z corresponds to the cimulation of a received measurement by the compromised
system (real measure + compromission of a subset of sensors)
'''
# e: error between healthy and compromised system
kf = self.kf
e = np.zeros((kf.dim_z,2))
e[:,0] = (-kf.K@Gamma@ya0).squeeze()
e[:,1] = (kf.F-kf.K@kf.H@kf.F)@(e[:,0].reshape((kf.dim_z,1))-kf.K@Gamma@ya1).squeeze()
# z: simulation of the received measure by the plant (real measure + compromission)
z = np.zeros((kf.dim_z,2))
# First state is null so the received measure = attacker's input
z[:,0] = (Gamma@ya0).squeeze()
# Second state needs to be estimated (H*F*e) then added to the attacker's input
z[:,1] = (kf.H@kf.F@e[:,0].reshape((kf.dim_z,1)) + Gamma@ya1).squeeze()
M = max(norm(z[:,0]),norm(z[:,1]))
return M
def attack_parameters(self,value_position):
'''
Given a position value, returns the corresponding eigenvalue, eigenvector
and attack matrix Gamma.
Parameters
----------
value_position: int
Position of the targetted eigenvalue in the state transition matrix.
Returns
-------
attack_val: float
Eigen value under which the attack will happen.
attack_vect: float numpy array
Eigen vector under which the attack will happen.
Gamma: int matrix
Attack matrix: diagonal matrix where yij = 1 if the ith sensor is
compromised and 0 if not.
'''
chosen_attack = self.unst_data[value_position]
attack_val = chosen_attack.value
attack_vect = chosen_attack.vector.reshape((self.kf.dim_x,1))
attack_pos = chosen_attack.position
Gamma = np.zeros((self.kf.dim_z,self.kf.dim_z))
Gamma[attack_pos][attack_pos] = 1
return attack_val,attack_vect,Gamma
def compute_attack_sequence(self, attack_size, pos_value = 0, logs=False):
'''
Creates the attack sequence (aka the falsified measurements passed to the filter).
Parameters:
-----------
attack_size: int
Duration of the attack (number of steps).
pos_value: int
Position of the unstable value along which the attacker should
attack.
logs: boolean
Displays the logs of the different steps if True. Default value: False
Returns
-------
attack_sequence: float numpy array
Column-stacked array as np.array([[ y0 | y1 | ... | y_attsize ]])
corresponding to the values the attacker will have to inject in the
system to compromise it.
Gamma: float numpy array
Attack vector
'''
kf = self.kf
# Unstable eigenvalues of A and associated eigenvectors
self.compute_unstable_eig(kf.F)
if not self.unst_data:
# If the unstable list is empty, no attacks are possible
return "No unstable values available for attack"
else:
# Choice of an eigenvalue under which the attack will be created
attack_val,attack_vect,Gamma = self.attack_parameters(pos_value)
if logs: print("Eigen value: \n{0}\n".format(attack_val))
if logs: print("Eigen vector: \n{0}\n".format(attack_vect))
if logs: print("Attack matrix: \n{0}\n".format(Gamma))
# Attacker's input to reach v
ss_K = self.compute_steady_state_K()
if logs: print("Steady State K: \n{0}\n".format(ss_K))
# Attacker input: -[(A-KCA)KGamma KGamma]
attackers_input = self.compute_attackers_input(ss_K, Gamma)
if logs: print("Attackers input: \n{0}\n".format(attackers_input))
# Attack sequence initialization
#1. Initialization of the false measurements
ya0, ya1 = self.initialize_attack_sequence(attackers_input,attack_vect)
if logs: print("First false measurements: \nya0:\n{0}\nya1:\n{1}\n".format(ya0,ya1))
#2. Initialization of the first "real" measurements -> to determine the max norm
M = self.compute_max_norm(Gamma,ya0,ya1)
ya0 = ya0/M
ya1 = ya1/M
self.add_false_measurement(ya0)
self.add_false_measurement(ya1)
ystar = kf.H@attack_vect
for i in range(2,attack_size):
yai = self.attack_sequence[:,i-2].reshape((kf.dim_z,1)) - attack_val**(i-2)/M * ystar
self.add_false_measurement(yai)
if logs: print("Attack Sequence: \n{0}\n".format(self.attack_sequence))
return self.attack_sequence, Gamma
def attack_measurement(self):
'''
Alters the measurements with the attack sequence
'''
pass
class ExtendedMoAttacker(MoAttacker):
def compute_steady_state_K(self):
'''
Computes the process to get K (Kalman gain) in steady state as the
solution of a discrete Ricatti equation.
Returns
-------
ss_K: matrix
Kalman gain matrix when the system is on steady state.
K = P*H'*inv(H*P*H'+R)
'''
kf = self.kf
H = kf.HJacob(kf.x)
if self.fb: P = self.compute_steady_state_P()
else: P = kf.P
ss_K = P@(H.T)@inv(H@P@H.T + kf.R)
return ss_K
def compute_attackers_input(self,ss_K,Gamma):
'''
Computes the initial space in which the attacker will have to find the
initial steps of the attack sequence.
Parameters
----------
ss_K: float matrix
Kalman gain of the estimator in steady state.
Gamma: int matrix
Attack matrix, saying which sensors have been compromised.
Returns:
--------
attackers_input: float matrix
Matrix
'''
kf = self.kf
H = kf.HJacob(kf.x)
attackers_input = np.concatenate((-(kf.F - ss_K@H@kf.F)@ss_K@Gamma, -ss_K@Gamma),axis=1)
return attackers_input
def compute_max_norm(self,Gamma,ya0,ya1):
'''
Computes the maximal norm after simulating the first two measurements.
Parameters
| |
<filename>userbot/modules/multimemes.py
# Copyright (C) 2020 MoveAngel and MinaProject
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
# Multifunction memes
#
# Based code + improve from AdekMaulana and aidilaryanto
#
# Memify ported from Userge and Refactored by KenHV
import asyncio
import io
import os
import random
import re
import shlex
import textwrap
import time
from asyncio.exceptions import TimeoutError
from random import randint, uniform
from typing import Optional, Tuple
from glitch_this import ImageGlitcher
from hachoir.metadata import extractMetadata
from hachoir.parser import createParser
from PIL import Image, ImageDraw, ImageEnhance, ImageFont, ImageOps
from telethon import events, functions, types
from telethon.errors.rpcerrorlist import YouBlockedUserError
from telethon.tl.types import DocumentAttributeFilename
from userbot import CMD_HELP, TEMP_DOWNLOAD_DIRECTORY, bot
from userbot.events import register
from userbot.utils import progress
THUMB_IMAGE_PATH = "./thumb_image.jpg"
Glitched = TEMP_DOWNLOAD_DIRECTORY + "glitch.gif"
EMOJI_PATTERN = re.compile(
"["
"\U0001F1E0-\U0001F1FF" # flags (iOS)
"\U0001F300-\U0001F5FF" # symbols & pictographs
"\U0001F600-\U0001F64F" # emoticons
"\U0001F680-\U0001F6FF" # transport & map symbols
"\U0001F700-\U0001F77F" # alchemical symbols
"\U0001F780-\U0001F7FF" # Geometric Shapes Extended
"\U0001F800-\U0001F8FF" # Supplemental Arrows-C
"\U0001F900-\U0001F9FF" # Supplemental Symbols and Pictographs
"\U0001FA00-\U0001FA6F" # Chess Symbols
"\U0001FA70-\U0001FAFF" # Symbols and Pictographs Extended-A
"\U00002702-\U000027B0" # Dingbats
"]+"
)
@register(outgoing=True, pattern=r"^\.glitch(?: |$)(.*)")
async def glitch(event):
if not event.reply_to_msg_id:
await event.edit("`Saya tidak akan membuat kesalahan pada hantu!`")
return
reply_message = await event.get_reply_message()
if not reply_message.media:
await event.edit("`Balas gambar/stiker!`")
return
await event.edit("`Mengunduh media..`")
if reply_message.photo:
glitch_file = await bot.download_media(
reply_message,
"glitch.png",
)
elif (
DocumentAttributeFilename(file_name="AnimatedSticker.tgs")
in reply_message.media.document.attributes
):
await bot.download_media(
reply_message,
"anim.tgs",
)
os.system("lottie_convert.py anim.tgs anim.png")
glitch_file = "anim.png"
elif reply_message.video:
video = await bot.download_media(
reply_message,
"glitch.mp4",
)
extractMetadata(createParser(video))
os.system("ffmpeg -i glitch.mp4 -vframes 1 -an -s 480x360 -ss 1 glitch.png")
glitch_file = "glitch.png"
else:
glitch_file = await bot.download_media(
reply_message,
"glitch.png",
)
try:
value = int(event.pattern_match.group(1))
if value > 8:
raise ValueError
except ValueError:
value = 2
await event.edit("`Mengacaukan media ini...`")
await asyncio.sleep(2)
glitcher = ImageGlitcher()
img = Image.open(glitch_file)
glitch_img = glitcher.glitch_image(img, value, color_offset=True, gif=True)
DURATION = 200
LOOP = 0
glitch_img[0].save(
Glitched,
format="GIF",
append_images=glitch_img[1:],
save_all=True,
duration=DURATION,
loop=LOOP,
)
await event.edit("`Mengunggah media yang dikacaukan...`")
c_time = time.time()
nosave = await event.client.send_file(
event.chat_id,
Glitched,
force_document=False,
reply_to=event.reply_to_msg_id,
progress_callback=lambda d, t: asyncio.get_event_loop().create_task(
progress(d, t, event, c_time, "[UNGGAH]")
),
)
await event.delete()
os.remove(Glitched)
await bot(
functions.messages.SaveGifRequest(
id=types.InputDocument(
id=nosave.media.document.id,
access_hash=nosave.media.document.access_hash,
file_reference=nosave.media.document.file_reference,
),
unsave=True,
)
)
os.remove(glitch_file)
os.system("rm *.tgs *.mp4")
@register(outgoing=True, pattern=r"^\.mmf (.*)")
async def memify(event):
reply_msg = await event.get_reply_message()
input_str = event.pattern_match.group(1)
await event.edit("`Sedang memproses...`")
if not reply_msg:
return await event.edit("`Balas pesan yang berisi media!`")
if not reply_msg.media:
return await event.edit("`Balas gambar/stiker/gif/video!`")
if not os.path.isdir(TEMP_DOWNLOAD_DIRECTORY):
os.makedirs(TEMP_DOWNLOAD_DIRECTORY)
input_file = await event.client.download_media(reply_msg, TEMP_DOWNLOAD_DIRECTORY)
input_file = os.path.join(TEMP_DOWNLOAD_DIRECTORY, os.path.basename(input_file))
if input_file.endswith(".tgs"):
await event.edit("`Mengekstrak bingkai pertama...`")
converted_file = os.path.join(TEMP_DOWNLOAD_DIRECTORY, "meme.webp")
cmd = f"lottie_convert.py --frame 0 {input_file} {converted_file}"
await runcmd(cmd)
os.remove(input_file)
if not os.path.lexists(converted_file):
return await event.edit("`Tidak dapat mengurai stiker beranimasi ini.`")
input_file = converted_file
elif input_file.endswith(".mp4"):
await event.edit("`Mengekstrak bingkai pertama...`")
converted_file = os.path.join(TEMP_DOWNLOAD_DIRECTORY, "meme.png")
await take_screen_shot(input_file, 0, converted_file)
os.remove(input_file)
if not os.path.lexists(converted_file):
return await event.edit("`Tidak dapat mengurai video ini.`")
input_file = converted_file
await event.edit("`Menambahkan teks...`")
try:
final_image = await add_text_img(input_file, input_str)
except Exception as e:
return await event.edit(f"**Terjadi kesalahan :**\n`{e}`")
await event.client.send_file(
entity=event.chat_id, file=final_image, reply_to=reply_msg
)
await event.delete()
os.remove(final_image)
os.remove(input_file)
async def add_text_img(image_path, text):
font_size = 12
stroke_width = 2
if ";" in text:
upper_text, lower_text = text.split(";")
else:
upper_text = text
lower_text = ""
img = Image.open(image_path).convert("RGBA")
img_info = img.info
image_width, image_height = img.size
font = ImageFont.truetype(
font="resources/MutantAcademyStyle.ttf",
size=int(image_height * font_size) // 100,
)
draw = ImageDraw.Draw(img)
char_width, char_height = font.getsize("A")
chars_per_line = image_width // char_width
top_lines = textwrap.wrap(upper_text, width=chars_per_line)
bottom_lines = textwrap.wrap(lower_text, width=chars_per_line)
if top_lines:
y = 10
for line in top_lines:
line_width, line_height = font.getsize(line)
x = (image_width - line_width) / 2
draw.text(
(x, y),
line,
fill="white",
font=font,
stroke_width=stroke_width,
stroke_fill="black",
)
y += line_height
if bottom_lines:
y = image_height - char_height * len(bottom_lines) - 15
for line in bottom_lines:
line_width, line_height = font.getsize(line)
x = (image_width - line_width) / 2
draw.text(
(x, y),
line,
fill="white",
font=font,
stroke_width=stroke_width,
stroke_fill="black",
)
y += line_height
final_image = os.path.join(TEMP_DOWNLOAD_DIRECTORY, "memify.webp")
img.save(final_image, **img_info)
return final_image
async def runcmd(cmd: str) -> Tuple[str, str, int, int]:
""" run command in terminal """
args = shlex.split(cmd)
process = await asyncio.create_subprocess_exec(
*args, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
)
stdout, stderr = await process.communicate()
return (
stdout.decode("utf-8", "replace").strip(),
stderr.decode("utf-8", "replace").strip(),
process.returncode,
process.pid,
)
async def take_screen_shot(
video_file: str, duration: int, path: str = ""
) -> Optional[str]:
""" take a screenshot """
ttl = duration // 2
thumb_image_path = path or os.path.join(
TEMP_DOWNLOAD_DIRECTORY, f"{os.path.basename(video_file)}.png"
)
command = f'''ffmpeg -ss {ttl} -i "{video_file}" -vframes 1 "{thumb_image_path}"'''
err = (await runcmd(command))[1]
return thumb_image_path if os.path.exists(thumb_image_path) else err
@register(outgoing=True, pattern=r"^\.q(?: |$)(.*)")
async def quotess(qotli):
if qotli.fwd_from:
return
if not qotli.reply_to_msg_id:
await qotli.edit("`Balas pesan pengguna mana pun!`")
return
reply_message = await qotli.get_reply_message()
if not reply_message.text:
await qotli.edit("`Balas pesan teks!`")
return
chat = "@QuotLyBot"
reply_message.sender
if reply_message.sender.bot:
await qotli.edit("`Balas pesan pengguna sebenarnya!`")
return
try:
await qotli.edit("`Sedang memproses...`")
async with bot.conversation(chat) as conv:
try:
response = conv.wait_event(
events.NewMessage(incoming=True, from_users=1031952739)
)
msg = await bot.forward_messages(chat, reply_message)
response = await response
await bot.send_read_acknowledge(conv.chat_id)
except YouBlockedUserError:
await qotli.reply("`Harap buka blokir` **@QuotLyBot** `dan coba lagi!`")
return
if response.text.startswith("Hi!"):
await qotli.edit(
"`Bisakah Anda dengan ramah menonaktifkan pengaturan privasi penerusan untuk selamanya?`"
)
else:
downloaded_file_name = await qotli.client.download_media(
response.media, TEMP_DOWNLOAD_DIRECTORY
)
await qotli.client.send_file(
qotli.chat_id, downloaded_file_name, reply_to=qotli.reply_to_msg_id
)
await qotli.delete()
await bot.send_read_acknowledge(qotli.chat_id)
await qotli.client.delete_messages(conv.chat_id, [msg.id, response.id])
os.remove(downloaded_file_name)
except TimeoutError:
await qotli.edit("**@QuotlyBot** `tidak menanggapi!`")
await qotli.client.delete_messages(conv.chat_id, [msg.id])
@register(outgoing=True, pattern=r"^\.hz(:? |$)(.*)?")
async def hazz(hazmat):
await hazmat.edit("`Mengirim informasi...`")
level = hazmat.pattern_match.group(2)
if hazmat.fwd_from:
return
if not hazmat.reply_to_msg_id:
await hazmat.edit("`WoWoWo Kapten!, kita tidak akan cocok dengan hantu!`")
return
reply_message = await hazmat.get_reply_message()
if not reply_message.media:
await hazmat.edit("`Kata bisa menghancurkan apapun Kapten!`")
return
if reply_message.sender.bot:
await hazmat.edit("`Balas ke pengguna sebenarnya!`")
return
chat = "@hazmat_suit_bot"
await hazmat.edit("`Siapkan Kapten! kita akan membersihkan beberapa virus...`")
message_id_to_reply = hazmat.message.reply_to_msg_id
msg_reply = None
async with hazmat.client.conversation(chat) as conv:
try:
msg = await conv.send_message(reply_message)
if level:
m = f"/hazmat {level}"
msg_reply = await conv.send_message(m, reply_to=msg.id)
r = await conv.get_response()
response = await conv.get_response()
elif reply_message.gif:
m = f"/hazmat"
msg_reply = await conv.send_message(m, reply_to=msg.id)
r = await conv.get_response()
response = await conv.get_response()
else:
response = await conv.get_response()
"""don't spam notif"""
await bot.send_read_acknowledge(conv.chat_id)
except YouBlockedUserError:
await hazmat.reply("`Harap buka blokir` **@hazmat_suit_bot**")
return
if response.text.startswith("I can't"):
await hazmat.edit("`Tidak dapat menangani GIF ini...`")
await hazmat.client.delete_messages(
conv.chat_id, [msg.id, response.id, r.id, msg_reply.id]
)
return
else:
downloaded_file_name = await hazmat.client.download_media(
response.media, TEMP_DOWNLOAD_DIRECTORY
)
await hazmat.client.send_file(
hazmat.chat_id,
downloaded_file_name,
force_document=False,
reply_to=message_id_to_reply,
)
"""cleanup chat after completed"""
if msg_reply is not None:
await hazmat.client.delete_messages(
conv.chat_id, [msg.id, msg_reply.id, r.id, response.id]
)
else:
await hazmat.client.delete_messages(conv.chat_id, [msg.id, response.id])
await hazmat.delete()
return os.remove(downloaded_file_name)
@register(outgoing=True, pattern=r"^\.df(:? |$)([1-8])?")
async def fryerrr(fry):
await fry.edit("`Mengirim informasi...`")
level = fry.pattern_match.group(2)
if fry.fwd_from:
return
if not fry.reply_to_msg_id:
await fry.edit("`Balas pesan gambar pengguna manapun!`")
return
reply_message = await fry.get_reply_message()
if not reply_message.media:
await fry.edit("`Tidak ada gambar untuk digoreng`")
return
if reply_message.sender.bot:
await fry.edit("`Balas ke pengguna sebenarnya!`")
return
chat = "@image_deepfrybot"
message_id_to_reply = fry.message.reply_to_msg_id
try:
async with fry.client.conversation(chat) as conv:
try:
msg = await conv.send_message(reply_message)
if level:
m = f"/deepfry {level}"
msg_level = await conv.send_message(m, reply_to=msg.id)
r = await conv.get_response()
response = await conv.get_response()
await bot.send_read_acknowledge(conv.chat_id)
except YouBlockedUserError:
await fry.reply("`Harap buka blokir` **@image_deepfrybot**")
return
if response.text.startswith("Forward"):
await fry.edit("`Nonaktifkan setelan privasi penerusan Anda!`")
else:
downloaded_file_name = await fry.client.download_media(
response.media, TEMP_DOWNLOAD_DIRECTORY
)
await fry.client.send_file(
fry.chat_id,
downloaded_file_name,
force_document=False,
reply_to=message_id_to_reply,
)
try:
msg_level
except NameError:
await fry.client.delete_messages(
conv.chat_id, [msg.id, response.id]
)
else:
await fry.client.delete_messages(
conv.chat_id, [msg.id, response.id, r.id, msg_level.id]
)
await fry.delete()
return os.remove(downloaded_file_name)
except TimeoutError:
await fry.edit("**@image_deepfrybot** `tidak menanggapi!`")
await fry.client.delete_messages(conv.chat_id, [msg.id])
@register(pattern=r"^\.deepfry(?: |$)(.*)", outgoing=True)
async def deepfryer(event):
try:
frycount = int(event.pattern_match.group(1))
if frycount < 1:
raise ValueError
except ValueError:
frycount = 1
reply_message = await event.get_reply_message()
image = io.BytesIO()
await event.edit("`Mengunduh media...`")
if reply_message.photo:
image = await bot.download_media(
reply_message,
"df.png",
)
elif (
DocumentAttributeFilename(file_name="AnimatedSticker.tgs")
in reply_message.media.document.attributes
):
await bot.download_media(
reply_message,
"df.tgs",
)
os.system("lottie_convert.py df.tgs df.png")
image = "df.png"
elif reply_message.video:
video = await bot.download_media(
reply_message,
"df.mp4",
)
extractMetadata(createParser(video))
os.system("ffmpeg -i df.mp4 -vframes 1 -an -s 480x360 -ss 1 df.png")
image = "df.png"
else:
image = await bot.download_media(
reply_message,
"df.png",
)
image = Image.open(image)
# fry the image
await event.edit("`Menggoreng media...`")
for _ in range(frycount):
image = await deepfry(image)
fried_io = io.BytesIO()
fried_io.name = | |
please pass async_req=True
>>> thread = api.get_run_upstream_lineage_with_http_info(owner, entity, uuid, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str entity: Entity name under namesapce (required)
:param str uuid: SubEntity uuid (required)
:param int offset: Pagination offset.
:param int limit: Limit size.
:param str sort: Sort to order the search.
:param str query: Query filter the search.
:param bool no_page: No pagination.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ListRunEdgesResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'owner',
'entity',
'uuid',
'offset',
'limit',
'sort',
'query',
'no_page'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_run_upstream_lineage" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `get_run_upstream_lineage`") # noqa: E501
# verify the required parameter 'entity' is set
if self.api_client.client_side_validation and ('entity' not in local_var_params or # noqa: E501
local_var_params['entity'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `entity` when calling `get_run_upstream_lineage`") # noqa: E501
# verify the required parameter 'uuid' is set
if self.api_client.client_side_validation and ('uuid' not in local_var_params or # noqa: E501
local_var_params['uuid'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `uuid` when calling `get_run_upstream_lineage`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
if 'entity' in local_var_params:
path_params['entity'] = local_var_params['entity'] # noqa: E501
if 'uuid' in local_var_params:
path_params['uuid'] = local_var_params['uuid'] # noqa: E501
query_params = []
if 'offset' in local_var_params and local_var_params['offset'] is not None: # noqa: E501
query_params.append(('offset', local_var_params['offset'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'sort' in local_var_params and local_var_params['sort'] is not None: # noqa: E501
query_params.append(('sort', local_var_params['sort'])) # noqa: E501
if 'query' in local_var_params and local_var_params['query'] is not None: # noqa: E501
query_params.append(('query', local_var_params['query'])) # noqa: E501
if 'no_page' in local_var_params and local_var_params['no_page'] is not None: # noqa: E501
query_params.append(('no_page', local_var_params['no_page'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKey'] # noqa: E501
return self.api_client.call_api(
'/api/v1/{owner}/{entity}/runs/{uuid}/lineage/upstream', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ListRunEdgesResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_runs_artifacts_lineage(self, owner, name, **kwargs): # noqa: E501
"""Get runs artifacts lineage # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_runs_artifacts_lineage(owner, name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str name: Entity managing the resource (required)
:param int offset: Pagination offset.
:param int limit: Limit size.
:param str sort: Sort to order the search.
:param str query: Query filter the search.
:param str mode: Mode of the search.
:param bool no_page: No pagination.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ListRunArtifactsResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_runs_artifacts_lineage_with_http_info(owner, name, **kwargs) # noqa: E501
def get_runs_artifacts_lineage_with_http_info(self, owner, name, **kwargs): # noqa: E501
"""Get runs artifacts lineage # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_runs_artifacts_lineage_with_http_info(owner, name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str name: Entity managing the resource (required)
:param int offset: Pagination offset.
:param int limit: Limit size.
:param str sort: Sort to order the search.
:param str query: Query filter the search.
:param str mode: Mode of the search.
:param bool no_page: No pagination.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ListRunArtifactsResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'owner',
'name',
'offset',
'limit',
'sort',
'query',
'mode',
'no_page'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_runs_artifacts_lineage" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `get_runs_artifacts_lineage`") # noqa: E501
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `get_runs_artifacts_lineage`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'offset' in local_var_params and local_var_params['offset'] is not None: # noqa: E501
query_params.append(('offset', local_var_params['offset'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'sort' in local_var_params and local_var_params['sort'] is not None: # noqa: E501
query_params.append(('sort', local_var_params['sort'])) # noqa: E501
if 'query' in local_var_params and local_var_params['query'] is not None: # noqa: E501
query_params.append(('query', local_var_params['query'])) # noqa: E501
if 'mode' in local_var_params and local_var_params['mode'] is not None: # noqa: E501
query_params.append(('mode', local_var_params['mode'])) # noqa: E501
if 'no_page' in local_var_params and local_var_params['no_page'] is not None: # noqa: E501
query_params.append(('no_page', local_var_params['no_page'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKey'] # noqa: E501
return self.api_client.call_api(
'/api/v1/{owner}/{name}/runs/lineage/artifacts', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ListRunArtifactsResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def impersonate_token(self, owner, entity, uuid, **kwargs): # noqa: E501
"""Impersonate run token # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.impersonate_token(owner, entity, uuid, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str entity: Entity: project name, hub name, registry name, ... (required)
:param str uuid: Uuid identifier of the sub-entity (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Auth
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] | |
Ports if defined in senders or recipients
- MAC address ethSrc loaded from Ingress device
- Check intent state with retry
Required:
name - Type of point intent to add eg. IPV4 | VLAN | Dualstack
senders - List of host dictionaries i.e.
[ { "name":"h8", "device":"of:0000000000000005/8","mac":"00:00:00:00:00:08" } ]
recipients - List of host dictionaries i.e.
[ { "name":"h16", "device":"of:0000000000000006/8", "mac":"00:00:00:00:00:10" } ]
Optional:
onosNode - ONOS node to install the intents in main.CLIs[ ]
0 by default so that it will always use the first
ONOS node
ethType - Ethernet type eg. IPV4, IPV6
bandwidth - Bandwidth capacity
lambdaAlloc - Allocate lambda, defaults to False
ipProto - IP protocol
tcp - TCP ports in the same order as the hosts in hostNames
sw1 - First switch to bring down & up for rerouting purpose
sw2 - Second switch to bring down & up for rerouting purpose
expectedLink - Expected link when the switches are down, it should
be two links lower than the links before the two
switches are down
"""
assert main, "There is no main variable"
assert senders, "You must specify a sender"
assert recipients, "You must specify a recipient"
# Assert devices or main.hostsData, "You must specify devices"
global itemName # The name of this run. Used for logs.
itemName = name
onosNode = int( onosNode )
main.log.info( itemName + ": Adding single to single point intents" )
for sender in senders:
if not sender.get( "device" ):
main.log.warn( "Device not given for sender {0}. Loading from main.hostData".format( sender.get( "name" ) ) )
sender[ "device" ] = main.hostsData.get( sender.get( "name" ) ).get( "location" )
for recipient in recipients:
if not recipient.get( "device" ):
main.log.warn( "Device not given for recipient {0}. Loading from main.hostData".format( recipient.get( "name" ) ) )
recipient[ "device" ] = main.hostsData.get( recipient.get( "name" ) ).get( "location" )
ingressDevice = senders[ 0 ].get( "device" )
egressDevice = recipients[ 0 ].get( "device" )
portIngress = senders[ 0 ].get( "port", "" )
portEgress = recipients[ 0 ].get( "port", "" )
main.log.debug( ingressDevice )
main.log.debug( egressDevice )
srcMac = senders[ 0 ].get( "mac" )
dstMac = recipients[ 0 ].get( "mac" )
ipSrc = senders[ 0 ].get( "ip" )
ipDst = recipients[ 0 ].get( "ip" )
intent1 = main.CLIs[ onosNode ].addPointIntent(
ingressDevice=ingressDevice,
egressDevice=egressDevice,
ingressPort=portIngress,
egressPort=portEgress,
ethType=ethType,
ethSrc=srcMac,
ethDst=dstMac,
bandwidth=bandwidth,
lambdaAlloc=lambdaAlloc,
ipProto=ipProto,
ipSrc=ipSrc,
ipDst=ipDst,
tcpSrc=tcpSrc,
tcpDst=tcpDst )
time.sleep( main.addIntentSleep )
intentsId = main.CLIs[ 0 ].getIntentsId()
if utilities.retry ( f=checkIntentState, retValue=main.FALSE,
args = (main, intentsId ), sleep=main.checkIntentSleep ):
return intentsId
else:
main.log.error( "Single to Single point intent did not install correctly" )
return main.FALSE
# Check intents state
if utilities.retry( f=checkIntentState, retValue=main.FALSE, args=( main, intentsId ), sleep=main.checkIntentSleep ):
return intentsId
else:
main.log.error( "Point Intent did not install correctly" )
return main.FALSE
def testPointIntent( main,
name,
intentId,
senders,
recipients,
badSenders={},
badRecipients={},
onosNode=0,
ethType="",
bandwidth="",
lambdaAlloc=False,
ipProto="",
ipAddresses="",
tcp="",
sw1="s5",
sw2="s2",
expectedLink=0):
"""
Test a Point Intent
Description:
Test a point intent
Steps:
- Fetch host data if not given
- Check Intent State
- Check Flow State
- Check Connectivity
- Check Lack of Connectivity Between Hosts not in the Intent
- Reroute
- Take Expected Link Down
- Check Intent State
- Check Flow State
- Check Topology
- Check Connectivity
- Bring Expected Link Up
- Check Intent State
- Check Flow State
- Check Topology
- Check Connectivity
- Remove Topology
Required:
name - Type of point intent to add eg. IPV4 | VLAN | Dualstack
senders - List of host dictionaries i.e.
{ "name":"h8", "device":"of:0000000000000005/8","mac":"00:00:00:00:00:08" }
recipients - List of host dictionaries i.e.
{ "name":"h16", "device":"of:0000000000000006/8", "mac":"00:00:00:00:00:10" }
Optional:
onosNode - ONOS node to install the intents in main.CLIs[ ]
0 by default so that it will always use the first
ONOS node
ethType - Ethernet type eg. IPV4, IPV6
bandwidth - Bandwidth capacity
lambdaAlloc - Allocate lambda, defaults to False
ipProto - IP protocol
tcp - TCP ports in the same order as the hosts in hostNames
sw1 - First switch to bring down & up for rerouting purpose
sw2 - Second switch to bring down & up for rerouting purpose
expectedLink - Expected link when the switches are down, it should
be two links lower than the links before the two
switches are down
"""
# Parameter Validity Check
assert main, "There is no main variable"
assert senders, "You must specify a sender"
assert recipients, "You must specify a recipient"
global itemName
itemName = name
tempHostsData = {}
onosNode = int( onosNode )
main.log.info( itemName + ": Testing Point Intent" )
# Names for scapy
senderNames = [ x.get( "name" ) for x in senders ]
recipientNames = [ x.get( "name" ) for x in recipients ]
badSenderNames = [ x.get( "name" ) for x in badSenders ]
badRecipientNames = [ x.get( "name" ) for x in badRecipients ]
for sender in senders:
if not sender.get( "device" ):
main.log.warn( "Device not given for sender {0}. Loading from main.hostData".format( sender.get( "name" ) ) )
sender[ "device" ] = main.hostsData.get( sender.get( "name" ) ).get( "location" )
for recipient in recipients:
if not recipient.get( "device" ):
main.log.warn( "Device not given for recipient {0}. Loading from main.hostData".format( recipient.get( "name" ) ) )
recipient[ "device" ] = main.hostsData.get( recipient.get( "name" ) ).get( "location" )
testResult = main.TRUE
main.log.info( itemName + ": Testing point intents" )
# Check intent state
if utilities.retry( f=checkIntentState, retValue=main.FALSE, args=( main, intentId ), sleep=main.checkIntentSleep ):
main.assertReturnString += 'Initial Intent State Passed\n'
else:
main.assertReturnString += 'Initial Intent State Failed\n'
testResult = main.FALSE
# Check flows count in each node
if utilities.retry( f=checkFlowsCount, retValue=main.FALSE, args=[ main ], sleep=20, attempts=3 ) and utilities.retry( f=checkFlowsState, retValue=main.FALSE, args=[ main ], sleep=20, attempts=3 ):
main.assertReturnString += 'Initial Flow State Passed\n'
else:
main.assertReturnString += 'Intial Flow State Failed\n'
testResult = main.FALSE
# Check Connectivity
if utilities.retry( f=scapyCheckConnection, retValue=main.FALSE, args=( main, senderNames, recipientNames ) ):
main.assertReturnString += 'Initial Ping Passed\n'
else:
main.assertReturnString += 'Initial Ping Failed\n'
testResult = main.FALSE
# Check connections that shouldn't work
if badSenderNames:
main.log.info( "Checking that packets from incorrect sender do not go through" )
if utilities.retry( f=scapyCheckConnection, retValue=main.FALSE, args=( main, badSenderNames, recipientNames ), kwargs={ "expectFailure":True } ):
main.assertReturnString += 'Bad Sender Ping Passed\n'
else:
main.assertReturnString += 'Bad Sender Ping Failed\n'
testResult = main.FALSE
if badRecipientNames:
main.log.info( "Checking that packets to incorrect recipients do not go through" )
if utilities.retry( f=scapyCheckConnection, retValue=main.FALSE, args=( main, senderNames, badRecipientNames ), kwargs={ "expectFailure":True } ):
main.assertReturnString += 'Bad Recipient Ping Passed\n'
else:
main.assertReturnString += 'Bad Recipient Ping Failed\n'
testResult = main.FALSE
# Test rerouting if these variables exist
if sw1 and sw2 and expectedLink:
# Take link down
if utilities.retry( f=link, retValue=main.FALSE, args=( main, sw1, sw2, "down" ) ):
main.assertReturnString += 'Link Down Passed\n'
else:
main.assertReturnString += 'Link Down Failed\n'
testResult = main.FALSE
# Check intent state
if utilities.retry( f=checkIntentState, retValue=main.FALSE, args=( main, intentId ), sleep=main.checkIntentSleep ):
main.assertReturnString += 'Link Down Intent State Passed\n'
else:
main.assertReturnString += 'Link Down Intent State Failed\n'
testResult = main.FALSE
# Check flows count in each node
if utilities.retry( f=checkFlowsCount, retValue=main.FALSE, args=[ main ], sleep=20, attempts=3 ) and utilities.retry( f=checkFlowsState, retValue=main.FALSE, args=[ main ], sleep=20, attempts=3 ):
main.assertReturnString += 'Link Down Flow State Passed\n'
else:
main.assertReturnString += 'Link Down Flow State Failed\n'
testResult = main.FALSE
# Check OnosTopology
if utilities.retry( f=checkTopology, retValue=main.FALSE, args=( main, expectedLink ), sleep=10 ):
main.assertReturnString += 'Link Down Topology State Passed\n'
else:
main.assertReturnString += 'Link Down Topology State Failed\n'
testResult = main.FALSE
# Check Connection
if utilities.retry( f=scapyCheckConnection, retValue=main.FALSE, args=( main, senderNames, recipientNames ) ):
main.assertReturnString += 'Link Down Pingall Passed\n'
else:
main.assertReturnString += 'Link Down Pingall Failed\n'
testResult = main.FALSE
# Bring link up
if utilities.retry( f=link, retValue=main.FALSE, args=( main, sw1, sw2, "up" ) ):
main.assertReturnString += 'Link Up Passed\n'
else:
main.assertReturnString += 'Link Up Failed\n'
testResult = main.FALSE
# Wait for reroute
time.sleep( main.rerouteSleep )
# | |
and (line_list[line]["line_type"]=="abs"): line_list[line]["voff"] = "ABS_VOFF"
elif ("line_type" not in line_list[line]) or (line_list[line]["line_type"]=="user"):
if verbose:
print("\n Warning: %s has no line_type keyword specified. Assuming narrow line." % (line))
line_list[line]["voff"] = "NA_VOFF"
line_list[line]["line_type"] = "na"
#
# Do a final check for valid keywords. If any keywords don't belong, raise an error.
init_hmoments = ["h"+str(m)+"_init" for m in range(3,3+(comp_options["n_moments"]-2),1)]
plim_hmoments = ["h"+str(m)+"_plim" for m in range(3,3+(comp_options["n_moments"]-2),1)]
hmoments = ["h"+str(m) for m in range(3,3+(comp_options["n_moments"]-2),1)]
#
for line in list(line_list):
for key in line_list[line]:
if key not in ["center","center_pix","fwhm_res_kms","fwhm_res_ang","amp","fwhm","voff","shape","line_type","line_profile",
"amp_init","amp_plim","fwhm_init","fwhm_plim","voff_init","voff_plim",
"shape_init","shape_plim","label"]+hmoments+init_hmoments+plim_hmoments:
raise ValueError("\n %s not a valid keyword for the line list! \n" % key)
#
return line_list
##################################################################################
#### Add FWHM Resolution #########################################################
def add_fwhm_res(line_list,lam_gal,fwhm_gal,velscale,verbose=True):
# Perform linear interpolation on the fwhm_gal array as a function of wavelength
# We will use this to determine the fwhm resolution as a function of wavelenth for each
# emission line so we can correct for the resolution at every iteration.
fwhm_gal_ftn = interp1d(lam_gal,fwhm_gal,kind='linear',bounds_error=False,fill_value=(1.e-10,1.e-10))
# Interpolation function that maps x (in angstroms) to pixels so we can get the exact
# location in pixel space of the emission line.
x_pix = np.array(range(len(lam_gal)))
pix_interp_ftn = interp1d(lam_gal,x_pix,kind='linear',bounds_error=False,fill_value=(1.e-10,1.e-10))
# iterate through the line_list and add the keywords
for line in list(line_list):
center = line_list[line]["center"] # line center in Angstroms
center_pix = float(pix_interp_ftn(center)) # line center in pixels
line_list[line]["center_pix"] = center_pix
fwhm_res_ang = float(fwhm_gal_ftn(center)) # instrumental FWHM resolution in angstroms
line_list[line]["fwhm_res_ang"] = fwhm_res_ang
c = 299792.458 # speed of light (km/s)
fwhm_res_kms = (fwhm_res_ang/center)*c# instrumental FWHM resolution in km/s
line_list[line]["fwhm_res_kms"] = fwhm_res_kms
return line_list
##################################################################################
#### Initialize Line Parameters ##################################################
def initialize_line_pars(lam_gal,galaxy,comp_options,line_list,verbose=True):
# Smooth galaxy by a small amount to get rid of
# noise spike (for low S/N spectra)
# galaxy = gaussian_filter1d(galaxy,2.)
def get_init_amp(line_center):
line_center = float(line_center)
try:
return np.nanmax([np.nanmax(galaxy[(lam_gal>(line_center-10.)) & (lam_gal<(line_center+10.))]), 0.0])
except ValueError:
return 0.0
line_par_input = {}
# Initial conditions for some parameters
max_amp = np.nanmax(galaxy)
median_amp = np.nanmedian(galaxy)
opt_feii_amp_init = (0.1*np.nanmedian(galaxy))
uv_iron_amp_init = (0.1*np.nanmedian(galaxy))
balmer_amp_init = (0.1*np.nanmedian(galaxy))
# Defaut parameter limits for certain line types
# Pre-defined initial values and parameter limits for different line_types.
def amp_hyperpars(line_type,line_center): # amplitude hyperparameters
line_center = float(line_center)
line_window = 10.0 # sampling window for each line in Angstroms
if (line_type in ["na","user"]):
return get_init_amp(line_center), (0.0,max_amp)
elif (line_type in ["br","out"]):
return (get_init_amp(line_center))/2.0, (0.0,max_amp)
elif (line_type=="abs"):
return -median_amp, (-median_amp,0.0,)
#
def fwhm_hyperpars(line_type,line_center,line_profile): # FWHM hyperparameters
na_fwhm_init = 100.0
out_fwhm_init = 450.0
br_fwhm_init = 2500.0
abs_fwhm_init = 500.0
na_fwhm_lim = (0.1 , 800.0)
out_fwhm_lim = (0.1 , 5000.0)
br_fwhm_lim = (500.0, 15000.0)
abs_fwhm_lim = (0.1 , 800.0)
if line_type in ["na","user"]:
if (line_profile=="GH"):
# An exception is granted to line profiles that are Gauss-Hermite, since they need to be
# able to accomodate excess width from an outflow component.
return 250.0, (0.1,3000.0)
else:
return na_fwhm_init, na_fwhm_lim
elif line_type in ["br"]:
return br_fwhm_init, br_fwhm_lim
elif line_type in ["out"]:
return out_fwhm_init, out_fwhm_lim
elif line_type in ["abs"]:
if (line_profile=="GH"):
# An exception is granted to line profiles that are Gauss-Hermite, since they need to be
# able to accomodate excess width from an outflow component.
return 250.0, (0.1,5000.0)
else:
return abs_fwhm_init, abs_fwhm_lim
#
def voff_hyperpars(line_type, line_center):
na_voff_init, br_voff_init = 0.001, 0.001
na_voff_lim = (-1000,1000)
br_voff_lim = (-1000,1000)
if line_type in ["na","user"]:
return na_voff_init, na_voff_lim
elif line_type in ["br","abs","out"]:
return br_voff_init, br_voff_lim
def h_moment_hyperpars():
# Higher-order moments for Gauss-Hermite line profiles
# all start at the same initial value (0) and parameter limits [-0.5,0.5]
# You can specify individual higher-order parameters here.
h_init = 0.0
h_lim = (-0.5,0.5)
return h_init, h_lim
#
def shape_hyperpars(): # shape of the Voigt profile; if line_profile="V" (Voigt)
shape_init = 0.0
shape_lim = (0.0,1.0)
return shape_init, shape_lim
# We start with standard lines and options. These are added one-by-one. Then we check specific line options and then override any lines that have
# been already added. Params are added regardless of component options as long as the parameter is set to "free"
for line in list(line_list):
if (("amp" in line_list[line]) and (line_list[line]["amp"]=="free")):
amp_default = amp_hyperpars(line_list[line]["line_type"],line_list[line]["center"])
line_par_input[line+"_AMP"] = {"init": line_list[line].get("amp_init",amp_default[0]),
"plim":line_list[line].get("amp_plim",amp_default[1])}
# Check to make sure init value is within limits of plim
if (line_par_input[line+"_AMP"]["init"]<line_par_input[line+"_AMP"]["plim"][0]) or (line_par_input[line+"_AMP"]["init"]>line_par_input[line+"_AMP"]["plim"][1]):
raise ValueError("\n Amplitude (amp) initial value (amp_init) for %s outside of parameter limits (amp_plim)!\n" % (line))
if (("fwhm" in line_list[line]) and (line_list[line]["fwhm"]=="free")):
fwhm_default = fwhm_hyperpars(line_list[line]["line_type"],line_list[line]["center"],line_list[line]["line_profile"])
line_par_input[line+"_FWHM"] = {"init": line_list[line].get("fwhm_init",fwhm_default[0]),
"plim":line_list[line].get("fwhm_plim",fwhm_default[1])}
# Check to make sure init value is within limits of plim
if (line_par_input[line+"_FWHM"]["init"]<line_par_input[line+"_FWHM"]["plim"][0]) or (line_par_input[line+"_FWHM"]["init"]>line_par_input[line+"_FWHM"]["plim"][1]):
raise ValueError("\n FWHM (fwhm) initial value (fwhm_init) for %s outside of parameter limits (fwhm_plim)!\n" % (line))
if (("voff" in line_list[line]) and (line_list[line]["voff"]=="free")):
voff_default = voff_hyperpars(line_list[line]["line_type"],line_list[line]["center"])
line_par_input[line+"_VOFF"] = {"init": line_list[line].get("voff_init",voff_default[0]),
"plim":line_list[line].get("voff_plim",voff_default[1])}
# Check to make sure init value is within limits of plim
if (line_par_input[line+"_VOFF"]["init"]<line_par_input[line+"_VOFF"]["plim"][0]) or (line_par_input[line+"_VOFF"]["init"]>line_par_input[line+"_VOFF"]["plim"][1]):
raise ValueError("\n Velocity offset (voff) initial value (voff_init) for %s outside of parameter limits (voff_plim)!\n" % (line))
if (line_list[line]["line_profile"]=="GH") & (comp_options["n_moments"]>2):
h_default = h_moment_hyperpars()
for m in range(3,3+(comp_options["n_moments"]-2),1):
if ("h"+str(m) in line_list[line]):
if (line_list[line]["h"+str(m)]=="free"):
line_par_input[line+"_H"+str(m)] = {"init": line_list[line].get("h"+str(m)+"_init",h_default[0]),
"plim":line_list[line].get("h"+str(m)+"_plim",h_default[1])}
# Check to make sure init value is within limits of plim
if (line_par_input[line+"_H"+str(m)]["init"]<line_par_input[line+"_H"+str(m)]["plim"][0]) or (line_par_input[line+"_H"+str(m)]["init"]>line_par_input[line+"_H"+str(m)]["plim"][1]):
raise ValueError("\n Gauss-Hermite moment h%d initial value (h%d_init) for %s outside of parameter limits (h%d_plim)!\n" % (m,m,line,m))
if (("shape" in line_list[line]) and (line_list[line]["shape"]=="free")):
shape_default = shape_hyperpars()
line_par_input[line+"_SHAPE"] = {"init": line_list[line].get("shape_init",shape_default[0]),
"plim":line_list[line].get("shape_plim",shape_default[1])}
# Check to make sure init value is within limits of plim
if (line_par_input[line+"_SHAPE"]["init"]<line_par_input[line+"_SHAPE"]["plim"][0]) or (line_par_input[line+"_SHAPE"]["init"]>line_par_input[line+"_SHAPE"]["plim"][1]):
raise ValueError("\n Voigt profile shape parameter (shape) initial value (shape_init) for %s outside of parameter limits (shape_plim)!\n" % (line))
# If tie_line_fwhm = True, we tie all widths (including any higher order moments) by respective line groups (Na, Br, Out, Abs)
if (comp_options["tie_line_fwhm"]==True):
# Add the common line widths for na,br,out, and abs lines
if (comp_options["fit_narrow"]==True) or ("na" in [line_list[line]["line_type"] for line in line_list]):
line_par_input["NA_FWHM"] = {"init": 250.0,
"plim":(0.0,1200.0)}
if (comp_options["na_line_profile"]=="GH") and (comp_options["n_moments"]>2):
for m in range(3,3+(comp_options["n_moments"]-2),1):
line_par_input["NA_H"+str(m)] = {"init": 0.0,
"plim":(-0.5,0.5)}
if comp_options["na_line_profile"]=="V":
line_par_input["NA_SHAPE"] = {"init": 0.0,
"plim":(0.0,1.0)}
if (comp_options["fit_broad"]==True) or ("br" in [line_list[line]["line_type"] for line in line_list]):
line_par_input["BR_FWHM"] = {"init": 2500.0,
"plim":(500.0,15000.0)}
if (comp_options["br_line_profile"]=="GH") and (comp_options["n_moments"]>2):
for m in range(3,3+(comp_options["n_moments"]-2),1):
line_par_input["BR_H"+str(m)] = {"init": 0.0,
"plim":(-0.5,0.5)}
if comp_options["br_line_profile"]=="V":
line_par_input["BR_SHAPE"] = {"init": 0.0,
"plim":(0.0,1.0)}
if (comp_options["fit_outflow"]==True) or ("out" in [line_list[line]["line_type"] for line in line_list]):
line_par_input["OUT_FWHM"] = {"init": 450.0,
"plim":(0.1,2500.0)}
if (comp_options["abs_line_profile"]=="GH") and (comp_options["n_moments"]>2):
for m in range(3,3+(comp_options["n_moments"]-2),1):
line_par_input["ABS_H"+str(m)] = {"init": 0.0,
"plim":(-0.5,0.5)}
if comp_options["abs_line_profile"]=="V":
line_par_input["ABS_SHAPE"] = {"init": 0.0,
"plim":(0.0,1.0)}
if (comp_options["fit_absorp"]==True) or ("abs" in [line_list[line]["line_type"] for line in line_list]):
line_par_input["ABS_FWHM"] = {"init": 100.0,
"plim":(0.0,800.0)}
if (comp_options["abs_line_profile"]=="GH") and (comp_options["n_moments"]>2):
for m in range(3,3+(comp_options["n_moments"]-2),1):
line_par_input["ABS_H"+str(m)] = {"init": 0.0,
"plim":(-0.5,0.5)}
if comp_options["abs_line_profile"]=="V":
line_par_input["ABS_SHAPE"] = {"init": 0.0,
"plim":(0.0,1.0)}
# If tie_line_voff = True, we tie all velocity offsets (including any higher order moments) by respective line groups (Na, Br, Out, Abs)
if comp_options["tie_line_voff"]==True:
# Add the common line voffs for na,br,out, and abs lines
if (comp_options["fit_narrow"]==True) or ("na" in [line_list[line]["line_type"] for line in line_list]):
line_par_input["NA_VOFF"] = {"init": 0.0,
"plim":(-500.0,500.0)}
if (comp_options["fit_broad"]==True) or ("br" in [line_list[line]["line_type"] for line in line_list]):
line_par_input["BR_VOFF"] = {"init": 0.0,
"plim":(-1000.0,1000.0)}
if (comp_options["fit_outflow"]==True) or ("out" in [line_list[line]["line_type"] for line in line_list]):
line_par_input["OUT_VOFF"] = {"init": 0.0,
"plim":(-1000.0,1000.0)}
if (comp_options["fit_absorp"]==True) or ("abs" in [line_list[line]["line_type"] for line in line_list]):
line_par_input["ABS_VOFF"] = {"init": 0.0,
"plim":(-500.0,500.0)}
return line_par_input
##################################################################################
#### Check Line Hard Constraints #################################################
def check_hard_cons(lam_gal,galaxy,comp_options,line_list,line_par_input,par_input,verbose=True):
# Get list of all params
# param_dict = {par:0 for par in line_par_input}
param_dict = {par:0 for par in {**par_input,**line_par_input}}
for line in list(line_list):
for hpar in line_list[line]:
if (line_list[line][hpar]!="free") and (hpar in ["amp","fwhm","voff","h3","h4","h5","h6","h7","h8","h9","h10","shape"]):
if (isinstance(line_list[line][hpar],(int,float))):
line_list[line][hpar] = float(line_list[line][hpar])
pass
else:
try:
ne.evaluate(line_list[line][hpar], local_dict = param_dict).item()
except:
if verbose:
print("Hard-constraint %s not found in parameter list or could not be parsed; converting to free parameter.\n" % line_list[line][hpar])
_line_list = {line:line_list[line]}
_line_list[line][hpar]="free"
_line_par_input = initialize_line_pars(lam_gal,galaxy,comp_options,_line_list)
line_par_input = {**_line_par_input,**line_par_input}
return line_list, line_par_input
##################################################################################
#### Check Line Soft Constraints | |
bold').grid(row=2, column=0, sticky="ne")
TK.Label(OtherFrame, text='Initial Flux (Jy/km/s) =', bg='black', fg='white', font='none 12 bold').grid(row=1, column=2, sticky="ne")
self.UserDiskThic = TK.Entry(OtherFrame, width=20, bg='white')
self.UserDiskThic.grid(row=0, column=1, sticky="nwse")
self.UserDiskAng = TK.Entry(OtherFrame, width=20, bg='white')
self.UserDiskAng.grid(row=0, column=3, sticky="nwse")
self.UserInc = TK.Entry(OtherFrame, width=20, bg='white')
self.UserInc.grid(row=1, column=1, sticky="nwse")
self.UserRatio = TK.Entry(OtherFrame, width=20, bg='white')
self.UserRatio.grid(row=2, column=3, sticky="nwse")
self.UserGasSig = TK.Entry(OtherFrame, width=20, bg='white')
self.UserGasSig.grid(row=2, column=1, sticky="nwse")
self.UserFlux = TK.Entry(OtherFrame, width=20, bg='white')
self.UserFlux.grid(row=1, column=3, sticky="nwse")
#Lower Buttons
Lower = TK.Frame(master, bg='black')
Lower.grid(row=11, columnspan=4)
TK.Button(Lower, text='Run', width=15, command=lambda : self.Run()).grid(row=11, column=2, columnspan=2, pady=10, sticky = 'nw')
TK.Button(Lower, text='Show Results', width=15, command=lambda : self.ShowPlot()).grid(row=11, column=0, columnspan=2, pady=10,sticky="ne")
#Status Message
self.StatusBox = TK.Entry(master, width=20, bg='white', justify='center')
self.StatusBox.grid(row=12, columnspan=4, sticky="we")
self.StatusBox.insert(0, 'Waiting for User Input')
#Progress Bar
self.prog_bar = ttk.Progressbar(master, orient="horizontal", length=650, mode="determinate")
self.prog_bar["value"] = 0
self.prog_bar["maximum"] = 1
self.prog_bar.grid(row = 13, columnspan=4)
def SimOptions(self):
"""
Open Window to edit simulation options
"""
def Accept(self):
"""
Save values and close window
"""
#Save new values
self.method = var.get()
self.MaxIt = int(self.UserMaxIt.get())
self.Samp = int(self.UserSamp.get())
#Update StatusBox
self.StatusBox.delete(0, TK.END)
self.StatusBox.insert(0, 'Simulation Options Updated')
#Close window
SimOptInputWin.destroy()
#Resets to default values
def Reset(self):
"""
Reset to default settings
"""
self.MaxIt, self.Samp = 500, 100000
Inputs = [self.UserMaxIt, self.UserSamp]
Data = [self.MaxIt, self.Samp]
var.set(self.Methods[0])
for i in range(len(Inputs)):
Inputs[i].delete(0, TK.END)
Inputs[i].insert(0, Data[i])
#Create Window
SimOptInputWin = TK.Tk()
SimOptInputWin.configure(background='black')
SimOptInputWin.title('KinMS GUI - Simulation Options')
SimOptInputWin.configure(background='black')
TK.Label(SimOptInputWin, text='Simulation Options', bg='black', fg='white', font='none 12 bold').grid(row=0, column=0, columnspan=2, sticky="n")
TK.Label(SimOptInputWin, text='Method Of Minimsation = ', bg='black', fg='white', font='none 12 bold').grid(row=1, column=0, sticky="e")
TK.Label(SimOptInputWin, text='Maximum Number or Iterations = ', bg='black', fg='white', font='none 12 bold').grid(row=2, column=0, sticky="e")
TK.Label(SimOptInputWin, text='Number of KinMS Cloudlet Samples =', bg='black', fg='white', font='none 12 bold').grid(row=3, column=0, sticky="e")
#Create dropdown box
self.Methods = ['Nelder-Mead', 'Powell', 'CG', 'BFGS', 'L-BFGS-B', 'TNC', 'COBYLA', 'SLSQP']
var = TK.StringVar(SimOptInputWin)
self.UserMethod = TK.OptionMenu(SimOptInputWin, var, *self.Methods)
self.UserMethod.grid(row=1, column=1, sticky='nesw')
#User Inputs
self.UserMaxIt = TK.Entry(SimOptInputWin, width=20, bg='white')
self.UserMaxIt.grid(row=2, column=1, sticky="nwse")
self.UserSamp = TK.Entry(SimOptInputWin, width=20, bg='white')
self.UserSamp.grid(row=3, column=1, sticky="nwse")
#Input data
Inputs = [self.UserMaxIt, self.UserSamp]
Data = [self.MaxIt, self.Samp]
var.set(self.method)
for i in range(len(Inputs)):
Inputs[i].delete(0, TK.END)
Inputs[i].insert(0, Data[i])
#Lower Buttons
OptionBottomButtons = TK.Frame(SimOptInputWin, bg='black')
OptionBottomButtons.grid(row=5, columnspan=2, sticky="n")
TK.Button(OptionBottomButtons, text='Accept', width=10, command=lambda : Accept(self)).grid(row=11, column=1, pady=10, sticky = 'w')
TK.Button(OptionBottomButtons, text='Load Default', width=10, command=lambda : Reset(self)).grid(row=11, column=0, pady=10, sticky = 'e')
#Edit Fits Values
def FitsInput(self):
"""
Open Window to edit .fits Values
"""
#Update Function
def UpdateFits(Data, Inputs):
"""
Save values and close window
"""
#Get user inputs
for i in range(len(Data)):
Data[i] = float(Inputs[i].get())
#Update values
self.xsize = Data[0]
self.xRef = int(Data[1])
self.xVal = Data[2]
self.ysize = Data[3]
self.yRef = int(Data[4])
self.yVal = Data[5]
self.cellsize = Data[6]
self.MaxRad = Data[7]
self.vsize = Data[8]
self.vRef = int(Data[9])
self.vVal = Data[10]
self.dv = Data[11]
self.beamsize = np.array([Data[12], Data[13], Data[14]])
self.VelStart, self.VelStop = int(Data[15]), int(Data[16])
self.XStart, self.XStop = int(Data[17]), int(Data[18])
self.YStart, self.YStop = int(Data[19]), int(Data[20])
#Update StatusBox
self.StatusBox.delete(0, TK.END)
self.StatusBox.insert(0, '.fits data Updated')
#Close Window
FitsInputWin.destroy()
#Create data
Data = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
#Create Window
FitsInputWin = TK.Tk()
FitsInputWin.configure(background='black')
FitsInputWin.title('KinMS GUI - Fits data')
FitsInputWin.configure(background='black')
FitsInputCenter = TK.Frame(FitsInputWin, bg='black')
FitsInputCenter.grid(row=1, sticky="n")
#Section Labels
TK.Label(FitsInputCenter, text = 'Data from .fits', bg='black', fg='white', font='none 12 bold').grid(row=0, column=0, columnspan=4, sticky='n')
TK.Label(FitsInputCenter, text = 'Values Derived from .fits', bg='black', fg='white', font='none 12 bold').grid(row=13, column=0, columnspan=4, sticky='n')
#Make Labels
TK.Label(FitsInputCenter, text = 'X size (") = ', bg='black', fg='white', font='none 12 bold').grid(row=1, column=0, sticky='e')
TK.Label(FitsInputCenter, text = 'X Reference Cell = ', bg='black', fg='white', font='none 12 bold').grid(row=2, column=0, sticky='e')
TK.Label(FitsInputCenter, text = 'X Cell Value = ', bg='black', fg='white', font='none 12 bold').grid(row=3, column=0, sticky='e')
TK.Label(FitsInputCenter, text = 'Y size (") = ', bg='black', fg='white', font='none 12 bold').grid(row=4, column=0, sticky='e')
TK.Label(FitsInputCenter, text = 'Y Reference Cell = ', bg='black', fg='white', font='none 12 bold').grid(row=5, column=0, sticky='e')
TK.Label(FitsInputCenter, text = 'Y Cell Value = ', bg='black', fg='white', font='none 12 bold').grid(row=6, column=0, sticky='e')
TK.Label(FitsInputCenter, text = 'Cell Size ("/Pix) = ', bg='black', fg='white', font='none 12 bold').grid(row=7, column=0, sticky='e')
TK.Label(FitsInputCenter, text = 'Number of Velocity Channels = ', bg='black', fg='white', font='none 12 bold').grid(row=8, column=0, sticky='e')
TK.Label(FitsInputCenter, text = 'Velocity Reference Cell = ', bg='black', fg='white', font='none 12 bold').grid(row=9, column=0, sticky='e')
TK.Label(FitsInputCenter, text = 'Velocity Cell Value (km/s) = ', bg='black', fg='white', font='none 12 bold').grid(row=10, column=0, sticky='e')
TK.Label(FitsInputCenter, text = 'Channel Width (km/s/channel) = ', bg='black', fg='white', font='none 12 bold').grid(row=11, column=0, sticky='e')
TK.Label(FitsInputCenter, text = 'Beam Size [BMAJ, BMIN, BPA] (deg) = ', bg='black', fg='white', font='none 12 bold').grid(row=12, column=0, sticky='e')
TK.Label(FitsInputCenter, text = 'Radius (") = ', bg='black', fg='white', font='none 12 bold').grid(row=14, column=0, sticky='e')
TK.Label(FitsInputCenter, text = 'Channel Start and Stop = ', bg='black', fg='white', font='none 12 bold').grid(row=15, column=0, sticky='e')
TK.Label(FitsInputCenter, text = 'X axis Spatial Start and Stop = ', bg='black', fg='white', font='none 12 bold').grid(row=16, column=0, sticky='e')
TK.Label(FitsInputCenter, text = 'Y axis Spatial Start and Stop = ', bg='black', fg='white', font='none 12 bold').grid(row=17, column=0, sticky='e')
#Creating Entry boxes
InputXSize = TK.Entry(FitsInputCenter, width=32, bg='white')
InputXSize.grid(row=1, column=1, columnspan=3, sticky='w')
InputXRef = TK.Entry(FitsInputCenter, width=32, bg='white')
InputXRef.grid(row=2, column=1, columnspan=3, sticky='w')
InputXVal = TK.Entry(FitsInputCenter, width=32, bg='white')
InputXVal.grid(row=3, column=1, columnspan=3, sticky='w')
InputYSize = TK.Entry(FitsInputCenter, width=32, bg='white')
InputYSize.grid(row=4, column=1, columnspan=3, sticky='w')
InputYRef = TK.Entry(FitsInputCenter, width=32, bg='white')
InputYRef.grid(row=5, column=1, columnspan=3, sticky='w')
InputYVal = TK.Entry(FitsInputCenter, width=32, bg='white')
InputYVal.grid(row=6, column=1, columnspan=3, sticky='w')
InputCellSize = TK.Entry(FitsInputCenter, width=32, bg='white')
InputCellSize.grid(row=7, column=1, columnspan=3, sticky='w')
InputMaxRad = TK.Entry(FitsInputCenter, width=32, bg='white')
InputMaxRad.grid(row=14, column=1, columnspan=3, sticky='w')
InputVelChan = TK.Entry(FitsInputCenter, width=32, bg='white')
InputVelChan.grid(row=8, column=1, columnspan=3, sticky='w')
InputVelRef = TK.Entry(FitsInputCenter, width=32, bg='white')
InputVelRef.grid(row=9, column=1, columnspan=3, sticky='w')
InputVelCell = TK.Entry(FitsInputCenter, width=32, bg='white')
InputVelCell.grid(row=10, column=1, columnspan=3, sticky='w')
InputVelWidth = TK.Entry(FitsInputCenter, width=32, bg='white')
InputVelWidth.grid(row=11, column=1, columnspan=3, sticky='w')
InputBMAJ = TK.Entry(FitsInputCenter, width=10, bg='white')
InputBMAJ.grid(row=12, column=1, sticky='w')
InputBMIN = TK.Entry(FitsInputCenter, width=10, bg='white')
InputBMIN.grid(row=12, column=2, sticky='w')
InputBPA = TK.Entry(FitsInputCenter, width=10, bg='white')
InputBPA.grid(row=12, column=3, sticky='w')
InputVStart = TK.Entry(FitsInputCenter, width=10, bg='white')
InputVStart.grid(row=15, column=1, sticky='w')
InputVStop = TK.Entry(FitsInputCenter, width=10, bg='white')
InputVStop.grid(row=15, column=2, sticky='w')
InputXStart = TK.Entry(FitsInputCenter, width=10, bg='white')
InputXStart.grid(row=16, column=1, sticky='w')
InputXStop = TK.Entry(FitsInputCenter, width=10, bg='white')
InputXStop.grid(row=16, column=2, sticky='w')
InputYStart = TK.Entry(FitsInputCenter, width=10, bg='white')
InputYStart.grid(row=17, column=1, sticky='w')
InputYStop = TK.Entry(FitsInputCenter, width=10, bg='white')
InputYStop.grid(row=17, column=2, sticky='w')
#Consolidating data
try:
Data = [self.xsize, self.xRef, self.xVal, self.ysize, self.yRef, self.yVal, \
self.cellsize, self.MaxRad, self.vsize, self.vRef, self.vVal, self.dv, \
self.beamsize[0], self.beamsize[1], self.beamsize[2], self.VelStart, self.VelStop, \
self.XStart, self.XStop, self.YStart, self.YStop]
except AttributeError:
Data = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
Inputs = [InputXSize, InputXRef, InputXVal, InputYSize, InputYRef, InputYVal, InputCellSize, \
InputMaxRad, InputVelChan, InputVelRef, InputVelCell, InputVelWidth, InputBMAJ, \
InputBMIN, InputBPA, InputVStart , InputVStop, InputXStart, InputXStop, InputYStart, InputYStop]
#Inputting Fits values to text boxes
for i in range(len(Inputs)):
Inputs[i].insert(0, Data[i])
#Adding button to close window and update values
TK.Button(FitsInputCenter, text='Accept', width=20, command=lambda : UpdateFits(Data, Inputs)).grid(row=18, column=2, columnspan=2, sticky="n")
#Load Fits Data
def Loadfits(self):
"""
Load .fits files
"""
#Select file
filename = askopenfilename()
self.ShowFile.delete(0, TK.END)
self.ShowFile.insert(0, filename.split('/')[-1])
#Try to load .fits
try:
hdulist = fits.open(filename)
#Update StatusBox
self.StatusBox.delete(0, TK.END)
self.StatusBox.insert(0, '.fits Loaded')
#Produce error code if .fits is not compatible
except OSError:
#Update StatusBox
self.StatusBox.delete(0, TK.END)
self.StatusBox.insert(0, 'File is not compatible')
#Reading data for header
self.scidata = hdulist[0].data.T
self.cellsize = abs(hdulist[0].header['CDELT2']) * 3600 #arcseconds/pixel
self.xsize = hdulist[0].header['NAXIS1'] * self.cellsize
self.xRef = hdulist[0].header['CRPIX1']
self.xVal = hdulist[0].header['CRVAL1']
self.ysize = hdulist[0].header['NAXIS2'] * self.cellsize
self.yRef = hdulist[0].header['CRPIX2']
self.yVal = hdulist[0].header['CRVAL2']
self.vsize = hdulist[0].header['NAXIS3']
self.vRef = hdulist[0].header['CRPIX3']
self.vVal = hdulist[0].header['CRVAL3'] / 1000
self.dv = hdulist[0].header['CDELT3'] / 1000 #km/s
self.MaxRad = np.sqrt(self.xsize**2 + self.ysize**2)
try:
self.beamsize = np.array([hdulist[0].header['BMAJ'] * 3600, hdulist[0].header['BMIN'] * 3600, hdulist[0].header['BPA']])
except KeyError:
BeamData = hdulist[1].data
self.beamsize = np.array([np.mean(BeamData['BMAJ']) * 3600, np.mean(BeamData['BMIN']) * 3600, np.mean(BeamData['BPA'])])
#Removing Stokes axis if applicable
if self.scidata.ndim > 3:
self.scidata = self.scidata[:,:,:,0]
#Determin the relevant velocity channels
try:
self.VelStart, self.VelStop = AOI(self.scidata.copy())
except NameError:
self.VelStart, self.VelStop = 0, 0
| |
# All Rights Reserved.
# Copyright 2013 SolidFire Inc
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import inspect
import json
import math
import re
import socket
import string
import time
import warnings
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import timeutils
from oslo_utils import units
import requests
from requests.packages.urllib3 import exceptions
import six
from cinder import context
from cinder import exception
from cinder.i18n import _
from cinder import interface
from cinder.objects import fields
from cinder import utils
from cinder.volume import configuration
from cinder.volume.drivers.san import san
from cinder.volume import qos_specs
from cinder.volume.targets import iscsi as iscsi_driver
from cinder.volume import volume_types
from cinder.volume import volume_utils
LOG = logging.getLogger(__name__)
sf_opts = [
cfg.BoolOpt('sf_emulate_512',
default=True,
help='Set 512 byte emulation on volume creation; '),
cfg.BoolOpt('sf_allow_tenant_qos',
default=False,
help='Allow tenants to specify QOS on create'),
cfg.StrOpt('sf_account_prefix',
help='Create SolidFire accounts with this prefix. Any string '
'can be used here, but the string \"hostname\" is special '
'and will create a prefix using the cinder node hostname '
'(previous default behavior). The default is NO prefix.'),
cfg.StrOpt('sf_volume_prefix',
default='UUID-',
help='Create SolidFire volumes with this prefix. Volume names '
'are of the form <sf_volume_prefix><cinder-volume-id>. '
'The default is to use a prefix of \'UUID-\'.'),
cfg.StrOpt('sf_svip',
help='Overrides default cluster SVIP with the one specified. '
'This is required or deployments that have implemented '
'the use of VLANs for iSCSI networks in their cloud.'),
cfg.PortOpt('sf_api_port',
default=443,
help='SolidFire API port. Useful if the device api is behind '
'a proxy on a different port.'),
cfg.BoolOpt('sf_enable_vag',
default=False,
help='Utilize volume access groups on a per-tenant basis.'),
cfg.StrOpt('sf_provisioning_calc',
default='maxProvisionedSpace',
choices=['maxProvisionedSpace', 'usedSpace'],
help='Change how SolidFire reports used space and '
'provisioning calculations. If this parameter is set to '
'\'usedSpace\', the driver will report correct '
'values as expected by Cinder '
'thin provisioning.')]
CONF = cfg.CONF
CONF.register_opts(sf_opts, group=configuration.SHARED_CONF_GROUP)
# SolidFire API Error Constants
xExceededLimit = 'xExceededLimit'
xAlreadyInVolumeAccessGroup = 'xAlreadyInVolumeAccessGroup'
xVolumeAccessGroupIDDoesNotExist = 'xVolumeAccessGroupIDDoesNotExist'
xNotInVolumeAccessGroup = 'xNotInVolumeAccessGroup'
class DuplicateSfVolumeNames(exception.Duplicate):
message = _("Detected more than one volume with name %(vol_name)s")
class SolidFireAPIException(exception.VolumeBackendAPIException):
message = _("Bad response from SolidFire API")
class SolidFireDriverException(exception.VolumeDriverException):
message = _("SolidFire Cinder Driver exception")
class SolidFireAPIDataException(SolidFireAPIException):
message = _("Error in SolidFire API response: data=%(data)s")
class SolidFireAccountNotFound(SolidFireDriverException):
message = _("Unable to locate account %(account_name)s on "
"Solidfire device")
class SolidFireRetryableException(exception.VolumeBackendAPIException):
message = _("Retryable SolidFire Exception encountered")
class SolidFireReplicationPairingError(exception.VolumeBackendAPIException):
message = _("Error on SF Keys")
def retry(exc_tuple, tries=5, delay=1, backoff=2):
def retry_dec(f):
@six.wraps(f)
def func_retry(*args, **kwargs):
_tries, _delay = tries, delay
while _tries > 1:
try:
return f(*args, **kwargs)
except exc_tuple:
time.sleep(_delay)
_tries -= 1
_delay *= backoff
LOG.debug('Retrying %(args)s, %(tries)s attempts '
'remaining...',
{'args': args, 'tries': _tries})
# NOTE(jdg): Don't log the params passed here
# some cmds like createAccount will have sensitive
# info in the params, grab only the second tuple
# which should be the Method
msg = (_('Retry count exceeded for command: %s') %
(args[1],))
LOG.error(msg)
raise SolidFireAPIException(message=msg)
return func_retry
return retry_dec
def locked_image_id_operation(f, external=False):
def lvo_inner1(inst, *args, **kwargs):
lock_tag = inst.driver_prefix
call_args = inspect.getcallargs(f, inst, *args, **kwargs)
if call_args.get('image_meta'):
image_id = call_args['image_meta']['id']
else:
err_msg = _('The decorated method must accept image_meta.')
raise exception.VolumeBackendAPIException(data=err_msg)
@utils.synchronized('%s-%s' % (lock_tag, image_id),
external=external)
def lvo_inner2():
return f(inst, *args, **kwargs)
return lvo_inner2()
return lvo_inner1
def locked_source_id_operation(f, external=False):
def lvo_inner1(inst, *args, **kwargs):
lock_tag = inst.driver_prefix
call_args = inspect.getcallargs(f, inst, *args, **kwargs)
src_arg = call_args.get('source', None)
if src_arg and src_arg.get('id', None):
source_id = call_args['source']['id']
else:
err_msg = _('The decorated method must accept src_uuid.')
raise exception.VolumeBackendAPIException(message=err_msg)
@utils.synchronized('%s-%s' % (lock_tag, source_id),
external=external)
def lvo_inner2():
return f(inst, *args, **kwargs)
return lvo_inner2()
return lvo_inner1
@interface.volumedriver
class SolidFireDriver(san.SanISCSIDriver):
"""OpenStack driver to enable SolidFire cluster.
.. code-block:: default
Version history:
1.0 - Initial driver
1.1 - Refactor, clone support, qos by type and minor bug fixes
1.2 - Add xfr and retype support
1.2.1 - Add export/import support
1.2.2 - Catch VolumeNotFound on accept xfr
2.0.0 - Move from httplib to requests
2.0.1 - Implement SolidFire Snapshots
2.0.2 - Implement secondary account
2.0.3 - Implement cluster pairing
2.0.4 - Implement volume replication
2.0.5 - Try and deal with the stupid retry/clear issues from objects
and tflow
2.0.6 - Add a lock decorator around the clone_image method
2.0.7 - Add scaled IOPS
2.0.8 - Add active status filter to get volume ops
2.0.9 - Always purge on delete volume
2.0.10 - Add response to debug on retryable errors
2.0.11 - Add ability to failback replicating volumes
2.0.12 - Fix bug #1744005
2.0.14 - Fix bug #1782588 qos settings on extend
2.0.15 - Fix bug #1834013 NetApp SolidFire replication errors
2.0.16 - Add options for replication mode (Async, Sync and
SnapshotsOnly)
"""
VERSION = '2.0.16'
# ThirdPartySystems wiki page
CI_WIKI_NAME = "NetApp_SolidFire_CI"
driver_prefix = 'solidfire'
sf_qos_dict = {'slow': {'minIOPS': 100,
'maxIOPS': 200,
'burstIOPS': 200},
'medium': {'minIOPS': 200,
'maxIOPS': 400,
'burstIOPS': 400},
'fast': {'minIOPS': 500,
'maxIOPS': 1000,
'burstIOPS': 1000},
'performant': {'minIOPS': 2000,
'maxIOPS': 4000,
'burstIOPS': 4000},
'off': None}
sf_qos_keys = ['minIOPS', 'maxIOPS', 'burstIOPS']
sf_scale_qos_keys = ['scaledIOPS', 'scaleMin', 'scaleMax', 'scaleBurst']
sf_iops_lim_min = {'minIOPS': 100, 'maxIOPS': 100, 'burstIOPS': 100}
sf_iops_lim_max = {'minIOPS': 15000,
'maxIOPS': 200000,
'burstIOPS': 200000}
cluster_stats = {}
retry_exc_tuple = (SolidFireRetryableException,
requests.exceptions.ConnectionError)
retryable_errors = ['xDBVersionMismatch',
'xMaxSnapshotsPerVolumeExceeded',
'xMaxClonesPerVolumeExceeded',
'xMaxSnapshotsPerNodeExceeded',
'xMaxClonesPerNodeExceeded',
'xSliceNotRegistered',
'xNotReadyForIO']
def __init__(self, *args, **kwargs):
super(SolidFireDriver, self).__init__(*args, **kwargs)
self.failed_over_id = kwargs.get('active_backend_id', None)
self.replication_status = kwargs.get('replication_status', "na")
self.configuration.append_config_values(sf_opts)
self.template_account_id = None
self.max_volumes_per_account = 1990
self.volume_map = {}
self.cluster_pairs = []
self.replication_enabled = False
self.failed_over = False
self.verify_ssl = self.configuration.driver_ssl_cert_verify
self.target_driver = SolidFireISCSI(solidfire_driver=self,
configuration=self.configuration)
self._check_replication_configs()
# If we're failed over, we need to parse things out and set the active
# cluster appropriately
if self.failed_over_id:
LOG.info("Running on failed-over mode. "
"Active backend-id: %s", self.failed_over_id)
repl_target = self.configuration.get('replication_device', [])
if not repl_target:
LOG.error('Failed to initialize SolidFire driver to '
'a remote cluster specified at id: %s',
self.failed_over_id)
raise SolidFireDriverException
remote_endpoint = self._build_repl_endpoint_info(
**repl_target[0])
self.active_cluster = self._create_cluster_reference(
remote_endpoint)
# When in failed-over state, we have only endpoint info from the
# primary cluster.
self.primary_cluster = {"endpoint": self._build_endpoint_info()}
self.failed_over = True
else:
self.primary_cluster = self._create_cluster_reference()
self.active_cluster = self.primary_cluster
if self.configuration.replication_device:
self._set_cluster_pairs()
LOG.debug("Active cluster: %s", self.active_cluster)
# NOTE(jdg): This works even in a failed over state, because what we
# do is use self.active_cluster in issue_api_request so by default we
# always use the currently active cluster, override that by providing
# an endpoint to issue_api_request if needed
try:
self._update_cluster_status()
except SolidFireAPIException:
pass
@classmethod
def get_driver_options(cls):
additional_opts = cls._get_oslo_driver_opts(
'san_ip', 'san_login', 'san_password', 'driver_ssl_cert_verify',
'replication_device', 'reserved_percentage',
'max_over_subscription_ratio')
return sf_opts + additional_opts
def _init_vendor_properties(self):
properties = {}
self._set_property(
properties,
"solidfire:replication_mode",
"Replication mode",
_("Specifies replication mode."),
"string",
enum=["Async", "Sync", "SnapshotsOnly"])
return properties, 'solidfire'
def __getattr__(self, attr):
if hasattr(self.target_driver, attr):
return getattr(self.target_driver, attr)
else:
msg = _('Attribute: %s not found.') % attr
raise NotImplementedError(msg)
def _get_remote_info_by_id(self, backend_id):
remote_info = None
for rd in self.configuration.get('replication_device', []):
if rd.get('backend_id', None) == backend_id:
remote_endpoint = self._build_endpoint_info(**rd)
remote_info = self._get_cluster_info(remote_endpoint)
remote_info['endpoint'] = remote_endpoint
if not remote_info['endpoint']['svip']:
remote_info['endpoint']['svip'] = (
remote_info['svip'] + ':3260')
return remote_info
def _create_remote_pairing(self, remote_device):
try:
pairing_info = self._issue_api_request('StartClusterPairing',
{}, version='8.0')['result']
pair_id = self._issue_api_request(
'CompleteClusterPairing',
{'clusterPairingKey': pairing_info['clusterPairingKey']},
version='8.0',
endpoint=remote_device['endpoint'])['result']['clusterPairID']
except SolidFireAPIException as ex:
if 'xPairingAlreadyExists' in ex.msg:
LOG.debug('Pairing already exists during init.')
else:
with excutils.save_and_reraise_exception():
LOG.error('Cluster pairing failed: %s', ex.msg)
LOG.debug('Initialized Cluster pair with ID: %s', pair_id)
remote_device['clusterPairID'] = pair_id
return pair_id
def _get_cluster_info(self, remote_endpoint):
try:
return self._issue_api_request(
'GetClusterInfo', {},
endpoint=remote_endpoint)['result']['clusterInfo']
except SolidFireAPIException:
msg = _("Replication device is unreachable!")
LOG.exception(msg)
raise
def _check_replication_configs(self):
repl_configs = self.configuration.replication_device
if not repl_configs:
return
# We only support one replication target. Checking if the user is
# trying to add more than one;
if len(repl_configs) > 1:
msg = _("SolidFire driver only supports one replication target "
"device.")
LOG.error(msg)
raise SolidFireDriverException(msg)
repl_configs = repl_configs[0]
# Check if the user is not using the same MVIP as source
# and replication target.
if repl_configs['mvip'] == | |
"""
Segmentaion Part
Modified from DETR (https://github.com/facebookresearch/detr)
"""
from collections import defaultdict
from typing import List, Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from PIL import Image
from einops import rearrange, repeat
try:
from panopticapi.utils import id2rgb, rgb2id
except ImportError:
pass
import fvcore.nn.weight_init as weight_init
from .position_encoding import PositionEmbeddingSine1D
BN_MOMENTUM = 0.1
def get_norm(norm, out_channels): # only support GN or LN
"""
Args:
norm (str or callable): either one of BN, SyncBN, FrozenBN, GN;
or a callable that takes a channel number and returns
the normalization layer as a nn.Module.
Returns:
nn.Module or None: the normalization layer
"""
if norm is None:
return None
if isinstance(norm, str):
if len(norm) == 0:
return None
norm = {
"GN": lambda channels: nn.GroupNorm(8, channels),
"LN": lambda channels: nn.LayerNorm(channels)
}[norm]
return norm(out_channels)
class Conv2d(torch.nn.Conv2d):
"""
A wrapper around :class:`torch.nn.Conv2d` to support empty inputs and more features.
"""
def __init__(self, *args, **kwargs):
"""
Extra keyword arguments supported in addition to those in `torch.nn.Conv2d`:
Args:
norm (nn.Module, optional): a normalization layer
activation (callable(Tensor) -> Tensor): a callable activation function
It assumes that norm layer is used before activation.
"""
norm = kwargs.pop("norm", None)
activation = kwargs.pop("activation", None)
super().__init__(*args, **kwargs)
self.norm = norm
self.activation = activation
def forward(self, x):
# torchscript does not support SyncBatchNorm yet
# https://github.com/pytorch/pytorch/issues/40507
# and we skip these codes in torchscript since:
# 1. currently we only support torchscript in evaluation mode
# 2. features needed by exporting module to torchscript are added in PyTorch 1.6 or
# later version, `Conv2d` in these PyTorch versions has already supported empty inputs.
if not torch.jit.is_scripting():
if x.numel() == 0 and self.training:
# https://github.com/pytorch/pytorch/issues/12013
assert not isinstance(
self.norm, torch.nn.SyncBatchNorm
), "SyncBatchNorm does not support empty inputs!"
x = F.conv2d(
x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups
)
if self.norm is not None:
x = self.norm(x)
if self.activation is not None:
x = self.activation(x)
return x
# FPN structure
class CrossModalFPNDecoder(nn.Module):
def __init__(self, feature_channels: List, conv_dim: int, mask_dim: int, dim_feedforward: int = 2048, norm=None):
"""
Args:
feature_channels: list of fpn feature channel numbers.
conv_dim: number of output channels for the intermediate conv layers.
mask_dim: number of output channels for the final conv layer.
dim_feedforward: number of vision-language fusion module ffn channel numbers.
norm (str or callable): normalization for all conv layers
"""
super().__init__()
self.feature_channels = feature_channels
lateral_convs = []
output_convs = []
use_bias = norm == ""
for idx, in_channels in enumerate(feature_channels):
# in_channels: 4x -> 32x
lateral_norm = get_norm(norm, conv_dim)
output_norm = get_norm(norm, conv_dim)
lateral_conv = Conv2d(
in_channels, conv_dim, kernel_size=1, bias=use_bias, norm=lateral_norm
)
output_conv = Conv2d(
conv_dim,
conv_dim,
kernel_size=3,
stride=1,
padding=1,
bias=use_bias,
norm=output_norm,
activation=F.relu,
)
weight_init.c2_xavier_fill(lateral_conv)
weight_init.c2_xavier_fill(output_conv)
stage = idx+1
self.add_module("adapter_{}".format(stage), lateral_conv)
self.add_module("layer_{}".format(stage), output_conv)
lateral_convs.append(lateral_conv)
output_convs.append(output_conv)
# Place convs into top-down order (from low to high resolution)
# to make the top-down computation in forward clearer.
self.lateral_convs = lateral_convs[::-1]
self.output_convs = output_convs[::-1]
self.mask_dim = mask_dim
self.mask_features = Conv2d(
conv_dim,
mask_dim,
kernel_size=3,
stride=1,
padding=1,
)
weight_init.c2_xavier_fill(self.mask_features)
# vision-language cross-modal fusion
self.text_pos = PositionEmbeddingSine1D(conv_dim, normalize=True)
sr_ratios = [8, 4, 2, 1]
cross_attns = []
for idx in range(len(feature_channels)): # res2 -> res5
cross_attn = VisionLanguageBlock(conv_dim, dim_feedforward=dim_feedforward,
nhead=8, sr_ratio=sr_ratios[idx])
for p in cross_attn.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
stage = int(idx + 1)
self.add_module("cross_attn_{}".format(stage), cross_attn)
cross_attns.append(cross_attn)
# place cross-attn in top-down order (from low to high resolution)
self.cross_attns = cross_attns[::-1]
def forward_features(self, features, text_features, poses, memory, nf):
# nf: num_frames
text_pos = self.text_pos(text_features).permute(2, 0, 1) # [length, batch_size, c]
text_features, text_masks = text_features.decompose()
text_features = text_features.permute(1, 0, 2)
for idx, (mem, f, pos) in enumerate(zip(memory[::-1], features[1:][::-1], poses[1:][::-1])): # 32x -> 8x
lateral_conv = self.lateral_convs[idx]
output_conv = self.output_convs[idx]
cross_attn = self.cross_attns[idx]
_, x_mask = f.decompose()
n, c, h, w = pos.shape
b = n // nf
t = nf
# NOTE: here the (h, w) is the size for current fpn layer
vision_features = lateral_conv(mem) # [b*t, c, h, w]
vision_features = rearrange(vision_features, '(b t) c h w -> (t h w) b c', b=b, t=t)
vision_pos = rearrange(pos, '(b t) c h w -> (t h w) b c', b=b, t=t)
vision_masks = rearrange(x_mask, '(b t) h w -> b (t h w)', b=b, t=t)
cur_fpn = cross_attn(tgt=vision_features,
memory=text_features,
t=t, h=h, w=w,
tgt_key_padding_mask=vision_masks,
memory_key_padding_mask=text_masks,
pos=text_pos,
query_pos=vision_pos
) # [t*h*w, b, c]
cur_fpn = rearrange(cur_fpn, '(t h w) b c -> (b t) c h w', t=t, h=h, w=w)
# upsample
if idx == 0: # top layer
y = output_conv(cur_fpn)
else:
# Following FPN implementation, we use nearest upsampling here
y = cur_fpn + F.interpolate(y, size=cur_fpn.shape[-2:], mode="nearest")
y = output_conv(y)
# 4x level
lateral_conv = self.lateral_convs[-1]
output_conv = self.output_convs[-1]
cross_attn = self.cross_attns[-1]
x, x_mask = features[0].decompose()
pos = poses[0]
n, c, h, w = pos.shape
b = n // nf
t = nf
vision_features = lateral_conv(x) # [b*t, c, h, w]
vision_features = rearrange(vision_features, '(b t) c h w -> (t h w) b c', b=b, t=t)
vision_pos = rearrange(pos, '(b t) c h w -> (t h w) b c', b=b, t=t)
vision_masks = rearrange(x_mask, '(b t) h w -> b (t h w)', b=b, t=t)
cur_fpn = cross_attn(tgt=vision_features,
memory=text_features,
t=t, h=h, w=w,
tgt_key_padding_mask=vision_masks,
memory_key_padding_mask=text_masks,
pos=text_pos,
query_pos=vision_pos
) # [t*h*w, b, c]
cur_fpn = rearrange(cur_fpn, '(t h w) b c -> (b t) c h w', t=t, h=h, w=w)
# Following FPN implementation, we use nearest upsampling here
y = cur_fpn + F.interpolate(y, size=cur_fpn.shape[-2:], mode="nearest")
y = output_conv(y)
return y # [b*t, c, h, w], the spatial stride is 4x
def forward(self, features, text_features, pos, memory, nf):
"""The forward function receives the vision and language features,
and outputs the mask features with the spatial stride of 4x.
Args:
features (list[NestedTensor]): backbone features (vision), length is number of FPN layers
tensors: [b*t, ci, hi, wi], mask: [b*t, hi, wi]
text_features (NestedTensor): text features (language)
tensors: [b, length, c], mask: [b, length]
pos (list[Tensor]): position encoding of vision features, length is number of FPN layers
tensors: [b*t, c, hi, wi]
memory (list[Tensor]): features from encoder output. from 8x -> 32x
NOTE: the layer orders of both features and pos are res2 -> res5
Returns:
mask_features (Tensor): [b*t, mask_dim, h, w], with the spatial stride of 4x.
"""
y = self.forward_features(features, text_features, pos, memory, nf)
return self.mask_features(y)
class VisionLanguageBlock(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
activation="relu", normalize_before=False, sr_ratio=1):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
# for downsample
self.sr_ratio = sr_ratio
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self, tgt, memory, t, h, w,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
b = tgt.size(1)
# self attn
q = k = self.with_pos_embed(tgt, query_pos)
if self.sr_ratio > 1: # downsample
q = rearrange(q, '(t h w) b c -> (b t) c h w', t=t, h=h, w=w)
k = rearrange(k, '(t h w) b c -> (b t) c h w', t=t, h=h, w=w)
v = rearrange(tgt, '(t h w) b c -> (b t) c h w', t=t, h=h, w=w)
# downsample
new_h = int(h * 1./self.sr_ratio)
new_w = int(w * 1./self.sr_ratio)
size = (new_h, new_w)
q = F.interpolate(q, size=size, mode='nearest')
k = F.interpolate(k, size=size, mode='nearest')
v = F.interpolate(v, size=size, mode='nearest')
# shape for transformer
q = rearrange(q, '(b t) c h w -> (t h w) b c', t=t)
k = rearrange(k, '(b t) c h w -> (t h w) b c', t=t)
v = rearrange(v, '(b t) c h w -> (t h w) b c', t=t)
# downsample mask
tgt_key_padding_mask = tgt_key_padding_mask.reshape(b*t, h, w)
tgt_key_padding_mask = F.interpolate(tgt_key_padding_mask[None].float(), size=(new_h, | |
fermé."""
return self[0] if len(self)==1 else centreGravite(self.dpoints)
centregravite=gravitycenter
@property
def barycentre(self):
u"""Le centre de gravité du nuage de points matériels cpoints."""
return baryCentre(self.cpoints)
@property
def degre(self):
m0,m1 = self.methode
if m0 in ('cubic',) : return 3
elif m0 in ('us','ius') : return m1
else : raise NotImplementedError,u"TODO:degre de la spline (methode=%s)"%self.methode
def courbure(self, T) :
"""
Calcule et retourne la courbure en T.
:param T: ndarray(n,).
:return sc: ndarray(n,) la courbure aux points de T.
:TODO :
raffiner en autorisant le calcul pour les splines de degré 1,
la dérivée première n'existe pas,
-> retourner inf si point anguleux et 0 sinon.
Analogue pour degré 2, la dérivée seconde n'existe pas,
-> retourner ce qu'il faut
"""
try :
dx, dy = self.sx(T, 1), self.sy(T, 1)
except ValueError as msg :
rdebug(str(msg))
sc = empty_like(T)
sc[:] = nan
return sc
try :
d2x, d2y = self.sx(T, 2), self.sy(T, 2)
except ValueError as msg :
rdebug(str(msg))
sc = empty_like(T)
sc[:] = nan
return sc
norm3_d2 = sqrt(dx**2+dy**2)**3
v = dx*d2y-dy*d2x
sc = v/norm3_d2
iz = where(norm3_d2 < 1.0e-12)
sc[iz] = sign(v)[iz]*inf
# debug(sc=sc)
return sc
# si norm_d2=0, x"(t)=y"(t)=0, c'est une droite, courbure nulle
# sc[where(norm3_d2 < 1.0e-12)] = 0.0
def absCurv(self, T, witherr=False):
u"""
Calcule et retourne l'abscisse curviligne réelle des points self(T) sur
la spline self.
L'abscisse curviligne d'un point de paramètre t dans [0,1] est peu
différente de t lorsqu'il y a beaucoup de points de contrôle.
Un temps 't' de T est une abscisse curviligne le long du polygone cpoints
le temps absCurv(t) est l'abscisse curviligne réelle du point self(t) le
long de self.
C'est l'intégrale de 0 à t de phi(s) = sqrt(self.sx(s)**2+self.sy(s)**2)
L'intégration est assurée par scipy.integrate.quad()
Si la spline self a trop de points de contrôle, ca rame et l'erreur est
importante
:param self: une NSplineAbstract
:param T: les n paramètres t des points dont on veut l'abscisse curviligne.
Ces n paramètres doivent être dans l'intervalle [0,1]
:type T: au choix
- un ndarray de shape (n,1) à valeurs réelles dans [0,1],
- une liste de n valeurs réelles dans [0,1],
- un tuple de n valeurs réelles dans [0,1],
- un réel unique t dans [0,1]
:return ac ou (ac, err):
- si T est réel, ac et err sont réels, err est l'erreur estimée
- si T est un ndarray((n,1)) ou ndarray((n,)) de n réels
alors ac et err sont de même type
(voir scipy.integrate)
"""
if isinstance(T, Number) :
#On integre sur les sous intervalles de self délimités par les knots
phi = lambda s : sqrt(self.sx(s,1)**2+self.sy(s,1)**2) #la fonction à integrer
bornes = [tk for tk in self.knots if tk<T]+[T]#bornes de sous intervalles
intervalles = zip(bornes[:-1], bornes[1:])#les intervalles
ac, err = 0, 0
for (t1, t2) in intervalles :
int_t1_t2, err12 = quad(phi, t1, t2)#integration intervalle [t1, t2]
ac += int_t1_t2
err = max(err,err12)
return (ac, err) if witherr else ac
# return ac1+ac2, max(err1, err2)
else :
res = asarray([absCurv(self, t) for t in T])
# res = asarray([quad(lambda s : sqrt(self.sx(s,1)**2+self.sy(s,1)**2), 0.0, t) for t in T])
return (res[:,0], res[:,1]) if witherr else res[:,0]
def integraleCourbure(self, a=0.0, b=1.0, n=100):
u"""Integrale de la valeur absolue de la courbure, caractérise la
régularité de la courbe"""
# n DOIT être pair"""
h = float(b-a)/n
T = linspace(a, b, n+1)
C = abs(self.courbure(T))
A1 = C[0] + C[-1]
A2 = 2*sum(C[i] for i in range(2,n) if i%2==0)
A4 = 4*sum(C[i] for i in range(1,n) if i%2==1)
# debug (h, A1, A2, A4, (h/3)*(A1 + A2 + A4))
return (h/3)*(A1 + A2 + A4)
def symetriser(self, axe, centre=None):
u'''
Modif de la spline elle meme. self est modifié.
C'est à dire
>>> self[i,axe] = 2*centre -self[i,axe]
si centre == None on prend l'isobarycentre[axe]
'''
if len(self)==0 : return
if centre is None :
centre = self.barycentre[0][axe]
if self.methode[0] == 'cubic' and isinstance(self.methode[1],(list, tuple)) :
#On fait tourner les dérivées du point p0, puis du dernier point.
#(dérivées premiere ou seconde suivant methode)
newderivees = []
for k in range(2) :
derivees = self.methode[1][k]# Point 0 ou n
#la derivée doit être symétrisée
d = derivees[0]#ordre de derivation
u = asarray(derivees[1:], dtype=float).reshape((1,2))
# debug('Avant sym:',u=u, axe=axe, centre=centre)
u = symetrieAxe(u,axe,0.0)[0]
# debug('Apres sym:',u=u, axe=axe, centre=centre)
newderivees.append((d, u[0], u[1]))
self._methode = (self.methode[0], tuple(newderivees))
self.cpoints = symetrieAxe(self.cpoints,axe,centre)
# self._update()c'est fait par le setter de cpoints
# self.qcpolygon=points#qpolygonFrom(points)
# self.update()C'est fait dans le setter qcpolygon()
def hardScale(self, scale=(1.0,1.0), centre=None):
u'''
Modif de la spline elle même. self est modifié.
Mise à l'échelle d'un scale=(fx,fy), centrée sur centre. (en fait, une homothétie)
C'est à dire
- self[i][0] = centre[0] + scale[0]*(self[i]-centre)[0]
- self[i][1] = centre[1] + scale[1]*(self[i]-centre)[1]
Le cas échéant, les valeurs des dérivées aux extrémités de la spline sont
mises à l'échelle, de sorte que self transformée soit réellement homothétique de self.
En modifiant self.cpoints, le _update est appelé, tous les attributs de self
(_epoints, _absCurv, _dpoints, etc...) sont mis à jour.
:param centre: (float, float) les coordonées (x0, y0) du centre d'homothétie.
si centre==None on prend l'isobarycentre des points de contrôle.
:param scale: (float, float)
:return: None
'''
if len(self) == 0 : return
if centre is None :
centre = self.barycentre
if self.methode[0] == 'cubic' and isinstance(self.methode[1],(list, tuple)) :
#On scale les dérivées du point p0, puis du dernier point.
#(dérivées premiere ou seconde suivant methode)
newderivees = []
for k in range(2) :
derivees = self.methode[1][k]# Point 0 ou n
#la derivée doit être mise a l'echelle
d = derivees[0]#ordre de derivation
#u = (x'(t), y'(t)) ou (x"(t),y"(t)) en t=0 ou 1
u = asarray(derivees[1:], dtype=float)
u[0] *= scale[0]
u[1] *= scale[1]
newderivees.append((d, u[0], u[1]))
self._methode = (self.methode[0], tuple(newderivees))
# debug(Avant=self.cpoints)
self.cpoints = hardScale(self.cpoints, scale, centre)#ca fait le _update
# debug(APRES=self.cpoints)
return
def translate(self,vecteur):
self.cpoints = self.cpoints+vecteur
# self._update()#C'est fait dans la property cpoints
hardMove = translate
def appendPoint(self,pos):
i = self.cpoints.shape[0]#l'indice du dernier point
if len(self) and dist2(pos, self.cpoints[-1]) == 0 :
raise ValueError('Point double, Impossible d\'ajouter ce point:%s'%(pos))
# return #eviter points doubles, les abs curv douvent être strictement croissantes.
else :
self.cpoints = vstack((self.cpoints, pos))
self._update()
return i
def index(self, t):
"""Localise t dans self.abscurv:retourne les deux entiers
k1,k2 tels que k1 < t <= k2"""
if t<0 or t>1 : return nan, nan
T = self.knots
k = 0
while T[k]<t : k+=1
if T[k] == t : return k,k
else : return k-1, k
def insertPoint(self, pos, i=None):
u"""
segment_i = [self[i-1], self[i]]
Si i = None :
Calcule dans quel segment il est raisonable d'insérer le point pos
si i != None,
insertion du point pos entre i-1 et i (de sorte qu'il deviennet le point i)
"""
# if isinstance(pos, QPointF) :
# pos = pos.x(), pos.y()
cpoints = self.cpoints
if i is None :
im1, _ = segmentPlusProche(cpoints, pos)
i = im1 + 1
#Les segments sont numerotes de 0 à len(cpoints)-2
#le segment i est délimité par les points i-1 et i
# debug('Calcul segment le plus proche de %s'%pos)
# if im1 is None :#Ca n'arrive jamais, cf segmentPlusProche()
# debug('Je ne sais pas ou (dans quel segment) inserer ce point %s'%str(pos))
# return
elif i == 0 :
#si i est le premier point, on insere dans le premier segment
im1, i = 0, 1
else :
im1 = i-1
if dist2(pos, cpoints[i]) == 0 or dist2(pos, cpoints[im1]) == 0 :
# debug('segment le plus proche',i=i,H=H,pos=pos)
raise ValueError('Point double, impossible d\'inserer ce point:%s en position %d'%(str(pos),i))
# return #eviter points doubles, car les abs curv doivent être strictement croissantes.
else:
cpoints = insert(cpoints, i, (pos,), axis=0)
# debug(cpoints=cpoints)
self.cpoints = cpoints
# self._update()#deja fait par le cpoint.setter
return i#La position du point inséré
def removePoint(self, pnum, update=True):
u"""
- Suppression du point pnum de self.cpoints
"""
point = self[pnum]
if update : #le setter de cpoints fait un update
self.cpoints = | |
import sys, os
import numpy as np
from lflib.lightfield import LightField
from lflib.imageio import save_image
from scipy.ndimage import filters
os.environ['PYOPENCL_COMPILER_OUTPUT'] = '1'
try:
import pyopencl as cl
import pyopencl.array as cl_array
LFLIB_HAVE_OPENCL = True
except ImportError:
LFLIB_HAVE_OPENCL = False
# Utility function
extract = lambda x, y: dict(list(zip(x, list(map(y.get, x)))))
# ------------------------------------------------------------------------------
# OPENCL KERNELS
# ------------------------------------------------------------------------------
src = '''
// Enable 64-bit double support
#if defined(cl_khr_fp64) // Khronos extension available?
#pragma OPENCL EXTENSION cl_khr_fp64 : enable
#elif defined(cl_amd_fp64) // AMD extension available?
#pragma OPENCL EXTENSION cl_amd_fp64 : enable
#endif
__kernel void cl_backproject_rayspread(__read_only image2d_t lf,
__global float* volume,
int lf_rows, int lf_cols,
__global float* psf,
__constant int* filterRows,
__constant int* filterCols,
__constant int* filterRowOffset,
__constant int* filterColOffset,
int supersample_factor,
sampler_t sampler) {
// Store each work-item unique row and column
int x = get_global_id(0); int y = get_global_id(1);
int s = x / supersample_factor; int t = y / supersample_factor;
// Bail out if the kernel is out of bounds
if (x >= K_NX || y >= K_NY)
return;
// This offset comes into play when supersample_factor > 1
int x_bump = (x - s * supersample_factor);
int y_bump = (y - t * supersample_factor);
// Iterator for the filter
int base_address = 0;
for (int z_idx = 0; z_idx < K_NZ; ++z_idx) {
float sum = 0.0f;
int2 coords;
// Iterate the filter rows
for(int j = y_bump; j < filterRows[z_idx]; j+=supersample_factor) {
coords.y = (y - j - filterRowOffset[z_idx])/supersample_factor;
// Iterate over the filter columns
int filterIdx = base_address + filterCols[z_idx]*j + x_bump;
for(int i = x_bump; i < filterCols[z_idx]; i+=supersample_factor) {
coords.x = (x - i - filterColOffset[z_idx])/supersample_factor;
// Read a pixel from the image. A single channel image
// stores the pixel in the 'x' coordinate of the returned vector.
float4 pixel = read_imagef(lf, sampler, coords);
sum += pixel.x * psf[filterIdx];
filterIdx += supersample_factor;
}
}
// Copy the data to the output image if the work-item is in bounds
int vol_idx = y*K_NX*K_NZ + x*K_NZ + z_idx;
volume[vol_idx] += sum;
base_address += (filterCols[z_idx] * filterRows[z_idx]);
}
}
__kernel void cl_project_rayspread(__read_only image2d_t vol_slice,
__global float* subaperture,
__global float* psf,
__constant int* filterRows,
__constant int* filterCols,
__constant int* filterRowOffset,
__constant int* filterColOffset,
__constant int* u_coords,
__constant int* v_coords,
int num_rays,
int supersample_factor,
sampler_t sampler) {
// Store each work-item unique row and column
int s = get_global_id(0); int t = get_global_id(1);
// Bail out if the kernel is out of bounds
if (s >= K_NS || t >= K_NT)
return;
// Iterator for the filter
int base_address = 0;
for (int r = 0; r < num_rays; ++r) {
float sum = 0.0f;
int2 coords;
// Iterate the filter rows
for(int j = 0; j < filterRows[r]; ++j) {
coords.y = t*supersample_factor + j + filterRowOffset[r];
// Iterate over the filter columns
int filterIdx = base_address + filterCols[r]*j;
for(int i = 0; i < filterCols[r]; ++i) {
coords.x = s*supersample_factor + i + filterColOffset[r];
// Read a pixel from the image. A single channel image
// stores the pixel in the 'x' coordinate of the returned vector.
float4 pixel = read_imagef(vol_slice, sampler, coords);
sum += pixel.x * psf[filterIdx++];
}
}
// Copy the data to the output light field
int u = u_coords[r];
int v = v_coords[r];
int subap_idx = v*K_NT*K_NU*K_NS + t*K_NU*K_NS + u*K_NS + s;
subaperture[subap_idx] += sum;
base_address += (filterCols[r] * filterRows[r]);
}
}
__kernel void cl_project_wavespread(__read_only image2d_t vol_slice,
__global double* subaperture_im,
__global int* u_coords,
__global int* v_coords,
__global int* s_coords,
__global int* t_coords,
__global float* coefficients,
int num_coefficients,
int supersample_factor,
int dx, int dy,
sampler_t sampler) {
// Create local storage to speed up running summations.
__private double sum_buf[K_NU * K_NV];
// __private float sum_total[K_NU * K_NV];
// __private float num_good[K_NU * K_NV];
// __private float num_total[K_NU * K_NV];
for (int v = 0; v < K_NV; ++v) {
for (int u = 0; u < K_NU; ++u) {
sum_buf[v*K_NU+u] = 0.0;
// sum_total[v*K_NU+u] = 0.0;
// num_good[v*K_NU+u] = 0.0;
// num_total[v*K_NU+u] = 0.0;
}
}
// Store each work-item unique row and column
int s = get_global_id(0); int t = get_global_id(1);
// Bail out if the kernel is out of bounds
if (s >= K_NS || t >= K_NT)
return;
// Iterate over the psf coordinates and coefficients
for (int i = 0; i < num_coefficients; ++i) {
// Grab the appropriate pixel from the volume. Here we assume
// that a value of s_coords = 0 and t_coords = 0 refer to the
// lenslet at the center of the volume.
//
int2 coords;
coords.x = (s - s_coords[i]) * supersample_factor + dx;
coords.y = (t - t_coords[i]) * supersample_factor + dy;
float4 pixel = read_imagef(vol_slice, sampler, coords);
// Copy the data to the output light field
int u = u_coords[i];
int v = v_coords[i];
//num_total[v*K_NU+u] += 1;
//if (coords.x >= 0 && coords.x < K_NS && coords.y >= 0 && coords.y < K_NT) {
//num_good[v*K_NU+u] += 1;
//}
sum_buf[v*K_NU+u] += pixel.x * coefficients[i];
}
for (int v = 0; v < K_NV; ++v) {
for (int u = 0; u < K_NU; ++u) {
int subap_idx = v*K_NT*K_NU*K_NS + t*K_NU*K_NS + u*K_NS + s;
subaperture_im[subap_idx] += sum_buf[v*K_NU+u];
}
}
}
__kernel void cl_backproject_wavespread(__read_only image2d_t lf,
__global double* volume,
__global int* u_coords,
__global int* v_coords,
__global int* s_coords,
__global int* t_coords,
__global float* coefficients,
int num_coefficients,
int z, int dx, int dy,
int supersample_factor,
sampler_t sampler) {
// Store each work-item unique row and column
int s = get_global_id(0);
int t = get_global_id(1);
int x = s * supersample_factor + dx;
int y = t * supersample_factor + dy;
// Bail out if the kernel is out of bounds
if (x >= K_NX || y >= K_NY || x < 0 || y < 0)
return;
double sum = 0.0;
// Iterate over the psf coordinates and coefficients
for (int i = 0; i < num_coefficients; ++i) {
int2 coords;
//float r = sqrt( (float) ((s - K_NS / 2) + (t - K_NT / 2)) );
//float theta = atan2((float)(s - K_NS / 2), (float)(t - K_NT / 2));
//float du = r * cos(theta);
//float dv = r * sin(theta);
// Check to make sure the s index is in bounds.
int s_idx = s + s_coords[i];
int t_idx = t + t_coords[i];
if (s_idx < 0 || s_idx >= K_NS || t_idx < 0 || t_idx >= K_NT)
continue;
// Grab the appropriate pixel from the light field. Here we assume
// that a value of s_coords = 0 and t_coords = 0 refer to the
// lenslet at the center of the volume.
//
coords.y = t_idx * K_NS + s_idx;
coords.x = v_coords[i] * K_NU + u_coords[i];
float4 pixel = read_imagef(lf, sampler, coords);
// Copy the data to the output light field
sum += pixel.x * coefficients[i];
}
int vol_idx = z*K_NX*K_NY + y*K_NX + x;
volume[vol_idx] += sum;
}
'''
# ------------------------------------------------------------------------------
# UTILITY CLASSES & FUNCTIONS
# ------------------------------------------------------------------------------
def roundUp(value, multiple):
''' Determine how far past the nearest multiple of the value.
This is useful for padding image dimensions to nice powers of 16,
which helps promote efficient memory access on the GPU.'''
remainder = value % multiple
if remainder != 0:
value += (multiple-remainder)
return value
# ------------------------------------------------------------------------------
# LIGHT FIELD PROJECTION CLASS
# ------------------------------------------------------------------------------
class LightFieldProjection(object):
def __init__(self, rayspread_db = None, psf_db = None, disable_gpu = False, gpu_id = None, platform_id = 0, use_sing_prec=False):
self.rayspread_db = rayspread_db
self.psf_db = psf_db
if self.psf_db is not None:
db = self.psf_db
else:
db = self.rayspread_db
# Premultiplier is optionaly applied after forward
# projection, and before back projection.
#
# Postmultiplier is optionaly applied before forward
# projection, and after back projection.
#
self.premultiplier = None
self.postmultiplier = None
if LFLIB_HAVE_OPENCL and not disable_gpu:
# Set up OpenCL
platform = cl.get_platforms()[platform_id]
print(75*"=")
print('platform id: ',platform_id, ' | platform: ',platform)
try:
for device in platform.get_devices():
print('device: | |
<reponame>blackdaemon/enso-launcher-continued
# Copyright (c) 2008, Humanized, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Enso nor the names of its contributors may
# be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Humanized, Inc. ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Humanized, Inc. BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
#
# enso.quasimode
#
# ----------------------------------------------------------------------------
"""
Implements the Quasimode.
This module implements a singleton class that represents the
quasimode. It handles all quasimodal key events, and the logic for
transitioning in and out of the quasimode. When the quasimode
terminates, it initiates the execution of the command, if any,
that the user indicated while in the quasimode. It also handles
the various kinds of user "error", which primarily consist of "no command
matches the text the user typed".
"""
# ----------------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------------
import logging
import operator
import time
import traceback
import weakref
from enso import config, graphics, input, messages
from enso.messages.windows import computeWidth
from enso.quasimode import layout
from enso.quasimode.charmaps import STANDARD_ALLOWED_KEYCODES as ALLOWED_KEYCODES
from enso.quasimode.parametersuggestionlist import ParameterSuggestionList
from enso.quasimode.suggestionlist import SuggestionList
from enso.quasimode.window import QuasimodeWindow
from enso.utils import suppress
from enso.utils.memoize import memoized
from enso.utils.strings import string_ratio_best_match
from enso.utils.xml_tools import escape_xml
# ----------------------------------------------------------------------------
# TheQuasimode
# ----------------------------------------------------------------------------
class Quasimode(object):
"""
Encapsulates the command quasimode state and event-handling.
Future note: In code review, we realized that implementing the
quasimode is an ideal case for the State pattern; the Quasimode
singleton would have a private member for quasimode state, which
would be an instance of one of two classes, InQuasimode or
OutOfQuasimode, both descended from a QuasimodeState interface
class. Consequances of this include much cleaner transition code
and separation of event handling into the two states.
"""
__instance = None
@classmethod
def get(cls):
return cls.__instance
@classmethod
def install(cls, eventManager):
from enso.commands import CommandManager
cls.__instance = cls(eventManager, CommandManager.get())
def __init__(self, eventManager, commandManager):
"""
Initialize the quasimode.
"""
self.__cmdManager = commandManager
# Boolean variable that records whether the quasimode key is
# currently down, i.e., whether the user is "in the quasimode".
self._inQuasimode = False
# The QuasimodeWindow object that is responsible for
# drawing the quasimode; set to None initially.
# A QuasimodeWindow object is created at the beginning of
# the quasimode, and destroyed at the completion of the
# quasimode.
self.__quasimodeWindow = None
# The suggestion list object, which is responsible for
# maintaining all the information about the auto-completed
# command and suggested command names, and the text typed
# by the user.
self.__suggestionList = SuggestionList(self.__cmdManager)
# The parameter-suggestion list object, which is responsible for
# maintaining all the information about the parameter suggestions.
# Used for suggesting history entries or web-query suggestions.
self.__parameterSuggestionList = ParameterSuggestionList(
self.__cmdManager)
# Boolean variable that should be set to True whenever an event
# occurs that requires the quasimode to be redrawn, and which
# should be set to False when the quasimode is drawn.
self.__needsRedraw = False
# Whether the next redraw should redraw the entire quasimodal
# display, or only the description and user text.
self.__nextRedrawIsFull = False
self.__eventMgr = eventManager
# Register a key event responder, so that the quasimode can
# actually respond to quasimode events.
self.__eventMgr.registerResponder(self.onKeyEvent, "key")
# Creates new event types that code can subscribe to, to find out
# when the quasimode (or mode) is started and completed.
self.__eventMgr.createEventType("startQuasimode")
self.__eventMgr.createEventType("endQuasimode")
# Creates new event type that code can subscribe to, to find out
# when the quasimode text has been modified.
self.__eventMgr.createEventType("textModified")
# Read settings from config file: are we modal?
# What key activates the quasimode?
# What keys exit and cancel the quasimode?
self.setQuasimodeKeyByName(input.KEYCODE_QUASIMODE_START, # IGNORE:E1101 @UndefinedVariable Keep PyLint and PyDev happy
config.QUASIMODE_START_KEY)
self.setQuasimodeKeyByName(input.KEYCODE_QUASIMODE_END, # IGNORE:E1101 @UndefinedVariable Keep PyLint and PyDev happy
config.QUASIMODE_END_KEY)
self.setQuasimodeKeyByName(input.KEYCODE_QUASIMODE_CANCEL, # IGNORE:E1101 @UndefinedVariable Keep PyLint and PyDev happy
config.QUASIMODE_CANCEL_KEY)
self.setQuasimodeKeyByName(input.KEYCODE_QUASIMODE_CANCEL2, # IGNORE:E1101 @UndefinedVariable Keep PyLint and PyDev happy
config.QUASIMODE_CANCEL_KEY2)
self.__isModal = config.IS_QUASIMODE_MODAL
self.__eventMgr.setModality(self.__isModal)
self.__lastQuasimodeStarted = None
self._lastRunCommand = None
self.__lastParameterSuggestionsCheck = 0.0
self.__lastParameterSuggestions = None
# Unique numeric ID of the Quasimode "session"
self.__quasimodeID = 0
def setQuasimodeKeyByName(self, function_name, key_name):
# Sets the quasimode to use the given key (key_name must be a
# string corresponding to a constant defined in the os-specific
# input module) for the given function ( which should be one of
# the KEYCODE_QUASIMODE_START/END/CANCEL constants also defined
# in input.)
key_code = getattr(input, key_name)
assert key_code, "Undefined quasimode key in config file: %s." % key_name
self.__eventMgr.setQuasimodeKeycode(function_name, key_code)
def getQuasimodeKeyByName(self, function_name):
return self.__eventMgr.getQuasimodeKeycode(function_name)
def isModal(self):
return self.__isModal
def setModal(self, isModal):
assert type(isModal) == bool
config.IS_QUASIMODE_MODAL = isModal
self.__isModal = isModal
self.__eventMgr.setModality(isModal)
def getSuggestionList(self):
return self.__suggestionList
def getLastRunCommand(self):
return self._lastRunCommand
def setDidyoumeanHint(self, hint):
is_dirty = (self.__suggestionList.getDidyoumeanHint() != hint)
if hint == "":
hint = None
self.__suggestionList.setDidyoumeanHint(hint)
if is_dirty:
self.__needsRedraw = True
def getDidyoumeanHint(self):
return self.__suggestionList.getDidyoumeanHint()
def setParameterSuggestions(self, suggestions):
self.__parameterSuggestionList.setSuggestions(suggestions)
def onKeyEventQuasimodeStart(self, eventType, keyCode):
#assert not self._inQuasimode
# self.__quasimodeBegin()
if not self._inQuasimode:
self.__quasimodeBegin()
def onKeyEventQuasimodeEnd(self, eventType, keyCode):
#assert self._inQuasimode
# self.__quasimodeEnd()
if self._inQuasimode:
self.__quasimodeEnd()
else:
self.__quasimodeEnd()
def onKeyEventQuasimodeCancel(self, eventType, keyCode):
self.__suggestionList.clearState()
self.__quasimodeEnd()
def onKeyEventTabComplete(self, eventType, keyCode):
oldText = self.__suggestionList.getUserText()
if self.__parameterSuggestionList.isActive():
self.__parameterSuggestionList.cycleActiveSuggestion(1)
suggestion = self.__parameterSuggestionList.getActiveSuggestion()
# print suggestion
activeCmd = self.__suggestionList.getActiveCommand()
with suppress(Exception):
userText = "%s%s" % (
activeCmd.PREFIX,
suggestion)
self.__suggestionList.setUserText(userText)
# self.__parameterSuggestionList.setSuggestions([])
else:
# Allow handlers to act upon Tab key even if the text
# has not been modified
self.__eventMgr.triggerEvent(
"textModified", keyCode, oldText, oldText, quasimodeId=self.__quasimodeID)
self.__suggestionList.autoType()
def onKeyEventReturn(self, eventType, keyCode):
self.__suggestionList.autoType()
def onKeyEventEscape(self, eventType, keyCode):
self.__suggestionList.clearState()
self.__parameterSuggestionList.setSuggestions([])
def onKeyEventDelete(self, eventType, keyCode):
oldText = self.__suggestionList.getUserText()
self.__onDelete()
self.__onParameterModified(
keyCode, oldText, self.__suggestionList.getUserText())
def onKeyEventBack(self, eventType, keyCode):
oldText = self.__suggestionList.getUserText()
# Backspace has been pressed.
self.__onBackspace()
self.__onParameterModified(
keyCode, oldText, self.__suggestionList.getUserText())
def onKeyEventDown(self, eventType, keyCode):
# The user has pressed the down arrow; change which of the
# suggestions is "active" (i.e., will be executed upon
# termination of the quasimode)
self.__suggestionList.cycleActiveSuggestion(1)
if self.__parameterSuggestionList.isActive() and self.__suggestionList.getActiveIndex() > 0:
self.__parameterSuggestionList.setSuggestions([])
#self.__parameterSuggestionList.cycleActiveSuggestion( 1 )
self.__nextRedrawIsFull = True
def onKeyEventUp(self, eventType, keyCode):
# Up arrow; change which suggestion is active.
self.__suggestionList.cycleActiveSuggestion(-1)
if self.__parameterSuggestionList.isActive() and self.__suggestionList.getActiveIndex() > 0:
self.__parameterSuggestionList.setSuggestions([])
#self.__parameterSuggestionList.cycleActiveSuggestion( -1 )
self.__nextRedrawIsFull = True
def onKeyEventHome(self, eventType, keyCode):
# The user has pressed the down arrow; change which of the
# suggestions is "active" (i.e., will be executed upon
# termination of the quasimode)
if self.__parameterSuggestionList.isActive():
self.__parameterSuggestionList.setActiveSuggestion(0)
def onKeyEventEnd(self, eventType, keyCode):
# Up arrow; change which suggestion is active.
if self.__parameterSuggestionList.isActive():
self.__parameterSuggestionList.setActiveSuggestion(-1)
def onKeyEvent(self, eventType, keyCode):
"""Handles a key event of particular type"""
FUNC_CALL_MAP = {
input.EVENT_KEY_QUASIMODE: { # IGNORE:E1101 @UndefinedVariable
input.KEYCODE_QUASIMODE_START: self.onKeyEventQuasimodeStart, # IGNORE:E1101 @UndefinedVariable
input.KEYCODE_QUASIMODE_END: self.onKeyEventQuasimodeEnd, # IGNORE:E1101 @UndefinedVariable
input.KEYCODE_QUASIMODE_CANCEL: self.onKeyEventQuasimodeCancel, # IGNORE:E1101 @UndefinedVariable
input.KEYCODE_QUASIMODE_CANCEL2: self.onKeyEventQuasimodeCancel, # IGNORE:E1101 @UndefinedVariable
},
input.EVENT_KEY_DOWN: { # IGNORE:E1101 @UndefinedVariable Keep PyLint and PyDev happy
input.KEYCODE_TAB: self.onKeyEventTabComplete, # IGNORE:E1101 @UndefinedVariable
input.KEYCODE_RIGHT: self.onKeyEventTabComplete, # IGNORE:E1101 @UndefinedVariable
input.KEYCODE_RETURN: self.onKeyEventReturn, # IGNORE:E1101 @UndefinedVariable
input.KEYCODE_ESCAPE: self.onKeyEventEscape, # IGNORE:E1101 @UndefinedVariable
input.KEYCODE_DELETE: self.onKeyEventDelete, # IGNORE:E1101 @UndefinedVariable
input.KEYCODE_BACK: self.onKeyEventBack, # IGNORE:E1101 @UndefinedVariable Keep PyLint and PyDev happy
input.KEYCODE_DOWN: self.onKeyEventDown, # IGNORE:E1101 @UndefinedVariable Keep PyLint and PyDev happy
input.KEYCODE_UP: self.onKeyEventUp, # IGNORE:E1101 @UndefinedVariable Keep PyLint and PyDev happy
input.KEYCODE_HOME: self.onKeyEventHome, # IGNORE:E1101 @UndefinedVariable | |
import logging
from pprint import pprint
import numpy as np
from copy import deepcopy
from pycqed.measurement.waveform_control import pulsar as ps
from pycqed.measurement.waveform_control import sequence as sequence
from pycqed.measurement.waveform_control import segment as segment
from pycqed.measurement.randomized_benchmarking import \
randomized_benchmarking as rb
import logging
log = logging.getLogger(__name__)
def one_qubit_reset(qb_name, operation_dict, prep_params=dict(), upload=True,
states=('g','e',)):
"""
:param qb_name:
:param states (tuple): ('g','e',) for active reset e, ('g','f',) for active
reset f and ('g', 'e', 'f') for both.
:return:
"""
seq_name = '{}_reset_x{}_sequence'.format(qb_name,
prep_params.get('reset_reps',
'_default_n_reps'))
pulses = [deepcopy(operation_dict['RO ' + qb_name])]
reset_and_last_ro_pulses = \
add_preparation_pulses(pulses, operation_dict, [qb_name], **prep_params)
swept_pulses = []
state_ops = dict(g=['I '], e=['X180 '], f=['X180 ', 'X180_ef '])
for s in states:
pulses = deepcopy(reset_and_last_ro_pulses)
state_pulses = [deepcopy(operation_dict[op + qb_name]) for op in
state_ops[s]]
# reference end of state pulse to start of first reset pulse,
# to effectively prepend the state pulse
segment_pulses = prepend_pulses(pulses, state_pulses)
swept_pulses.append(segment_pulses)
seq = pulse_list_list_seq(swept_pulses, seq_name, upload=False)
# reuse sequencer memory by repeating readout pattern
seq.repeat_ro(f"RO {qb_name}", operation_dict)
log.debug(seq)
if upload:
ps.Pulsar.get_instance().program_awgs(seq)
return seq, np.arange(seq.n_acq_elements())
def rabi_seq_active_reset(amps, qb_name, operation_dict, cal_points,
upload=True, n=1, for_ef=False,
last_ge_pulse=False, prep_params=dict()):
'''
Rabi sequence for a single qubit using the tektronix.
Args:
amps: array of pulse amplitudes (V)
pulse_pars: dict containing the pulse parameters
RO_pars: dict containing the RO parameters
active_reset: boolean flag specifying if active reset is used
n: number of pulses (1 is conventional Rabi)
post_msmt_delay: extra wait time for resetless compatibility
cal_points: whether to use calibration points or not
upload: whether to upload sequence to instrument or not
Returns:
sequence (Sequence): sequence object
segment_indices (list): array of range of n_segments including
calibration_segments. To be used as sweep_points for the MC.
'''
seq_name = 'Rabi_sequence'
# add Rabi amplitudes segments
rabi_ops = ["X180_ef " + qb_name if for_ef else "X180 " + qb_name] * n
if for_ef:
rabi_ops = ["X180 " + qb_name] + rabi_ops # prepend ge pulse
if last_ge_pulse:
rabi_ops += ["X180 " + qb_name] # append ge pulse
rabi_ops += ["RO " + qb_name]
rabi_pulses = [deepcopy(operation_dict[op]) for op in rabi_ops]
for i in np.arange(1 if for_ef else 0, n + 1 if for_ef else n):
rabi_pulses[i]["name"] = "Rabi_" + str(i-1 if for_ef else i)
swept_pulses = sweep_pulse_params(rabi_pulses,
{f'Rabi_{i}.amplitude':
amps for i in range(n)})
swept_pulses_with_prep = \
[add_preparation_pulses(p, operation_dict, [qb_name], **prep_params)
for p in swept_pulses]
seq = pulse_list_list_seq(swept_pulses_with_prep, seq_name, upload=False)
# add calibration segments
seq.extend(cal_points.create_segments(operation_dict, **prep_params))
# reuse sequencer memory by repeating readout pattern
seq.repeat_ro(f"RO {qb_name}", operation_dict)
log.debug(seq)
if upload:
ps.Pulsar.get_instance().program_awgs(seq)
return seq, np.arange(seq.n_acq_elements())
def t1_active_reset(times, qb_name, operation_dict, cal_points,
upload=True, for_ef=False, last_ge_pulse=False,
prep_params=dict()):
'''
T1 sequence for a single qubit using the tektronix.
SSB_Drag pulse is used for driving, simple modulation used for RO
Input pars:
times: array of times to wait after the initial pi-pulse
pulse_pars: dict containing the pulse parameters
RO_pars: dict containing the RO parameters
'''
if np.any(times>1e-3):
logging.warning('The values in the times array might be too large.'
'The units should be seconds.')
seq_name = 'T1_sequence'
#Operations
if for_ef:
ops = ["X180", "X180_ef"]
if last_ge_pulse:
ops += ["X180"]
else:
ops = ["X180"]
ops += ["RO"]
ops = add_suffix(ops, " " + qb_name)
pulses = [deepcopy(operation_dict[op]) for op in ops]
# name delayed pulse: last ge pulse if for_ef and last_ge_pulse
# otherwise readout pulse
if for_ef and last_ge_pulse:
delayed_pulse = -2 # last_ge_pulse
delays = np.array(times)
else:
delayed_pulse = -1 # readout pulse
delays = np.array(times) + pulses[-1]["pulse_delay"]
pulses[delayed_pulse]['name'] = "Delayed_pulse"
# vary delay of readout pulse or last ge pulse
swept_pulses = sweep_pulse_params(pulses, {'Delayed_pulse.pulse_delay': delays})
# add preparation pulses
swept_pulses_with_prep = \
[add_preparation_pulses(p, operation_dict, [qb_name], **prep_params)
for p in swept_pulses]
seq = pulse_list_list_seq(swept_pulses_with_prep, seq_name, upload=False)
# add calibration segments
seq.extend(cal_points.create_segments(operation_dict, **prep_params))
# reuse sequencer memory by repeating readout pattern
seq.repeat_ro(f"RO {qb_name}", operation_dict)
if upload:
ps.Pulsar.get_instance().program_awgs(seq)
return seq, np.arange(seq.n_acq_elements())
def ramsey_seq_Echo(times, pulse_pars, RO_pars, nr_echo_pulses=4,
artificial_detuning=None,
cal_points=True, cpmg_scheme=True,
upload=True, return_seq=False):
'''
Ramsey sequence for a single qubit using the tektronix.
SSB_Drag pulse is used for driving, simple modualtion used for RO
Input pars:
times: array of times between (start of) pulses (s)
pulse_pars: dict containing the pulse parameters
RO_pars: dict containing the RO parameters
artificial_detuning: artificial_detuning (Hz) implemented using phase
cal_points: whether to use calibration points or not
'''
if np.any(times > 1e-3):
logging.warning('The values in the times array might be too large.'
'The units should be seconds.')
seq_name = 'Ramsey_sequence'
seq = sequence.Sequence(seq_name)
seg_list = []
# First extract values from input, later overwrite when generating
# waveforms
pulses = get_pulse_dict_from_pars(pulse_pars)
pulse_pars_x2 = deepcopy(pulses['X90'])
pulse_pars_x2['ref_point'] = 'start'
X180_pulse = deepcopy(pulses['X180'])
Echo_pulses = nr_echo_pulses*[X180_pulse]
DRAG_length = pulse_pars['nr_sigma']*pulse_pars['sigma']
for i, tau in enumerate(times):
if artificial_detuning is not None:
Dphase = ((tau-times[0]) * artificial_detuning * 360) % 360
pulse_pars_x2['phase'] = Dphase
if cal_points and (i == (len(times)-4) or i == (len(times)-3)):
seg = segment.Segment('segment_{}'.format(i), [pulses['I'], RO_pars])
elif cal_points and (i == (len(times)-2) or i == (len(times)-1)):
seg = segment.Segment('segment_{}'.format(i), [pulses['X180'], RO_pars])
else:
X90_separation = tau - DRAG_length
if cpmg_scheme:
if i == 0:
print('cpmg')
echo_pulse_delay = (X90_separation -
nr_echo_pulses*DRAG_length) / \
nr_echo_pulses
if echo_pulse_delay < 0:
pulse_pars_x2['pulse_delay'] = tau
pulse_dict_list = [pulses['X90'], pulse_pars_x2, RO_pars]
else:
pulse_dict_list = [pulses['X90']]
start_end_delay = echo_pulse_delay/2
for p_nr, pulse_dict in enumerate(Echo_pulses):
pd = deepcopy(pulse_dict)
pd['ref_point'] = 'end'
pd['pulse_delay'] = \
(start_end_delay if p_nr == 0 else echo_pulse_delay)
pulse_dict_list.append(pd)
pulse_pars_x2['ref_point'] = 'end'
pulse_pars_x2['pulse_delay'] = start_end_delay
pulse_dict_list += [pulse_pars_x2, RO_pars]
else:
if i == 0:
print('UDD')
pulse_positions_func = \
lambda idx, N: np.sin(np.pi*idx/(2*N+2))**2
pulse_delays_func = (lambda idx, N: X90_separation*(
pulse_positions_func(idx, N) -
pulse_positions_func(idx-1, N)) -
((0.5 if idx == 1 else 1)*DRAG_length))
if nr_echo_pulses*DRAG_length > X90_separation:
pulse_pars_x2['pulse_delay'] = tau
pulse_dict_list = [pulses['X90'], pulse_pars_x2, RO_pars]
else:
pulse_dict_list = [pulses['X90']]
for p_nr, pulse_dict in enumerate(Echo_pulses):
pd = deepcopy(pulse_dict)
pd['ref_point'] = 'end'
pd['pulse_delay'] = pulse_delays_func(
p_nr+1, nr_echo_pulses)
pulse_dict_list.append(pd)
pulse_pars_x2['ref_point'] = 'end'
pulse_pars_x2['pulse_delay'] = pulse_delays_func(
1, nr_echo_pulses)
pulse_dict_list += [pulse_pars_x2, RO_pars]
seg = segment.Segment('segment_{}'.format(i), pulse_dict_list)
seg_list.append(seg)
seq.add(seg)
if upload:
ps.Pulsar.get_instance().program_awgs(seq)
if return_seq:
return seq, seg_list
else:
return seq_name
def ramsey_seq_cont_drive(times, pulse_pars, RO_pars,
artificial_detuning=None, cal_points=True,
upload=True, return_seq=False, **kw):
'''
Ramsey sequence for a single qubit using the tektronix.
SSB_Drag pulse is used for driving, simple modualtion used for RO
Input pars:
times: array of times between (start of) pulses (s)
pulse_pars: dict containing the pulse parameters
RO_pars: dict containing the RO parameters
artificial_detuning: artificial_detuning (Hz) implemented using phase
cal_points: whether to use calibration points or not
'''
if np.any(times > 1e-3):
logging.warning('The values in the times array might be too large.'
'The units should be seconds.')
seq_name = 'Ramsey_sequence'
seq = sequence.Sequence(seq_name)
seg_list = []
# First extract values from input, later overwrite when generating
# waveforms
pulses = get_pulse_dict_from_pars(pulse_pars)
pulse_pars_x2 = deepcopy(pulses['X90'])
DRAG_length = pulse_pars['nr_sigma']*pulse_pars['sigma']
cont_drive_ampl = 0.1 * pulse_pars['amplitude']
X180_pulse = deepcopy(pulses['X180'])
cos_pulse = {'pulse_type': 'CosPulse_gauss_rise',
'channel': X180_pulse['I_channel'],
'frequency': X180_pulse['mod_frequency'],
'length': 0,
'phase': X180_pulse['phi_skew'],
'amplitude': cont_drive_ampl * X180_pulse['alpha'],
'pulse_delay': 0,
'ref_point': 'end'}
sin_pulse = {'pulse_type': 'CosPulse_gauss_rise',
'channel': X180_pulse['Q_channel'],
'frequency': X180_pulse['mod_frequency'],
'length': 0,
'phase': 90,
'amplitude': cont_drive_ampl * X180_pulse['alpha'],
'pulse_delay': 0,
'ref_point': 'simultaneous'}
for i, tau in enumerate(times):
if artificial_detuning is not None:
Dphase = ((tau-times[0]) * artificial_detuning * 360) % 360
pulse_pars_x2['phase'] = Dphase
if cal_points and (i == (len(times)-4) or i == (len(times)-3)):
seg = segment.Segment('segment_{}'.format(i), [pulses['I'], RO_pars])
elif cal_points and (i == (len(times)-2) or i == (len(times)-1)):
seg = segment.Segment('segment_{}'.format(i), [pulses['X180'], RO_pars])
else:
X90_separation = tau - DRAG_length
if X90_separation > 0:
pulse_pars_x2['ref_point'] = 'end'
cos_pls1 = deepcopy(cos_pulse)
sin_pls1 = deepcopy(sin_pulse)
cos_pls1['length'] = X90_separation/2
sin_pls1['length'] = X90_separation/2
cos_pls2 = deepcopy(cos_pls1)
sin_pls2 = deepcopy(sin_pls1)
cos_pls2['amplitude'] = -cos_pls1['amplitude']
cos_pls2['pulse_type'] = 'CosPulse_gauss_fall'
sin_pls2['amplitude'] = -sin_pls1['amplitude']
sin_pls2['pulse_type'] = 'CosPulse_gauss_fall'
pulse_dict_list = [pulses['X90'], cos_pls1, sin_pls1,
cos_pls2, sin_pls2, pulse_pars_x2, RO_pars]
else:
pulse_pars_x2['ref_point'] = 'start'
pulse_pars_x2['pulse_delay'] = tau
pulse_dict_list = [pulses['X90'], pulse_pars_x2, RO_pars]
seg = segment.Segment('segment_{}'.format(i), pulse_dict_list)
seg_list.append(seg)
seq.add(seg)
if upload:
ps.Pulsar.get_instance().program_awgs(seq)
if return_seq:
return seq, seg_list
else:
return seq_name
def ramsey_seq(times, pulse_pars, RO_pars,
artificial_detuning=None,
cal_points=True, upload=True, return_seq=False):
'''
Ramsey sequence for a single qubit using the tektronix.
SSB_Drag pulse is used for driving, simple modualtion used for RO
Input pars:
times: array of times between (start of) | |
"faulty": -1.3,
"undesirable": -1.9,
"flustered": -1.0,
"beauteousness": 2.7,
"loyalty": 2.5,
"ambivalent": 0.5,
"vulnerableness": -1.1,
"loyalists": 1.1,
"longingly": 0.7,
"faithlessly": -0.9,
"determinate": 0.8,
"intimidatingly": -1.1,
"responsive": 1.5,
"lovingness": 2.7,
"shitheads": -2.6,
"pleasurably": 2.6,
"frisky": 1.0,
"harmonica": 0.6,
"desperateness": -1.5,
"better": 1.9,
"laughings": 1.9,
"lmso": 2.7,
"feudalizing": -0.7,
"respectfulness": 1.9,
"confidently": 2.1,
"pleasurable": 2.4,
"pressureless": 1.0,
"chucklesome": 1.1,
"sentimentalising": 0.4,
"weakness": -1.8,
"startlements": 0.2,
"intimidation": -1.8,
"warmups": 0.8,
"weaponless": 0.1,
"antagonisms": -1.2,
"doubtingly": -1.4,
"brightness": 1.6,
"arrested": -2.1,
"stinkwood": -0.1,
"weakfish": -0.2,
"trivializations": -0.7,
"loneliness": -1.8,
"luck": 2.0,
"interruption": -1.5,
"calmly": 1.3,
"successively": 0.9,
"enthusiasts": 1.4,
"borescopes": -0.1,
"stealing": -2.7,
"optimistically": 2.1,
"comedies": 1.7,
"contend": 0.2,
"brilliantines": 2.0,
"sluts": -2.7,
"freelancers": 0.4,
"nigger": -3.3,
"lurks": -0.9,
"sparkle": 1.8,
"easiness": 1.6,
"surprise": 1.1,
"ranter": -1.2,
"sluggish": -1.7,
"assaults": -2.5,
"cutest": 2.8,
"cutesy": 2.1,
"fave": 1.9,
"revenge": -2.4,
"smugness": -1.4,
"aggressing": -0.6,
"heartlessness": -2.8,
"struggle": -1.3,
"inadequately": -1.0,
"warmhearted": 1.8,
"grouches": -0.9,
"brooding": 0.1,
"grouched": -0.8,
"enraged": -1.7,
"wronged": -1.9,
"intelligencers": 1.6,
"reassure": 1.4,
"comforted": 1.8,
"peculiarity": 0.6,
"downside": -1.0,
"enrages": -1.8,
"comforter": 1.9,
"snubbing": -0.9,
"uneasily": -1.4,
"nastiness": -1.1,
"smileless": -1.4,
"dumbfounded": -1.6,
"enthral": 0.4,
"dwell": 0.5,
"dumbfounder": -1.0,
"solemnly": 0.8,
"horrendously": -1.9,
"peculiar": 0.6,
"anxiety": -0.7,
"cruelly": -2.8,
"solemnity": -1.1,
"bitterness": -1.7,
"matter": 0.1,
"fabulous": 2.4,
"silly": 0.1,
"vitriolic": -2.1,
"blamelessness": 0.6,
"delectables": 1.4,
"jealously": -2.0,
"impotent": -1.1,
"(^;0": 2.0,
"funning": 1.8,
"easygoingness": 1.5,
"arguably": -1.0,
"O_o": -0.5,
"worsening": -2.0,
"starved": -2.6,
"lowball": -0.8,
"shysters": -0.9,
"grins": 0.9,
"appreciatory": 1.7,
"appreciators": 1.5,
"exonerate": 1.8,
"bittering": -1.2,
"treasonous": -2.7,
"alarm": -1.4,
"doa": -2.3,
"(^;o": 1.9,
"worsen": -2.3,
"definitely": 1.7,
"romanticize": 1.8,
"annoyed": -1.6,
"hunger": -1.0,
"rapture": 0.6,
"brutalities": -2.6,
"brilliance": 2.9,
"brilliancy": 2.6,
"uneasiness": -1.6,
"poison": -2.5,
"acquit": 0.8,
"divinity": 2.7,
"superiorly": 2.2,
"dismayingly": -1.9,
"jailed": -2.2,
"mournfulness": -1.8,
"rejoicing": 2.8,
"supremest": 2.2,
"stop": -1.2,
"smug": 0.8,
"rejective": -1.8,
"pardoning": 1.7,
"feudalist": -0.9,
"fraudsters": -2.4,
"lowlier": -1.7,
"bad": -2.5,
"feudalism": -0.9,
"scrumptious": 2.1,
"ban": -2.6,
"ethical": 2.3,
"unworthy": -2.0,
"fascinating": 2.5,
"blameless": 0.7,
"winnable": 1.8,
"survived": 2.3,
"interruptions": -1.7,
"mournfully": -1.7,
"heavenlinesses": 2.3,
"antagonistically": -2.2,
"safecrackings": -0.7,
"tolerance": 1.2,
"lethargic": -1.2,
"lazy": -1.5,
"embarrassedly": -1.1,
"cleverer": 2.0,
"dumber": -1.5,
"ignorance": -1.5,
"weary": -1.1,
"virtuosity": 2.1,
"beautician": 1.2,
"dumbed": -1.4,
"confronter": -0.3,
"positivisms": 1.8,
"stealthiness": 0.2,
"proactive": 1.8,
"agonise": -2.1,
"monopolize": -0.8,
"radiants": 1.2,
"overstatements": -0.7,
"positivest": 2.9,
"amorettos": 0.3,
"glamourless": -1.6,
"persecuting": -1.5,
"generousness": 2.4,
"glamorized": 2.1,
"--<--<@": 2.2,
"okays": 2.1,
"glamorizer": 2.4,
"glamorizes": 2.4,
"questioned": -0.4,
"prosperous": 2.1,
"suppress": -1.3,
"sunshine": 2.2,
"regretter": -1.6,
"sunshiny": 1.9,
"regretted": -1.6,
"moronically": -1.4,
"censored": -0.6,
"distorts": -1.4,
"illiteracy": -1.9,
"freakier": -1.3,
"ugly": -2.3,
"---'-;-{@": 2.3,
"neat": 2.0,
"degraders": -2.0,
"murders": -3.0,
"chilling": -0.1,
"pessimistic": -1.5,
"ridicule": -2.0,
"defeated": -2.1,
"fakes": -1.8,
"shame": -2.1,
"lugubrious": -2.1,
"splendour": 2.2,
"defeater": -1.4,
"disappear": -0.9,
"heroisms": 2.2,
"hahas": 1.8,
"unfortunate": -2.0,
"phobias": -2.0,
"confronting": -0.6,
"entitled": 1.1,
"amusers": 1.3,
"nurtures": 1.9,
"nurturer": 1.9,
"delight": 2.9,
"gracioso": 1.0,
"jealous": -2.0,
"kia": -3.2,
"nurtured": 1.9,
"opportunity": 1.8,
"weakly": -1.8,
"failing": -2.3,
"unconcerned": -0.9,
"flops": -1.4,
"slutty": -2.3,
"givers": 1.7,
"mockeries": -1.6,
"sentence": 0.3,
"agog": 1.9,
"rebellions": -1.1,
"unfair": -2.1,
"fighters": -0.2,
"mournfuller": -1.9,
"yep": 1.2,
"yes": 1.7,
"successionally": 1.1,
"devilwoods": -1.0,
"calmodulin": 0.2,
"satisfactions": 2.1,
"assassination": -2.9,
"weakside": -1.1,
"save": 2.2,
"|=": -0.4,
"|:": -0.5,
"wellaway": -0.8,
"defenselessly": -1.1,
"shocker": -0.6,
"paranoid": -1.0,
"dreams": 1.7,
"vanity": -0.9,
"dignity": 1.7,
"foolery": -1.8,
"amusias": -0.4,
"perfectibility": 1.8,
"harmonically": 2.1,
"disillusioned": -1.9,
"perfectiveness": 0.9,
"notorious": -1.9,
"determinator": 1.1,
"disgusts": -2.1,
"dead": -3.3,
"coziness": 1.5,
"dear": 1.6,
"gloomier": -1.5,
"joylessness": -2.7,
"horridnesses": -3.0,
"interestingness": 1.8,
"rigidification": -1.1,
"provoking": -0.8,
"bamboozle": -1.5,
"alarmists": -1.1,
"bold": 1.6,
"confrontation": -1.3,
"confronters": -1.3,
"super": 2.9,
"freeloading": -1.3,
"wept": -2.0,
"puking": -1.8,
"blithe": 1.2,
"bolds": 1.3,
"irritative": -2.0,
"commit": 1.2,
"sympathetic": 2.3,
"fatalists": -1.2,
"inspirit": 1.9,
"fumed": -1.8,
"energises": 2.2,
"intellectual": 2.3,
"fumer": 0.7,
"fumes": -0.1,
"fumet": 0.4,
"frightened": -1.9,
"energised": 2.1,
"murderers": -3.3,
"annoyance": -1.3,
"riskier": -1.4,
"jealousy": -1.3,
"wimpishness": -0.2,
"freeloaders": -0.1,
"lowe": 0.5,
"idealizer": 1.3,
"immorality": -0.6,
"lown": 0.9,
"severer": -1.6,
"idealized": 1.8,
"failure": -2.3,
"lows": -0.8,
"positivistic": 1.9,
"weirdnesses": -0.7,
"joyrider": 0.7,
"joyrides": 0.8,
"surprising": 1.1,
"dynamometric": 0.3,
"prevents": 0.3,
"wellies": 0.4,
"horrifying": -2.7,
"propaganda": -1.0,
"strengthening": 2.2,
"difficultly": -1.7,
"pileup": -1.1,
"adversely": -0.8,
"dominants": 0.2,
"oversimplification": 0.2,
"agreeing": 1.4,
"molester": -2.3,
"excel": 2.0,
"pitifullest": -1.1,
"misinterpreted": -1.3,
"molested": -1.9,
"axe": -0.4,
"smugly": 0.2,
"brightens": 1.5,
"magnificence": 2.4,
"idealogy": 0.8,
"died": -2.6,
"derail": -1.2,
"apology": 0.2,
"pessimists": -1.0,
"libertinage": 0.2,
"trembled": -1.1,
"luckier": 1.9,
"sobering": -0.8,
"visioning": 1.1,
"profits": 1.9,
"relievo": 1.3,
"gossipmonger": -1.0,
"relieve": 1.5,
"foemen": -0.3,
"distractable": -1.3,
"misread": -1.1,
"anticipation": 0.4,
"praises": 2.4,
"praiser": 2.0,
"deceitful": -1.9,
"sufferer": -2.0,
"disgusting": -2.4,
"amaze": 2.5,
"praised": 2.2,
"suffered": -2.2,
":-###..": -2.5,
"apeshit": -0.9,
"beneficently": 2.2,
"intelligibly": 1.2,
"sappy": -1.0,
"rebelling": -1.1,
"excellency": 2.5,
"die": -2.9,
"accidentally": -1.4,
"jho": 0.8,
"hooray": 2.3,
"excellence": 3.1,
"grimace": -1.0,
"attracting": 2.1,
"fearing": -2.7,
"refusing": -1.7,
"pmbi": 0.8,
"heroical": 2.9,
"sentimentalism": 1.0,
"sentimentalise": 1.2,
"favour": 1.9,
"intelligencer": 1.5,
"intelligences": 1.6,
"suspect": -1.2,
"divinize": 2.3,
"sentimentalist": 0.8,
"promissory": 0.9,
"dweller": 0.3,
"victimized": -1.8,
"dwelled": 0.4,
"deceit": -2.0,
"victimizer": -1.8,
"oversimplify": -0.6,
"battling": -1.1,
"dominantly": 0.2,
"grinned": 1.1,
"depressible": -1.7,
"tolerantly": 0.4,
"grinner": 1.1,
"defenceman": 0.4,
"treasure": 1.2,
"(-:O": 1.5,
"pitiableness": -1.1,
"sorrowfulness": -2.5,
"hardier": -0.6,
"flexibility": 1.4,
"contradictions": -1.3,
"honester": 1.9,
"dolorous": -2.2,
"(-:o": 1.5,
"ferociousness": -1.0,
"shakeable": -0.3,
"smothered": -0.9,
"ayc": 0.2,
"(-:{": -0.1,
"3:(": -2.2,
"3:)": 0.5,
"adoringly": 2.4,
"unequaled": 0.5,
"acceptable": 1.3,
"treasury": 0.8,
"insultingly": -2.3,
"vigorish": -0.4,
"rigid": -0.5,
"acceptably": 1.5,
"weepier": -1.8,
"weepies": -1.6,
"romanticizes": 1.8,
"avoiding": -1.4,
"flu": -1.6,
"glorifying": 2.4,
"flunks": -1.8,
"drags": -0.7,
"snobbily": -1.6,
"(-:<": -0.4,
"growing": 0.7,
"flunky": -1.8,
"repressing": -1.8,
"(-:0": 2.8,
"invigorations": 1.2,
"crazy": -1.4,
"solemnifies": -0.5,
"grouchy": -1.9,
"stammerers": -0.8,
"lobbying": -0.3,
"confused": -1.3,
"disbelieve": -1.2,
"solemnified": -0.5,
"glamourous": 2.0,
"confuses": -1.3,
"craze": -0.6,
"complainant": -0.7,
"benignly": 0.2,
"interrupters": -1.3,
"supremer": 2.3,
"rejectingly": -1.7,
"looses": -0.6,
"143": 3.2,
"resentenced": -0.8,
"critics": -1.2,
"praiseworthy": 2.6,
"resentences": -0.6,
"mad": -2.2,
"mocks": -2.0,
"destructive": -3.0,
"severed": -1.5,
"pressuring": -1.4,
"fraudulence": -2.3,
"farce": -1.7,
"thorny": -1.1,
"hailed": 0.9,
"youthful": 1.3,
"deceive": -1.7,
"bastardly": -2.7,
"cute": 2.0,
"shaky": -0.9,
"wishing": 0.9,
"arguer": -1.6,
"argues": -1.6,
"vigorously": 0.5,
"fatalities": -2.9,
"cuts": -1.2,
"argued": -1.5,
"perfectively": 2.1,
"irresolute": -1.4,
"phobics": -1.3,
"defensives": -0.3,
"thank": 1.5,
"interesting": 1.7,
"undermined": -1.5,
"undermines": -1.4,
"truest": 1.9,
"aching": -2.2,
"captivated": 1.6,
"numbfish": -0.4,
"killer": -3.3,
"sarcastic": -1.0,
"cutely": 1.3,
"stubbornness": -1.1,
"beneficialness": 1.7,
"bullshit": -2.8,
"battlefields": -0.9,
"truthful": 2.0,
"bitterbrushes": -0.6,
"careless": -1.5,
"helplessnesses": -1.7,
"sentimentalities": 0.9,
"richly": 1.9,
"abandoners": -1.9,
"grievants": -1.1,
"condemns": -2.3,
"dominate": -0.5,
"assurance": 1.4,
"inaction": -1.0,
"repressively": -1.7,
"peacefulness": 2.1,
"felonies": -2.5,
"irrationalities": -1.5,
"freemasonry": 0.3,
"forbids": -1.3,
"worshipper": 0.6,
"eviller": -2.9,
"contented": 1.4,
"jolliness": 2.5,
"funky": -0.4,
"apologising": 0.2,
"assuredly": 1.6,
"remorseful": -0.9,
"dwelling": 0.1,
"thwarts": -0.4,
"trickily": -0.8,
"lone": -1.1,
"lamella": -0.1,
"diviners": 1.2,
"suspended": -2.1,
"thoughtless": -2.0,
"wowing": 2.5,
"fiery": -1.4,
"stinkpot": -2.5,
"loneliest": -2.4,
"pukka": 2.8,
"14aa41": 2.4,
"fuking": -3.2,
"easement": 1.6,
"victimising": -2.5,
"sickeners": -2.2,
"contemptibilities": -2.0,
"acceptant": 1.6,
"protected": 1.9,
"defecting": -1.8,
"conspiracy": -2.4,
"comedian": 1.6,
"teasingly": -0.4,
"ferocities": -1.0,
"warsaw": -0.1,
"efficient": 1.8,
"isolate": -0.8,
"shocked": -1.3,
"burdeners": -1.7,
"creatively": 1.5,
"uncompelling": -0.9,
"flattered": 1.6,
"pain": -2.3,
"arguing": -2.0,
"stealthiest": 0.4,
"insanity": -2.7,
"assault": -2.8,
"satisfying": 2.0,
"amortise": 0.5,
"flatterer": -0.3,
"complaints": -1.7,
"supremely": 2.7,
"glamour": 2.4,
"damns": -2.2,
"creativities": 1.7,
"gracefully": 2.4,
"creativeness": 1.8,
"merriments": 2.0,
"enthusing": 1.9,
"humoresques": 0.9,
"show": 1.1,
"freakouts": -1.5,
"pityriasis": -0.8,
"enthusiast": 1.5,
"safekeeping": 1.4,
"despairingly": -2.2,
"dick": -2.3,
"collapses": -1.2,
"travesty": -2.7,
"defenders": 0.3,
"enthusiasm": 1.9,
"bittern": -0.2,
"uninvolved": -2.2,
"satisfied": 1.8,
"unimportant": -1.3,
"killers": -3.3,
"0:)": 1.9,
"bitters": -0.4,
"unethical": -2.3,
"gallantry": 2.6,
"xqzt": 1.6,
"wrathful": -2.7,
"moodily": -1.3,
"stupid": -2.4,
"worrying": -1.4,
"lethargy": -1.4,
"honesty": 2.2,
"shitake": -0.3,
"satisfies": 1.8,
"rigidly": -0.7,
"steadfast": 1.0,
"feudists": -0.7,
"moaned": -0.4,
"promiscuity": -1.8,
"jt": 0.9,
"jw": 1.6,
"jp": 0.8,
"pained": -1.8,
"killing": -3.4,
"jj": 1.0,
"jk": 0.9,
"surprisers": 0.3,
"stinker": -1.5,
| |
<gh_stars>1-10
"""
Keywords used across different PENELOPE main programs.
"""
# Standard library modules.
from operator import attrgetter
# Third party modules.
# Local modules.
from pypenelopetools.penelope.keyword import (
TypeKeyword,
KeywordSequence,
KeywordGroupBase,
)
from pypenelopetools.penelope.enums import KPAR, ICOL
# Globals and constants variables.
class TITLE(TypeKeyword):
"""Title of the job.
The TITLE string is used to mark dump files. To prevent the
improper use of wrong resuming files, change the title each
time you modify basic parameters of your problem. The code
will then be able to identify the inconsistency and to print
an error message before stopping.
"""
def __init__(self):
super().__init__("TITLE", (str,))
def _parse_line(self, line):
name, values, comment = super()._parse_line(line)
return name, (" ".join(values),), comment
def set(self, title):
"""
Sets value.
Args:
title (str): Title of the job (up to 65 characters).
"""
super().set(title)
def validate(self, title):
if len(title) > 65:
raise ValueError("Title is too long. Maximum 65 characters")
return super().validate(title)
class SKPAR(TypeKeyword):
"""Type of primary particle KPARP (1=electrons, 2=photons or 3=positrons).
If KPARP=0, the initial states of primary particles are
set by subroutine SOURCE, to be provided by the user. An
example of that subroutine, corresponding to a 60-Co source
(two gamma rays in each nuclear deexcitation), is included
in the PENMAIN package (file 'source.f').
"""
def __init__(self):
super().__init__(
"SKPAR",
(KPAR,),
comment="Primary particles: 1=electron, 2=photon, 3=positron",
)
def set(self, kparp):
"""
Sets value.
Args:
kparp (:class:`KPAR`): Type of primary particles
"""
super().set(kparp)
def validate(self, kparp):
if kparp not in KPAR:
raise ValueError("Invalid particle")
return super().validate(kparp)
class SENERG(TypeKeyword):
"""For a monoenergetic source, initial energy SE0 of primary particles."""
def __init__(self):
super().__init__(
"SENERG", (float,), comment="Initial energy (monoenergetic sources only)"
)
def set(self, se0):
"""
Sets value.
Args:
se0 (float): Initial energy of primary particles in eV
"""
super().set(se0)
def validate(self, se0):
if se0 <= 0.0:
raise ValueError("SE0 must be greater than 0")
return super().validate(se0)
class SPECTR(KeywordSequence):
"""Define a source with continuous (stepwise constant) spectrum.
For a source with continuous (stepwise constant) spectrum,
each 'SPECTR' line gives the lower end-point of an energy
bin of the source spectrum (Ei) and the associated relative
probability (Pi), integrated over the bin. Up to NSEM=1000
lines, in arbitrary order. The upper end of the spectrum is
defined by entering a line with Ei equal to the upper energy
end point and with a negative Pi value.
"""
def __init__(self, maxlength=1000):
keyword = TypeKeyword(
"SPECTR", (float, float), comment="E bin: lower-end and total probability"
)
super().__init__(keyword, maxlength)
def add(self, ei, pi):
"""
Adds a step in the spectrum.
Args:
ei (float): Lower end-point of an energy bin of the source spectrum in eV
pi (float): Associated relative probability
"""
return super().add(ei, pi)
class SGPOL(TypeKeyword):
"""Activates the simulation of polarisation effects in the scattering of photons.
This line activates the simulation of polarisation effects
in the scattering of photons (electrons and positrons are
assumed to be unpolarised). SP1, SP2, SP3 are the Stokes
parameters of primary photons, which define the degrees of
linear polarisation at 45 deg azimuth, of circular
polarisation, and of linear polarisation at zero azimuth,
respectively. It is assumed that secondary photons are
emitted with null polarisation (SP1=SP2=SP3=0).
"""
def __init__(self):
super().__init__(
"SGPOL",
(float, float, float),
comment="Stokes parameters for polarized photons",
)
def set(self, sp1, sp2, sp3):
"""
Sets Stokes polarisation parameters.
Args:
sp1 (float): Degrees of linear polarisation at 45 deg azimuth
sp2 (float): Degrees of circular polarisation
sp3 (float): Degrees of linear polarisation at 0 deg azimuth
"""
super().set(sp1, sp2, sp3)
class SPOSIT(TypeKeyword):
"""Coordinates of the source centre."""
def __init__(self):
super().__init__(
"SPOSIT", (float, float, float), comment="Coordinates of the source"
)
def set(self, sx0, sy0, sz0):
"""
Sets coordinates.
Args:
sx0 (float): x-coordinate in cm.
sy0 (float): y-coordinate in cm.
sz0 (float): z-coordinate in cm.
"""
super().set(sx0, sy0, sz0)
class SCONE(TypeKeyword):
"""Initial direction of primary particles is sampled uniformly within a conical beam.
Conical source beam. Polar and azimuthal angles of the
beam axis direction, THETA and PHI, and angular aperture,
ALPHA, in deg.
The case ALPHA=0 defines a monodirectional source, and ALPHA
=180 deg corresponds to an isotropic source.
"""
def __init__(self):
super().__init__(
"SCONE", (float, float, float), comment="Conical beam; angles in deg"
)
def set(self, theta, phi, alpha):
"""
Sets angles.
Args:
theta (float): Polar angle of the beam axis direction in deg.
phi (float): Azimuthal angle of the beam axis direction in deg.
alpha (float): Angular aperture in deg.
"""
super().set(theta, phi, alpha)
class SRECTA(TypeKeyword):
"""Initial direction of primary particles is sampled uniformly within a rectangular beam.
Rectangular source beam. Limiting polar and azimuthal angles
of the source beam window, (THETAL,THETAU)x(PHIL,PHIU), in deg.
The case THETAL=THETAU, PHIL=PHIU defines a monodirectional
source. To define an isotropic source, set THETAL=0, THETAU=
180, PHIL=0 and PHIU=360.
"""
def __init__(self):
super().__init__(
"SRECTA",
(float, float, float, float),
comment="Rectangular beam; angles in deg",
)
def set(self, thetal, thetau, phil, phiu):
"""
Sets angles.
Args:
thetal (float): Lower limit polar angle in deg.
thetau (float): Upper limit polar angle in deg.
phil (float): Lower limit azimuthal angle in deg.
phiu (float): Upper limit azimuthal angle in deg.
"""
super().set(thetal, thetau, phil, phiu)
class MFNAME(TypeKeyword):
"""Name of a PENELOPE input material data file.
This file must be generated in advance by running the program MATERIAL.
"""
def __init__(self):
super().__init__("MFNAME", (str,), comment="Material file, up to 20 chars")
def set(self, filename):
"""
Sets filename.
Args:
filename (str): File name of material file (up to 20 characters).
"""
super().set(filename)
class MSIMPA(TypeKeyword):
"""Set of simulation parameters for this material
* absorption energies, EABS(1:3,M),
* elastic scattering parameters, C1(M) and C2(M), and
* cutoff energy losses for inelastic collisions and Bremsstrahlung emission,
WCC(M) and WCR(M).
"""
def __init__(self):
super().__init__(
"MSIMPA",
(float, float, float, float, float, float, float),
comment="EABS(1:3),C1,C2,WCC,WCR",
)
def set(self, eabs1, eabs2, eabs3, c1, c2, wcc, wcr):
"""
Sets parameters.
Args:
eabs1 (float): Absorption energy of electrons in eV.
eabs2 (float): Absorption energy of photons in eV.
eabs3 (float): Absorption energy of positrons in eV.
c1 (float): Elastic scattering coefficient.
c2 (float): Elastic scattering coefficient.
wcc (float): Cutoff energy losses for inelastic collisions in eV.
wcr (float): Cutoff energy losses for Bremsstrahlung emission in eV.
"""
super().set(eabs1, eabs2, eabs3, c1, c2, wcc, wcr)
class MaterialGroup(KeywordGroupBase):
"""Group to define both material file name and its simulation parameters."""
def __init__(self):
super().__init__()
self.MFNAME = MFNAME()
self.MSIMPA = MSIMPA()
self.index = None
def set(self, filename, eabs1, eabs2, eabs3, c1, c2, wcc, wcr, index=None):
"""
Sets material file name and simulation parameters.
Args:
filename (str): File name of material file (up to 20 characters).
eabs1 (float): Absorption energy of electrons in eV.
eabs2 (float): Absorption energy of photons in eV.
eabs3 (float): Absorption energy of positrons in eV.
c1 (float): Elastic scattering coefficient.
c2 (float): Elastic scattering coefficient.
wcc (float): Cutoff energy losses for inelastic collisions in eV.
wcr (float): Cutoff energy losses for Bremsstrahlung emission in eV.
index (int, optional): Index of this material in the geometry
"""
self.MFNAME.set(filename)
self.MSIMPA.set(eabs1, eabs2, eabs3, c1, c2, wcc, wcr)
self.index = index
def get_keywords(self):
return (self.MFNAME, self.MSIMPA)
class Materials(KeywordSequence):
"""Definition of materials."""
def __init__(self, maxlength=10):
keyword = MaterialGroup()
super().__init__(keyword, maxlength)
def add(self, index, filename, eabs1, eabs2, eabs3, c1, c2, wcc, wcr):
"""
Adds a new material.
Args:
index (int): Index of this material in the geometry
filename (str): File name of material file (up to 20 characters).
eabs1 (float): Absorption energy of electrons in eV.
eabs2 (float): Absorption energy of photons in eV.
eabs3 (float): Absorption energy of positrons in eV.
c1 (float): Elastic scattering coefficient.
c2 (float): Elastic scattering coefficient.
wcc (float): Cutoff energy losses for inelastic collisions in eV.
wcr (float): Cutoff energy losses for Bremsstrahlung emission in eV.
"""
return super().add(filename, eabs1, eabs2, eabs3, c1, c2, wcc, wcr, index)
def _add_keyword(self, keyword):
super()._add_keyword(keyword)
if keyword.index is None:
keyword.index = len(self._keywords)
def get(self):
values = []
for keyword in sorted(self._keywords, key=attrgetter("index")):
values.append(keyword.get())
return (tuple(values),)
def write(self, fileobj):
for keyword in sorted(self._keywords, key=attrgetter("index")):
keyword.write(fileobj)
class GEOMFN(TypeKeyword):
"""Name of geometry definition file.
The bodies in | |
str
:param Tid: 设备TID
:type Tid: str
:param OrderCount: 订单数量,可一次性创建多个订单
:type OrderCount: int
:param StorageRegion: 云存服务所在的区域,如ap-guangzhou,ap-singapore
:type StorageRegion: str
:param ChnNum: 视频流通道号。(对于存在多路视频流的设备,如NVR设备,与设备实际视频流通道号对应)
:type ChnNum: int
:param AccessId: 设备主人用户在IoT Video平台的注册ID。该参数用于验证Paas/Saas平台的设备/用户关系链是否一致
:type AccessId: str
:param EnableTime: 服务生效时间,若不指定此参数,服务立即生效
:type EnableTime: int
"""
self.PkgId = None
self.Tid = None
self.OrderCount = None
self.StorageRegion = None
self.ChnNum = None
self.AccessId = None
self.EnableTime = None
def _deserialize(self, params):
self.PkgId = params.get("PkgId")
self.Tid = params.get("Tid")
self.OrderCount = params.get("OrderCount")
self.StorageRegion = params.get("StorageRegion")
self.ChnNum = params.get("ChnNum")
self.AccessId = params.get("AccessId")
self.EnableTime = params.get("EnableTime")
class CreateStorageServiceResponse(AbstractModel):
"""CreateStorageService返回参数结构体
"""
def __init__(self):
"""
:param IsRenew: 标志是否为续订
:type IsRenew: bool
:param ServiceId: 云存服务ID
:type ServiceId: str
:param StorageRegion: 云存服务所在的区域
:type StorageRegion: str
:param Tid: 设备TID
:type Tid: str
:param ChnNum: 视频流通道号。(对于存在多路视频流的设备,如NVR设备,与设备实际视频流通道号对应)
:type ChnNum: int
:param AccessId: 终端用户在IoT Video平台的注册ID
:type AccessId: str
:param StartTime: 服务开始时间
:type StartTime: int
:param EndTime: 服务失效时间
:type EndTime: int
:param Status: 服务状态
:type Status: int
:param Data: 新增的云存定单列表
:type Data: list of StorageOrder
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.IsRenew = None
self.ServiceId = None
self.StorageRegion = None
self.Tid = None
self.ChnNum = None
self.AccessId = None
self.StartTime = None
self.EndTime = None
self.Status = None
self.Data = None
self.RequestId = None
def _deserialize(self, params):
self.IsRenew = params.get("IsRenew")
self.ServiceId = params.get("ServiceId")
self.StorageRegion = params.get("StorageRegion")
self.Tid = params.get("Tid")
self.ChnNum = params.get("ChnNum")
self.AccessId = params.get("AccessId")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.Status = params.get("Status")
if params.get("Data") is not None:
self.Data = []
for item in params.get("Data"):
obj = StorageOrder()
obj._deserialize(item)
self.Data.append(obj)
self.RequestId = params.get("RequestId")
class CreateTraceIdsRequest(AbstractModel):
"""CreateTraceIds请求参数结构体
"""
def __init__(self):
"""
:param Tids: 设备TID列表
:type Tids: list of str
"""
self.Tids = None
def _deserialize(self, params):
self.Tids = params.get("Tids")
class CreateTraceIdsResponse(AbstractModel):
"""CreateTraceIds返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class CreateUploadPathRequest(AbstractModel):
"""CreateUploadPath请求参数结构体
"""
def __init__(self):
"""
:param ProductId: 产品ID
:type ProductId: str
:param FileName: 固件文件名
:type FileName: str
"""
self.ProductId = None
self.FileName = None
def _deserialize(self, params):
self.ProductId = params.get("ProductId")
self.FileName = params.get("FileName")
class CreateUploadPathResponse(AbstractModel):
"""CreateUploadPath返回参数结构体
"""
def __init__(self):
"""
:param Data: 固件上传地址URL,用户可将本地的固件文件通过该URL以PUT的请求方式上传。
注意:此字段可能返回 null,表示取不到有效值。
:type Data: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Data = None
self.RequestId = None
def _deserialize(self, params):
self.Data = params.get("Data")
self.RequestId = params.get("RequestId")
class CreateUsrTokenRequest(AbstractModel):
"""CreateUsrToken请求参数结构体
"""
def __init__(self):
"""
:param AccessId: 终端用户在IoT Video上的唯一标识ID
:type AccessId: str
:param UniqueId: 终端唯一ID,用于区分同一个用户的多个终端
:type UniqueId: str
:param TtlMinutes: Token的TTL(time to alive)分钟数
:type TtlMinutes: int
"""
self.AccessId = None
self.UniqueId = None
self.TtlMinutes = None
def _deserialize(self, params):
self.AccessId = params.get("AccessId")
self.UniqueId = params.get("UniqueId")
self.TtlMinutes = params.get("TtlMinutes")
class CreateUsrTokenResponse(AbstractModel):
"""CreateUsrToken返回参数结构体
"""
def __init__(self):
"""
:param AccessId: 终端用户在IoT Video上的唯一标识ID
:type AccessId: str
:param AccessToken: IoT Video平台的AccessToken
:type AccessToken: str
:param ExpireTime: Token的过期时间,单位秒(UTC时间)
:type ExpireTime: int
:param TerminalId: 终端ID
:type TerminalId: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.AccessId = None
self.AccessToken = None
self.ExpireTime = None
self.TerminalId = None
self.RequestId = None
def _deserialize(self, params):
self.AccessId = params.get("AccessId")
self.AccessToken = params.get("AccessToken")
self.ExpireTime = params.get("ExpireTime")
self.TerminalId = params.get("TerminalId")
self.RequestId = params.get("RequestId")
class DeleteAppUsrRequest(AbstractModel):
"""DeleteAppUsr请求参数结构体
"""
def __init__(self):
"""
:param AccessId: 客户的终端用户在IoT Video上的唯一标识ID
:type AccessId: str
"""
self.AccessId = None
def _deserialize(self, params):
self.AccessId = params.get("AccessId")
class DeleteAppUsrResponse(AbstractModel):
"""DeleteAppUsr返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteBindingRequest(AbstractModel):
"""DeleteBinding请求参数结构体
"""
def __init__(self):
"""
:param AccessId: 终端用户在IoT Video上的唯一标识ID
:type AccessId: str
:param Tid: 设备TID
:type Tid: str
:param Role: 用户角色,owner:主人,guest:访客
:type Role: str
"""
self.AccessId = None
self.Tid = None
self.Role = None
def _deserialize(self, params):
self.AccessId = params.get("AccessId")
self.Tid = params.get("Tid")
self.Role = params.get("Role")
class DeleteBindingResponse(AbstractModel):
"""DeleteBinding返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteDeviceRequest(AbstractModel):
"""DeleteDevice请求参数结构体
"""
def __init__(self):
"""
:param Tids: 设备TID列表
:type Tids: list of str
"""
self.Tids = None
def _deserialize(self, params):
self.Tids = params.get("Tids")
class DeleteDeviceResponse(AbstractModel):
"""DeleteDevice返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteIotDataTypeRequest(AbstractModel):
"""DeleteIotDataType请求参数结构体
"""
def __init__(self):
"""
:param TypeId: 自定义数据类型的标识符
:type TypeId: str
"""
self.TypeId = None
def _deserialize(self, params):
self.TypeId = params.get("TypeId")
class DeleteIotDataTypeResponse(AbstractModel):
"""DeleteIotDataType返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteMessageQueueRequest(AbstractModel):
"""DeleteMessageQueue请求参数结构体
"""
def __init__(self):
"""
:param ProductId: 产品ID
:type ProductId: str
"""
self.ProductId = None
def _deserialize(self, params):
self.ProductId = params.get("ProductId")
class DeleteMessageQueueResponse(AbstractModel):
"""DeleteMessageQueue返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteOtaVersionRequest(AbstractModel):
"""DeleteOtaVersion请求参数结构体
"""
def __init__(self):
"""
:param ProductId: 产品ID
:type ProductId: str
:param OtaVersion: 固件版本号,格式为x.y.z, x,y 范围0-63,z范围1~524288
:type OtaVersion: str
:param Operator: 操作人
:type Operator: str
"""
self.ProductId = None
self.OtaVersion = None
self.Operator = None
def _deserialize(self, params):
self.ProductId = params.get("ProductId")
self.OtaVersion = params.get("OtaVersion")
self.Operator = params.get("Operator")
class DeleteOtaVersionResponse(AbstractModel):
"""DeleteOtaVersion返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteProductRequest(AbstractModel):
"""DeleteProduct请求参数结构体
"""
def __init__(self):
"""
:param ProductId: 产品ID
:type ProductId: str
"""
self.ProductId = None
def _deserialize(self, params):
self.ProductId = params.get("ProductId")
class DeleteProductResponse(AbstractModel):
"""DeleteProduct返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteTraceIdsRequest(AbstractModel):
"""DeleteTraceIds请求参数结构体
"""
def __init__(self):
"""
:param Tids: 设备TID列表
:type Tids: list of str
"""
self.Tids = None
def _deserialize(self, params):
self.Tids = params.get("Tids")
class DeleteTraceIdsResponse(AbstractModel):
"""DeleteTraceIds返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeliverStorageServiceRequest(AbstractModel):
"""DeliverStorageService请求参数结构体
"""
def __init__(self):
"""
:param SrcServiceId: 待转移的源云存服务ID
:type SrcServiceId: str
:param Tid: 设备TID
:type Tid: str
:param ChnNum: 视频流通道号。(对于存在多路视频流的设备,如NVR设备,与设备实际视频流通道号对应)
:type ChnNum: int
:param AccessId: 设备主人用户在IoT Video平台的注册ID。该参数用于验证Paas/Saas平台的设备/用户关系链是否一致
:type AccessId: str
"""
self.SrcServiceId = None
self.Tid = None
self.ChnNum = None
self.AccessId = None
def _deserialize(self, params):
self.SrcServiceId = params.get("SrcServiceId")
self.Tid = params.get("Tid")
self.ChnNum = params.get("ChnNum")
self.AccessId = params.get("AccessId")
class DeliverStorageServiceResponse(AbstractModel):
"""DeliverStorageService返回参数结构体
"""
def __init__(self):
"""
:param SrcServiceId: 被转出的云存服务ID
:type SrcServiceId: str
:param ServiceId: 被转入的云存服务ID
:type ServiceId: str
:param StorageRegion: 云存服务所在的区域
:type StorageRegion: str
:param Tid: 设备TID
:type Tid: str
:param ChnNum: 视频流通道号。(对于存在多路视频流的设备,如NVR设备,与设备实际视频流通道号对应)
:type ChnNum: int
:param AccessId: 终端用户在IoT Video平台的注册ID
:type AccessId: str
:param StartTime: 服务开始时间
:type StartTime: int
:param EndTime: 服务失效时间
:type EndTime: int
:param Status: 服务状态
:type Status: int
:param Data: 新增的云存定单列表
:type Data: list of StorageOrder
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.SrcServiceId = None
self.ServiceId = None
self.StorageRegion = None
self.Tid = None
self.ChnNum = None
self.AccessId = None
self.StartTime = None
self.EndTime = None
self.Status = None
self.Data = None
self.RequestId = None
def _deserialize(self, params):
self.SrcServiceId = params.get("SrcServiceId")
self.ServiceId = params.get("ServiceId")
self.StorageRegion = params.get("StorageRegion")
self.Tid = params.get("Tid")
self.ChnNum = params.get("ChnNum")
self.AccessId = params.get("AccessId")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.Status = params.get("Status")
if params.get("Data") is not None:
self.Data = []
for item in params.get("Data"):
obj = StorageOrder()
obj._deserialize(item)
self.Data.append(obj)
self.RequestId = params.get("RequestId")
class DescribeAccountBalanceRequest(AbstractModel):
"""DescribeAccountBalance请求参数结构体
"""
def __init__(self):
"""
:param AccountType: 账户类型 1:设备接入 2:云存
:type AccountType: int
"""
self.AccountType = None
def _deserialize(self, params):
self.AccountType = params.get("AccountType")
class DescribeAccountBalanceResponse(AbstractModel):
"""DescribeAccountBalance返回参数结构体
"""
def __init__(self):
"""
:param AccountType: 账户类型 1=设备接入;2=云存。
注意:此字段可能返回 null,表示取不到有效值。
:type AccountType: int
:param Balance: 余额, 单位 : 分(人民币)。
注意:此字段可能返回 null,表示取不到有效值。
:type Balance: int
:param State: 账户状态,1=正常;8=冻结;9=销户。
注意:此字段可能返回 null,表示取不到有效值。
:type State: int
:param LastUpdateTime: 最后修改时间,UTC值。
注意:此字段可能返回 null,表示取不到有效值。
:type LastUpdateTime: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.AccountType = None
self.Balance = None
self.State = None
self.LastUpdateTime = None
self.RequestId = None
def _deserialize(self, params):
self.AccountType = params.get("AccountType")
self.Balance = params.get("Balance")
self.State = params.get("State")
self.LastUpdateTime = params.get("LastUpdateTime")
self.RequestId = params.get("RequestId")
class DescribeBindDevRequest(AbstractModel):
"""DescribeBindDev请求参数结构体
"""
def __init__(self):
"""
:param AccessId: 终端用户在IoT Video上的唯一标识ID
:type AccessId: str
"""
self.AccessId = None
def _deserialize(self, params):
self.AccessId = params.get("AccessId")
class DescribeBindDevResponse(AbstractModel):
"""DescribeBindDev返回参数结构体
"""
def __init__(self):
"""
:param Data: 绑定的设备列表信息
注意:此字段可能返回 null,表示取不到有效值。
:type Data: list of BindDevInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: | |
1
## Have to move 1 in the Y direction which is the same as z_num
back_plane_top_left_idx = z_proj + z_num
back_plane_bot_left_idx = back_plane_top_left_idx + 1
## Have to move 1 in the X direction which is the same as z_num*y_num
front_plane_top_right_idx = z_proj + y_num*z_num
front_plane_bot_right_idx = front_plane_top_right_idx + 1
## Have to move 1 in the y direction which is the same as z_num
back_plane_top_right_idx = front_plane_top_right_idx + z_num
back_plane_bot_right_idx = back_plane_top_right_idx + 1
#### Now project over the Y direction
y_proj = np.arange(0,y_num-1)[:,None]*(z_num)
front_plane_top_left_idx = front_plane_top_left_idx + y_proj
front_plane_bot_left_idx = front_plane_bot_left_idx+ y_proj
back_plane_top_left_idx = back_plane_top_left_idx+ y_proj
back_plane_bot_left_idx = back_plane_bot_left_idx+ y_proj
front_plane_top_right_idx = front_plane_top_right_idx+ y_proj
front_plane_bot_right_idx = front_plane_bot_right_idx+ y_proj
back_plane_top_right_idx = back_plane_top_right_idx+ y_proj
back_plane_bot_right_idx = back_plane_bot_right_idx+ y_proj
#### Lastly project in X direction
x_proj = np.arange(0,x_num-1)[:,None,None]*(y_num*z_num)
front_plane_top_left_idx = front_plane_top_left_idx + x_proj
front_plane_bot_left_idx = front_plane_bot_left_idx + x_proj
back_plane_top_left_idx = back_plane_top_left_idx + x_proj
back_plane_bot_left_idx = back_plane_bot_left_idx + x_proj
front_plane_top_right_idx = front_plane_top_right_idx + x_proj
front_plane_bot_right_idx = front_plane_bot_right_idx + x_proj
back_plane_top_right_idx = back_plane_top_right_idx + x_proj
back_plane_bot_right_idx = back_plane_bot_right_idx + x_proj
#
voxel_idx = np.c_[front_plane_top_left_idx.ravel(),
front_plane_bot_left_idx.ravel(),
back_plane_bot_left_idx.ravel(),
back_plane_top_left_idx.ravel(),
front_plane_top_right_idx.ravel(),
front_plane_bot_right_idx.ravel(),
back_plane_bot_right_idx.ravel(),
back_plane_top_right_idx.ravel(),
]
voxel_mask = np.take(volume, voxel_idx)
voxel_sum = np.sum(voxel_mask, axis=-1)
voxel_surface_vertex_idx = np.where(np.logical_and(voxel_sum != 0,
voxel_sum != 8))[0]
self.full_voxels = np.where(voxel_sum == 8)[0]
## Get only the non-zero points on the surface for visualization
surface_vertex_idx = voxel_idx[voxel_surface_vertex_idx][
voxel_mask[voxel_surface_vertex_idx].astype(bool)]
surface_vertex = grid_point_reference[surface_vertex_idx]
## Get the voxels that correspond to the surface of the molecule
surface_voxel = voxel_mask[voxel_surface_vertex_idx].astype(int)
## Get corresponding grid_point_reference idx for each of the surface voxel
## verticies
surface_voxel_vert = voxel_idx[voxel_surface_vertex_idx]
voxel_coords = []
cube_coords = []
coords = []
triangles = []
total_volume = self.full_voxels.shape[0]*self.spacing*self.spacing*self.spacing
# print("BEFORE LOOP: {}".format(time.time() - start))
proj_total_time = 0
inner_loop_time = 0
radius_loop_time = 0
for idx,entry in enumerate(surface_voxel):
### Get Cartesian Coordinates index
temp_ref_idx = surface_voxel_vert[idx]
### Get populated coordinates
voxel_coords.append(grid_point_reference[
temp_ref_idx[entry.astype(bool)]])
### Get Cart Cube vertex and edges
temp_vertices = grid_point_reference[temp_ref_idx]
temp_edges = compute_edge_sites(temp_vertices)
inner_loop_start = time.time()
### Performing projections onto sphere surfaces for each edge point
for edge_idx,edge in enumerate(temp_edges):
rad_loop_start = time.time()
### Project onto surface of each sphere present
temp_projected_edge_list = []
temp_projected_centers = []
### First choose relevant spheres
edge_to_center = np.linalg.norm(edge - self.centers, axis=-1)
edge_to_center_inside = edge_to_center - self.radii
proj_sphere_idx = np.where(np.abs(edge_to_center_inside) <=
(self.spacing*2))[0]
for r_idx in proj_sphere_idx:
## Also, need center of the atom for proper projection
temp_center = self.centers[r_idx]
temp_projected_centers.append(temp_center)
radius = self.radii[r_idx]
proj_edge_start = time.time()
## Get the projected edge for this sphere
# temp_proj_edge = self.proj_edge(edge,
# edge_idx,
# temp_vertices,
# radius,
# temp_center)
temp_proj_edge = numba_proj_edge(edge,
edge_idx,
temp_vertices,
radius,
temp_center)
proj_total_time += time.time() - proj_edge_start
## If there was no change, do not append
if np.linalg.norm(temp_proj_edge - edge) < 1e-6:
continue
## Append
temp_projected_edge_list.append(temp_proj_edge)
## Let's see if this problem can be solved in a different way
if len(temp_projected_edge_list) == 0:
continue
elif len(temp_projected_edge_list) == 1:
choice_idx = 0
else:
cdist_distances = cdist(temp_projected_edge_list,
temp_projected_centers)
## Choose the one that maximizes distances
cdist_sum = np.sum(cdist_distances,axis=-1)
choice_idx = np.argmax(cdist_sum)
### Hard code for now because only interested in testing for one sphere
temp_edges[edge_idx] = temp_projected_edge_list[choice_idx]
inner_loop_time += time.time() - inner_loop_start
### Get the tri_idx for this surface voxel
triangles_bool = tri_connectivity[tostring(entry)].astype(bool)
array_to_mask = np.repeat(np.arange(0,12)[None,:],
triangles_bool.shape[0],
axis=0)
tri_idx = array_to_mask[triangles_bool].reshape(-1,3)
### Build triangles for grid point reference
tri_idx = tri_idx + len(coords)*12
### Save results for plotting
cube_coords.append(temp_vertices)
coords.append(temp_edges)
triangles.append(tri_idx)
## Compute volume with the projected edges
total_volume += get_volume(entry, temp_vertices, temp_edges)
### For debugging purposes
self.o_voxel_coords = voxel_coords.copy()
self.o_cube_coords = cube_coords.copy()
self.o_coords = coords.copy()
self.o_triangles = triangles.copy()
self.surface_voxel = surface_voxel
self.surface_voxel_vert = surface_voxel_vert
voxel_coords = np.vstack(voxel_coords)
cube_coords = np.vstack(cube_coords)
coords = np.vstack(coords)
triangles = np.vstack(triangles)
# print("AFTER LOOP: {}".format(time.time() - start))
# print("PROJ TOTAL TIME: {}".format(proj_total_time))
# print("INNER LOOP TIME: {}".format(inner_loop_time))
# print("RADIUS LOOP TIME: {}".format(radius_loop_time))
return total_volume,voxel_coords,cube_coords,coords,triangles
def proj_edge(self, edge, edge_idx, vertices, radius, center):
x = edge[0]
y = edge[1]
z = edge[2]
a = center[0]
b = center[1]
c = center[2]
## Each edge idx only has one degree of freedom to project onto surface
if edge_idx == 0:
## Z
proj2 = radius*radius - np.square(x-a) - np.square(y-b)
proj_dir_value = z
proj_dir_center = c
original = z
elif edge_idx == 1:
## Y
proj2 = radius*radius - np.square(x-a) - np.square(z-c)
proj_dir_value = y
proj_dir_center = b
original = y
elif edge_idx == 2:
## Z
proj2 = radius*radius - np.square(x-a) - np.square(y-b)
proj_dir_value = z
proj_dir_center = c
original = z
elif edge_idx == 3:
## Y
proj2 = radius*radius - np.square(x-a) - np.square(z-c)
proj_dir_value = y
proj_dir_center = b
original = y
elif edge_idx == 4:
## X
proj2 = radius*radius - np.square(z-c) - np.square(y-b)
proj_dir_value = x
proj_dir_center = a
original = x
elif edge_idx == 5:
## X
proj2 = radius*radius - np.square(z-c) - np.square(y-b)
proj_dir_value = x
proj_dir_center = a
original = x
elif edge_idx == 6:
## X
proj2 = radius*radius - np.square(z-c) - np.square(y-b)
proj_dir_value = x
proj_dir_center = a
original = x
elif edge_idx == 7:
## X
proj2 = radius*radius - np.square(z-c) - np.square(y-b)
proj_dir_value = x
proj_dir_center = a
original = x
elif edge_idx == 8:
## Z
proj2 = radius*radius - np.square(x-a) - np.square(y-b)
proj_dir_value = z
proj_dir_center = c
original = z
elif edge_idx == 9:
## Y
proj2 = radius*radius - np.square(x-a) - np.square(z-c)
proj_dir_value = y
proj_dir_center = b
original = y
elif edge_idx == 10:
## Z
proj2 = radius*radius - np.square(x-a) - np.square(y-b)
proj_dir_value = z
proj_dir_center = c
original = z
elif edge_idx == 11:
## Y
proj2 = radius*radius - np.square(x-a) - np.square(z-c)
proj_dir_value = y
proj_dir_center = b
original = y
if proj2 < 0:
proj2 = proj2*-1
proj = np.sqrt(proj2)
### 20200429 Fix decision function
temp_pos_dir = np.linalg.norm((proj + proj_dir_center) - proj_dir_value)
temp_neg_dir = np.linalg.norm((-proj + proj_dir_center) - proj_dir_value)
if temp_neg_dir < temp_pos_dir:
proj = proj*-1 + proj_dir_center
else:
proj = proj + proj_dir_center
## Check if projection is within the spacing of the grid.
## If it's outside, then this cannot be a valid projection.
## And the value is set back to original edge position.
if edge_idx == 0:
## Z, 0,1
if proj < vertices[0][2]:
proj = z
elif proj > vertices[1][2]:
proj = z
elif edge_idx == 1:
if proj < vertices[0][1]:
proj = y
elif proj > vertices[3][1]:
proj = y
elif edge_idx == 2:
## Z 2,3
if proj < vertices[3][2]:
proj = z
elif proj > vertices[2][2]:
proj = z
elif edge_idx == 3:
if proj < vertices[1][1]:
proj = y
elif proj > vertices[2][1]:
proj = y
elif edge_idx == 4:
## X 0,4
if proj < vertices[0][0]:
proj = x
elif proj > vertices[4][0]:
proj = x
elif edge_idx == 5:
## X 3,7
if proj < vertices[3][0]:
proj = x
elif proj > vertices[7][0]:
proj = x
elif edge_idx == 6:
## X 2,6
if proj < vertices[2][0]:
proj = x
elif proj > vertices[6][0]:
proj = x
elif edge_idx == 7:
## X, 1,5
if proj < vertices[1][0]:
proj = x
elif proj > vertices[5][0]:
proj = x
elif edge_idx == 8:
## Z, 4.5
if proj < vertices[4][2]:
proj = z
elif proj > vertices[5][2]:
proj = z
elif edge_idx == 9:
## Y 4,7
if proj < vertices[4][1]:
proj = y
elif proj > vertices[7][1]:
proj = y
elif edge_idx == 10:
## Z, 6,7
if proj < vertices[7][2]:
proj = z
elif proj > vertices[6][2]:
proj = z
elif edge_idx == 11:
## Y, 5,6
if proj | |
<reponame>tioui/Vim_Eiffel_IDE
# The MIT License (MIT)
#
# Copyright (c) 2014 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import environment_vim as environment
import eiffel_ide
import string
def get_class_from_buffer(a_project):
"""
Get the name of the class being edited in the current buffer.
a_project is the current openned eiffel project.
"""
if environment.evaluate("bufname('%')") ==\
environment.get_global_variable("eiffel_tools_buffer_name"):
try:
l_class =\
environment.get_buffer_variable("eiffel_tools_buffer_class")
except:
l_class = ""
else:
l_buffer_text = environment.buffer_to_text()
l_class = a_project.class_name_from_text(l_buffer_text)
return l_class
def set_class_and_info(a_info_name, a_class_name):
"""
Set the `a_info_name' for the tools buffer information type and the
`a_class_name' as the tools buffer class name.
"""
environment.set_buffer_variable("eiffel_tools_buffer_info_type",
a_info_name)
environment.set_buffer_variable("eiffel_tools_buffer_class",
a_class_name)
def unset_class_and_info():
"""
Remove the tools buffer information type and class name.
"""
environment.set_buffer_variable("eiffel_tools_buffer_info_type", None)
environment.set_buffer_variable("eiffel_tools_buffer_class", None)
def class_execute(a_project, a_name, a_routine, a_class_name=None):
"""Tool routine for all class information functionnalities
a_project: The current openned eiffel project
a_name: The name of the functionnality (Text to print in the status bar)
a_routine: The lambda routine that get/print information
a_class_name: The class name to get information (optionnal)
Return: None
"""
if a_class_name:
l_class = a_class_name
if l_class == "%":
l_class = get_class_from_buffer(a_project)
else:
l_class = environment.word_under_the_cursor()
if not l_class:
l_class = get_class_from_buffer(a_project)
if l_class:
eiffel_ide.launch_process(a_project,
lambda window: a_routine(l_class, window),
"Getting " + a_name.lower() + " of class " +
l_class, a_name + " of class " + l_class,
False, True,
lambda: set_class_and_info(a_name, l_class))
environment.execute("setlocal filetype=eiffel")
def flat(a_project, *arguments):
"""
The flat view displays all the features for the current class, i.e.
including both written-in and inherited features
a_project: The current openned eiffel project
arguments: A tuple that optionnaly contain the class name as first element
See: https://docs.eiffel.com/book/eiffelstudio/class-formatters-flat-view
"""
l_class_name = None
if arguments:
l_class_name = arguments[0]
class_execute(a_project, "Flat view",
lambda a_class, a_buffer:
a_project.fetch_class_flat(a_class, a_buffer),
l_class_name)
environment.eiffel_fold()
def ancestors(a_project, *arguments):
"""
The ancestors view displays all the classes from which the current
class inherits, directly or not, using a tree-like indented layout.
a_project: The current openned eiffel project
arguments: A tuple that optionnaly contain the class name as first element
See: https://docs.eiffel.com/book/eiffelstudio/ancestors
"""
l_class_name = None
if arguments:
l_class_name = arguments[0]
class_execute(a_project, "Ancestors",
lambda a_class, a_buffer:
a_project.fetch_class_ancestors(a_class, a_buffer),
l_class_name)
def attributes(a_project, *arguments):
"""
The attributes view displays all the attributes of the current class,
including inherited attributes.
a_project: The current openned eiffel project
arguments: A tuple that optionnaly contain the class name as first element
See: https://docs.eiffel.com/book/eiffelstudio/attributes
"""
l_class_name = None
if arguments:
l_class_name = arguments[0]
class_execute(a_project, "Attributes",
lambda a_class, a_buffer:
a_project.fetch_class_attributes(a_class, a_buffer),
l_class_name)
def clients(a_project, *arguments):
"""
The clients view displays all the classes which are using features
of the current class, and thus rely on its interface.
a_project: The current openned eiffel project
arguments: A tuple that optionnaly contain the class name as first element
See: https://docs.eiffel.com/book/eiffelstudio/clients
"""
l_class_name = None
if arguments:
l_class_name = arguments[0]
class_execute(a_project, "Clients",
lambda a_class, a_buffer:
a_project.fetch_class_clients(a_class, a_buffer),
l_class_name)
def deferred(a_project, *arguments):
"""
The deferred view displays all the deferred features of a class.
a_project: The current openned eiffel project
arguments: A tuple that optionnaly contain the class name as first element
See: https://docs.eiffel.com/book/eiffelstudio/deferred-features
"""
l_class_name = None
if arguments:
l_class_name = arguments[0]
class_execute(a_project, "Deferred features",
lambda a_class, a_buffer:
a_project.fetch_class_deferred(a_class, a_buffer),
l_class_name)
def descendants(a_project, *arguments):
"""
The descendants view displays all the classes which inherit from the
current class,directly or not, using a tree-like indented layout.
a_project: The current openned eiffel project
arguments: A tuple that optionnaly contain the class name as first element
See: https://docs.eiffel.com/book/eiffelstudio/descendants
"""
l_class_name = None
if arguments:
l_class_name = arguments[0]
class_execute(a_project, "Descendants",
lambda a_class, a_buffer:
a_project.fetch_class_descendants(a_class, a_buffer),
l_class_name)
def exported(a_project, *arguments):
"""
The exported view displays all the features of the current class that
all other classes may call.
a_project: The current openned eiffel project
arguments: A tuple that optionnaly contain the class name as first element
See: https://docs.eiffel.com/book/eiffelstudio/exported-features
"""
l_class_name = None
if arguments:
l_class_name = arguments[0]
class_execute(a_project, "Exported features",
lambda a_class, a_buffer:
a_project.fetch_class_exported(a_class, a_buffer),
l_class_name)
def externals(a_project, *arguments):
"""
The external view displays all the external features of the current class.
a_project: The current openned eiffel project
arguments: A tuple that optionnaly contain the class name as first element
See: https://docs.eiffel.com/book/eiffelstudio/
class-formatters-external-features
"""
l_class_name = None
if arguments:
l_class_name = arguments[0]
class_execute(a_project, "External features",
lambda a_class, a_buffer:
a_project.fetch_class_externals(a_class, a_buffer),
l_class_name)
def flatshort(a_project, *arguments):
"""
The Flat Contract view displays the contracts of all written-in and
inherited features of the current class.
a_project: The current openned eiffel project
arguments: A tuple that optionnaly contain the class name as first element
See: https://docs.eiffel.com/book/eiffelstudio/flat-contract-view
"""
l_class_name = None
if arguments:
l_class_name = arguments[0]
class_execute(a_project, "Flat contract view",
lambda a_class, a_buffer:
a_project.fetch_class_flatshort(a_class, a_buffer),
l_class_name)
environment.eiffel_fold()
def once(a_project, *arguments):
"""
The once view displays all the routines declared as once and the
constant attributes in the current class (or in its ancestors).
a_project: The current openned eiffel project
arguments: A tuple that optionnaly contain the class name as first element
See: https://docs.eiffel.com/book/eiffelstudio/once-routines-and-constants
"""
l_class_name = None
if arguments:
l_class_name = arguments[0]
class_execute(a_project, "Once features",
lambda a_class, a_buffer:
a_project.fetch_class_once(a_class, a_buffer),
l_class_name)
def invariants(a_project, *arguments):
"""
The invariants view displays all the invariants of the current class.
a_project: The current openned eiffel project
arguments: A tuple that optionnaly contain the class name as first element
See: https://docs.eiffel.com/book/eiffelstudio/invariants
"""
l_class_name = None
if arguments:
l_class_name = arguments[0]
class_execute(a_project, "Invariants",
lambda a_class, a_buffer:
a_project.fetch_class_invariants(a_class, a_buffer),
l_class_name)
def routines(a_project, *arguments):
"""
The routines view displays all the routine signatures of the current
class, including inherited routines.
a_project: The current openned eiffel project
arguments: A tuple that optionnaly contain the class name as first element
See: https://docs.eiffel.com/book/eiffelstudio/routines
"""
l_class_name = None
if arguments:
l_class_name = arguments[0]
class_execute(a_project, "Routines",
lambda a_class, a_buffer:
a_project.fetch_class_routines(a_class, a_buffer),
l_class_name)
def creators(a_project, *arguments):
"""
The creators view displays all the creation procedure signatures of the
current class.
a_project: The current openned eiffel project
arguments: A tuple that optionnaly contain the class name as first element
See: https://docs.eiffel.com/book/eiffelstudio/creators
"""
l_class_name = None
if arguments:
l_class_name = arguments[0]
class_execute(a_project, "Creators",
lambda a_class, a_buffer:
a_project.fetch_class_creators(a_class, a_buffer),
l_class_name)
def short(a_project, *arguments):
"""
The contract view displays the contracts of all written-in features of the
current class.
a_project: The current openned eiffel project
arguments: A tuple that optionnaly contain the class name as first element
See: https://docs.eiffel.com/book/eiffelstudio/contract-view
"""
l_class_name = None
if arguments:
l_class_name = arguments[0]
class_execute(a_project, "Contract View",
lambda a_class, a_buffer:
a_project.fetch_class_short(a_class, a_buffer),
l_class_name)
environment.eiffel_fold()
def suppliers(a_project, *arguments):
"""
The suppliers view displays all the classes from which the current
class is calling features.
a_project: The current openned eiffel project
arguments: A tuple that optionnaly contain the class name as first element
See: https://docs.eiffel.com/book/eiffelstudio/suppliers
"""
l_class_name = None
if arguments:
l_class_name = arguments[0]
class_execute(a_project, "Suppliers",
lambda a_class, a_buffer:
a_project.fetch_class_suppliers(a_class, a_buffer),
l_class_name)
def text(a_project, *arguments):
"""
Show the original text source code of the class.
a_project: The current openned eiffel project
arguments: A tuple that optionnaly contain the class name as first element
See: https://docs.eiffel.com/book/eiffelstudio/
class-formatters-basic-text-view
"""
l_class_name = None
if arguments:
l_class_name = arguments[0]
class_execute(a_project, "Text View",
lambda a_class, a_buffer:
a_project.fetch_class_text(a_class, a_buffer),
l_class_name)
environment.eiffel_fold()
def _edit_command_and_flag(is_split, is_vertical, is_tab, force_edit):
"""
Return the command and flags to | |
<reponame>olivermfacon/premier-league-app
import os
import json
import http.client
import requests
from mailjet_rest import Client
from dotenv import load_dotenv
from oddscalculator import *
from fractions import Fraction
def club_colors(selected_team_id):
"""
Returns the colors based on the id of the team. The color is later used in the newsletter so that fans receive the email in the colors of their favorite team.
Param: selected_team_id
"""
colors = []
basic_colors = ["Red", "Blue", "Green", "Yellow"]
connection.request('GET', f'/v2/teams/{selected_team_id}', None, headers )
response = json.loads(connection.getresponse().read().decode())
color = response["clubColors"]
y=0
for char in color:
if char == '/':
colors.append(color[:y-1])
colors.append(color[y+2:])
y+=1
for color in basic_colors:
if color in colors[0]:
colors[0] = color
if color in colors[1]:
colors[1] = color
if response["name"] == "Manchester City FC":
colors[0] = "#1CC6E8"
elif response["name"] == "Wolverhampton Wanderers FC":
colors[1] = "#FDB913"
elif response["name"] == "West Ham United FC":
colors[1] = "#7A263A"
colors[0] = "#1BB1E7"
elif response["name"] == "Southampton FC":
colors[0] = "Red"
colors[1] = "White"
elif response["name"] == "Burnley FC":
colors[0] = "#6C1D45"
colors[1] = "#99D6EA"
return colors
def format_date(match_date):
"""
Formats a data string for printing and display purposes.
Param: my_price (str) like "2019-08-11T13:00:00Z"
Example: format_date(2019-08-11T13:00:00Z)
Returns: 2019-08-11
"""
return match_date[0:10]
def get_menu_option():
"""
Function to display menu options and asking the user to choose one.
"""
print("1. View their next 5 fixtures...")
print("2. View their last 5 fixtures...")
print("3. View their entire current season...")
print("4. View their position in the table...")
print("5. View the club roster...")
print("6. View season statistics...")
print("7. View team information...")
print("8. Sign up to your club's weekly newsletter...")
print("9. Calculate odds on next game...")
print()
return input("CHOOSE AN OPTION BELOW BY ENTERING THE MENU NUMBER OR ENTER 'DONE' ONCE YOU ARE FINISHED: ")
def match_info(match,requested_team):
"""
Functions that returns the list called match_information. In conjunction with other function, it is used to display information about the games.
Param: match is list
"""
match_information = []
if match["score"]["winner"] == "HOME_TEAM":
match_information.append(match["homeTeam"]["name"].upper())
match_information.append(match["awayTeam"]["name"])
if match_information[0] == requested_team:
match_information.append("WIN")
else:
match_information.append("LOSS")
elif match["score"]["winner"] == "AWAY_TEAM":
match_information.append(match["homeTeam"]["name"])
match_information.append(match["awayTeam"]["name"].upper())
if match_information[1] == requested_team:
match_information.append("WIN")
else:
match_information.append("LOSS")
else:
match_information.append(match["homeTeam"]["name"])
match_information.append(match["awayTeam"]["name"])
match_information.append("DRAW")
return match_information
def next_five(matches, status, purpose):
"""
Function that displays the schedule for next 5 games. If there are fewer than 5 games left in the season, the function will dipsplay however many there are left.
Params:
matches(list) contains all information about requested team
status(string) holds value of a dictionary key like "FINISHED"
purpose(string) like "email"
Example of 1 game displayed:
(2020-03-22) Matchday 31
Southampton FC vs Arsenal FC
"""
next_content = []
if purpose == "console":
print()
print("------------------------------")
print("THE NEXT FIVE SCHEDULED GAMES:")
print("------------------------------")
print()
x=0
while(x < 5):
match_date = matches[x]["utcDate"]
match_date = format_date(match_date)
match_info = ("(" + match_date + ") Matchday " + str(matches[x]["matchday"]))
match_teams = ("\t" + matches[x]["homeTeam"]["name"] + " vs " + matches[x]["awayTeam"]["name"])
if purpose == "console":
print(match_info)
print(match_teams)
if status == "POSTPONED" or status == "CANCELLED":
print(status)
print()
if matches[x]["matchday"] == 38:
print("---> END OF SEASON <---")
break
elif purpose == "email":
next_content.append(match_info)
next_content.append(match_teams)
x += 1
return next_content
def last_five(matches, requested_team, purpose):
"""
Function that displays results for last 5 games.
Params:
matches(list) contains all information about requested team
requested_team(string) holds value of a id of the requested team
purpose(string) like "console"
Example of 1 game displayed:
(2020-02-23) Matchday 27 - WIN
ARSENAL FC 3 vs Everton FC 2
"""
last_content = []
if purpose == "console":
print()
print("-----------------------------")
print("THE LAST FIVE COMPLETED GAMES")
print("-----------------------------")
print()
x=5
finished_games = len(matches)
while(x > 0):
match_date = matches[finished_games - x]["utcDate"]
match_date = format_date(match_date)
match_information = match_info(matches[finished_games - x], requested_team)
matchday_info = "(" + match_date + ") Matchday " + str(matches[finished_games - x]["matchday"]) + " - " + match_information[2]
matchday_score = "\t" + match_information[0] + " " + str(matches[finished_games - x]["score"]["fullTime"]["homeTeam"]) + " vs " + match_information[1] + " " + str(matches[finished_games - x]["score"]["fullTime"]["awayTeam"])
if purpose == "console":
print(matchday_info)
print(matchday_score)
print()
elif purpose == "email":
last_content.append(matchday_info)
last_content.append(matchday_score)
x -= 1
return last_content
def whole_season(matches, requested_team):
"""
Function that displays fixtures for the entire season. Both the finished and scheduled/postponed games.
Params:
matches(list) contains all information about requested team
requested_team(string) holds value of a id of the requested team
Example of 2 games displayed:
(2020-03-07) Matchday 29 - WIN
LIVERPOOL FC 2 vs AFC Bournemouth 1
-----------------
UPCOMING FIXTURES
-----------------
(2020-03-16) Matchday 30
Everton FC vs Liverpool FC
---> THIS FIXTURE IS POSTPONED <---
"""
x=0
if matches[x]["status"] == "FINISHED":
print()
print("-------------")
print("PAST FIXTURES")
print("-------------")
print()
for match in matches:
match_date = match["utcDate"]
match_date = format_date(match_date)
if match["status"] == "FINISHED":
match_information = match_info(match,requested_team)
print("(" + match_date + ") Matchday " + str(match["matchday"]) + " - " + match_information[2])
print("\t" + match_information[0] + " " + str(match["score"]["fullTime"]["homeTeam"]) + " vs " + match_information[1] + " " + str(match["score"]["fullTime"]["awayTeam"]))
else:
print("(" + match_date + ") Matchday " + str(match["matchday"]))
print("\t" + match["homeTeam"]["name"] + " vs " + match["awayTeam"]["name"])
if(match["status"] == "POSTPONED"):
print("\t---> THIS FIXTURE IS POSTPONED <---")
if(match["status"] == "CANCELLED"):
print("\t---> THIS FIXTURE IS CANCELLED <---")
print()
if match["status"] == "FINISHED" and (matches[x+1]["status"] == "SCHEDULED" or matches[x+1]["status"] == "POSTPONED") and x != 37:
print()
print("-----------------")
print("UPCOMING FIXTURES")
print("-----------------")
print()
x += 1
print()
def prem_table(selected_team_id, purpose):
"""
Function that displays requested team position in the league.
Params:
standings(list) contains all information about season table.
selected_team_id(string) holds value of a id of the requested team
purpose(string) like "console"
Example of table displayed when the user chooses Manchester City:
-----------------------------
LIVE PREMIER LEAGUE STANDINGS
-----------------------------
1. Liverpool FC (82pts)
2. MANCHESTER CITY FC (57pts)
3. Leicester City FC (53pts)
"""
table = ""
connection.request('GET', '/v2/competitions/PL/standings', None, headers )
response = json.loads(connection.getresponse().read().decode())
standings = response["standings"][0]["table"]
if purpose == "console":
print()
print("-----------------------------")
print("LIVE PREMIER LEAGUE STANDINGS")
print("-----------------------------")
print()
for team in standings:
if team["team"]["id"] == selected_team_id:
pos = str(team["position"]) + ".\t" + team["team"]["name"].upper() + " (" + str(team["points"]) + "pts)"
else:
pos = str(team["position"]) + ".\t" + team["team"]["name"] + " (" + str(team["points"]) + "pts)"
if purpose == "console":
print(pos)
else:
table += pos + "<br/>"
print()
return table
def display_squad(squad):
"""
Function that displays the squad of the requested team.
Params:
squad(list) contains all information about the squad of the requested team.
Example of part of the squad displayed:
Roster:
COACH: <NAME>
GOALKEEPERS
Ederson
<NAME>
<NAME>
DEFENDERS
<NAME>
<NAME>
<NAME>
"""
print()
goalkeepers = []
defenders = []
midfielders = []
attackers = []
for person in squad:
if person["position"] == "Goalkeeper":
goalkeepers.append(person["name"])
elif person["position"] == "Defender":
defenders.append(person["name"])
elif person["position"] == "Midfielder":
midfielders.append(person["name"])
elif person["position"] == "Attacker":
attackers.append(person["name"])
elif person["role"] == "COACH":
print("COACH: " + person["name"] + "\n")
print("GOALKEEPERS\n")
for keeper in goalkeepers:
print(f"\t{keeper}")
print(f"\nDEFENDERS\n")
for defender in defenders:
print(f"\t{defender}")
print(f"\nMIDFIELDERS\n")
for midfielder in midfielders:
print(f"\t{midfielder}")
print(f"\nATTACKERS\n")
for attacker in attackers:
print(f"\t{attacker}")
print()
def season_statistics(team_stats):
"""
Function that displays statistics for the season.
Params:
team_stats(list) contains all information about requested team
Example of 1 game displayed:
Season Stats:
Manchester City FC Season Statistics
League Standing: 2nd
Points: 57
Games Played: 28
Wins: 18
Draws: 3
Losses: 7
Win Percentage: 64.28%
Goals Scored: 68
Goals Conceded: 31
Goal Difference: 37
"""
season_stats = []
win_percentage = (team_stats["won"]/team_stats["playedGames"])*100
if team_stats["position"] == 1:
place = "st"
elif team_stats["position"] == 2:
place = "nd"
elif team_stats["position"] == 3:
place = "rd"
else:
place = "th"
print("\n" + team_stats["team"]["name"] + " Season Statistics\n")
print("\tLeague Standing: " + str(team_stats["position"]) + place)
print("\tPoints: " + str(team_stats["points"]) + "\n")
print("\tGames Played: " + str(team_stats["playedGames"]))
print("\tWins: " + str(team_stats["won"]))
print("\tDraws: " + str(team_stats["draw"]))
print("\tLosses: " + str(team_stats["lost"]))
print("\tWin Percentage: " + str(win_percentage)[:5] + "%\n")
print("\tGoals Scored: " + str(team_stats["goalsFor"]))
print("\tGoals Conceded: " + str(team_stats["goalsAgainst"]))
print("\tGoal Difference: " + str(team_stats["goalDifference"]) + "\n")
def team_info(team, purpose):
"""
Function that displays and returns the information about the tean.
Params:
team(dict) contains all information about requested team
purpose(string) like "console"
Example of team stats displayed:
ARSENAL FC
FOUNDED: 1886
VENUE: Emirates Stadium
CLUB COLORS: | |
== 1)
m.c1527 = Constraint(expr= m.x924 + m.x940 == 1)
m.c1528 = Constraint(expr= m.x925 + m.x941 == 1)
m.c1529 = Constraint(expr= m.x926 + m.x942 == 1)
m.c1530 = Constraint(expr= m.x927 + m.x943 == 1)
m.c1531 = Constraint(expr= m.x928 + m.x944 == 1)
m.c1532 = Constraint(expr= m.x929 + m.x945 == 1)
m.c1533 = Constraint(expr= m.x930 + m.x946 == 1)
m.c1534 = Constraint(expr= m.x931 + m.x947 == 1)
m.c1535 = Constraint(expr= m.x932 + m.x948 == 1)
m.c1536 = Constraint(expr= m.x933 + m.x949 == 1)
m.c1537 = Constraint(expr= m.x934 + m.x950 == 1)
m.c1538 = Constraint(expr= m.x951 + m.x967 == 1)
m.c1539 = Constraint(expr= m.x952 + m.x968 == 1)
m.c1540 = Constraint(expr= m.x953 + m.x969 == 1)
m.c1541 = Constraint(expr= m.x954 + m.x970 == 1)
m.c1542 = Constraint(expr= m.x955 + m.x971 == 1)
m.c1543 = Constraint(expr= m.x956 + m.x972 == 1)
m.c1544 = Constraint(expr= m.x957 + m.x973 == 1)
m.c1545 = Constraint(expr= m.x958 + m.x974 == 1)
m.c1546 = Constraint(expr= m.x959 + m.x975 == 1)
m.c1547 = Constraint(expr= m.x960 + m.x976 == 1)
m.c1548 = Constraint(expr= m.x961 + m.x977 == 1)
m.c1549 = Constraint(expr= m.x962 + m.x978 == 1)
m.c1550 = Constraint(expr= m.x963 + m.x979 == 1)
m.c1551 = Constraint(expr= m.x964 + m.x980 == 1)
m.c1552 = Constraint(expr= m.x965 + m.x981 == 1)
m.c1553 = Constraint(expr= m.x966 + m.x982 == 1)
m.c1554 = Constraint(expr= - m.x311 - m.x327 >= -255)
m.c1555 = Constraint(expr= - m.x312 - m.x328 >= -255)
m.c1556 = Constraint(expr= - m.x313 - m.x329 >= -255)
m.c1557 = Constraint(expr= - m.x314 - m.x330 >= -255)
m.c1558 = Constraint(expr= - m.x315 - m.x331 >= -255)
m.c1559 = Constraint(expr= - m.x316 - m.x332 >= -255)
m.c1560 = Constraint(expr= - m.x317 - m.x333 >= -255)
m.c1561 = Constraint(expr= - m.x318 - m.x334 >= -255)
m.c1562 = Constraint(expr= - m.x319 - m.x335 >= -255)
m.c1563 = Constraint(expr= - m.x320 - m.x336 >= -255)
m.c1564 = Constraint(expr= - m.x321 - m.x337 >= -255)
m.c1565 = Constraint(expr= - m.x322 - m.x338 >= -255)
m.c1566 = Constraint(expr= - m.x323 - m.x339 >= -255)
m.c1567 = Constraint(expr= - m.x324 - m.x340 >= -255)
m.c1568 = Constraint(expr= - m.x325 - m.x341 >= -255)
m.c1569 = Constraint(expr= - m.x326 - m.x342 >= -255)
m.c1570 = Constraint(expr= - m.x343 - m.x359 >= -546)
m.c1571 = Constraint(expr= - m.x344 - m.x360 >= -546)
m.c1572 = Constraint(expr= - m.x345 - m.x361 >= -546)
m.c1573 = Constraint(expr= - m.x346 - m.x362 >= -546)
m.c1574 = Constraint(expr= - m.x347 - m.x363 >= -546)
m.c1575 = Constraint(expr= - m.x348 - m.x364 >= -546)
m.c1576 = Constraint(expr= - m.x349 - m.x365 >= -546)
m.c1577 = Constraint(expr= - m.x350 - m.x366 >= -546)
m.c1578 = Constraint(expr= - m.x351 - m.x367 >= -546)
m.c1579 = Constraint(expr= - m.x352 - m.x368 >= -546)
m.c1580 = Constraint(expr= - m.x353 - m.x369 >= -546)
m.c1581 = Constraint(expr= - m.x354 - m.x370 >= -546)
m.c1582 = Constraint(expr= - m.x355 - m.x371 >= -546)
m.c1583 = Constraint(expr= - m.x356 - m.x372 >= -546)
m.c1584 = Constraint(expr= - m.x357 - m.x373 >= -546)
m.c1585 = Constraint(expr= - m.x358 - m.x374 >= -546)
m.c1586 = Constraint(expr= - m.x375 - m.x391 >= -1331)
m.c1587 = Constraint(expr= - m.x376 - m.x392 >= -1331)
m.c1588 = Constraint(expr= - m.x377 - m.x393 >= -1331)
m.c1589 = Constraint(expr= - m.x378 - m.x394 >= -1331)
m.c1590 = Constraint(expr= - m.x379 - m.x395 >= -1331)
m.c1591 = Constraint(expr= - m.x380 - m.x396 >= -1331)
m.c1592 = Constraint(expr= - m.x381 - m.x397 >= -1331)
m.c1593 = Constraint(expr= - m.x382 - m.x398 >= -1331)
m.c1594 = Constraint(expr= - m.x383 - m.x399 >= -1331)
m.c1595 = Constraint(expr= - m.x384 - m.x400 >= -1331)
m.c1596 = Constraint(expr= - m.x385 - m.x401 >= -1331)
m.c1597 = Constraint(expr= - m.x386 - m.x402 >= -1331)
m.c1598 = Constraint(expr= - m.x387 - m.x403 >= -1331)
m.c1599 = Constraint(expr= - m.x388 - m.x404 >= -1331)
m.c1600 = Constraint(expr= - m.x389 - m.x405 >= -1331)
m.c1601 = Constraint(expr= - m.x390 - m.x406 >= -1331)
m.c1602 = Constraint(expr= - m.x599 - m.x615 >= -1400)
m.c1603 = Constraint(expr= - m.x600 - m.x616 >= -1400)
m.c1604 = Constraint(expr= - m.x601 - m.x617 >= -1400)
m.c1605 = Constraint(expr= - m.x602 - m.x618 >= -1400)
m.c1606 = Constraint(expr= - m.x603 - m.x619 >= -1400)
m.c1607 = Constraint(expr= - m.x604 - m.x620 >= -1400)
m.c1608 = Constraint(expr= - m.x605 - m.x621 >= -1400)
m.c1609 = Constraint(expr= - m.x606 - m.x622 >= -1400)
m.c1610 = Constraint(expr= - m.x607 - m.x623 >= -1400)
m.c1611 = Constraint(expr= - m.x608 - m.x624 >= -1400)
m.c1612 = Constraint(expr= - m.x609 - m.x625 >= -1400)
m.c1613 = Constraint(expr= - m.x610 - m.x626 >= -1400)
m.c1614 = Constraint(expr= - m.x611 - m.x627 >= -1400)
m.c1615 = Constraint(expr= - m.x612 - m.x628 >= -1400)
m.c1616 = Constraint(expr= - m.x613 - m.x629 >= -1400)
m.c1617 = Constraint(expr= - m.x614 - m.x630 >= -1400)
m.c1618 = Constraint(expr= 600*m.b25 - m.x407 - m.x423 >= 0)
m.c1619 = Constraint(expr= 600*m.b25 - m.x408 - m.x424 >= 0)
m.c1620 = Constraint(expr= 600*m.b25 - m.x409 - m.x425 >= 0)
m.c1621 = Constraint(expr= 600*m.b25 - m.x410 - m.x426 >= 0)
m.c1622 = Constraint(expr= 600*m.b25 - m.x411 - m.x427 >= 0)
m.c1623 = Constraint(expr= 600*m.b25 - m.x412 - m.x428 >= 0)
m.c1624 = Constraint(expr= 600*m.b25 - m.x413 - m.x429 >= 0)
m.c1625 = Constraint(expr= 600*m.b25 - m.x414 - m.x430 >= 0)
m.c1626 = Constraint(expr= 600*m.b25 - m.x415 - m.x431 >= 0)
m.c1627 = Constraint(expr= 600*m.b25 - m.x416 - m.x432 >= 0)
m.c1628 = Constraint(expr= 600*m.b25 - m.x417 - m.x433 >= 0)
m.c1629 = Constraint(expr= 600*m.b25 - m.x418 - m.x434 >= 0)
m.c1630 = Constraint(expr= 600*m.b25 - m.x419 - m.x435 >= 0)
m.c1631 = Constraint(expr= 600*m.b25 - m.x420 - m.x436 >= 0)
m.c1632 = Constraint(expr= 600*m.b25 - m.x421 - m.x437 >= 0)
m.c1633 = Constraint(expr= 600*m.b25 - m.x422 - m.x438 >= 0)
m.c1634 = Constraint(expr= 1509*m.b26 - m.x439 - m.x455 >= 0)
m.c1635 = Constraint(expr= 1509*m.b26 - m.x440 - m.x456 >= 0)
m.c1636 = Constraint(expr= 1509*m.b26 - m.x441 - m.x457 >= 0)
m.c1637 = Constraint(expr= 1509*m.b26 - m.x442 - m.x458 >= 0)
m.c1638 = Constraint(expr= 1509*m.b26 - m.x443 - m.x459 >= 0)
m.c1639 = Constraint(expr= 1509*m.b26 - m.x444 - m.x460 >= 0)
m.c1640 = Constraint(expr= 1509*m.b26 - m.x445 - m.x461 >= 0)
m.c1641 = Constraint(expr= 1509*m.b26 - m.x446 - m.x462 >= 0)
m.c1642 = Constraint(expr= 1509*m.b26 - m.x447 - m.x463 >= 0)
m.c1643 = Constraint(expr= 1509*m.b26 - m.x448 - m.x464 >= 0)
m.c1644 = Constraint(expr= 1509*m.b26 - m.x449 - m.x465 >= 0)
m.c1645 = Constraint(expr= 1509*m.b26 - m.x450 - m.x466 >= 0)
m.c1646 = Constraint(expr= 1509*m.b26 - m.x451 - m.x467 >= 0)
m.c1647 = Constraint(expr= 1509*m.b26 - m.x452 - m.x468 >= 0)
m.c1648 = Constraint(expr= 1509*m.b26 - m.x453 - m.x469 >= 0)
m.c1649 = Constraint(expr= 1509*m.b26 - m.x454 - m.x470 >= 0)
m.c1650 = Constraint(expr= 2200*m.b27 - m.x471 - m.x487 >= 0)
m.c1651 = Constraint(expr= 2200*m.b27 - m.x472 - m.x488 >= 0)
m.c1652 = Constraint(expr= 2200*m.b27 - m.x473 - m.x489 >= 0)
m.c1653 = Constraint(expr= 2200*m.b27 - m.x474 - m.x490 >= 0)
m.c1654 = Constraint(expr= 2200*m.b27 - m.x475 - m.x491 >= 0)
m.c1655 = Constraint(expr= 2200*m.b27 - m.x476 - m.x492 >= 0)
m.c1656 = Constraint(expr= 2200*m.b27 - m.x477 - m.x493 >= 0)
m.c1657 = Constraint(expr= 2200*m.b27 - m.x478 - m.x494 >= 0)
m.c1658 = Constraint(expr= 2200*m.b27 - m.x479 - m.x495 >= 0)
m.c1659 = Constraint(expr= 2200*m.b27 - m.x480 - m.x496 >= 0)
m.c1660 = Constraint(expr= 2200*m.b27 - m.x481 - m.x497 >= 0)
m.c1661 = Constraint(expr= 2200*m.b27 - m.x482 - m.x498 >= 0)
m.c1662 = Constraint(expr= 2200*m.b27 - m.x483 - m.x499 >= 0)
m.c1663 = Constraint(expr= 2200*m.b27 - m.x484 - m.x500 >= 0)
m.c1664 = Constraint(expr= 2200*m.b27 - m.x485 - m.x501 >= 0)
m.c1665 = Constraint(expr= 2200*m.b27 - m.x486 - m.x502 >= 0)
m.c1666 = Constraint(expr= 900*m.b28 - m.x503 - m.x519 >= 0)
m.c1667 = Constraint(expr= 900*m.b28 - m.x504 - m.x520 >= 0)
m.c1668 = Constraint(expr= 900*m.b28 - m.x505 - m.x521 >= 0)
m.c1669 = Constraint(expr= 900*m.b28 - m.x506 - m.x522 >= 0)
m.c1670 = Constraint(expr= 900*m.b28 - m.x507 - m.x523 >= 0)
m.c1671 = Constraint(expr= 900*m.b28 - m.x508 - m.x524 >= 0)
m.c1672 = Constraint(expr= 900*m.b28 - m.x509 - m.x525 >= 0)
m.c1673 = Constraint(expr= 900*m.b28 - m.x510 - m.x526 >= 0)
m.c1674 = Constraint(expr= 900*m.b28 - m.x511 - m.x527 >= 0)
m.c1675 = Constraint(expr= 900*m.b28 - m.x512 - m.x528 >= 0)
m.c1676 = Constraint(expr= 900*m.b28 - m.x513 - m.x529 >= 0)
m.c1677 = Constraint(expr= 900*m.b28 - m.x514 - m.x530 >= 0)
m.c1678 = Constraint(expr= 900*m.b28 - m.x515 - m.x531 >= 0)
m.c1679 = Constraint(expr= 900*m.b28 - m.x516 - m.x532 >= 0)
m.c1680 = Constraint(expr= 900*m.b28 - m.x517 - m.x533 >= 0)
m.c1681 = Constraint(expr= 900*m.b28 - m.x518 - m.x534 >= 0)
m.c1682 = | |
# -*- coding: utf-8 -*-
"""Contains various utility classes for creating loopy arrays
and indexing / mapping
"""
import logging
import six
import copy
from string import Template
import loopy as lp
import numpy as np
from loopy.kernel.data import AddressSpace as scopes
from pyjac.core.enum_types import JacobianFormat, JacobianType
from pyjac.utils import listify, partition
class array_splitter(object):
"""
A convenience object that handles splitting arrays to improve vectorized
data-access patterns, etc.
Can handle reshaping of both loopy and numpy arrays to the desired shape
Properties
----------
depth: int [None]
If is not None, the vector-width to use for deep-vectorization
wide: bool [False]
If is not None, the vector-width to use for wide-vectorization
data_order: ['C', 'F']
The data ordering of the kernel
"""
def __init__(self, loopy_opts):
self.depth = loopy_opts.depth
self.width = loopy_opts.width
self.vector_width = loopy_opts.vector_width
self.data_order = loopy_opts.order
self.is_simd = loopy_opts.is_simd
self.pre_split = None
if loopy_opts.pre_split:
if self.width:
self.pre_split = global_ind
else:
self.pre_split = var_name
@property
def _is_simd_split(self):
"""
Returns True IFF this array splitter has a split, and this split is a result
of use of explicit SIMD data types
"""
return self._have_split() and not self._have_split(with_simd_as=False)
@staticmethod
def __determine_split(data_order, vector_width, width, depth, is_simd):
"""
The internal workings of the :func:`_have_split` methods, consolodated.
Not intended to be called directly
"""
if vector_width:
if is_simd:
return True
return ((data_order == 'C' and width) or (data_order == 'F' and depth))
return False
def _have_split(self, with_simd_as=None):
"""
Returns True if there is anything for this :class:`array_splitter` to do
Parameters
----------
with_simd_as: bool [None]
If specified, calculate whether we have a split if :attr:`is_simd` was
set to the given value
Returns
-------
have_split: bool
True IFF this vectorization pattern will result in a split for any
array
"""
is_simd = self.is_simd if with_simd_as is None else with_simd_as
return self.__determine_split(
self.data_order, self.vector_width, self.width, self.depth, is_simd)
@staticmethod
def _have_split_static(loopy_opts, with_simd_as=None):
"""
Like :func:`_have_split`, but a static method for easy calling
Parameters
----------
loopy_opts: :class:`loopy_options`
The options object that would be used to construct this splitter.
with_simd_as: bool [None]
If specified, calculate whether we have a split if :attr:`is_simd` was
set to the given value
Returns
-------
have_split: bool
True IFF this vectorization pattern will result in a split for any
array
"""
is_simd = loopy_opts.is_simd if with_simd_as is None else with_simd_as
return array_splitter.__determine_split(
loopy_opts.order, loopy_opts.vector_width, loopy_opts.width,
loopy_opts.depth, is_simd)
def _should_split(self, array):
"""
Returns True IFF this array should result in a split, the current criteria
are:
1) the array's at least 2-D
2) we have a vector width, and we're using explicit SIMD
Note: this assumes that :func:`_have_split` has been called previously (
and passed)
"""
return len(array.shape) >= 1 or self.is_simd
def split_and_vec_axes(self, array):
"""
Determine the axis that should be split, and the destination of the vector
axis for the given array and :attr:`data_order`, :attr:`width` and
:attr:`depth`
Parameters
----------
array: :class:`numpy.ndarray` or :class:`loopy.ArrayBase`
The array to split
Notes
-----
Does not take into account whether the array should be split, see:
:func:`have_split`, :func:`should_split`
Returns
-------
split_axis:
the axis index to be split
vec_axis:
the destination index of the vector axis
"""
if self.data_order == 'C' and self.width:
split_axis = 0
vec_axis = len(array.shape)
elif self.data_order == 'C':
split_axis = len(array.shape) - 1
vec_axis = len(array.shape)
elif self.data_order == 'F' and self.depth:
split_axis = len(array.shape) - 1
vec_axis = 0
else:
split_axis = 0
vec_axis = 0
return split_axis, vec_axis
def grow_axis(self, array):
"""
Returns
-------
grow_axis: int
The integer index of the axis corresponding to the initial conditions,
see :ref:`vector_split`
"""
if self._should_split(array):
# the grow axis is one IFF it's a F-ordered deep-split
return 1 if self.data_order == 'F' else 0
return 0
def split_shape(self, array):
"""
Returns the array shape that would result from splitting the supplied array
Parameters
----------
array: :class:`numpy.ndarray` (or object w/ attribute shape)
Returns
-------
shape: tuple of int
The resulting split array shape
grow_axis: int
The integer index of the axis corresponding to the initial conditions,
see :ref:`vector_split`
vec_axis: int
The integer index of the vector axis, if present.
If there is no split, this will be None, see :ref:`vector_split`
split_axis: int
The integer index of the axis that will be split (note: this is
calculated _before_ the split is applied). If no split, this is None
"""
grow_axis = 0
vec_axis = None
shape = tuple(x for x in array.shape)
split_axis = None
if not self._have_split():
return shape, grow_axis, vec_axis, split_axis
vector_width = None
if self._should_split(array):
# the grow axis is one IFF it's a F-ordered deep-split
grow_axis = self.grow_axis(array)
split_axis, vec_axis = self.split_and_vec_axes(array)
vector_width = self.depth if self.depth else self.width
assert vector_width
new_shape = [-1] * (len(shape) + 1)
# the vector axis is of size vector_width
new_shape[vec_axis] = vector_width
def __index(i):
if i < vec_axis:
return i
else:
return i + 1
for i in range(len(shape)):
if i == split_axis:
# the axis that is split is divided by the vector width
new_shape[__index(i)] = int(
np.ceil(shape[i] / float(vector_width)))
else:
# copy old shape
new_shape[__index(i)] = shape[i]
shape = tuple(new_shape)
return shape, grow_axis, vec_axis, split_axis
def _split_array_axis_inner(self, kernel, array_name, split_axis, dest_axis,
count, order='C', vec=False, **kwargs):
if count == 1:
return kernel
# {{{ adjust arrays
from loopy.kernel.tools import ArrayChanger
from loopy.symbolic import SubstitutionRuleMappingContext
from loopy.transform.padding import ArrayAxisSplitHelper
achng = ArrayChanger(kernel, array_name)
ary = achng.get()
from pytools import div_ceil
# {{{ adjust shape
new_shape = ary.shape
assert new_shape is not None, 'Cannot split auto-sized arrays'
new_shape = list(new_shape)
axis_len = new_shape[split_axis]
# still need to reduce the global problem size in unit testing
if self.pre_split and not isinstance(axis_len, int):
outer_len = new_shape[split_axis]
else:
# todo: automatic detection of when axis_len % count == 0, and we can
# replace with axis_len >> log2(count)
outer_len = div_ceil(axis_len, count)
new_shape[split_axis] = outer_len
new_shape.insert(dest_axis, count)
new_shape = tuple(new_shape)
# }}}
# {{{ adjust dim tags
if ary.dim_tags is None:
raise RuntimeError("dim_tags of '%s' are not known" % array_name)
new_dim_tags = list(ary.dim_tags)
old_dim_tag = ary.dim_tags[split_axis]
from loopy.kernel.array import FixedStrideArrayDimTag
if not isinstance(old_dim_tag, FixedStrideArrayDimTag):
raise RuntimeError("axis %d of '%s' is not tagged fixed-stride".format(
split_axis, array_name))
tag = FixedStrideArrayDimTag(1)
new_dim_tags.insert(dest_axis, tag)
# fix strides
toiter = reversed(list(enumerate(new_shape))) if order == 'C' \
else enumerate(new_shape)
stride = 1
for i, shape in toiter:
new_dim_tags[i] = new_dim_tags[i].copy(stride=stride)
stride *= shape
new_dim_tags = tuple(new_dim_tags)
# }}}
# {{{ adjust dim_names
new_dim_names = ary.dim_names
if new_dim_names is not None:
new_dim_names = list(new_dim_names)
existing_name = new_dim_names[split_axis]
outer_name = existing_name + "_outer"
new_dim_names[split_axis] = outer_name
new_dim_names.insert(dest_axis, existing_name + "_inner")
new_dim_names = tuple(new_dim_names)
# }}}
kernel = achng.with_changed_array(ary.copy(
shape=new_shape, dim_tags=new_dim_tags, dim_names=new_dim_names))
# }}}
var_name_gen = kernel.get_var_name_generator()
def split_access_axis(expr):
idx = expr.index
if not isinstance(idx, tuple):
idx = (idx,)
idx = list(idx)
axis_idx = idx[split_axis]
from loopy.symbolic import simplify_using_aff
from pymbolic.primitives import Variable
if self.pre_split:
# no split, just add an axis
outer_index = axis_idx
inner_index = Variable(self.pre_split + '_inner')
else:
inner_index = simplify_using_aff(kernel, axis_idx % count)
outer_index = simplify_using_aff(kernel, axis_idx // count)
idx[split_axis] = outer_index
idx.insert(dest_axis, inner_index)
return expr.aggregate.index(tuple(idx))
rule_mapping_context = SubstitutionRuleMappingContext(
kernel.substitutions, var_name_gen)
aash = ArrayAxisSplitHelper(rule_mapping_context,
set([array_name]), split_access_axis)
kernel = rule_mapping_context.finish_kernel(aash.map_kernel(kernel))
if vec:
achng = ArrayChanger(kernel, array_name)
new_strides = [t.layout_nesting_level for t in achng.get().dim_tags]
tag = ['N{}'.format(s) if i != dest_axis else 'vec'
for i, s in enumerate(new_strides)]
kernel = lp.tag_array_axes(kernel, [array_name], tag)
return kernel
def __split_iname_access(self, knl, arrays):
"""
Warning -- should be called _only_ on non-split arrays when :attr:`pre_split`
is True
This is a helper array to ensure that indexing of non-split arrays
stay correct if we're using a pre-split. Essentially, if we don't split
the arrays, the pre-split index will never make it into the array, and hence
the assumed iname splitting won't work.
Parameters
----------
knl: :class:`loopy.LoopKernel`
The kernel to split iname access for
arrays: :list of class:`loopy.ArrayArg`
The array(s) to split iname access for
Returns
-------
split_knl: :class:`loopy.LoopKernel`
The kernel with iname access to the array's split
Raises
------
AssertionError
"""
assert self.pre_split
owner = self
new_var = self.pre_split + | |
#!/usr/bin/env python3
import abc
from functools import partial
from typing import Generator, Optional, Tuple, Union
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn import metrics
from sklearn.model_selection import KFold
from sklearn.preprocessing import MinMaxScaler, Normalizer, StandardScaler
from datafold.pcfold import TSCDataFrame
from datafold.utils.general import is_df_same_index, is_integer, series_if_applicable
class TSCMetric(object):
"""Compute metrics for time series collection data.
Parameters
----------
metrics
* "rmse" - root mean squared error,
* "rrmse" - relative root mean squared error
* "mse" - mean squared error,
* "mae" - mean absolute error,
* "max" maximum error,
* "l2" - Eucledian norm
mode
compute metric per "timeseries", "timestep" or "feature"
scaling
Prior scaling (useful for heterogeneous time series features).
* "id" - no scaling,
* "min-max" - each feature is scaled into (0, 1) range,
* "standard" - remove mean and scale to unit variance for each feature,
* "l2_normalize" - divide each feature by Euclidean norm
References
----------
"rrmse" is taken from :cite:`le_clainche_higher_2017`
"""
_cls_valid_modes = ["timeseries", "timestep", "feature"]
_cls_valid_metrics = ["rmse", "rrmse", "mse", "mape", "mae", "medae", "max", "l2"]
_cls_valid_scaling = ["id", "min-max", "standard", "l2_normalize"]
def __init__(self, metric: str, mode: str, scaling: str = "id"):
mode = mode.lower()
metric = metric.lower()
if metric in self._cls_valid_metrics:
self.metric = self._metric_from_str_input(metric)
else:
raise ValueError(
f"Invalid metric={mode}. Choose from {self._cls_valid_metrics}"
)
if mode in self._cls_valid_modes:
self.mode = mode
else:
raise ValueError(
f"Invalid mode='{mode}'. Choose from {self._cls_valid_modes}"
)
self.scaling = self._select_scaling(name=scaling)
def _select_scaling(self, name):
if name == "id":
return None
elif name == "min-max":
return MinMaxScaler()
elif name == "standard":
return StandardScaler()
elif name == "l2_normalize":
return Normalizer(norm="l2")
else:
raise ValueError(
f"scaling={name} is not known. Choose from {self._cls_valid_scaling}"
)
def _scaling(self, y_true: TSCDataFrame, y_pred: TSCDataFrame):
# it is checked before that y_true and y_pred indices/columns are identical
index, columns = y_true.index, y_true.columns
# first normalize y_true, afterwards (with the same factors from y_true!) y_pred
if self.scaling is not None: # is None if scaling is identity
y_true = self.scaling.fit_transform(y_true)
y_pred = self.scaling.transform(y_pred.to_numpy())
y_true = TSCDataFrame(y_true, index=index, columns=columns)
y_pred = TSCDataFrame(y_pred, index=index, columns=columns)
return y_true, y_pred
def _l2_metric(
self, y_true, y_pred, sample_weight=None, multioutput="uniform_average"
):
diff = y_true - y_pred
if sample_weight is not None:
diff = sample_weight[:, np.newaxis] * diff
l2_norm = np.linalg.norm(diff, axis=0)
if multioutput == "uniform_average":
l2_norm = np.mean(l2_norm)
return l2_norm
def _medae_metric(
self, y_true, y_pred, sample_weight=None, multioutput="uniform_average"
):
"""Median absolute error."""
if sample_weight is not None:
raise ValueError("Median absolute error does not support sample_weight.")
return metrics.median_absolute_error(
y_true=y_true, y_pred=y_pred, multioutput=multioutput
)
# def _mer_metric(
# self, y_true, y_pred, sample_weight=None, multioutput="uniform_average"
# ):
# r"""Mean error relative to mean observation
# Each time series must have the same length (corresponding to a prediction
# horizon).
#
# The error is taken from https://www.ijcai.org/Proceedings/2017/0277.pdf
#
# The MER is computed with
# .. math::
# \text{MER} = 100 \cdot \frac{1}{N} \sum_{i=1}^N
# \frac{\vert y - \hat{y} \vert}{\bar{y}}
# """
# # TODO: this metric shows a problem in the current setting
# # -- it does not fir in the metric_per_[timeseries|feature|timestep]
#
# if self.mode == "timestep":
# raise ValueError("Metric 'mean error relative to mean observation' does not "
# "support mode 'timestep'.")
#
# if sample_weight is not None:
# raise NotImplementedError("Sample weight is not implemented ")
#
# N = y_true.shape[0]
# error = (100 * 1 / N * ((y_true - y_pred).abs() / y_true.mean()).sum())
#
# if multioutput == "uniform_average":
# error = np.mean(error)
# return error
def _rrmse_metric(
self, y_true, y_pred, sample_weight=None, multioutput="uniform_average"
):
"""Metric from :cite:`le_clainche_higher_2017`"""
if multioutput == "uniform_average":
norm_ = np.sum(np.square(np.linalg.norm(y_true, axis=1)))
else: # multioutput == "raw_values":
norm_ = np.sum(np.square(y_true), axis=0)
if (np.asarray(norm_) <= 1e-14).any():
raise RuntimeError(
f"norm factor(s) are too small for rrmse \n norm_factor = {norm_}"
)
mse_error = metrics.mean_squared_error(
y_true, y_pred, sample_weight=sample_weight, multioutput=multioutput
)
mse_error_relative = np.divide(mse_error, norm_)
return np.sqrt(mse_error_relative)
def _max_error(
self, y_true, y_pred, sample_weight=None, multioutput="uniform_average"
):
"""Wrapper for :class:`sklean.metrics.max_error` to allow `sample_weight` and
`multioutput` arguments (both have not effect).
"""
# fails if y is multioutput
return metrics.max_error(y_true=y_true, y_pred=y_pred)
def _metric_from_str_input(self, error_metric: str):
error_metric = error_metric.lower()
from typing import Callable
error_metric_handle: Callable
if error_metric == "rmse": # root mean squared error
error_metric_handle = partial(metrics.mean_squared_error, squared=False)
elif error_metric == "rrmse": # relative root mean squared error
error_metric_handle = self._rrmse_metric # type: ignore
elif error_metric == "mape": # mean absolute percentage error
error_metric_handle = metrics.mean_absolute_percentage_error # type: ignore
elif error_metric == "mse":
error_metric_handle = metrics.mean_squared_error
elif error_metric == "mae":
error_metric_handle = metrics.mean_absolute_error
elif error_metric == "medae": # median absolute error
error_metric_handle = self._medae_metric
elif error_metric == "max":
error_metric_handle = self._max_error
elif error_metric == "l2":
error_metric_handle = self._l2_metric
else:
raise ValueError(f"Metric {error_metric} not known. Please report bug.")
return error_metric_handle
def _is_scalar_multioutput(self, multioutput) -> bool:
# Return True if there is only one column (because features are averaged)
if (
isinstance(multioutput, str) and multioutput == "uniform_average"
) or isinstance(multioutput, np.ndarray):
# array -> average with weights
scalar_score = True
elif multioutput == "raw_values":
scalar_score = False
else:
raise ValueError(f"Illegal argument multioutput='{multioutput}'")
return scalar_score
def _single_column_name(self, multioutput) -> list:
assert self._is_scalar_multioutput(multioutput)
if isinstance(multioutput, str) and multioutput == "uniform_average":
column = ["metric_uniform_average"]
elif isinstance(multioutput, np.ndarray):
column = ["metric_user_weights"]
else:
raise ValueError(f"Illegal argument of multioutput={multioutput}")
return column
def _metric_per_timeseries(
self,
y_true: TSCDataFrame,
y_pred: TSCDataFrame,
sample_weight=None,
multioutput="uniform_average",
) -> Union[pd.Series, pd.DataFrame]:
if sample_weight is not None:
# same length of time series to have mapping
# sample_weight -> time step of time series (can be a different time value)
y_true.tsc.check_timeseries_same_length()
if sample_weight.shape[0] != y_true.n_timesteps:
raise ValueError(
f"'sample_weight' length (={len(sample_weight)}) "
f"does not match the number of time steps (={y_true.n_timesteps})"
)
if self._is_scalar_multioutput(multioutput=multioutput):
column = self._single_column_name(multioutput=multioutput)
# Make in both cases a DataFrame and later convert to Series in the scalar
# case this allows to use .loc[i, :] in the loop
error_per_timeseries = pd.DataFrame(
np.nan, index=y_true.ids, columns=column
)
else:
error_per_timeseries = pd.DataFrame(
np.nan,
index=y_true.ids,
columns=y_true.columns.to_list(),
)
for i, y_true_single in y_true.itertimeseries():
y_pred_single = y_pred.loc[i, :]
error_per_timeseries.loc[i, :] = self.metric(
y_true_single,
y_pred_single,
sample_weight=sample_weight,
multioutput=multioutput,
)
return series_if_applicable(error_per_timeseries)
def _metric_per_feature(
self,
y_true: TSCDataFrame,
y_pred: TSCDataFrame,
sample_weight=None,
multioutput="raw_values",
):
# Note: score per feature is never a multioutput-average, because a feature is
# seen as a scalar quantity
if sample_weight is not None:
if sample_weight.shape[0] != y_true.shape[0]:
raise ValueError(
f"'sample_weight' length (={sample_weight.shape[0]}) "
f"does not match the number of feature values "
f"(y.shape[0]={y_true.shape[0]})"
)
metric_per_feature = self.metric(
y_true.to_numpy(),
y_pred.to_numpy(),
sample_weight=sample_weight,
multioutput="raw_values", # raw_values to tread every feature separately
)
metric_per_feature = pd.Series(
metric_per_feature,
index=y_true.columns,
)
return metric_per_feature
def _metric_per_timestep(
self,
y_true: TSCDataFrame,
y_pred: TSCDataFrame,
sample_weight=None,
multioutput="uniform_average",
):
if sample_weight is not None:
# sample weights -> each time series has a different weight
# Currently, all time series must have the same time values to have the same
# length for each time step
y_true.tsc.check_timeseries_same_length()
# the weight, must be as long as the time series
if sample_weight.shape[0] != y_true.n_timeseries:
raise ValueError(
f"'sample_weight' shape (={sample_weight.shape[0]}) "
f"does not match the number of time series (={y_true.n_timeseries})."
)
time_indices = pd.Index(y_true.time_values(), name="time")
if self._is_scalar_multioutput(multioutput=multioutput):
column = self._single_column_name(multioutput=multioutput)
# Make in both cases a DataFrame and later convert to Series in the scalar
# case this allows to use .loc[i, :] in the loop
metric_per_time = pd.DataFrame(np.nan, index=time_indices, columns=column)
else:
metric_per_time = pd.DataFrame(
np.nan, index=time_indices, columns=y_true.columns.to_list()
)
metric_per_time.index = metric_per_time.index.set_names(
TSCDataFrame.tsc_time_idx_name
)
idx_slice = pd.IndexSlice
for t in time_indices:
y_true_t = pd.DataFrame(y_true.loc[idx_slice[:, t], :])
y_pred_t = pd.DataFrame(y_pred.loc[idx_slice[:, t], :])
metric_per_time.loc[t, :] = self.metric(
y_true_t,
y_pred_t,
sample_weight=sample_weight,
multioutput=multioutput,
)
return series_if_applicable(metric_per_time)
def __call__(
self,
y_true: TSCDataFrame,
y_pred: TSCDataFrame,
sample_weight: Optional[np.ndarray] = None,
multioutput: Union[str, np.ndarray] = "raw_values",
) -> Union[pd.Series, pd.DataFrame]:
"""Compute metric between two time series collections.
Parameters
----------
y_true
Ground truth time series collection (basis for scaling), of shape
`(n_samples, n_features)`.
y_pred
Predicted time series (the same scaling as for `y_true` will be applied),
with exact same index (`ID` and `time` and columns as `y_true`).
sample_weight
Gives samples individual weights depending on the `mode`.
* `mode=timeseries` array of shape `(n_timesteps,)`. Each | |
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from matplotlib.backends.backend_pdf import PdfPages
import pandas as pd
import numpy as np
from yellowbrick.regressor import PredictionError, ResidualsPlot
from yellowbrick.regressor.alphas import AlphaSelection
import yellowbrick as yb
from typing import List, Dict
import warnings
import sklearn
import matplotlib
import seaborn as sns
# .- EDA Functions ---------------------------------------------------
def analyse_continous(df, var, cluster_labels):
df = df.copy()
# selecting number of bins
selected_bins = 30
unique_values = len(df[var].unique())
if unique_values < 30:
selected_bins = unique_values
plt.figure()
fig = df[var].hist(by=cluster_labels,
layout=(1, len(np.unique(cluster_labels))),
bins=selected_bins,
figsize=(20, 10), stacked=True)
plt.ylabel('Number of Customers')
plt.xlabel(var)
plt.title(var)
def find_frequency(series):
"""Provide summary on frequency counts and proportions.
Parameters
----------
series: A pandas Series containing discrete values
Returns
-------
A pandas DataFrame containing frequency counts and proportions for each
category.
"""
columns = ['p_frequency', 'n_frequency']
frequency = pd.concat([series.value_counts(normalize=True),
series.value_counts()], keys=columns, axis=1)
return frequency
def summarise(df):
"""Provide summary on missing values, unique values and data type.
Parameters
----------
df: A pandas DataFrame to summarise
Returns
-------
A pandas DataFrame containing count and proportion of missing values,
count of unique values and data type for each column.
"""
columns = ['n_missing', 'p_missing', 'n_unique', 'dtype']
summary = pd.concat([df.isnull().sum(),
df.isnull().mean(),
df.nunique(),
df.dtypes], keys=columns, axis=1)
return summary.sort_values(by='n_missing', ascending=False)
def find_outlier(series, k=1.5):
"""Find outlier using first and third quartiles and interquartile range.
Parameters
----------
series: A pandas Series to find outlier in
k: (optional) An integer indicating threshold of outlier in IQR from Q1/Q3
Returns
-------
A pandas Series containing boolean values where True indicates an outlier.
"""
q1 = series.quantile(.25)
q3 = series.quantile(.75)
iqr = q3-q1
lower_bound = q1 - k*iqr
upper_bound = q3 + k*iqr
is_outlier = (series<lower_bound) | (series>upper_bound)
return is_outlier
def describe_more(df, features, k=1.5):
"""Provide descriptive statistics and outlier summary for numerical features.
Parameters
----------
df: A pandas DataFrame to describe
features: A list of numerical feature column names to use
k: (optional) An integer indicating threshold of outlier in IQR from Q1/Q3
Returns
-------
A pandas DataFrame containing descriptive statistics and outlier summary.
"""
descriptives = df[features].describe()
outliers = df[features].apply(find_outlier)
descriptives.loc['n_outliers']= outliers.sum()
descriptives.loc['p_outliers']= outliers.mean()
return descriptives
def plot_discrete(df, feature, target, orientation='v', figsize=(14, 4)):
"""Plot target mean and counts for unique values in feature.
Parameters
----------
df: A pandas DataFrame to use
feature: A string specifying the name of the feature column
target: A string specifying the name of the target column
orientation: (optional) 'h' for horizontal and 'v' for orientation of bars
figsize: (optional) A tuple specifying the shape of the plot
Returns
-------
A plot containing 2 subplots. Left subplot shows counts of categories. Right
subplot shows target mean value for each category.
"""
fig, ax = plt.subplots(1, 2, figsize=figsize)
if orientation=='v':
sns.countplot(data=df, x=feature, ax=ax[0])
sns.barplot(data=df, x=feature, y=target, ax=ax[1])
ax[1].set_ylim([0,1])
elif orientation=='h':
sns.countplot(data=df, y=feature, ax=ax[0])
sns.barplot(data=df, x=target, y=feature, orient='h', ax=ax[1])
ax[1].set_xlim([0,1])
ax[0].set_title(f"Category counts in {feature}")
ax[1].set_title(f"Mean target by category in {feature}")
plt.tight_layout() # To ensure subplots don't overlay
def plot_continuous(df, feature, target, bins=30, figsize=(14, 5)):
"""Plot histogram, density plot, box plot and swarm plot for feature colour
coded by target.
Parameters
----------
df: A pandas DataFrame to use
feature: A string specifying the name of the feature column
target: A string specifying the name of the target column
bins: (optional) An integer for number of bins in histogram
figsize: (optional) A tuple specifying the shape of the plot
Returns
-------
A plot containing 4 subplots. Top left subplot shows number of histogram.
Top right subplot shows density plot. Bottom left subplot shows box plot.
Bottom right subplot shows swarm plot. Each contains overlaying graphs for
each class in target.
"""
fig, ax = plt.subplots(2, 2, figsize=(14,8))
sns.histplot(data=df, x=feature, hue=target, bins=bins, ax=ax[0,0])
ax[0,0].set_title(f'Histogram of {feature} by {target}')
sns.kdeplot(data=df, x=feature, hue=target, shade=True, common_norm=False,
ax=ax[0,1])
ax[0,1].set_title(f'Density plot of {feature} by {target}')
sns.boxplot(data=df, y=feature, x=target, ax=ax[1,0])
ax[1,0].set_title(f'Box plot of {feature} by {target}')
sns.swarmplot(data=df.dropna(), y=feature, x=target, ax=ax[1,1])
ax[1,1].set_title(f'Swarm plot of {feature} by {target}')
plt.tight_layout() # To ensure subplots don't overlay
# .- regression functions ---------------------------------------------------
def plot_scatter_plots(dataframe, target, path_to_save):
print("-"*50)
print("Target variable: **{}** .Analyzing variable for dataset with {} rows. ".format(target, len(dataframe)))
print("-"*50)
print("\n"*5)
independent_variables = [col for col in list(
dataframe.select_dtypes([int, float])) if col not in target]
with PdfPages(path_to_save) as pdf:
for variable in independent_variables:
fig = plt.figure(figsize=(22, 5))
title = fig.suptitle(
"Analyzing variable: {}".format(variable), fontsize=14)
fig.subplots_adjust(top=0.85, wspace=0.3)
ax1 = fig.add_subplot(1, 5, 1)
correlation = round(np.corrcoef(
dataframe[variable], dataframe[target])[0, 1], 2)
ax1.set_title('Correlation: {}'.format(correlation))
sns.regplot(x=dataframe[variable], y=dataframe[target],
fit_reg=True, ax=ax1, scatter_kws={'alpha': 0.1})
ax2 = fig.add_subplot(1, 5, 2)
ax2.set_title("Distribution of variable")
ax2.set_xlabel(variable)
ax2.set_ylabel("Frequency")
w_freq, w_bins, w_patches = ax2.hist(x=dataframe[variable], color='red', bins=15,
edgecolor='black', linewidth=1)
ax3 = fig.add_subplot(1, 5, 3)
ax3.set_title("Boxplot of variable")
sns.boxplot(x=dataframe[variable], ax=ax3)
data = dataframe[variable]
data_without_outliers = data[abs(
data - np.mean(data)) < 2 * np.std(data)]
ax4 = fig.add_subplot(1, 5, 4)
ax4.set_title("Distribution of variable dealing with outlers")
ax4.set_xlabel(variable)
ax4.set_ylabel("Frequency")
w_freq, w_bins, w_patches = ax4.hist(x=data_without_outliers, color='red', bins=15,
edgecolor='black', linewidth=1)
ax5 = fig.add_subplot(1, 5, 5)
ax5.set_title("Boxplot of variable dealing with outliers")
sns.boxplot(x=data_without_outliers, ax=ax5)
pdf.savefig()
plt.show()
plt.close()
def rmse(y_true, y_pred):
return np.sqrt(mean_squared_error(y_true, y_pred))
def evaluate_results_time_series(df, time_period_col, model, target, path_to_save_report, max_features=None, plot_since_period=0):
mean_error = []
with PdfPages(path_to_save_report) as pdf:
for period in range(df[time_period_col].min()+1, df[time_period_col].max() + 1):
train = df[df.time_period < period]
test = df[df.time_period == period]
X_train, X_test = train.drop(target, 1), test.drop(target, 1)
y_train, y_test = train[target], test[target]
#model.fit(X_train, y_train)
y_pred = model.predict(X_test)
error = rmse(y_test, y_pred)
mean_error.append(error)
if period >= plot_since_period:
fig = plt.figure(figsize=(22, 5))
title = fig.suptitle('Period {} - Error {} - Train size: {} / Test size: {}'.format(
period, round(error, 5), len(y_train), len(y_test)), fontsize=14)
fig.subplots_adjust(top=0.85, wspace=0.1)
ax1 = fig.add_subplot(1, 2, 1)
visualizer = PredictionError(model, ax=ax1, line_color="red")
visualizer.score(X_test, y_test)
visualizer.finalize()
ax2 = fig.add_subplot(1, 2, 2)
visualizer = ResidualsPlot(model, ax=ax2)
visualizer.fit(X_train, y_train)
visualizer.score(X_test, y_test)
visualizer.finalize()
#ax3 = fig.add_subplot(1,3,3)
#visualize.plot_coefficients(model, X_train)
# plt.show()
pdf.savefig(fig)
plt.close()
_logger.info('Period %d - Error %.5f' % (period, error))
else:
_logger.info('Period %d - Error %.5f' % (period, error))
_logger.info('Mean Error = %.5f' % np.mean(mean_error))
return model, X_train, y_train, X_test, y_test, mean_error
def plot_coefficients(model, X_train, max_features=None):
"""
Plots sorted coefficient values of the model
"""
coefs = pd.DataFrame(model.coef_, X_train.columns)
coefs.columns = ["coef"]
coefs["abs"] = coefs.coef.apply(np.abs)
coefs = coefs.sort_values(by="abs", ascending=False).drop(["abs"], axis=1)
if max_features:
coefs = coefs[:max_features]
plt.figure(figsize=(15, 7))
plt.title("Coefficients importance")
coefs.coef.plot(kind='bar')
plt.grid(True, axis='y')
plt.hlines(y=0, xmin=0, xmax=len(coefs), linestyles='dashed')
# .- Classification functions ---------------------------------------------------
def success_plot(x, y, title="", y_mean=None, pdf_path=None):
try:
x_names = list(np.unique(x))
except:
x_names = list(x.unique())
x_ids = list(range(len(x_names)))
x_base = np.array([x_ids[x_names.index(v)] for v in x])
x_vals = x_ids
x_counts_per_group = np.bincount(x_base)
x_freq = x_counts_per_group/len(x_base)
y_freq = [np.mean(y[x_base == i]) for i in x_vals]
fig, ax1 = plt.subplots()
color = 'tab:grey'
ax1.set_xlabel('value')
ax1.set_ylabel('volume', color=color)
bar_plot = ax1.bar(x_vals, x_freq, color=color)
for idx, rect in enumerate(bar_plot):
height = rect.get_height()
ax1.text(rect.get_x() + rect.get_width()/2.,
height/2, # vertical
x_counts_per_group[idx],
ha='center', va='bottom', rotation=0)
if y_mean:
ax1.axhline(y=y_mean, xmin=0, xmax=1)
ax1.annotate('Total volume of positive class', xy=(0.5, y_mean), xytext=(0.5, y_mean + 0.2),
arrowprops=dict(arrowstyle="wedge,tail_width=0.5", alpha=0.1, fc="0.6", ec="none"))
#ax1.tick_params(axis='y', labelcolor=color)
ax1.set_ylim([0, 1])
plt.xticks(x_vals, x_names, rotation=-25, ha='left')
# instantiate a second axes that shares the same x-axis
ax2 = ax1.twinx()
color = 'tab:red'
ax2.set_ylabel('Churn ratio', color=color)
ax2.plot(x_vals, y_freq, color=color, marker='o')
ax2.set_ylim([0, 1])
#ax2.tick_params(axis='y', labelcolor=color)
plt.title(title)
fig.tight_layout() # otherwise the right y-label is slightly clipped
if pdf_path:
pdf_path.savefig()
plt.show()
def success_plot_cont_var(x, y, title="", n_bins=10, y_mean=None, pdf_path=None):
# Filter nans
x_values = np.array(list(filter(lambda v: not np.isnan(v), x)))
# calculate corr
correlation = round(np.corrcoef(x_values, y[~x.isnull()])[0, 1], 2)
# Calculate var bounds
qs = np.percentile(x_values, [0, 25, 50, 75, 100])
iqr = qs[3] - qs[1]
x_min = max(min(x_values), qs[1]-1.5*iqr)
x_max = min(max(x_values), qs[3]+1.5*iqr)
# Calculate bins
bin_size = (x_max - x_min)/n_bins
bin_bounds = [x_min+i*bin_size for i in range(n_bins)]
if (bin_bounds[0] > min(x_values)):
bin_bounds = [min(x_values)] + bin_bounds
if (bin_bounds[-1] < max(x_values)):
bin_bounds = bin_bounds + [max(x_values)]
# Calculate x segments
x_segment = []
for x_val in x:
if np.isnan(x_val):
x_segment.append("desc")
else:
s = None
bin_pos = 1
while (s == None and bin_pos < len(bin_bounds)):
if (x_val >= bin_bounds[bin_pos-1]) and (x_val <= bin_bounds[bin_pos]):
s = "C{0} [{1}, {2}]".format(
str(bin_pos).zfill(2),
round(bin_bounds[bin_pos-1], 2),
round(bin_bounds[bin_pos], 2))
#s = "C{}".format(bin_pos)
bin_pos += 1
x_segment.append(s)
x_segment = np.array(x_segment, dtype=np.str)
title = "{} (correlation with target = {})".format(title, correlation)
success_plot(x_segment, y, title, y_mean, pdf_path)
def report_univariate_churn(df, path, target, columns_to_plot, | |
import binascii
import cgi
import json
import os
import subprocess
import urllib.parse
import webbrowser
import requests
import sublime
import sublime_plugin
from Default.paragraph import expand_to_paragraph
Settings = None
class ValeFixCommand(sublime_plugin.TextCommand):
"""Applies a fix for an alert.
"""
def run(self, edit, **args):
alert, suggestion = args["alert"], args["suggestion"]
offset = self.view.text_point(alert["Line"] - 1, 0)
coords = sublime.Region(
offset + alert["Span"][0] - 1, offset + alert["Span"][1]
)
if alert["Action"]["Name"] != "remove":
self.view.replace(edit, coords, suggestion)
else:
coords.b = coords.b + 1
self.view.erase(edit, coords)
self.view.window().status_message(
"[Vale Server] Successfully applied fix!")
def debug(message, prefix="Vale", level="debug"):
"""Print a formatted console entry to the Sublime Text console.
Args:
message (str): A message to print to the console
prefix (str): An optional prefix
level (str): One of debug, info, warning, error [Default: debug]
Returns:
str: Issue a standard console print command.
"""
if Settings.get("vale_debug"):
print(
"{prefix}: [{level}] {message}".format(
message=message, prefix=prefix, level=level
)
)
def show_suggestions(suggestions, payload):
"""Show a Quick Panel of possible solutions for the given alert.
"""
alert = json.loads(payload)
options = []
for suggestion in suggestions:
if alert["Action"]["Name"] == "remove":
options.append("Remove '" + alert["Match"] + "'")
else:
options.append("Replace with '" + suggestion + "'")
sublime.active_window().show_quick_panel(
options,
lambda idx: apply_suggestion(alert, suggestions, idx),
sublime.MONOSPACE_FONT
)
def apply_suggestion(alert, suggestions, idx):
"""Apply the given suggestion to the active buffer.
"""
if idx >= 0 and idx < len(suggestions):
suggestion = suggestions[idx]
view = sublime.active_window().active_view()
view.run_command("vale_fix", {
"alert": alert, "suggestion": suggestion
})
def handle_navigation(path):
"""Handle navigation after a user clicks one of our links.
"""
if os.path.exists(path):
# The path exists, open it in a new tab.
sublime.active_window().open_file(path)
elif path.startswith("http"):
# The path doesn't exist, assume it's an URL.
webbrowser.open(path)
else:
# It's an alert to process.
server = urllib.parse.urljoin(Settings.get("vale_server"), "suggest")
alert = binascii.unhexlify(path.encode()).decode()
r = requests.post(server, data={
"alert": alert
})
show_suggestions(r.json().get("suggestions", []), alert)
def query(endpoint, payload={}):
"""Query the Vale Server API with the given `endpoint` and `payload`.
"""
try:
server = urllib.parse.urljoin(Settings.get("vale_server"), endpoint)
r = requests.get(server, params=payload)
return r.json() if r.status_code == 200 else {}
except requests.exceptions.RequestException as e:
debug(str(e), level="error")
return {}
def make_link(url, linkText="{url}"):
"""Return a link HTML string.
"""
template = "<a href=\"{url}\">" + linkText + "</a>"
return template.format(url=url)
def post_file(path):
"""
"""
try:
server = urllib.parse.urljoin(Settings.get("vale_server"), "file")
debug("running vale ({0}) on {1}".format(server, path))
r = requests.post(server, data={
"file": path,
"path": os.path.dirname(path)
})
if r.status_code != 200:
return {}
body = r.json()["path"]
with open(body, "r+", encoding="utf-8") as f:
return json.load(f)
except requests.exceptions.RequestException as e:
debug(e)
return {}
def post_str(buf, ext):
"""
"""
try:
server = urllib.parse.urljoin(Settings.get("vale_server"), "vale")
debug("running vale ({0}) on {1}".format(server, buf))
r = requests.post(server, data={
"text": buf,
"format": ext
})
if r.status_code != 200:
return {}
return r.json()
except requests.exceptions.RequestException as e:
debug(e)
return {}
class ValeSettings(object):
"""Provide global access to and management of Vale's settings.
"""
settings_file = "Vale.sublime-settings"
settings = sublime.load_settings(settings_file)
def __init__(self):
self.on_hover = []
self.error_template = None
self.warning_template = None
self.info_template = None
self.css = None
self.settings.add_on_change("reload", lambda: self.load())
self.load()
def load(self):
"""Load Vale's settings.
"""
self.settings = sublime.load_settings(self.settings_file)
self.__load_resources()
def is_supported(self, syntax):
"""Determine if `syntax` has been specified in the settings.
"""
return True
def get_styles(self):
"""Get Vale's base styles.
"""
config = self.get_config()
return config["GBaseStyles"]
def get_draw_style(self):
"""Get the region styling.
"""
underlined = sublime.DRAW_NO_FILL | sublime.DRAW_NO_OUTLINE
style = self.get("vale_alert_style")
if style == "solid_underline":
return sublime.DRAW_SOLID_UNDERLINE | underlined
elif style == "stippled_underline":
return sublime.DRAW_STIPPLED_UNDERLINE | underlined
elif style == "squiggly_underline":
return sublime.DRAW_SQUIGGLY_UNDERLINE | underlined
return sublime.DRAW_OUTLINED
def get_config(self):
"""Create a list of settings from the vale binary.
"""
return query("config")
def put(self, setting, value):
"""Store and save `setting` as `value`.
Args:
setting (str): The name of the setting to be accessed.
value (str, int, bool): The value to be stored.
"""
self.settings.set(setting, value)
sublime.save_settings(self.settings_file)
def get(self, setting):
"""Return the value associated with `setting`.
Args:
setting (str): The name of the setting to be accessed.
Returns:
(str, int, bool): The value associated with `setting`. The default
value is ''.
"""
return self.settings.get(setting, "")
def clear_on_hover(self):
"""Clear Vale's regions and hover data.
"""
for alert in self.on_hover:
for level in ["error", "warning", "suggestion"]:
sublime.View(alert["view_id"]).erase_regions(
"vale-server-" + level
)
del self.on_hover[:]
def __load_resources(self):
"""Load Vale's static resources.
"""
self.error_template = sublime.load_resource(
self.settings.get("vale_error_template")
)
self.warning_template = sublime.load_resource(
self.settings.get("vale_warning_template")
)
self.info_template = sublime.load_resource(
self.settings.get("vale_info_template")
)
self.css = sublime.load_resource(self.settings.get("vale_css"))
class ValeDashboardCommand(sublime_plugin.WindowCommand):
"""Opens the Vale Server dashboard.
"""
def run(self):
instance = Settings.get("vale_server")
webbrowser.open(instance)
class ValeReportCommand(sublime_plugin.WindowCommand):
"""Generates a report for the active folder.
"""
def run(self):
instance = Settings.get("vale_server")
wind = sublime.active_window()
name = os.path.dirname(wind.active_view().file_name())
server = urllib.parse.urljoin(
instance,
"/summary.html?path={0}".format(name)
)
webbrowser.open(server)
class ValeVocabCommand(sublime_plugin.WindowCommand):
"""Opens the user-specified vocab file.
"""
def run(self, name):
config = Settings.get_config()
src = os.path.join(
config["StylesPath"],
"Vocab",
config["Project"],
name + ".txt")
sublime.active_window().open_file(src)
class ValeVocabEditCommand(sublime_plugin.WindowCommand):
"""Adds the user-selected term to the given file.
"""
def run(self, name):
sel = self.window.active_view().sel()
reg = sublime.Region(sel[0].a, sel[0].b)
if reg.size() == 0:
reg = self.window.active_view().word(reg)
term = self.window.active_view().substr(reg)
config = Settings.get_config()
project = config["Project"]
words = query("vocab", {
"name": project, "file": name
})
words.append(term)
sorted_list = sorted(set(words), key=str.casefold)
server = urllib.parse.urljoin(Settings.get("vale_server"), "update")
r = requests.post(server, data={
"path": project + "." + name,
"text": "\n".join(sorted_list)
})
if r.status_code == 200:
self.window.status_message(
"Successfully added '{0}' to '{1}' vocab.".format(term, project)
)
class ValeEditStylesCommand(sublime_plugin.WindowCommand):
"""Provides quick access to styles on a view-specific basis.
"""
styles = []
def run(self):
"""Show a list of all styles applied to the active view.
"""
styles_dir = os.path.dirname(self.window.active_view().file_name())
config = Settings.get_config()
path = config["StylesPath"]
if not path or not os.path.exists(path):
debug("invalid path!")
return
styles = []
for s in os.listdir(path):
style = os.path.join(path, s)
if s == "Vocab" or not os.path.isdir(style):
continue
self.styles.append(style)
styles.append(s)
self.window.show_quick_panel(styles, self.choose_rule)
def choose_rule(self, idx):
"""Show a list of all rules in the user-selected style.
"""
if idx == -1:
return # The panel was cancelled.
d = self.styles[idx]
rules = [x for x in os.listdir(d) if x.endswith(".yml")]
open_rule = (
lambda i: None
if i == -1
else self.window.open_file(os.path.join(d, rules[i]))
)
self.window.show_quick_panel(rules, open_rule)
class ValeCommand(sublime_plugin.TextCommand):
"""Manages Vale's linting functionality.
"""
def is_enabled(self):
syntax = self.view.settings().get("syntax")
return Settings.is_supported(syntax)
def run(self, edit, from_load):
"""Run vale on the user-indicated buffer.
"""
path = self.view.file_name()
if not path or self.view.is_scratch():
debug("invalid path: {0}!".format(path))
return
limit = Settings.get("vale_threshold")
count = self.view.rowcol(self.view.size())[0] + 1
if limit < 0 or (limit > 0 and count >= limit):
if from_load:
return
_, ext = os.path.splitext(path)
reg = expand_to_paragraph(self.view, self.view.sel()[0].b)
buf = self.view.substr(reg)
row, _ = self.view.rowcol(reg.a)
response = post_str(buf, ext)
self.show_alerts(response, row)
else:
response = post_file(path)
self.show_alerts(response, 0)
def show_alerts(self, data, offset):
"""Add alert regions to the view.
"""
Settings.clear_on_hover()
regions = {"suggestion": [], "warning": [], "error": []}
level_to_scope = {
"error": "region.redish",
"warning": "region.orangish",
"suggestion": "region.bluish"
}
if "Code" in data and "Text" in data:
sublime.status_message(
"Vale: runtime error (skipping lint)")
debug(data["Text"])
debug(data.get("Path", ""))
return
for f, alerts in data.items():
for a in alerts:
start = self.view.text_point((a["Line"] - 1) + offset, 0)
loc = (start + a["Span"][0] - 1, start + a["Span"][1])
region = sublime.Region(*loc)
regions[a["Severity"]].append(region)
Settings.on_hover.append(
{
"region": region,
"HTML": self._make_content(a),
"view_id": self.view.id(),
"level": a["Severity"],
"msg": a["Message"],
}
)
for level in ["error", "warning", "suggestion"]:
self.view.add_regions(
"vale-server-" + level,
regions[level],
level_to_scope[level],
"circle",
Settings.get_draw_style(),
)
def _make_content(self, alert):
"""Convert an alert into HTML suitable for a popup.
"""
actions = []
style, rule = alert["Check"].split(".")
path = query("path")["path"]
loc = os.path.join(path, style, rule) + ".yml"
if os.path.exists(loc):
actions.append(make_link(loc, "Edit rule"))
if "Action" in alert and alert["Action"]["Name"] != "":
stringify = json.dumps(alert, separators=(",", ":")).strip()
stringify = binascii.hexlify(stringify.encode()).decode()
actions.append(make_link(stringify, "Fix Alert"))
level = alert["Severity"].capitalize()
if level == "Error":
template = Settings.error_template
elif level == "Warning":
template = Settings.warning_template
else:
template = Settings.info_template
source = alert["Link"]
if source != "":
actions.append(make_link(source, "Read more"))
message = cgi.escape(alert["Message"])
if alert["Description"] == "":
title = "{} - {}".format(level, alert["Check"])
body = message
else:
title = "{}: {}".format(level, message)
body = alert["Description"]
return template.format(
CSS=Settings.css,
header=title,
body=body,
actions=" | ".join(actions))
class ValeEventListener(sublime_plugin.EventListener):
"""Monitors events related to Vale.
"""
def is_enabled(self):
syntax = self.view.settings().get("syntax")
return Settings.is_supported(syntax)
def on_modified_async(self, view):
Settings.clear_on_hover()
if Settings.get("vale_mode") == "background":
debug("running vale on modified")
view.run_command("vale", {"from_load": False})
def on_load_async(self, view):
if | |
<gh_stars>1-10
""" Module contains filter used in tidal time series analysis.
"""
## Python libary import.
from numpy import abs
import pandas as pd
import numpy as np
from vtools.data.vtime import seconds, minutes, hours
from scipy import array as sciarray
from scipy.signal import lfilter,firwin,filtfilt
from scipy.signal.filter_design import butter
from scipy.ndimage import gaussian_filter1d
#__all__=["boxcar","butterworth","daily_average","godin","cosine_lanczos",\
# "lowpass_cosine_lanczos_filter_coef","ts_gaussian_filter"]
_cached_filt_info = {}
###########################################################################
## Public interface.
###########################################################################
def process_cutoff(cutoff_frequency,cutoff_period,freq):
if cutoff_frequency is None:
if cutoff_period is None:
raise("One of cutoff_frequency or cutoff_period must be given")
cp = pd.tseries.frequencies.to_offset(cutoff_period)
return 2.*freq/cp
else:
if cutoff_frequency < 0 or cutoff_frequency > 1.:
raise ValueError("cutoff frequency must be 0 < cf < 1)")
return cutoff_frequency
def cosine_lanczos(ts,cutoff_period=None,cutoff_frequency=None,filter_len=None,
padtype=None,padlen=None,fill_edge_nan=True):
""" squared low-pass cosine lanczos filter on a regular time series.
Parameters
-----------
ts : :class:`DataFrame <pandas:pandas.DataFrame>`
filter_len : int, time_interval
Size of lanczos window, default is to number of samples within filter_period*1.25.
cutoff_frequency: float,optional
Cutoff frequency expressed as a ratio of a Nyquist frequency,
should within the range (0,1). For example, if the sampling frequency
is 1 hour, the Nyquist frequency is 1 sample/2 hours. If we want a
36 hour cutoff period, the frequency is 1/36 or 0.0278 cycles per hour.
Hence the cutoff frequency argument used here would be
0.0278/0.5 = 0.056.
cutoff_period : string or :ref:`time_interval<time_intervals>`
Period of cutting off frequency. If input as a string, it must
be convertible to :ref:`Time interval<time_intervals>`.
cutoff_frequency and cutoff_period can't be specified at the same time.
padtype : str or None, optional
Must be 'odd', 'even', 'constant', or None. This determines the type
of extension to use for the padded signal to which the filter is applied.
If padtype is None, no padding is used. The default is None.
padlen : int or None, optional
The number of elements by which to extend x at both ends of axis
before applying the filter. This value must be less than x.shape[axis]-1.
padlen=0 implies no padding. If padtye is not None and padlen is not
given, padlen is be set to 6*m.
fill_edge_nan: bool,optional
If pading is not used and fill_edge_nan is true, resulting data on
the both ends are filled with nan to account for edge effect. This is
2*m on the either end of the result. Default is true.
Returns
-------
result : :class:`~vtools.data.timeseries.TimeSeries`
A new regular time series with the same interval of ts. If no pading
is used the beigning and ending 4*m resulting data will be set to nan
to remove edge effect.
Raise
--------
ValueError
If input timeseries is not regular,
or, cutoff_period and cutoff_frequency are given at the same time,
or, neither cutoff_period nor curoff_frequence is given,
or, padtype is not "odd","even","constant",or None,
or, padlen is larger than data size
"""
freq=ts.index.freq
if freq is None:
raise ValueError("Time series has no frequency attribute")
m=filter_len
cf = process_cutoff(cutoff_frequency,cutoff_period,freq)
if m is None:
m = int(1.25 * 2. /cf)
elif type(m) != int:
try:
m = int(m/freq)
except:
raise TypeError("filter_len was not an int or divisible by filter_len (probably a type incompatiblity)")
#raise NotImplementedError("Only integer length filter lengths or supported currently. Received: ".format(type(m)))
##find out nan location and fill with 0.0. This way we can use the
## signal processing filtrations out-of-the box without nans causing trouble,
## but we have to post process the areas touched by nan
idx=np.where(np.isnan(ts.values))[0]
data=np.array(ts.values).copy()
## figure out indexes that will be nan after the filtration,which
## will "grow" the nan region around the original nan by 2*m
## slots in each direction
if False:
#len(idx)>0:
data[idx]=0.0
shifts=np.arange(-2*m,2*m+1)
result_nan_idx=np.clip(np.add.outer(shifts,idx),0,len(ts)-1).ravel()
if m<1:
raise ValueError("bad input cutoff period or frequency")
if padtype is not None:
if (not padtype in ["odd","even","constant"]):
raise ValueError("unkown padtype :"+padtype)
if (padlen is None) and (padtype is not None):
padlen=6*m
if padlen is not None: # is None sensible?
if padlen>len(data):
raise ValueError("Padding length is more than data size")
## get filter coefficients. sizeo of coefis is 2*m+1 in fact
coefs= lowpass_cosine_lanczos_filter_coef(cf,int(m))
d2=filtfilt(coefs,[1.0],data,axis=0,padtype=padtype,padlen=padlen)
#if(len(idx)>0):
# d2[result_nan_idx]=np.nan
## replace edge points with nan if pading is not used
if (padtype is None) and (fill_edge_nan==True):
d2[0:2*m,np.newaxis]=np.nan
d2[len(d2)-2*m:len(d2),np.newaxis]=np.nan
out = ts.copy(deep=True)
out[:]=d2
return out
def butterworth(ts,cutoff_period=None,cutoff_frequency=None,order=4):
""" low-pass butterworth-squared filter on a regular time series.
Parameters
-----------
ts : :class:`DataFrame <pandas:pandas.DataFrame>`
Must be one or two dimensional, and regular.
order: int ,optional
The default is 4.
cutoff_frequency: float,optional
Cutoff frequency expressed as a ratio with Nyquist frequency,
should within the range (0,1). For a discretely sampled system,
the Nyquist frequency is the fastest frequency that can be resolved by that
sampling, which is half the sampling frequency. For example, if the sampling frequency
is 1 sample/1 hour, the Nyquist frequency is 1 sample/2 hours. If we want a
36 hour cutoff period, the frequency is 1/36 or 0.0278 cycles per hour.
Hence the cutoff frequency argument used here would be
0.0278/0.5 = 0.056.
cutoff_period : string or :ref:`time_interval<time_intervals>`
Period corresponding to cutoff frequency. If input as a string, it must
be convertible to a regular interval using the same rules as a pandas frequency..
cutoff_frequency and cutoff_period can't be specified at the same time.
Returns
-------
result :
A new regular time series with the same interval as ts.
Raise
--------
ValueError
If input order is not even, or input timeseries is not regular,
or neither cutoff_period and cutoff_frequency is given while input
time series interval is not 15min or 1 hour, or cutoff_period and cutoff_frequency
are given at the same time.
"""
if (order%2):
raise ValueError("only even order is accepted")
#if not ts.is_regular():
# raise ValueError("Only regular time series can be filtered.")
freq=ts.index.freq
# if (not (interval in _butterworth_interval)) and (cutoff_period is None) and (cutoff_frequency is None):
# raise ValueError("time interval is not supported by butterworth if no cuttoff period/frequency given.")
if (not (cutoff_frequency is None)) and (not(cutoff_period is None)):
raise ValueError("cutoff_frequency and cutoff_period can't be specified simultaneously")
if (cutoff_frequency is None) and (cutoff_period is None):
raise ValueError("Either cutoff_frequency or cutoff_period must be given")
cf=cutoff_frequency
if (cf is None):
if (not(cutoff_period is None)):
cutoff_period = pd.tseries.frequencies.to_offset(cutoff_period)
cf = 2.*freq/cutoff_period
else:
cf=butterworth_cutoff_frequencies[interval]
## get butter filter coefficients.
[b,a]=butter(order/2,cf)
d2=filtfilt(b,a,ts.values,axis=0,padlen=90)
out = ts.copy(deep=True)
out[:]=d2
# prop={}
# for key,val in ts.props.items():
# prop[key]=val
# prop[TIMESTAMP]=INST
# prop[AGGREGATION]=INDIVIDUAL
# time_interval
return out
def lowpass_cosine_lanczos_filter_coef(cf,m,normalize=True):
"""return the convolution coefficients for low pass lanczos filter.
Parameters
-----------
cf: float
Cutoff frequency expressed as a ratio of a Nyquist frequency.
m: int
Size of filtering window size.
Returns
--------
Results: list
Coefficients of filtering window.
"""
if (cf,m) in _cached_filt_info:
return _cached_filt_info[(cf,m)]
coscoef=[cf*np.sin(np.pi*k*cf)/(np.pi*k*cf) for k in np.arange(1,m+1,1,dtype='d')]
sigma=[np.sin(np.pi*k/m)/(np.pi*k/m) for k in np.arange(1,m+1,1,dtype='float')]
prod= [c*s for c,s in zip(coscoef,sigma)]
temp = prod[-1::-1]+[cf]+prod
res=np.array(temp)
if normalize:
res = res/res.sum()
_cached_filt_info[(cf,m)] = res
return res
def generate_godin_fir(freq):
'''
generate godin filter impulse response for given freq
freq is a pandas freq
'''
freqstr=str(freq)
if freqstr in _cached_filt_info:
return _cached_filt_info[freqstr]
dt_sec = int(freq/seconds(1))
nsample24 = int(86400//dt_sec) # 24 hours by dt (24 for hour, 96 for 15min)
wts24=np.zeros(nsample24,dtype='d')
wts24[:]=1./nsample24
nsample25=(1490*60)//dt_sec # 24 hr 50min in seconds by dt
if nsample25%2==0:
# ensure odd
nsample25+=1
wts25=np.zeros(nsample25,dtype='d')
wts25[:]=1.0/nsample25
wts24=np.zeros(nsample24,dtype='d')
wts24[:]=1./nsample24
v = np.convolve(wts25,np.convolve(wts24,wts24))
_cached_filt_info[freqstr] = v
return v
def godin(ts):
""" Low-pass Godin filter a regular time series.
Applies the :math:`\mathcal{A_{24}^{2}A_{25}}` Godin filter [1]_
The filter is generalized to be the equivalent of one
boxcar of the length of the lunar diurnal (~25 hours)
constituent and two of the solar diurnal (~24 hours), though the
implementation combines these steps.
Parameters
-----------
ts : :class:`DataFrame <pandas:pandas.DataFrame>`
Returns
-------
result : :class:`DataFrame <pandas:pandas.DataFrame>`
A new regular | |
process closes before the
# command is finished. If you would like your application to print a warning message, then set the
# broken_pipe_warning attribute to the message you want printed.`
if self.broken_pipe_warning:
sys.stderr.write(self.broken_pipe_warning)
# ----- Methods related to tab completion -----
def _reset_completion_defaults(self) -> None:
"""
Resets tab completion settings
Needs to be called each time readline runs tab completion
"""
self.allow_appended_space = True
self.allow_closing_quote = True
self.completion_hint = ''
self.completion_header = ''
self.completion_matches = []
self.display_matches = []
self.matches_delimited = False
self.matches_sorted = False
if rl_type == RlType.GNU:
readline.set_completion_display_matches_hook(self._display_matches_gnu_readline)
elif rl_type == RlType.PYREADLINE:
# noinspection PyUnresolvedReferences
readline.rl.mode._display_completions = self._display_matches_pyreadline
def tokens_for_completion(self, line: str, begidx: int, endidx: int) -> Tuple[List[str], List[str]]:
"""Used by tab completion functions to get all tokens through the one being completed.
:param line: the current input line with leading whitespace removed
:param begidx: the beginning index of the prefix text
:param endidx: the ending index of the prefix text
:return: A 2 item tuple where the items are
**On Success**
- tokens: list of unquoted tokens - this is generally the list needed for tab completion functions
- raw_tokens: list of tokens with any quotes preserved = this can be used to know if a token was quoted
or is missing a closing quote
Both lists are guaranteed to have at least 1 item. The last item in both lists is the token being tab
completed
**On Failure**
- Two empty lists
"""
import copy
unclosed_quote = ''
quotes_to_try = copy.copy(constants.QUOTES)
tmp_line = line[:endidx]
tmp_endidx = endidx
# Parse the line into tokens
while True:
try:
initial_tokens = shlex_split(tmp_line[:tmp_endidx])
# If the cursor is at an empty token outside of a quoted string,
# then that is the token being completed. Add it to the list.
if not unclosed_quote and begidx == tmp_endidx:
initial_tokens.append('')
break
except ValueError as ex:
# Make sure the exception was due to an unclosed quote and
# we haven't exhausted the closing quotes to try
if str(ex) == "No closing quotation" and quotes_to_try:
# Add a closing quote and try to parse again
unclosed_quote = quotes_to_try[0]
quotes_to_try = quotes_to_try[1:]
tmp_line = line[:endidx]
tmp_line += unclosed_quote
tmp_endidx = endidx + 1
else:
# The parsing error is not caused by unclosed quotes.
# Return empty lists since this means the line is malformed.
return [], []
# Further split tokens on punctuation characters
raw_tokens = self.statement_parser.split_on_punctuation(initial_tokens)
# Save the unquoted tokens
tokens = [utils.strip_quotes(cur_token) for cur_token in raw_tokens]
# If the token being completed had an unclosed quote, we need
# to remove the closing quote that was added in order for it
# to match what was on the command line.
if unclosed_quote:
raw_tokens[-1] = raw_tokens[-1][:-1]
return tokens, raw_tokens
def delimiter_complete(self, text: str, line: str, begidx: int, endidx: int,
match_against: Iterable, delimiter: str) -> List[str]:
"""
Performs tab completion against a list but each match is split on a delimiter and only
the portion of the match being tab completed is shown as the completion suggestions.
This is useful if you match against strings that are hierarchical in nature and have a
common delimiter.
An easy way to illustrate this concept is path completion since paths are just directories/files
delimited by a slash. If you are tab completing items in /home/user you don't get the following
as suggestions:
/home/user/file.txt /home/user/program.c
/home/user/maps/ /home/user/cmd2.py
Instead you are shown:
file.txt program.c
maps/ cmd2.py
For a large set of data, this can be visually more pleasing and easier to search.
Another example would be strings formatted with the following syntax: company::department::name
In this case the delimiter would be :: and the user could easily narrow down what they are looking
for if they were only shown suggestions in the category they are at in the string.
:param text: the string prefix we are attempting to match (all matches must begin with it)
:param line: the current input line with leading whitespace removed
:param begidx: the beginning index of the prefix text
:param endidx: the ending index of the prefix text
:param match_against: the list being matched against
:param delimiter: what delimits each portion of the matches (ex: paths are delimited by a slash)
:return: a list of possible tab completions
"""
matches = utils.basic_complete(text, line, begidx, endidx, match_against)
# Display only the portion of the match that's being completed based on delimiter
if matches:
# Set this to True for proper quoting of matches with spaces
self.matches_delimited = True
# Get the common beginning for the matches
common_prefix = os.path.commonprefix(matches)
prefix_tokens = common_prefix.split(delimiter)
# Calculate what portion of the match we are completing
display_token_index = 0
if prefix_tokens:
display_token_index = len(prefix_tokens) - 1
# Get this portion for each match and store them in self.display_matches
for cur_match in matches:
match_tokens = cur_match.split(delimiter)
display_token = match_tokens[display_token_index]
if not display_token:
display_token = delimiter
self.display_matches.append(display_token)
return matches
def flag_based_complete(self, text: str, line: str, begidx: int, endidx: int,
flag_dict: Dict[str, Union[Iterable, Callable]], *,
all_else: Union[None, Iterable, Callable] = None) -> List[str]:
"""Tab completes based on a particular flag preceding the token being completed.
:param text: the string prefix we are attempting to match (all matches must begin with it)
:param line: the current input line with leading whitespace removed
:param begidx: the beginning index of the prefix text
:param endidx: the ending index of the prefix text
:param flag_dict: dictionary whose structure is the following:
`keys` - flags (ex: -c, --create) that result in tab completion for the next argument in the
command line
`values` - there are two types of values:
1. iterable list of strings to match against (dictionaries, lists, etc.)
2. function that performs tab completion (ex: path_complete)
:param all_else: an optional parameter for tab completing any token that isn't preceded by a flag in flag_dict
:return: a list of possible tab completions
"""
# Get all tokens through the one being completed
tokens, _ = self.tokens_for_completion(line, begidx, endidx)
if not tokens:
return []
completions_matches = []
match_against = all_else
# Must have at least 2 args for a flag to precede the token being completed
if len(tokens) > 1:
flag = tokens[-2]
if flag in flag_dict:
match_against = flag_dict[flag]
# Perform tab completion using an Iterable
if isinstance(match_against, Iterable):
completions_matches = utils.basic_complete(text, line, begidx, endidx, match_against)
# Perform tab completion using a function
elif callable(match_against):
completions_matches = match_against(text, line, begidx, endidx)
return completions_matches
def index_based_complete(self, text: str, line: str, begidx: int, endidx: int,
index_dict: Mapping[int, Union[Iterable, Callable]], *,
all_else: Union[None, Iterable, Callable] = None) -> List[str]:
"""Tab completes based on a fixed position in the input string.
:param text: the string prefix we are attempting to match (all matches must begin with it)
:param line: the current input line with leading whitespace removed
:param begidx: the beginning index of the prefix text
:param endidx: the ending index of the prefix text
:param index_dict: dictionary whose structure is the following:
`keys` - 0-based token indexes into command line that determine which tokens perform tab
completion
`values` - there are two types of values:
1. iterable list of strings to match against (dictionaries, lists, etc.)
2. function that performs tab completion (ex: path_complete)
:param all_else: an optional parameter for tab completing any token that isn't at an index in index_dict
:return: a list of possible tab completions
"""
# Get all tokens through the one being completed
tokens, _ = self.tokens_for_completion(line, begidx, endidx)
if not tokens:
return []
matches = []
# Get the index of the token being completed
index = len(tokens) - 1
# Check if token is at an index in the dictionary
if index in index_dict:
match_against = index_dict[index]
else:
match_against = all_else
# Perform tab completion using a Iterable
if isinstance(match_against, Iterable):
matches = utils.basic_complete(text, line, begidx, endidx, match_against)
# Perform | |
when sending a request.
:ivar type: The type of the target. Possible values include: "TargetGroup", "SqlDatabase",
"SqlElasticPool", "SqlShardMap", "SqlServer".
:vartype type: str or ~azure.mgmt.sql.models.JobTargetType
:ivar server_name: The server name.
:vartype server_name: str
:ivar database_name: The database name.
:vartype database_name: str
"""
_validation = {
'type': {'readonly': True},
'server_name': {'readonly': True},
'database_name': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'server_name': {'key': 'serverName', 'type': 'str'},
'database_name': {'key': 'databaseName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(JobExecutionTarget, self).__init__(**kwargs)
self.type = None
self.server_name = None
self.database_name = None
class JobListResult(msrest.serialization.Model):
"""A list of jobs.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: Array of results.
:vartype value: list[~azure.mgmt.sql.models.Job]
:ivar next_link: Link to retrieve next page of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Job]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(JobListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class JobSchedule(msrest.serialization.Model):
"""Scheduling properties of a job.
:param start_time: Schedule start time.
:type start_time: ~datetime.datetime
:param end_time: Schedule end time.
:type end_time: ~datetime.datetime
:param type: Schedule interval type. Possible values include: "Once", "Recurring". Default
value: "Once".
:type type: str or ~azure.mgmt.sql.models.JobScheduleType
:param enabled: Whether or not the schedule is enabled.
:type enabled: bool
:param interval: Value of the schedule's recurring interval, if the ScheduleType is recurring.
ISO8601 duration format.
:type interval: str
"""
_attribute_map = {
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'type': {'key': 'type', 'type': 'str'},
'enabled': {'key': 'enabled', 'type': 'bool'},
'interval': {'key': 'interval', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(JobSchedule, self).__init__(**kwargs)
self.start_time = kwargs.get('start_time', "0001-01-01T00:00:00+00:00")
self.end_time = kwargs.get('end_time', "9999-12-31T11:59:59+00:00")
self.type = kwargs.get('type', "Once")
self.enabled = kwargs.get('enabled', None)
self.interval = kwargs.get('interval', None)
class JobStep(ProxyResource):
"""A job step.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param step_id: The job step's index within the job. If not specified when creating the job
step, it will be created as the last step. If not specified when updating the job step, the
step id is not modified.
:type step_id: int
:param target_group: The resource ID of the target group that the job step will be executed on.
:type target_group: str
:param credential: The resource ID of the job credential that will be used to connect to the
targets.
:type credential: str
:param action: The action payload of the job step.
:type action: ~azure.mgmt.sql.models.JobStepAction
:param output: Output destination properties of the job step.
:type output: ~azure.mgmt.sql.models.JobStepOutput
:param execution_options: Execution options for the job step.
:type execution_options: ~azure.mgmt.sql.models.JobStepExecutionOptions
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'step_id': {'key': 'properties.stepId', 'type': 'int'},
'target_group': {'key': 'properties.targetGroup', 'type': 'str'},
'credential': {'key': 'properties.credential', 'type': 'str'},
'action': {'key': 'properties.action', 'type': 'JobStepAction'},
'output': {'key': 'properties.output', 'type': 'JobStepOutput'},
'execution_options': {'key': 'properties.executionOptions', 'type': 'JobStepExecutionOptions'},
}
def __init__(
self,
**kwargs
):
super(JobStep, self).__init__(**kwargs)
self.step_id = kwargs.get('step_id', None)
self.target_group = kwargs.get('target_group', None)
self.credential = kwargs.get('credential', None)
self.action = kwargs.get('action', None)
self.output = kwargs.get('output', None)
self.execution_options = kwargs.get('execution_options', None)
class JobStepAction(msrest.serialization.Model):
"""The action to be executed by a job step.
All required parameters must be populated in order to send to Azure.
:param type: Type of action being executed by the job step. Possible values include: "TSql".
Default value: "TSql".
:type type: str or ~azure.mgmt.sql.models.JobStepActionType
:param source: The source of the action to execute. Possible values include: "Inline". Default
value: "Inline".
:type source: str or ~azure.mgmt.sql.models.JobStepActionSource
:param value: Required. The action value, for example the text of the T-SQL script to execute.
:type value: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'source': {'key': 'source', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(JobStepAction, self).__init__(**kwargs)
self.type = kwargs.get('type', "TSql")
self.source = kwargs.get('source', "Inline")
self.value = kwargs['value']
class JobStepExecutionOptions(msrest.serialization.Model):
"""The execution options of a job step.
:param timeout_seconds: Execution timeout for the job step.
:type timeout_seconds: int
:param retry_attempts: Maximum number of times the job step will be reattempted if the first
attempt fails.
:type retry_attempts: int
:param initial_retry_interval_seconds: Initial delay between retries for job step execution.
:type initial_retry_interval_seconds: int
:param maximum_retry_interval_seconds: The maximum amount of time to wait between retries for
job step execution.
:type maximum_retry_interval_seconds: int
:param retry_interval_backoff_multiplier: The backoff multiplier for the time between retries.
:type retry_interval_backoff_multiplier: float
"""
_attribute_map = {
'timeout_seconds': {'key': 'timeoutSeconds', 'type': 'int'},
'retry_attempts': {'key': 'retryAttempts', 'type': 'int'},
'initial_retry_interval_seconds': {'key': 'initialRetryIntervalSeconds', 'type': 'int'},
'maximum_retry_interval_seconds': {'key': 'maximumRetryIntervalSeconds', 'type': 'int'},
'retry_interval_backoff_multiplier': {'key': 'retryIntervalBackoffMultiplier', 'type': 'float'},
}
def __init__(
self,
**kwargs
):
super(JobStepExecutionOptions, self).__init__(**kwargs)
self.timeout_seconds = kwargs.get('timeout_seconds', 43200)
self.retry_attempts = kwargs.get('retry_attempts', 10)
self.initial_retry_interval_seconds = kwargs.get('initial_retry_interval_seconds', 1)
self.maximum_retry_interval_seconds = kwargs.get('maximum_retry_interval_seconds', 120)
self.retry_interval_backoff_multiplier = kwargs.get('retry_interval_backoff_multiplier', 2)
class JobStepListResult(msrest.serialization.Model):
"""A list of job steps.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: Array of results.
:vartype value: list[~azure.mgmt.sql.models.JobStep]
:ivar next_link: Link to retrieve next page of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[JobStep]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(JobStepListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class JobStepOutput(msrest.serialization.Model):
"""The output configuration of a job step.
All required parameters must be populated in order to send to Azure.
:param type: The output destination type. Possible values include: "SqlDatabase". Default
value: "SqlDatabase".
:type type: str or ~azure.mgmt.sql.models.JobStepOutputType
:param subscription_id: The output destination subscription id.
:type subscription_id: str
:param resource_group_name: The output destination resource group.
:type resource_group_name: str
:param server_name: Required. The output destination server name.
:type server_name: str
:param database_name: Required. The output destination database.
:type database_name: str
:param schema_name: The output destination schema.
:type schema_name: str
:param table_name: Required. The output destination table.
:type table_name: str
:param credential: Required. The resource ID of the credential to use to connect to the output
destination.
:type credential: str
"""
_validation = {
'server_name': {'required': True},
'database_name': {'required': True},
'table_name': {'required': True},
'credential': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'subscription_id': {'key': 'subscriptionId', 'type': 'str'},
'resource_group_name': {'key': 'resourceGroupName', 'type': 'str'},
'server_name': {'key': 'serverName', 'type': 'str'},
'database_name': {'key': 'databaseName', 'type': 'str'},
'schema_name': {'key': 'schemaName', 'type': 'str'},
'table_name': {'key': 'tableName', 'type': 'str'},
'credential': {'key': 'credential', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(JobStepOutput, self).__init__(**kwargs)
self.type = kwargs.get('type', "SqlDatabase")
self.subscription_id = kwargs.get('subscription_id', None)
self.resource_group_name = kwargs.get('resource_group_name', None)
self.server_name = kwargs['server_name']
self.database_name = kwargs['database_name']
self.schema_name = kwargs.get('schema_name', "dbo")
self.table_name = kwargs['table_name']
self.credential = kwargs['credential']
class JobTarget(msrest.serialization.Model):
"""A job target, for example a specific database or a container of databases that is evaluated during job execution.
All required parameters must be populated in order to send to Azure.
:param membership_type: Whether the target is included or excluded from the group. Possible
values include: "Include", "Exclude". Default value: "Include".
:type membership_type: str or ~azure.mgmt.sql.models.JobTargetGroupMembershipType
:param type: Required. The target type. Possible values include: "TargetGroup", "SqlDatabase",
"SqlElasticPool", "SqlShardMap", "SqlServer".
:type type: str or ~azure.mgmt.sql.models.JobTargetType
:param server_name: The target server name.
:type server_name: str
:param database_name: The target database name.
:type database_name: str
:param elastic_pool_name: The target elastic pool name.
:type elastic_pool_name: str
:param shard_map_name: The target shard map.
:type shard_map_name: str
:param refresh_credential: The resource ID of the credential that is used during job execution
to connect to the target and determine the list of databases inside the target.
:type refresh_credential: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'membership_type': {'key': 'membershipType', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'server_name': {'key': 'serverName', 'type': 'str'},
| |
if
c.concepticon_id}
else: # All observed concepts
self.concepts_subset = set(concepts.values())
self.lang_to_concept = defaultdict(set)
self.data = defaultdict(lambda:defaultdict(list))
for row in progressbar(db.iter_table('FormTable'),
desc="Loading data..."):
if row.get("Loan", ""): continue # Ignore loan words
concept = concepts[row["Parameter_ID"]]
if concept not in self.concepts_subset or \
row["Language_ID"] not in langs or \
(not self.asjp and row["Segments"] is None) or \
(self.asjp and row["Graphemes"] is None):
continue
# TODO: if it has COGIDS, split on morphemes
# TODO: add a Word for each morpheme + morpheme cogid
try:
token = list(self._iter_phonemes(row))
except ValueError: continue # unknown sounds
if all([s in IGNORE for s in token]): continue
syllables = len(lingpy.sequence.sound_classes.syllabify(token,
output="nested"))
lang = langs[row["Language_ID"]]
# TODO: also add COGID
word = Word(lang=lang, syllables=syllables,
token=token, concept=concept, id=row["ID"],
original_token=" ".join(row["Segments"]), dataset=row["dataset"])
self.data[(lang, concept)][" ".join(token)].append(word)
self.lang_to_concept[lang].add(concept)
def _iter_phonemes(self, row):
""" Iterate over pre-processed phonemes from a row's token.
The phonemes are usually from the "Segments" column, except for ASJP data
where we retrieve them from "Graphemes".
We pre-process by:
- re-tokenizing on spaces, ignoring empty segments
- selecting the second element when there is a slash
- using the sound_class attribute function to obtain sound classes
Args:
row (dict): dict of column name to value
Yields:
successive sound classes in the row's word.
"""
# In some dataset, the separator defined in the metadata is " + ",
# which means that tokens are not phonemes (ex:bodtkhobwa)
# This is solved by re-tokenizing on the space...
if self.asjp:
segments = row["Graphemes"][1:-1] # Ignore end and start symbols
else:
segments = row["Segments"]
tokens = " ".join([s for s in segments if (s is not None and s != "")]).split(" ")
for segment in tokens:
try:
if "/" in segment:
segment = segment.split("/")[1]
yield self.sound_class(segment)
except ValueError as e:
self.errors.append((row["dataset"], row["Language_ID"], segment,
" ".join(str(x) for x in segments), row["ID"]))
raise e
def iter_candidates(self):
""" Iterate over word pair candidates.
Across all datasets, inside each genus, we consider all token
pairs for the same concept in all language pairs.
Yields:
tuples of `genus, (langA, tA, sA), (langB, tB, sB)`
genus (str): genus name
langA (str) and langB (str): glottocodes for the two languages
tA (list of str) and tB (list of str): the two tokens
sA (int) and sB (int): the syllable count for each token
"""
for genus in progressbar(self.genera_to_lang, desc="Genera"):
langs = self.genera_to_lang[genus]
lang_pairs = combinations(langs, r=2)
n_lang = len(langs)
tot_pairs = (n_lang * (n_lang - 1)) / 2
for langA, langB in progressbar(lang_pairs, total=tot_pairs,
desc="Language pairs"):
concepts_A = self.lang_to_concept[langA]
concepts_B = self.lang_to_concept[langB]
common_concepts = (concepts_A & concepts_B)
self.concepts_intersection[(langA, langB)] += len(common_concepts)
for concept in common_concepts:
for tokA, tokB in product(self.data[(langA, concept)],
self.data[(langB, concept)]):
# Here we grab the first word, but there may be other words,
# if this token is documented in other datasets.
# So far we don't really need the information.
wordA = self.data[(langA, concept)][tokA][0]
wordB = self.data[(langB, concept)][tokB][0]
yield wordA, wordB
def __iter__(self):
"""Iterate over the tokens.
Yields:
for all known tokens, its genus, language glottocode, concept, and the token itself.
"""
for lang, concept in self.data:
for token in self.data[(lang, concept)]:
yield self.data[(lang, concept)][token][0] # also picking a single word
def register(parser):
# Standard catalogs can be "requested" as follows:
add_catalog_spec(parser, "clts")
add_catalog_spec(parser, "glottolog")
add_format(parser, default='pipe')
parser.description = run.__doc__
parser.add_argument(
'--dataset',
action='store',
default='lexicore',
help='select a specific lexibank dataset (otherwise entire lexicore)')
parser.add_argument(
'--display',
action='store',
default=None,
help='select a display')
parser.add_argument(
'--threshold',
action='store',
default=1,
type=float,
help='Max differences per syllable in the SCA string.')
parser.add_argument(
'--cutoff',
action='store',
default=0.05,
type=float,
help='Cutoff for attested correspondences in a language pair, in proportion of the list of cognates.')
parser.add_argument(
'--model',
choices=["BIPA", "ASJPcode", "Coarse"],
default='BIPA',
type=str,
help='select a sound class model: BIPA, ASJPcode, or Coarse.')
parser.add_argument(
'--concepts',
action='store',
default=None,
type=str,
help='select a concept list to filter on')
class Correspondences(object):
"""Extract sound correspondences.
Attributes:
args: the full args passed to the correspondences command.
data (SoundCorrespsByGenera): the lexicore dataset
clts (pyclts.CLTS): a clts instance
sca_cache (dict): maps bipa sounds to SCA class (used for the cognate threshold).
bipa_cache (dict): maps strings to bipa sounds.
counts (Counter): occurences of pairs of Sounds (the keys are frozensets).
examples (defaultdict): example source words for pairs of sounds (the keys are frozensets).
total_cognates (Counter): counts the number of cognates found for each pair of languages.
tones (set): characters which denote tones. Allow for a fast identification.
(using calls to bipa is too slow)
"""
def __init__(self, args, data, clts):
""" Initialization only records the arguments and defines the attributes.
Args:
args: the full args passed to the correspondences command.
data (SoundCorrespsByGenera): the data
clts (pyclts.CLTS): a clts instance
"""
self.args = args
self.data = data
self.clts = clts
self.sca_cache = {}
self.bipa_cache = {}
self.counts = Counter()
self.examples = defaultdict(list)
self.total_cognates = Counter()
self.tones = set("⁰¹²³⁴⁵˥˦˧˨↓↑↗↘")
def bipa(self, item):
""" Caches calls to the bipa transcription system, as resolve_sound is too slow.
Args:
item: a string representing a sound
Returns:
bipa (Sound): the corresponding BIPA sound
"""
try:
return self.bipa_cache[item]
except KeyError:
self.bipa_cache[item] = self.clts.bipa[item]
return self.bipa_cache[item]
def sca(self, item):
""" Caches calls to the SCA sound class system, as resolve_sound is too slow.
Args:
item: a string representing a sound
Returns:
sca (str): the corresponding SCA class
"""
try:
return self.sca_cache[item]
except KeyError:
self.sca_cache[item] = self.clts.soundclasses_dict["sca"][item]
return self.sca_cache[item]
def find_available(self):
""" Find which pairs of sounds from our data are available in each genera.
- A pair of two distinct sounds x,y are available in a genus if the genus has at
least two distinct languages A,B such that A has at least two occurences of x
and B has at least two occurences of y.
- A pair of a sound and a gap (x,-) is available in a genus if that genus has a
language with at least two occurences of x.
- A pair of a sound and itself (x,x) is available in a genus if that genus has a
language with at least two occurences of x.
Returns:
available (list of lists): Inner lists are rows with [family, genus, soundA, soundB]
"""
self.args.log.info('Counting available corresp...')
sounds_by_genera = defaultdict(lambda: defaultdict(Counter))
for word in self.data:
for sound in word.token:
if sound not in IGNORE: # spaces and segmentation symbols ignored
sounds_by_genera[(word.lang.family, word.lang.genus)][sound][word.lang] += 1
available = list()
for family, genus in list(sounds_by_genera):
freq = sounds_by_genera[(family, genus)]
n_sounds = len(freq)
tot_sound_pairs = (n_sounds * (n_sounds - 1)) / 2
sound_pairs = combinations(freq, r=2)
for sound_A in progressbar(freq):
if sound_A != "-": # No point in counting corresp between blank and itself
occ = {lg for lg in freq[sound_A] if freq[sound_A][lg] > 1}
if len(occ) > 1:
available.append([family, genus, sound_A, sound_A])
if len(occ) > 0:
available.append([family, genus, sound_A, "-"])
for sound_A, sound_B in progressbar(sound_pairs, total=tot_sound_pairs):
occ_A = {lg for lg in freq[sound_A] if freq[sound_A][lg] > 1}
occ_B = {lg for lg in freq[sound_B] if freq[sound_B][lg] > 1}
if occ_A and occ_B and len(occ_A | occ_B) > 1:
sound_A, sound_B = tuple(sorted((sound_A, sound_B)))
available.append([family, genus, sound_A, sound_B])
return available
def allowed_differences(self, sa, sb):
""" Compute the number of allowed differences for two syllable length.
Args:
sa (int): number of syllables in the first token
sb (int): number of syllables in the second token
Returns:
diff (int): a threshold above which two words of these lengths
can be considered cognates.
"""
return max(sa, sb) * self.args.threshold
def sounds_and_contexts(self, almA, almB):
""" Iterator of sounds and contexts for a pair of aligned tokens.
Args:
almA (list of str): aligned elements of the first token.
almB (list of str): aligned elements of the second token.
Yields: pair of aligned sounds and their contexts: `(sA, cA), (sB, cB)`
sA (str) and sB (str): aligned sounds from resp. almA and almB
cA (str) and cB (str): contexts for resp. sA and sB
"""
def to_categories(sequence):
"""Turn a sequence of sounds into a sequence of categories used in | |
from django.shortcuts import render, redirect, get_object_or_404, HttpResponseRedirect, reverse
from django.views.generic import ListView, View, CreateView, UpdateView, DeleteView, RedirectView, DetailView
from .models import JobOpening, ShareJob, RequestJob
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.contrib.messages.views import SuccessMessageMixin
from .forms import CreateJobForm, CreateShareForm, ShareJobEditForm, QuoteJobForm
from django.contrib import messages
from notifications.signals import notify
from notifications.models import Notification
# REST FRAMEWORK
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import authentication, permissions
# Create your views here.
class AllJobs(ListView):
ordering = ['-date_posted']
context_object_name = 'posts'
paginate_by = 30
template_name = 'job/job_home.html'
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
context['model'] = self.model
return context
def get_queryset(self):
posts = JobOpening.objects.all().order_by('-date_posted')
return posts
class CreateJobOpening(LoginRequiredMixin, CreateView):
model = JobOpening
def get(self, request):
form = CreateJobForm()
context = {
'form': form,
}
return render(request, 'job/job_create_form.html', context)
def post(self, request):
if request.method == 'POST':
form = CreateJobForm(request.POST or None)
if form.is_valid():
print('Valid Form')
job_title = form.cleaned_data['job_title']
method_of_application = form.cleaned_data['method_of_application']
field = form.cleaned_data['field']
education = form.cleaned_data['education']
industry = form.cleaned_data['industry']
job_description = form.cleaned_data['job_description']
company_description = form.cleaned_data['company_description']
send_cv_directly = form.cleaned_data['send_cv_directly']
experience = form.cleaned_data['experience']
job_type = form.cleaned_data['job_type']
state = form.cleaned_data['state']
company_name = form.cleaned_data['company_name']
job_create = JobOpening.objects.create(user= request.user,
job_title= job_title,
job_description= job_description,
company_description= company_description,
company_name=company_name,
industry= industry,
field= field,
education= education,
experience=experience,
send_cv_directly=send_cv_directly,
job_type=job_type,
method_of_application= method_of_application,
state=state
)
job_create.save()
return redirect('all_jobs')
else:
print(form.non_field_errors)
print('Invalid form')
else:
form = CreateJobForm()
context = {
'form': form
}
return render(request, 'job/job_create_form.html', context)
class JobDetail(DetailView):
def get(self, request, *args, **kwargs):
pk = kwargs['pk']
UserId = request.user.id
post = get_object_or_404(JobOpening, pk=pk)
# comment_form = CommentForm()
# comments = Comment.objects.filter(post__exact=post, reply=None).order_by('-id')
is_liked = False
is_saved = False
is_thread = False
# if Share.objects.filter(post__exact=pk).exists():
# is_thread = True
# if UserId in post.likes.all():
# is_liked = True
if request.user in post.saved.all():
is_saved = True
context = {
'object': post,
'is_thread': is_thread,
'is_liked': is_liked,
# 'total_likes': post.total_likes(),
# 'comments': comments,
# 'comment_form': comment_form,
'is_saved': is_saved,
}
return render(request, 'job/job_detail.html', context)
class JobUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):
model = JobOpening
fields = [
'job_title',
'job_description',
'company_description',
'company_name',
'job_type',
'education',
'industry',
'field',
'state',
'company_email',
'experience',
'method_of_application',
'send_cv_directly',
]
template_name = 'blog/postupdate.html'
def form_valid(self, form):
form.instance.user = self.request.user
return super().form_valid(form)
def test_func(self):
post = self.get_object()
if self.request.user == post.user:
return True
return False
class DeleteJobView(LoginRequiredMixin, UserPassesTestMixin, DeleteView):
model = JobOpening
# success_url = reverse('all_jobs')
def test_func(self):
post = self.get_object()
if self.request.user == post.user:
return True
return False
def delete(self, request, *args, **kwargs):
post = self.get_object()
user = post.user
notifications = Notification.objects.filter(action_object_content_type__model= 'jobopening', action_object_object_id=post.pk)
for notification in notifications.all():
if int(notification.action_object_object_id) == post.id:
notification.delete()
else:
pass
post.delete()
return redirect('all_jobs')
class SaveJobToggle(LoginRequiredMixin, RedirectView):
def get_redirect_url(self, *args, **kwargs):
pk = self.kwargs.get('pk')
post = get_object_or_404(JobOpening, pk=pk)
url_ = post.get_absolute_url()
user = self.request.user
if user.is_authenticated:
if user in post.saved.all():
post.saved.remove(user)
else:
post.saved.add(user)
return url_
class SaveJobAPIToggle(APIView):
authentication_classes = [authentication.SessionAuthentication]
permission_classes = [permissions.IsAuthenticated]
def get(self, request, pk=None, format=None):
post = get_object_or_404(JobOpening, pk=pk)
is_saved = False
updated = False
url_ = post.get_absolute_url()
user = self.request.user
if user.is_authenticated:
if user in post.saved.all():
post.saved.remove(user)
is_saved = False
else:
post.saved.add(user)
is_saved = True
updated = True
data = {
'updated': updated,
'is_saved': is_saved
}
return Response(data)
class ShareJobView(LoginRequiredMixin, CreateView):
model = ShareJob
# fields = ['content', 'post', 'user']
template_name = 'job/job_share.html'
success_url = '/'
context_object_name = 'post'
def get(self, request, *args, **kwargs):
pk = kwargs['pk']
form = CreateShareForm()
post = get_object_or_404(JobOpening, pk=pk)
context = {
'form': form,
'post': post,
}
return render(request, 'job/job_share.html', context)
def post(self, request, *args, **kwargs):
pk = kwargs['pk']
post = get_object_or_404(JobOpening, pk=pk)
if request.method == 'POST':
share_form = CreateShareForm(request.POST or None, request.FILES or None)
if share_form.is_valid():
image = share_form.cleaned_data['image'] # request.POST.get('image')
content = share_form.cleaned_data['content']
post_id = pk
share_create = ShareJob.objects.create(job=post, user=request.user, content=content, image=image)
share_create.save()
# for word in content.split():
# if len(word) > 1:
# if word.startswith("#"):
# wo = word.replace('#', '')
# hash_tag = ShareTag(share=share_create, tag=wo, user=request.user)
# hash_tag.save()
# for word in content.split():
# if word.startswith("@"):
# w = word.replace('@', '')
# if User.objects.get(username__iexact=w):
# notify.send(request.user, recipient=User.objects.get(username__iexact=w),
# verb='mentioned you in a post thread', action_object=post, description=content)
# else:
# pass
messages.success(request, f'Job Shared')
return redirect('job-thread', pk=pk)
else:
form = ShareJob()
context = {
'post': post,
'form': form,
}
return render(request, 'job/job_share.html', context)
class ShareJobUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView, RedirectView):
model = ShareJob
# fields = ['content', 'image']
# template_name = 'blog/share_update.html'
def form_valid(self, form):
form.instance.user = self.request.user
return super().form_valid(form)
def test_func(self):
share = self.get_object()
if self.request.user == share.user:
return True
return False
def get(self, request, *args, **kwargs):
pk = kwargs['pk']
share = ShareJob.objects.get(pk=pk)
share_username = '@'+share.user.username
form = ShareJobEditForm(instance=share, initial={'content': share_username})
context = {
'form': form,
'post': share,
}
return render(request, 'blog/share_update.html', context)
def post(self, request, *args, **kwargs):
pk = kwargs['pk']
share = ShareJob.objects.get(pk=pk)
form = ShareJobEditForm(instance=share)
prev_mentions = []
prev_hashtags = []
new_mentions = []
new_hashtags = []
prev_post = ShareJobEditForm(instance=share)
prev_content = prev_post['content'].value()
for word in prev_content.split():
if word.startswith("@"):
prev_mentions.append(word)
for word in prev_content.split():
if word.startswith("#"):
prev_hashtags.append(word)
if request.method == 'POST':
form = ShareJobEditForm(request.POST or None, request.FILES or None, instance=share)
if form.is_valid():
content = form.cleaned_data['content']
new_content = form['content'].value()
for word in new_content.split():
if word.startswith("@"):
new_mentions.append(word)
for word in new_content.split():
if word.startswith("#"):
new_hashtags.append(word)
form.save()
diff_hastags = list(set(new_hashtags) - set(prev_hashtags))
diff_mentions = list(set(new_mentions) - set(prev_mentions))
# if len(diff_mentions) >= 1:
# for word in diff_mentions:
# if word.startswith("@"):
# w = word.replace('@', '')
# try:
# if User.objects.get(username__iexact=w):
# if request.user != User.objects.get(username__iexact=w):
# notify.send(request.user, recipient=User.objects.get(username__iexact=w), verb='mentioned you in a post thread', action_object=share.post, description=content)
# except User.DoesNotExist:
# pass
# if len(diff_hastags) >= 1:
# del_hash = ShareTag.objects.filter(share__exact=pk)
# del_hash.delete()
# for word in new_hashtags:
# if word.startswith("#"):
# wo = word.replace('#', '')
# hash_tag = ShareTag(share=share, tag=wo, user= request.user)
# hash_tag.save()
return HttpResponseRedirect(reverse('job-thread', kwargs={'pk': share.job.id}))
else:
form = ShareJobEditForm(instance=share)
context = {
'form': form,
'share': share,
}
return HttpResponseRedirect(share.get_absolute_url())
def get_success_url(self):
return reverse('share-thread', kwargs={'pk': self.object.post.id})
class DeleteShareJob(LoginRequiredMixin, UserPassesTestMixin, DeleteView, RedirectView):
model = ShareJob
template_name = 'job/share_confirm_delete.html'
context_object_name = "share"
def test_func(self):
share = self.get_object()
if self.request.user == share.user:
return True
return False
# def delete(self):
def get_success_url(self):
return reverse('job-thread', kwargs={'pk': self.object.job.pk})
class ShareJobThread(ListView):
model = ShareJob
paginate_by = 30
context_object_name = 'shares'
template_name = 'job/job_thread.html'
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
context['post_id'] = self.request.GET.get('pk')
context['ori_post'] = JobOpening.objects.get(pk=self.kwargs.get('pk'))
return context
def get_queryset(self):
post_id = self.kwargs.get('pk')
shares = ShareJob.objects.filter(job__exact=post_id)
share = shares.order_by('date_posted')
return share
class LikeShareJobToggle(LoginRequiredMixin, RedirectView):
def get_redirect_url(self, *args, **kwargs):
pk = self.kwargs.get('pk')
print(pk)
obj = get_object_or_404(ShareJob, pk=pk)
url_ = obj.get_absolute_url()
user = self.request.user
if user.is_authenticated:
if user in obj.likes.all():
obj.likes.remove(user)
else:
obj.likes.add(user)
return url_
class LikeShareApiToggle(APIView):
"""
View to list all users in the system.
* Requires token authentication.
* Only authenticated users are able to access this view.
"""
authentication_classes = [authentication.SessionAuthentication]
permission_classes = [permissions.IsAuthenticated]
def get(self, request, pk=None, format=None):
# pk = self.kwargs.get('pk')
obj = get_object_or_404(ShareJob, pk=pk)
url_ = obj.get_absolute_url()
user = self.request.user
updated = False
liked = False
if user.is_authenticated:
if user in obj.likes.all():
obj.likes.remove(user)
liked = False
else:
obj.likes.add(user)
liked = True
# sif request.user != obj.user:
# notify.send(request.user, recipient=obj.user, verb='liked your post in a thread', action_object=obj.post,description=obj.content)
updated = True
data = {
"updated": updated,
"liked": liked,
"like_count": obj.likes.count()
}
return Response(data)
class QuoteJobShare(LoginRequiredMixin, CreateView):
def get(self, request, *args, **kwargs):
pk = kwargs['pk']
form = QuoteJobForm()
share_post = get_object_or_404(ShareJob, pk=pk)
context = {
'form': form,
'share_post': share_post,
}
return render(request, 'blog/quote.html', context)
def post(self, request, *args, **kwargs):
pk = kwargs['pk']
share_post = get_object_or_404(ShareJob, pk=pk)
if request.method == 'POST':
quote_form = QuoteJobForm(request.POST or None, request.FILES or None)
if quote_form.is_valid():
image = quote_form.cleaned_data['image']
content = quote_form.cleaned_data['content']
share_pk = pk
quote_create = ShareJob.objects.create(job=share_post.job, share_post=share_post, is_quote=True, user=request.user, content = content, image=image)
quote_create.save()
# for word in content.split():
# if len(word) > 1:
# if word.startswith("#"):
# wo = word.replace('#', '')
# hash_tag = ShareTag(share=quote_create, tag=wo, user=request.user)
# hash_tag.save()
# for word in content.split():
# if word.startswith("@"):
# w = word.replace('@', '')
# if User.objects.get(username__iexact=w):
# notify.send(request.user, recipient=User.objects.get(username__iexact=w),
# verb='mentioned you in a post thread', action_object=share_post.post, description=content)
# else:
# pass
messages.success(request, | |
flowing wells so that there is
not an abrupt change in flowing well rates.
rate : [double]
* rate (double) is the volumetric pumping rate for the multi-
aquifer well. A positive value indicates recharge and a
negative value indicates discharge (pumping). RATE only
applies to active (IBOUND > 0) multi-aquifer wells. If the
Options block includes a TIMESERIESFILE entry (see the "Time-
Variable Input" section), values can be obtained from a time
series by entering the time-series name in place of a numeric
value. By default, the RATE for each multi-aquifer well is
zero.
well_head : [double]
* well_head (double) is the head in the multi-aquifer well.
WELL_HEAD is only applied to constant head (STATUS is
CONSTANT) and inactive (STATUS is INACTIVE) multi-aquifer
wells. If the Options block includes a TIMESERIESFILE entry
(see the "Time-Variable Input" section), values can be
obtained from a time series by entering the time-series name
in place of a numeric value. The program will terminate with
an error if WELL_HEAD is less than the bottom of the well.
head_limit : [string]
* head_limit (string) is the limiting water level (head) in the
well, which is the minimum of the well RATE or the well
inflow rate from the aquifer. HEAD_LIMIT can be applied to
extraction wells (RATE < 0) or injection wells (RATE > 0).
HEAD_LIMIT can be deactivated by specifying the text string
'OFF'. The HEAD_LIMIT option is based on the HEAD_LIMIT
functionality available in the MNW2 (Konikow et al., 2009)
package for MODFLOW-2005. The HEAD_LIMIT option has been
included to facilitate backward compatibility with previous
versions of MODFLOW but use of the RATE_SCALING option
instead of the HEAD_LIMIT option is recommended. By default,
HEAD_LIMIT is 'OFF'.
shutoffrecord : [minrate, maxrate]
* minrate (double) is the minimum rate that a well must exceed
to shutoff a well during a stress period. The well will shut
down during a time step if the flow rate to the well from the
aquifer is less than MINRATE. If a well is shut down during a
time step, reactivation of the well cannot occur until the
next time step to reduce oscillations. MINRATE must be less
than maxrate.
* maxrate (double) is the maximum rate that a well must exceed
to reactivate a well during a stress period. The well will
reactivate during a timestep if the well was shutdown during
the previous time step and the flow rate to the well from the
aquifer exceeds maxrate. Reactivation of the well cannot
occur until the next time step if a well is shutdown to
reduce oscillations. maxrate must be greater than MINRATE.
rate_scalingrecord : [pump_elevation, scaling_length]
* pump_elevation (double) is the elevation of the multi-aquifer
well pump (PUMP_ELEVATION). PUMP_ELEVATION should not be less
than the bottom elevation (BOTTOM) of the multi-aquifer well.
* scaling_length (double) height above the pump elevation
(SCALING_LENGTH). If the simulated well head is below this
elevation (pump elevation plus the scaling length), then the
pumping rate is reduced.
auxiliaryrecord : [auxname, auxval]
* auxname (string) name for the auxiliary variable to be
assigned AUXVAL. AUXNAME must match one of the auxiliary
variable names defined in the OPTIONS block. If AUXNAME does
not match one of the auxiliary variable names defined in the
OPTIONS block the data are ignored.
* auxval (double) value for the auxiliary variable. If the
Options block includes a TIMESERIESFILE entry (see the "Time-
Variable Input" section), values can be obtained from a time
series by entering the time-series name in place of a numeric
value.
filename : String
File name for this package.
pname : String
Package name for this package.
parent_file : MFPackage
Parent package file that references this package. Only needed for
utility packages (mfutl*). For example, mfutllaktab package must have
a mfgwflak package parent_file.
"""
auxiliary = ListTemplateGenerator(("gwf6", "maw", "options", "auxiliary"))
head_filerecord = ListTemplateGenerator(
("gwf6", "maw", "options", "head_filerecord")
)
budget_filerecord = ListTemplateGenerator(
("gwf6", "maw", "options", "budget_filerecord")
)
ts_filerecord = ListTemplateGenerator(
("gwf6", "maw", "options", "ts_filerecord")
)
obs_filerecord = ListTemplateGenerator(
("gwf6", "maw", "options", "obs_filerecord")
)
packagedata = ListTemplateGenerator(
("gwf6", "maw", "packagedata", "packagedata")
)
connectiondata = ListTemplateGenerator(
("gwf6", "maw", "connectiondata", "connectiondata")
)
perioddata = ListTemplateGenerator(("gwf6", "maw", "period", "perioddata"))
package_abbr = "gwfmaw"
_package_type = "maw"
dfn_file_name = "gwf-maw.dfn"
dfn = [
[
"block options",
"name auxiliary",
"type string",
"shape (naux)",
"reader urword",
"optional true",
],
[
"block options",
"name boundnames",
"type keyword",
"shape",
"reader urword",
"optional true",
],
[
"block options",
"name print_input",
"type keyword",
"reader urword",
"optional true",
],
[
"block options",
"name print_head",
"type keyword",
"reader urword",
"optional true",
],
[
"block options",
"name print_flows",
"type keyword",
"reader urword",
"optional true",
],
[
"block options",
"name save_flows",
"type keyword",
"reader urword",
"optional true",
],
[
"block options",
"name head_filerecord",
"type record head fileout headfile",
"shape",
"reader urword",
"tagged true",
"optional true",
],
[
"block options",
"name head",
"type keyword",
"shape",
"in_record true",
"reader urword",
"tagged true",
"optional false",
],
[
"block options",
"name headfile",
"type string",
"preserve_case true",
"shape",
"in_record true",
"reader urword",
"tagged false",
"optional false",
],
[
"block options",
"name budget_filerecord",
"type record budget fileout budgetfile",
"shape",
"reader urword",
"tagged true",
"optional true",
],
[
"block options",
"name budget",
"type keyword",
"shape",
"in_record true",
"reader urword",
"tagged true",
"optional false",
],
[
"block options",
"name fileout",
"type keyword",
"shape",
"in_record true",
"reader urword",
"tagged true",
"optional false",
],
[
"block options",
"name budgetfile",
"type string",
"preserve_case true",
"shape",
"in_record true",
"reader urword",
"tagged false",
"optional false",
],
[
"block options",
"name no_well_storage",
"type keyword",
"reader urword",
"optional true",
],
[
"block options",
"name flow_correction",
"type keyword",
"reader urword",
"optional true",
],
[
"block options",
"name flowing_wells",
"type keyword",
"reader urword",
"optional true",
],
[
"block options",
"name shutdown_theta",
"type double precision",
"reader urword",
"optional true",
],
[
"block options",
"name shutdown_kappa",
"type double precision",
"reader urword",
"optional true",
],
[
"block options",
"name ts_filerecord",
"type record ts6 filein ts6_filename",
"shape",
"reader urword",
"tagged true",
"optional true",
"construct_package ts",
"construct_data timeseries",
"parameter_name timeseries",
],
[
"block options",
"name ts6",
"type keyword",
"shape",
"in_record true",
"reader urword",
"tagged true",
"optional false",
],
[
"block options",
"name filein",
"type keyword",
"shape",
"in_record true",
"reader urword",
"tagged true",
"optional false",
],
[
"block options",
"name ts6_filename",
"type string",
"preserve_case true",
"in_record true",
"reader urword",
"optional false",
"tagged false",
],
[
"block options",
"name obs_filerecord",
"type record obs6 filein obs6_filename",
"shape",
"reader urword",
"tagged true",
"optional true",
"construct_package obs",
"construct_data continuous",
"parameter_name observations",
],
[
"block options",
"name obs6",
"type keyword",
"shape",
"in_record true",
"reader urword",
"tagged true",
"optional false",
],
[
"block options",
"name obs6_filename",
"type string",
"preserve_case true",
"in_record true",
"tagged false",
"reader urword",
"optional false",
],
[
"block options",
"name mover",
"type keyword",
"tagged true",
"reader urword",
"optional true",
],
[
"block dimensions",
"name nmawwells",
"type integer",
"reader urword",
"optional false",
],
[
"block packagedata",
"name packagedata",
"type recarray wellno radius bottom strt condeqn ngwfnodes aux "
"boundname",
"shape (nmawwells)",
"reader urword",
],
[
"block packagedata",
"name wellno",
"type integer",
"shape",
"tagged false",
"in_record true",
"reader urword",
"numeric_index true",
],
[
"block packagedata",
"name radius",
"type double precision",
"shape",
"tagged false",
"in_record true",
"reader urword",
],
[
"block packagedata",
"name bottom",
"type double precision",
"shape",
"tagged false",
"in_record true",
"reader urword",
],
[
"block packagedata",
"name strt",
"type double precision",
"shape",
"tagged false",
"in_record true",
"reader urword",
],
[
"block packagedata",
"name condeqn",
"type string",
"shape",
"tagged false",
"in_record true",
"reader urword",
],
[
"block packagedata",
"name ngwfnodes",
"type integer",
"shape",
"tagged false",
"in_record true",
"reader urword",
],
[
"block packagedata",
"name aux",
"type double precision",
"in_record true",
"tagged false",
"shape (naux)",
"reader urword",
"time_series true",
"optional true",
],
[
"block packagedata",
"name boundname",
"type string",
"shape",
"tagged false",
"in_record true",
"reader urword",
"optional true",
],
[
"block | |
# coding=utf-8
"""Multifuture inferencing."""
import argparse
import json
import os
#import cPickle as pickle
import pickle
import numpy as np
import tensorflow as tf
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# the following will still have colocation debug info
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
from glob import glob
from tqdm import tqdm
from pred_models import Model as PredictionModel
from pred_utils import relative_to_abs
parser = argparse.ArgumentParser()
parser.add_argument("traj_path")
parser.add_argument("multifuture_path")
parser.add_argument("model_path")
parser.add_argument("output_file", help="a pickle, traj_id -> all output")
parser.add_argument("--num_out", default=20, type=int,
help="number of output per sample")
parser.add_argument("--save_prob_file", default=None,
help="save beam prob to a file")
parser.add_argument("--greedy", action="store_true")
parser.add_argument("--center_only", action="store_true")
parser.add_argument("--cap_reg", action="store_true")
parser.add_argument("--gpuid", type=int, default=0)
parser.add_argument("--obs_length", type=int, default=8)
# ------------------- basic model parameters
parser.add_argument("--emb_size", type=int, default=128)
parser.add_argument("--enc_hidden_size", type=int,
default=256, help="hidden size for rnn")
parser.add_argument("--dec_hidden_size", type=int,
default=256, help="hidden size for rnn")
parser.add_argument("--grid_strides", default="2,4")
parser.add_argument("--use_grids", default="1,0")
parser.add_argument("--use_gn", action="store_true")
parser.add_argument("--use_gnn", action="store_true")
parser.add_argument("--use_scene_enc", action="store_true")
parser.add_argument("--use_single_decoder", action="store_true")
parser.add_argument("--use_soft_grid_class", action="store_true")
parser.add_argument("--diverse_beam", action="store_true")
parser.add_argument("--diverse_gamma", type=float, default=1.0)
parser.add_argument("--fix_num_timestep", type=int, default=0)
parser.add_argument("--scene_feat_path", default=None)
parser.add_argument("--scene_id2name", default=None)
parser.add_argument("--scene_h", type=int, default=36)
parser.add_argument("--scene_w", type=int, default=64)
parser.add_argument("--scene_class", type=int, default=11)
parser.add_argument("--convlstm_kernel", default=3, type=int)
parser.add_argument("--scene_conv_dim", default=64, type=int)
parser.add_argument("--scene_conv_kernel", default=3, type=int)
parser.add_argument("--video_h", type=int, default=1080)
parser.add_argument("--video_w", type=int, default=1920)
def load_traj(traj_file):
data = []
delim = "\t"
with open(traj_file, "r") as f:
for line in f:
fidx, pid, x, y = line.strip().split(delim)
data.append([fidx, pid, x, y])
return np.array(data, dtype="float32")
def add_grid(args):
args.scene_grid_strides = [int(o) for o in args.grid_strides.split(",")]
assert args.scene_grid_strides
args.num_scene_grid = len(args.scene_grid_strides)
args.use_grids = [bool(int(o)) for o in args.use_grids.split(",")]
args.scene_grids = []
# the following is consistent with tensorflow conv2d when given odd input
for stride in args.scene_grid_strides:
h, w = args.scene_h, args.scene_w
this_h, this_w = round(h*1.0/stride), round(w*1.0/stride)
this_h, this_w = int(this_h), int(this_w)
args.scene_grids.append((this_h, this_w))
# Get the center point for each scale's each grid
args.scene_grid_centers = []
args.grid_box_sizes = []
for h, w in args.scene_grids:
h_gap, w_gap = args.video_h*1.0/h, args.video_w*1.0/w
args.grid_box_sizes.append((h_gap, w_gap))
centers_x = np.cumsum([w_gap for _ in range(w)]) - w_gap/2.0
centers_y = np.cumsum([h_gap for _ in range(h)]) - h_gap/2.0
centers_xx = np.tile(np.expand_dims(centers_x, axis=0), [h, 1])
centers_yy = np.tile(np.expand_dims(centers_y, axis=1), [1, w])
centers = np.stack((centers_xx, centers_yy), axis=-1) # [H,W,2]
args.scene_grid_centers.append(centers)
def get_grid_input(args, traj):
# traj is [obs_length, 2]
grid_class = np.zeros([len(args.scene_grids), args.obs_length], dtype="int32")
grid_target_all = []
# get the grid classification label based on (x,y)
# grid centers: [H,W,2]
for i, (center, (h, w)) in enumerate(zip(
args.scene_grid_centers, args.scene_grids)):
grid_target = np.zeros((args.obs_length, h, w, 2), dtype="float32")
# grid classification
h_gap, w_gap = args.video_h*1.0/h, args.video_w*1.0/w
x_indexes = np.ceil(traj[:, 0] / w_gap) # [obs_length]
y_indexes = np.ceil(traj[:, 1] / h_gap) # [obs_length]
x_indexes = np.asarray(x_indexes, dtype="int")
y_indexes = np.asarray(y_indexes, dtype="int")
# ceil(0.0) = 0.0, we need
x_indexes[x_indexes == 0] = 1
y_indexes[y_indexes == 0] = 1
x_indexes = x_indexes - 1
y_indexes = y_indexes - 1
one_hot = np.zeros((args.obs_length, h, w), dtype="uint8")
one_hot[range(args.obs_length), y_indexes, x_indexes] = 1
one_hot_flat = one_hot.reshape((args.obs_length, -1)) # [obs_length, h*w]
classes = np.argmax(one_hot_flat, axis=1) # [obs_length]
grid_class[i, :] = classes
# grid regression
# tile current person seq xy
traj_tile = np.tile(np.expand_dims(np.expand_dims(
traj, axis=1), axis=1), [1, h, w, 1])
# tile center [obs_length, h, w, 2]
center_tile = np.tile(np.expand_dims(
center, axis=0), [args.obs_length, 1, 1, 1])
# grid_center + target -> actual xy
all_target = traj_tile - center_tile # [obs_length, h,w,2]
# only save the one grid
grid_target[:, :, :, :] = all_target
grid_target_all.append(grid_target)
return grid_class, grid_target_all
def get_inputs(args, traj_files, gt_trajs):
traj_list = [] # [N] [obs_length, 2]
traj_list_rel = [] # [N] [obs_length, 2]
scene_feats = [] # all frame seg
scene_featidx_list = [] # [N, obs_length, 1]
grid_class_list = [] # [N, strides, obs_length]
grid_target_list = [] # [N, strides, obs_length, 2]
pred_length_list = [] # [N]
with open(args.scene_id2name, "r") as f:
scene_id2name = json.load(f) # {"oldid2new":,"id2name":}
scene_oldid2new = scene_id2name["oldid2new"]
scene_oldid2new = {
int(oldi): scene_oldid2new[oldi] for oldi in scene_oldid2new}
# for background class or other class that we ignored
assert 0 not in scene_oldid2new
scene_oldid2new[0] = 0
total_scene_class = len(scene_oldid2new)
scene_id2name = scene_id2name["id2name"]
scene_id2name[0] = "BG"
assert len(scene_oldid2new) == len(scene_id2name)
for traj_file in traj_files:
traj_id = os.path.splitext(os.path.basename(traj_file))[0]
scene, moment_idx, x_agent_pid, camera = traj_id.split("_")
x_agent_pid = int(x_agent_pid)
# load all features
traj_data = load_traj(traj_file)
# assuming the frameIdx is sorted in ASC
frame_idxs = np.unique(traj_data[:, 0]).tolist()
# we only need the x_agent's trajectory
# [obs_length, 2]
x_agent_obs_traj = traj_data[x_agent_pid == traj_data[:, 1], 2:]
assert len(x_agent_obs_traj) == args.obs_length, (
traj_id, x_agent_obs_traj.shape)
x_agent_obs_traj_rel = np.zeros_like(x_agent_obs_traj)
x_agent_obs_traj_rel[1:, :] = x_agent_obs_traj[1:, :] - \
x_agent_obs_traj[:-1, :]
# for this trajectory we get all the features
# 1. grid
# [scale, obs_length], [2][obs_length, 2]
grid_class, grid_target = get_grid_input(args, x_agent_obs_traj)
# 2. person box / other boxes / scene feature
scene_featidx = np.zeros([args.obs_length, 1], dtype="int32")
for i, frame_idx in enumerate(frame_idxs):
scene_feat_file = os.path.join(args.scene_feat_path, traj_id,
"%s_F_%08d.npy" % (traj_id, frame_idx))
feati = len(scene_feats)
# get the feature new i
scene_feats.append(np.load(scene_feat_file))
scene_featidx[i, :] = feati
# pack up all the features
traj_list.append(x_agent_obs_traj)
traj_list_rel.append(x_agent_obs_traj_rel)
scene_featidx_list.append(scene_featidx)
grid_class_list.append(grid_class)
grid_target_list.append(grid_target)
# get the multifuture maximum pred timestep
pred_timesteps = [len(gt_trajs[traj_id][future_id]["x_agent_traj"])
for future_id in gt_trajs[traj_id]]
pred_length_list.append(max(pred_timesteps))
# replace the scene feature
scene_feat_shape = [len(scene_feats), args.scene_h, args.scene_w,
total_scene_class]
scene_feat_all = np.zeros(scene_feat_shape, dtype="uint8")
print("making scene feature of shape %s..." % (scene_feat_shape))
for k, scene_feat in tqdm(enumerate(scene_feats), total=len(scene_feats)):
# transform classid first
new_scene_feat = np.zeros_like(scene_feat) # zero for background class
for i in range(args.scene_h):
for j in range(args.scene_w):
# rest is ignored and all put into background
if scene_feat[i, j] in scene_oldid2new:
new_scene_feat[i, j] = scene_oldid2new[scene_feat[i, j]]
# transform to masks
this_scene_feat = np.zeros(
(args.scene_h, args.scene_w, total_scene_class), dtype="uint8")
# so we use the H,W to index the mask feat
# generate the index first
h_indexes = np.repeat(np.arange(args.scene_h), args.scene_w).reshape(
(args.scene_h, args.scene_w))
w_indexes = np.tile(np.arange(args.scene_w), args.scene_h).reshape(
(args.scene_h, args.scene_w))
this_scene_feat[h_indexes, w_indexes, new_scene_feat] = 1
scene_feat_all[k, :, :, :] = this_scene_feat
del this_scene_feat
del new_scene_feat
print("Done.")
return {
"obs_traj": traj_list,
"obs_traj_rel": traj_list_rel,
"obs_grid_class": grid_class_list,
"obs_grid_target": grid_target_list,
"obs_scene": scene_featidx_list,
"scene_feats": scene_feat_all,
"max_pred_lengths": pred_length_list
}
def load_model_weights(model_path, sess, top_scope=None):
"""Load model weights into tf Graph."""
tf.global_variables_initializer().run()
allvars = tf.global_variables()
allvars = [var for var in allvars if "global_step" not in var.name]
restore_vars = allvars
opts = ["Adam", "beta1_power", "beta2_power", "Adam_1", "Adadelta_1",
"Adadelta", "Momentum"]
restore_vars = [var for var in restore_vars
if var.name.split(":")[0].split("/")[-1] not in opts]
if top_scope is not None:
restore_vars = [var for var in restore_vars
if var.name.split(":")[0].split("/")[0] == top_scope]
saver = tf.train.Saver(restore_vars, max_to_keep=5)
load_from = model_path
ckpt = tf.train.get_checkpoint_state(load_from)
if ckpt and ckpt.model_checkpoint_path:
loadpath = ckpt.model_checkpoint_path
saver.restore(sess, loadpath)
else:
raise Exception("Model not exists")
class PredictionModelInference(PredictionModel):
"""Rewrite the future prediction model for inferencing."""
def get_feed_dict(self, inputs, args, idx):
"""Givng a batch of data, construct the feed dict."""
# Tensor dimensions, so pylint: disable=g-bad-name
N = 1
T_in = args.obs_length
T_pred = inputs["max_pred_lengths"][idx]
feed_dict = {}
obs_length = np.zeros((N), dtype="int32")
pred_length = np.zeros((N), dtype="int32")
feed_dict[self.obs_length] = obs_length
feed_dict[self.pred_length] = pred_length
obs_length[0] = T_in
pred_length[0] = T_pred
feed_dict[self.is_train] = False
for j, (h, w) in enumerate(args.scene_grids):
if not args.use_grids[j]:
continue
grid_obs_labels = np.zeros([1, T_in], dtype="int")
grid_obs_reg_targets = np.zeros([1, T_in, h, w, 2], dtype="float")
grid_obs_labels[0, :] = inputs["obs_grid_class"][idx][j, :]
grid_obs_reg_targets[0, :, :, :, :] = \
inputs["obs_grid_target"][idx][j][:, :, :, :]
feed_dict[self.grid_obs_labels[j]] = grid_obs_labels
feed_dict[self.grid_obs_regress[j]] = grid_obs_reg_targets
feed_dict[self.grid_pred_regress[j]] = np.zeros(
[1, T_pred, h, w, 2], dtype="float")
if args.use_soft_grid_class:
feed_dict[self.grid_pred_labels_T[j]] = np.zeros(
[1, T_pred, h, w, 1], dtype="int")
else:
feed_dict[self.grid_pred_labels_T[j]] = np.zeros(
[1, T_pred], dtype="int")
# reconstruct the scene feature first
oldid2newid = {}
new_scene_idxs = np.zeros([1, T_in, 1], dtype="int32")
for j in range(T_in):
oldid = inputs["obs_scene"][idx][j][0]
if oldid not in oldid2newid:
oldid2newid[oldid] = len(oldid2newid)
newid = oldid2newid[oldid]
new_scene_idxs[0, j, 0] = newid
# get all the feature used by this mini-batch
scene_feat = np.zeros((len(oldid2newid), args.scene_h,
args.scene_w, args.scene_class),
dtype="float32")
for oldid in oldid2newid:
newid = oldid2newid[oldid]
scene_feat[newid, :, :, :] = \
inputs["scene_feats"][oldid, :, :, :]
# initial all the placeholder
obs_scene = np.zeros((N, T_in), dtype="int32")
obs_scene_mask = np.zeros((N, T_in), dtype="bool")
feed_dict[self.obs_scene] = obs_scene
feed_dict[self.obs_scene_mask] = obs_scene_mask
feed_dict[self.scene_feat] = scene_feat
# each bacth
for j in range(T_in):
# it was (1) shaped
obs_scene[0, j] = new_scene_idxs[0, j, 0]
obs_scene_mask[0, j] = True
# [N,num_scale, T] # each is int to num_grid_class
for j, _ in enumerate(args.scene_grids):
this_grid_label = np.zeros([N, T_in], dtype="int32")
this_grid_label[0, :] = inputs["obs_grid_class"][idx][j, :]
feed_dict[self.grid_obs_labels[j]] = this_grid_label
return feed_dict
if __name__ == "__main__":
args = parser.parse_args()
add_grid(args)
args.use_beam_search = True
if args.greedy:
args.use_beam_search = False
assert sum(args.use_grids) == 1
# get all the test data
traj_files = glob(os.path.join(args.traj_path, "*.txt"))
traj_ids = [os.path.splitext(os.path.basename(one))[0] for one in traj_files]
gt_trajs = {}
for traj_id | |
"parameters": {},
"form_key": None,
},
"y": {
"class": "NumpyArray",
"primitive": "bool",
"inner_shape": [],
"has_identifier": False,
"parameters": {},
"form_key": None,
},
},
"has_identifier": False,
"parameters": {},
"form_key": None,
}
assert ak._v2.forms.recordform.RecordForm(
contents=[
ak._v2.forms.emptyform.EmptyForm(),
ak._v2.forms.numpyform.NumpyForm("bool"),
],
keys=None,
has_identifier=True,
parameters={"x": 123},
form_key="hello",
).tolist(verbose=False) == {
"class": "RecordArray",
"contents": [
{"class": "EmptyArray"},
"bool",
],
"has_identifier": True,
"parameters": {"x": 123},
"form_key": "hello",
}
assert ak._v2.forms.recordform.RecordForm(
contents=[
ak._v2.forms.emptyform.EmptyForm(),
ak._v2.forms.numpyform.NumpyForm("bool"),
],
keys=["x", "y"],
has_identifier=True,
parameters={"x": 123},
form_key="hello",
).tolist(verbose=False) == {
"class": "RecordArray",
"contents": {
"x": {"class": "EmptyArray"},
"y": "bool",
},
"has_identifier": True,
"parameters": {"x": 123},
"form_key": "hello",
}
assert ak._v2.forms.from_iter(
{
"class": "RecordArray",
"contents": [
{"class": "EmptyArray"},
"bool",
],
}
).tolist() == {
"class": "RecordArray",
"contents": [
{
"class": "EmptyArray",
"has_identifier": False,
"parameters": {},
"form_key": None,
},
{
"class": "NumpyArray",
"primitive": "bool",
"inner_shape": [],
"has_identifier": False,
"parameters": {},
"form_key": None,
},
],
"has_identifier": False,
"parameters": {},
"form_key": None,
}
assert ak._v2.forms.from_iter(
{
"class": "RecordArray",
"contents": {
"x": {"class": "EmptyArray"},
"y": "bool",
},
}
).tolist() == {
"class": "RecordArray",
"contents": {
"x": {
"class": "EmptyArray",
"has_identifier": False,
"parameters": {},
"form_key": None,
},
"y": {
"class": "NumpyArray",
"primitive": "bool",
"inner_shape": [],
"has_identifier": False,
"parameters": {},
"form_key": None,
},
},
"has_identifier": False,
"parameters": {},
"form_key": None,
}
assert ak._v2.forms.from_iter(
{
"class": "RecordArray",
"contents": [
{"class": "EmptyArray"},
"bool",
],
"has_identifier": True,
"parameters": {"x": 123},
"form_key": "hello",
}
).tolist(verbose=False) == {
"class": "RecordArray",
"contents": [
{"class": "EmptyArray"},
"bool",
],
"has_identifier": True,
"parameters": {"x": 123},
"form_key": "hello",
}
assert ak._v2.forms.from_iter(
{
"class": "RecordArray",
"contents": {
"x": {"class": "EmptyArray"},
"y": "bool",
},
"has_identifier": True,
"parameters": {"x": 123},
"form_key": "hello",
}
).tolist(verbose=False) == {
"class": "RecordArray",
"contents": {
"x": {"class": "EmptyArray"},
"y": "bool",
},
"has_identifier": True,
"parameters": {"x": 123},
"form_key": "hello",
}
@pytest.mark.skipif(
ak._util.py27 or ak._util.py35, reason="Python 2.7, 3.5 have unstable dict order."
)
def test_IndexedForm():
assert (
str(
ak._v2.forms.indexedform.IndexedForm(
"i32", ak._v2.forms.emptyform.EmptyForm()
)
)
== """{
"class": "IndexedArray",
"index": "i32",
"content": {
"class": "EmptyArray"
}
}"""
)
assert (
str(
ak._v2.forms.indexedform.IndexedForm(
"u32", ak._v2.forms.emptyform.EmptyForm()
)
)
== """{
"class": "IndexedArray",
"index": "u32",
"content": {
"class": "EmptyArray"
}
}"""
)
assert (
str(
ak._v2.forms.indexedform.IndexedForm(
"i64", ak._v2.forms.emptyform.EmptyForm()
)
)
== """{
"class": "IndexedArray",
"index": "i64",
"content": {
"class": "EmptyArray"
}
}"""
)
assert (
str(
ak._v2.forms.indexedform.IndexedForm(
"i32",
ak._v2.forms.emptyform.EmptyForm(),
has_identifier=True,
parameters={"x": 123},
form_key="hello",
)
)
== """{
"class": "IndexedArray",
"index": "i32",
"content": {
"class": "EmptyArray"
},
"has_identifier": true,
"parameters": {
"x": 123
},
"form_key": "hello"
}"""
)
assert (
repr(
ak._v2.forms.indexedform.IndexedForm(
index="i32", content=ak._v2.forms.emptyform.EmptyForm()
)
)
== "IndexedForm('i32', EmptyForm())"
)
assert (
repr(
ak._v2.forms.indexedform.IndexedForm(
index="i32",
content=ak._v2.forms.emptyform.EmptyForm(),
has_identifier=True,
parameters={"x": 123},
form_key="hello",
)
)
== "IndexedForm('i32', EmptyForm(), has_identifier=True, parameters={'x': 123}, form_key='hello')"
)
assert ak._v2.forms.indexedform.IndexedForm(
"i32", ak._v2.forms.emptyform.EmptyForm()
).tolist(verbose=False) == {
"class": "IndexedArray",
"index": "i32",
"content": {"class": "EmptyArray"},
}
assert ak._v2.forms.indexedform.IndexedForm(
"i32", ak._v2.forms.emptyform.EmptyForm()
).tolist() == {
"class": "IndexedArray",
"index": "i32",
"content": {
"class": "EmptyArray",
"has_identifier": False,
"parameters": {},
"form_key": None,
},
"has_identifier": False,
"parameters": {},
"form_key": None,
}
assert ak._v2.forms.indexedform.IndexedForm(
index="i32",
content=ak._v2.forms.emptyform.EmptyForm(),
has_identifier=True,
parameters={"x": 123},
form_key="hello",
).tolist(verbose=False) == {
"class": "IndexedArray",
"index": "i32",
"content": {"class": "EmptyArray"},
"has_identifier": True,
"parameters": {"x": 123},
"form_key": "hello",
}
assert ak._v2.forms.from_iter(
{
"class": "IndexedArray",
"index": "i32",
"content": {"class": "EmptyArray"},
}
).tolist() == {
"class": "IndexedArray",
"index": "i32",
"content": {
"class": "EmptyArray",
"has_identifier": False,
"parameters": {},
"form_key": None,
},
"has_identifier": False,
"parameters": {},
"form_key": None,
}
assert ak._v2.forms.from_iter(
{
"class": "IndexedArray",
"index": "u32",
"content": {"class": "EmptyArray"},
}
).tolist() == {
"class": "IndexedArray",
"index": "u32",
"content": {
"class": "EmptyArray",
"has_identifier": False,
"parameters": {},
"form_key": None,
},
"has_identifier": False,
"parameters": {},
"form_key": None,
}
assert ak._v2.forms.from_iter(
{
"class": "IndexedArray",
"index": "i64",
"content": {"class": "EmptyArray"},
}
).tolist() == {
"class": "IndexedArray",
"index": "i64",
"content": {
"class": "EmptyArray",
"has_identifier": False,
"parameters": {},
"form_key": None,
},
"has_identifier": False,
"parameters": {},
"form_key": None,
}
assert ak._v2.forms.from_iter(
{
"class": "IndexedArray",
"index": "i32",
"content": {"class": "EmptyArray"},
"has_identifier": True,
"parameters": {"x": 123},
"form_key": "hello",
}
).tolist(verbose=False) == {
"class": "IndexedArray",
"index": "i32",
"content": {"class": "EmptyArray"},
"has_identifier": True,
"parameters": {"x": 123},
"form_key": "hello",
}
@pytest.mark.skipif(
ak._util.py27 or ak._util.py35, reason="Python 2.7, 3.5 have unstable dict order."
)
def test_IndexedOptionForm():
assert (
str(
ak._v2.forms.indexedoptionform.IndexedOptionForm(
"i32", ak._v2.forms.emptyform.EmptyForm()
)
)
== """{
"class": "IndexedOptionArray",
"index": "i32",
"content": {
"class": "EmptyArray"
}
}"""
)
assert (
str(
ak._v2.forms.indexedoptionform.IndexedOptionForm(
"i64", ak._v2.forms.emptyform.EmptyForm()
)
)
== """{
"class": "IndexedOptionArray",
"index": "i64",
"content": {
"class": "EmptyArray"
}
}"""
)
assert (
str(
ak._v2.forms.indexedoptionform.IndexedOptionForm(
"i32",
ak._v2.forms.emptyform.EmptyForm(),
has_identifier=True,
parameters={"x": 123},
form_key="hello",
)
)
== """{
"class": "IndexedOptionArray",
"index": "i32",
"content": {
"class": "EmptyArray"
},
"has_identifier": true,
"parameters": {
"x": 123
},
"form_key": "hello"
}"""
)
assert (
repr(
ak._v2.forms.indexedoptionform.IndexedOptionForm(
index="i32", content=ak._v2.forms.emptyform.EmptyForm()
)
)
== "IndexedOptionForm('i32', EmptyForm())"
)
assert (
repr(
ak._v2.forms.indexedoptionform.IndexedOptionForm(
index="i32",
content=ak._v2.forms.emptyform.EmptyForm(),
has_identifier=True,
parameters={"x": 123},
form_key="hello",
)
)
== "IndexedOptionForm('i32', EmptyForm(), has_identifier=True, parameters={'x': 123}, form_key='hello')"
)
assert ak._v2.forms.indexedoptionform.IndexedOptionForm(
"i32", ak._v2.forms.emptyform.EmptyForm()
).tolist(verbose=False) == {
"class": "IndexedOptionArray",
"index": "i32",
"content": {"class": "EmptyArray"},
}
assert ak._v2.forms.indexedoptionform.IndexedOptionForm(
"i32", ak._v2.forms.emptyform.EmptyForm()
).tolist() == {
"class": "IndexedOptionArray",
"index": "i32",
"content": {
"class": "EmptyArray",
"has_identifier": False,
"parameters": {},
"form_key": None,
},
"has_identifier": False,
"parameters": {},
"form_key": None,
}
assert ak._v2.forms.indexedoptionform.IndexedOptionForm(
index="i32",
content=ak._v2.forms.emptyform.EmptyForm(),
has_identifier=True,
parameters={"x": 123},
form_key="hello",
).tolist(verbose=False) == {
"class": "IndexedOptionArray",
"index": "i32",
"content": {"class": "EmptyArray"},
"has_identifier": True,
"parameters": {"x": 123},
"form_key": "hello",
}
assert ak._v2.forms.from_iter(
{
"class": "IndexedOptionArray",
"index": "i32",
"content": {"class": "EmptyArray"},
}
).tolist() == {
"class": "IndexedOptionArray",
"index": "i32",
"content": {
"class": "EmptyArray",
"has_identifier": False,
"parameters": {},
"form_key": None,
},
"has_identifier": False,
"parameters": {},
"form_key": None,
}
assert ak._v2.forms.from_iter(
{
"class": "IndexedOptionArray",
"index": "i64",
"content": {"class": "EmptyArray"},
}
).tolist() == {
"class": "IndexedOptionArray",
"index": "i64",
"content": {
"class": "EmptyArray",
"has_identifier": False,
"parameters": {},
"form_key": None,
},
"has_identifier": False,
"parameters": {},
"form_key": None,
}
assert ak._v2.forms.from_iter(
{
"class": "IndexedOptionArray",
"index": "i32",
"content": {"class": "EmptyArray"},
"has_identifier": True,
"parameters": {"x": 123},
"form_key": "hello",
}
).tolist(verbose=False) == {
"class": "IndexedOptionArray",
"index": "i32",
"content": {"class": "EmptyArray"},
"has_identifier": True,
"parameters": {"x": 123},
"form_key": "hello",
}
@pytest.mark.skipif(
ak._util.py27 or ak._util.py35, reason="Python 2.7, 3.5 have unstable dict order."
)
def test_ByteMaskedForm():
assert (
str(
ak._v2.forms.bytemaskedform.ByteMaskedForm(
"i8", ak._v2.forms.emptyform.EmptyForm(), True
)
)
== """{
"class": "ByteMaskedArray",
"mask": "i8",
"valid_when": true,
"content": {
"class": "EmptyArray"
}
}"""
)
assert (
str(
ak._v2.forms.bytemaskedform.ByteMaskedForm(
"i8", ak._v2.forms.emptyform.EmptyForm(), False
)
)
== """{
"class": "ByteMaskedArray",
"mask": "i8",
"valid_when": false,
"content": {
"class": "EmptyArray"
}
}"""
)
assert (
str(
ak._v2.forms.bytemaskedform.ByteMaskedForm(
"i8",
ak._v2.forms.emptyform.EmptyForm(),
True,
has_identifier=True,
parameters={"x": 123},
form_key="hello",
)
)
== """{
"class": "ByteMaskedArray",
"mask": "i8",
"valid_when": true,
"content": {
"class": "EmptyArray"
},
"has_identifier": true,
"parameters": {
"x": 123
},
"form_key": "hello"
}"""
)
assert (
repr(
ak._v2.forms.bytemaskedform.ByteMaskedForm(
mask="i8", content=ak._v2.forms.emptyform.EmptyForm(), valid_when=True
)
)
== "ByteMaskedForm('i8', EmptyForm(), True)"
)
assert (
repr(
ak._v2.forms.bytemaskedform.ByteMaskedForm(
mask="i8",
content=ak._v2.forms.emptyform.EmptyForm(),
valid_when=True,
has_identifier=True,
parameters={"x": 123},
form_key="hello",
)
)
== "ByteMaskedForm('i8', EmptyForm(), True, has_identifier=True, parameters={'x': 123}, form_key='hello')"
)
assert ak._v2.forms.bytemaskedform.ByteMaskedForm(
"i8", ak._v2.forms.emptyform.EmptyForm(), True
).tolist(verbose=False) == {
"class": "ByteMaskedArray",
"mask": "i8",
"valid_when": True,
"content": {"class": "EmptyArray"},
}
assert ak._v2.forms.bytemaskedform.ByteMaskedForm(
"i8", ak._v2.forms.emptyform.EmptyForm(), True
).tolist() == {
"class": "ByteMaskedArray",
"mask": "i8",
"valid_when": True,
"content": {
"class": "EmptyArray",
"has_identifier": False,
"parameters": {},
"form_key": None,
},
"has_identifier": False,
"parameters": {},
"form_key": None,
}
assert ak._v2.forms.bytemaskedform.ByteMaskedForm(
mask="i8",
content=ak._v2.forms.emptyform.EmptyForm(),
valid_when=True,
has_identifier=True,
parameters={"x": 123},
form_key="hello",
).tolist(verbose=False) == {
"class": "ByteMaskedArray",
"mask": "i8",
"valid_when": True,
"content": {"class": "EmptyArray"},
"has_identifier": True,
"parameters": {"x": 123},
"form_key": "hello",
}
assert ak._v2.forms.from_iter(
{
"class": "ByteMaskedArray",
"mask": "i8",
"valid_when": True,
"content": {"class": "EmptyArray"},
}
).tolist() == {
"class": "ByteMaskedArray",
"mask": "i8",
"valid_when": True,
"content": {
"class": "EmptyArray",
"has_identifier": False,
"parameters": {},
"form_key": None,
},
"has_identifier": False,
"parameters": {},
"form_key": None,
}
assert ak._v2.forms.from_iter(
{
"class": "ByteMaskedArray",
"mask": "i64",
"valid_when": True,
"content": {"class": "EmptyArray"},
}
).tolist() == {
"class": "ByteMaskedArray",
"mask": "i64",
"valid_when": True,
"content": {
"class": "EmptyArray",
"has_identifier": False,
"parameters": {},
"form_key": None,
},
"has_identifier": False,
"parameters": {},
"form_key": None,
}
assert ak._v2.forms.from_iter(
{
"class": "ByteMaskedArray",
"mask": "i8",
"valid_when": True,
"content": {"class": "EmptyArray"},
"has_identifier": True,
"parameters": {"x": 123},
"form_key": "hello",
}
).tolist(verbose=False) == {
"class": "ByteMaskedArray",
"mask": "i8",
"valid_when": True,
"content": {"class": "EmptyArray"},
"has_identifier": True,
"parameters": {"x": 123},
"form_key": "hello",
}
@pytest.mark.skipif(
ak._util.py27 or ak._util.py35, reason="Python 2.7, 3.5 have unstable dict order."
)
def test_BitMaskedForm():
assert (
str(
ak._v2.forms.bitmaskedform.BitMaskedForm(
"u8", ak._v2.forms.emptyform.EmptyForm(), True, True
)
)
== """{
"class": "BitMaskedArray",
"mask": "u8",
"valid_when": true,
"lsb_order": true,
| |
"困":
List.append("9EAE")
elif i == "択":
List.append("9EAF")
elif i == "夕":
List.append("9EB0")
elif i == "扱":
List.append("9EB1")
elif i == "啓":
List.append("9EB2")
elif i == "汚":
List.append("9EB3")
elif i == "罰":
List.append("9EB4")
elif i == "胆":
List.append("9EB5")
elif i == "猫":
List.append("9EB6")
elif i == "棒":
List.append("9EB7")
elif i == "細":
List.append("9EB8")
elif i == "点":
List.append("9EB9")
elif i == "製":
List.append("9EBA")
elif i == "矢":
List.append("9EBB")
elif i == "居":
List.append("9EBC")
elif i == "肝":
List.append("9EBD")
elif i == "答":
List.append("9EBE")
elif i == "義":
List.append("9EBF")
elif i == "務":
List.append("9EC0")
elif i == "留":
List.append("9EC1")
elif i == "障":
List.append("9EC2")
elif i == "訓":
List.append("9EC3")
elif i == "弟":
List.append("9EC4")
elif i == "妹":
List.append("9EC5")
elif i == "首":
List.append("9EC6")
elif i == "迎":
List.append("9EC7")
elif i == "姉":
List.append("9EC8")
elif i == "姿":
List.append("9EC9")
elif i == "将":
List.append("9ECA")
elif i == "散":
List.append("9ECB")
elif i == "練":
List.append("9ECC")
elif i == "猟":
List.append("9ECD")
elif i == "働":
List.append("9ECE")
elif i == "例":
List.append("9ECF")
elif i == "志":
List.append("9ED0")
elif i == "判":
List.append("9ED1")
elif i == "冗":
List.append("9ED2")
elif i == "官":
List.append("9ED3")
elif i == "臨":
List.append("9ED4")
elif i == "刻":
List.append("9ED5")
elif i == "潜":
List.append("9ED6")
elif i == "服":
List.append("9ED7")
elif i == "余":
List.append("9ED8")
elif i == "計":
List.append("9ED9")
elif i == "争":
List.append("9EDA")
elif i == "悲":
List.append("9EDB")
elif i == "閉":
List.append("9EDC")
elif i == "永":
List.append("9EDD")
elif i == "耐":
List.append("9EDE")
elif i == "移":
List.append("9EDF")
elif i == "候":
List.append("9EE0")
elif i == "習":
List.append("9EE1")
elif i == "罪":
List.append("9EE2")
elif i == "独":
List.append("9EE3")
elif i == "昼":
List.append("9EE4")
elif i == "反":
List.append("9EE5")
elif i == "氷":
List.append("9EE6")
elif i == "妻":
List.append("9EE7")
elif i == "注":
List.append("9EE8")
elif i == "表":
List.append("9EE9")
elif i == "究":
List.append("9EEA")
elif i == "徳":
List.append("9EEB")
elif i == "刃":
List.append("9EEC")
elif i == "倍":
List.append("9EED")
elif i == "洗":
List.append("9EEE")
elif i == "缶":
List.append("9EEF")
elif i == "押":
List.append("9EF0")
elif i == "登":
List.append("9EF1")
elif i == "提":
List.append("9EF2")
elif i == "常":
List.append("9EF3")
elif i == "刀":
List.append("9EF4")
elif i == "怒":
List.append("9EF5")
elif i == "才":
List.append("9EF6")
elif i == "木":
List.append("9EF7")
elif i == "彫":
List.append("9EF8")
elif i == "夫":
List.append("9EF9")
elif i == "婦":
List.append("9EFA")
elif i == "花":
List.append("9EFB")
elif i == "鳴":
List.append("9EFC")
elif i == "祭":
List.append("9EFD")
elif i == "騒":
List.append("9EFE")
elif i == "況":
List.append("9F42")
elif i == "嫉":
List.append("9F43")
elif i == "妬":
List.append("9F44")
elif i == "借":
List.append("9F45")
elif i == "寄":
List.append("9F46")
elif i == "茶":
List.append("9F47")
elif i == "酒":
List.append("9F48")
elif i == "燃":
List.append("9F49")
elif i == "投":
List.append("9F4A")
elif i == "希":
List.append("9F4B")
elif i == "費":
List.append("9F4C")
elif i == "並":
List.append("9F4D")
elif i == "喜":
List.append("9F4E")
elif i == "辺":
List.append("9F4F")
elif i == "仔":
List.append("9F50")
elif i == "徴":
List.append("9F51")
elif i == "坊":
List.append("9F52")
elif i == "飼":
List.append("9F53")
elif i == "肖":
List.append("9F54")
elif i == "博":
List.append("9F55")
elif i == "天":
List.append("9F56")
elif i == "軍":
List.append("9F57")
elif i == "祖":
List.append("9F58")
elif i == "棚":
List.append("9F59")
elif i == "講":
List.append("9F5B")
elif i == "呪":
List.append("9F5C")
elif i == "医":
List.append("9F5D")
elif i == "勉":
List.append("9F5E")
elif i == "布":
List.append("9F5F")
elif i == "訪":
List.append("9F60")
elif i == "混":
List.append("9F61")
elif i == "侵":
List.append("9F62")
elif i == "疑":
List.append("9F63")
elif i == "儀":
List.append("9F64")
elif i == "努":
List.append("9F65")
elif i == "益":
List.append("9F66")
elif i == "門":
List.append("9F67")
elif i == "試":
List.append("9F68")
elif i == "厳":
List.append("9F69")
elif i == "裏":
List.append("9F6A")
elif i == "腰":
List.append("9F6B")
elif i == "帥":
List.append("9F6C")
elif i == "封":
List.append("9F6D")
elif i == "柱":
List.append("9F6E")
elif i == "繋":
List.append("9F6F")
elif i == "丘":
List.append("9F70")
elif i == "畑":
List.append("9F71")
elif i == "忍":
List.append("9F72")
elif i == "厄":
List.append("9F73")
elif i == "嫁":
List.append("9F74")
elif i == "展":
List.append("9F75")
elif i == "汗":
List.append("9F76")
elif i == "車":
List.append("9F77")
elif i == "接":
List.append("9F78")
elif i == "絹":
List.append("9F79")
elif i == "肌":
List.append("9F7A")
elif i == "魂":
List.append("9F7B")
elif i == "票":
List.append("9F7C")
elif i == "橋":
List.append("9F7D")
elif i == "娘":
List.append("9F7E")
elif i == "根":
List.append("9F80")
elif i == "怖":
List.append("9F81")
elif i == "幅":
List.append("9F82")
elif i == "衝":
List.append("9F83")
elif i == "射":
List.append("9F84")
elif i == "罠":
List.append("9F85")
elif i == "床":
List.append("9F86")
elif i == "丈":
List.append("9F87")
elif i == "区":
List.append("9F88")
elif i == "随":
List.append("9F89")
elif i == "枝":
List.append("9F8A")
elif i == "古":
List.append("9F8B")
elif i == "頂":
List.append("9F8C")
elif i == "横":
List.append("9F8D")
elif i == "拾":
List.append("9F8E")
elif i == "良":
List.append("9F8F")
elif i == "穫":
List.append("9F90")
elif i == "承":
List.append("9F91")
elif i == "森":
List.append("9F92")
elif i == "雑":
List.append("9F93")
elif i == "貨":
List.append("9F94")
elif i == "族":
List.append("9F95")
elif i == "省":
List.append("9F96")
elif i == "掃":
List.append("9F97")
elif i == "除":
List.append("9F98")
elif i == "粧":
List.append("9F99")
elif i == "恥":
List.append("9F9A")
elif i == "濯":
List.append("9F9B")
elif i == "帯":
List.append("9F9C")
elif i == "策":
List.append("9F9D")
elif i == "裕":
List.append("9F9E")
elif i == "施":
List.append("9F9F")
elif i == "営":
List.append("9FA0")
elif i == "優":
List.append("9FA1")
elif i == "骨":
List.append("9FA2")
elif i == "埋":
List.append("9FA3")
elif i == "躍":
List.append("9FA4")
elif i == "冬":
List.append("9FA5")
elif i == "遙":
List.append("9FA6")
elif i == "圧":
List.append("9FA7")
elif i == "迫":
List.append("9FA8")
elif i == "獄":
List.append("9FA9")
elif i == "再":
List.append("9FAA")
elif i == "亡":
List.append("9FAB")
elif i == "雨":
List.append("9FAC")
elif i == "枯":
List.append("9FAD")
elif i == "噴":
List.append("9FAE")
elif i == "久":
List.append("9FAF")
elif i == "衰":
List.append("9FB0")
elif i == "鈍":
List.append("9FB1")
elif i == "凍":
List.append("9FB2")
elif i == "昇":
List.append("9FB3")
elif i == "低":
List.append("9FB4")
elif i == "砂":
List.append("9FB5")
elif i == "含":
List.append("9FB6")
elif i == "晶":
List.append("9FB7")
elif i == "溜":
List.append("9FB8")
elif i == "珠":
List.append("9FB9")
elif i == "黄":
List.append("9FBA")
elif i == "霊":
List.append("9FBB")
elif i == "召":
List.append("9FBC")
elif i == "喚":
List.append("9FBD")
elif i == "蒼":
List.append("9FBE")
elif i == "紅":
List.append("9FBF")
elif i == "翠":
List.append("9FC0")
elif i == "草":
List.append("9FC1")
elif i == "種":
List.append("9FC2")
elif i == "瓶":
List.append("9FC3")
elif i == "鱗":
List.append("9FC4")
elif i == "珍":
List.append("9FC5")
elif i == "獣":
List.append("9FC6")
elif i == "蓋":
List.append("9FC7")
elif i == "鑑":
List.append("9FC8")
elif i == "欠":
List.append("9FC9")
elif i == "各":
List.append("9FCA")
elif i == "煙":
List.append("9FCB")
elif i == "筒":
List.append("9FCC")
elif i == "造":
List.append("9FCD")
elif i == "炭":
List.append("9FCE")
elif i == "炉":
List.append("9FCF")
elif i == "槽":
List.append("9FD0")
elif i == "苗":
List.append("9FD1")
elif i == "植":
List.append("9FD2")
elif i == "周":
List.append("9FD3")
elif i == "囲":
List.append("9FD4")
elif i == "鍵":
List.append("9FD5")
elif i == "級":
List.append("9FD6")
elif i == "干":
List.append("9FD7")
elif i == "匂":
List.append("9FD8")
elif i == "涙":
List.append("9FD9")
elif i == "球":
List.append("9FDA")
elif i == "漬":
List.append("9FDB")
elif i == "瓜":
List.append("9FDC")
elif i == "臭":
List.append("9FDD")
elif i == "毛":
List.append("9FDE")
elif i == "芳":
List.append("9FDF")
elif i == "皮":
List.append("9FE0")
elif i == "丸":
List.append("9FE1")
elif i == "牛":
List.append("9FE2")
elif i == "乳":
List.append("9FE3")
elif i == "酵":
List.append("9FE4")
elif i == "胃":
List.append("9FE5")
elif i == "腸":
List.append("9FE6")
elif i == "炊":
List.append("9FE7")
elif i == "穀":
List.append("9FE8")
elif i == "農":
List.append("9FE9")
elif i == "八":
List.append("9FEA")
elif i == "孫":
List.append("9FEB")
elif i == "柔":
List.append("9FEC")
elif i == "麦":
List.append("9FED")
elif i == "粉":
List.append("9FEE")
elif i == "般":
List.append("9FEF")
elif i == "脂":
List.append("9FF0")
elif i == "肪":
List.append("9FF1")
elif i == "塊":
List.append("9FF2")
elif i == | |
<filename>textstat/textstat.py
import warnings
import string
import re
import math
from collections import Counter
import pkg_resources
from functools import lru_cache
from pyphen import Pyphen
langs = {
"en": { # Default config
"fre_base": 206.835,
"fre_sentence_length": 1.015,
"fre_syll_per_word": 84.6,
"syllable_threshold": 3,
},
"de": {
# <NAME>
"fre_base": 180,
"fre_sentence_length": 1,
"fre_syll_per_word": 58.5,
},
"es": {
# <NAME> Readability Formula
"fre_base": 206.84,
"fre_sentence_length": 1.02,
"fre_syll_per_word": 0.6,
},
"fr": {
"fre_base": 207,
"fre_sentence_length": 1.015,
"fre_syll_per_word": 73.6,
},
"it": {
# Flesch-Vacca
"fre_base": 217,
"fre_sentence_length": 1.3,
"fre_syll_per_word": 0.6,
},
"nl": {
# Flesch-Douma
"fre_base": 206.835,
"fre_sentence_length": 0.93,
"fre_syll_per_word": 77,
},
"pl": {
"syllable_threshold": 4,
},
"ru": {
"fre_base": 206.835,
"fre_sentence_length": 1.3,
"fre_syll_per_word": 60.1,
},
}
def legacy_round(number, points=0):
p = 10 ** points
return float(math.floor((number * p) + math.copysign(0.5, number))) / p
def get_grade_suffix(grade):
"""
Select correct ordinal suffix
"""
ordinal_map = {1: 'st', 2: 'nd', 3: 'rd'}
teens_map = {11: 'th', 12: 'th', 13: 'th'}
return teens_map.get(grade % 100, ordinal_map.get(grade % 10, 'th'))
class textstatistics:
__lang = "en_US"
text_encoding = "utf-8"
def _cache_clear(self):
caching_methods = [
method for method in dir(self)
if callable(getattr(self, method))
and hasattr(getattr(self, method), "cache_info")
]
for method in caching_methods:
getattr(self, method).cache_clear()
def set_lang(self, lang):
self.__lang = lang
self._cache_clear()
@lru_cache(maxsize=128)
def char_count(self, text, ignore_spaces=True):
"""
Function to return total character counts in a text,
pass the following parameter `ignore_spaces = False`
to ignore whitespaces
"""
if ignore_spaces:
text = text.replace(" ", "")
return len(text)
@lru_cache(maxsize=128)
def letter_count(self, text, ignore_spaces=True):
"""
Function to return total letter amount in a text,
pass the following parameter `ignore_spaces = False`
to ignore whitespaces
"""
if ignore_spaces:
text = text.replace(" ", "")
return len(self.remove_punctuation(text))
@staticmethod
def remove_punctuation(text):
return ''.join(ch for ch in text if ch not in string.punctuation)
@lru_cache(maxsize=128)
def lexicon_count(self, text, removepunct=True):
"""
Function to return total lexicon (words in lay terms) counts in a text
"""
if removepunct:
text = self.remove_punctuation(text)
count = len(text.split())
return count
@lru_cache(maxsize=128)
def syllable_count(self, text, lang=None):
"""
Function to calculate syllable words in a text.
I/P - a text
O/P - number of syllable words
"""
if lang:
warnings.warn(
"The 'lang' argument has been moved to "
"'textstat.set_lang(<lang>)'. This argument will be removed "
"in the future.",
DeprecationWarning
)
if isinstance(text, bytes):
text = text.decode(self.text_encoding)
text = text.lower()
text = self.remove_punctuation(text)
if not text:
return 0
dic = Pyphen(lang=self.__lang)
count = 0
for word in text.split(' '):
word_hyphenated = dic.inserted(word)
count += max(1, word_hyphenated.count("-") + 1)
return count
@lru_cache(maxsize=128)
def sentence_count(self, text):
"""
Sentence count of a text
"""
ignore_count = 0
sentences = re.split(r' *[\.\?!][\'"\)\]]*[ |\n](?=[A-Z])', text)
for sentence in sentences:
if self.lexicon_count(sentence) <= 2:
ignore_count += 1
return max(1, len(sentences) - ignore_count)
@lru_cache(maxsize=128)
def avg_sentence_length(self, text):
try:
asl = float(self.lexicon_count(text) / self.sentence_count(text))
return legacy_round(asl, 1)
except ZeroDivisionError:
return 0.0
@lru_cache(maxsize=128)
def avg_syllables_per_word(self, text, interval=None):
syllable = self.syllable_count(text)
words = self.lexicon_count(text)
try:
if interval:
syllables_per_word = float(syllable) * interval / float(words)
else:
syllables_per_word = float(syllable) / float(words)
return legacy_round(syllables_per_word, 1)
except ZeroDivisionError:
return 0.0
@lru_cache(maxsize=128)
def avg_character_per_word(self, text):
try:
letters_per_word = float(
self.char_count(text) / self.lexicon_count(text))
return legacy_round(letters_per_word, 2)
except ZeroDivisionError:
return 0.0
@lru_cache(maxsize=128)
def avg_letter_per_word(self, text):
try:
letters_per_word = float(
self.letter_count(text) / self.lexicon_count(text))
return legacy_round(letters_per_word, 2)
except ZeroDivisionError:
return 0.0
@lru_cache(maxsize=128)
def avg_sentence_per_word(self, text):
try:
sentence_per_word = float(
self.sentence_count(text) / self.lexicon_count(text))
return legacy_round(sentence_per_word, 2)
except ZeroDivisionError:
return 0.0
@lru_cache(maxsize=128)
def flesch_reading_ease(self, text):
sentence_length = self.avg_sentence_length(text)
s_interval = 100 if self.__get_lang_root() in ['es', 'it'] else None
syllables_per_word = self.avg_syllables_per_word(text, s_interval)
flesch = (
self.__get_lang_cfg("fre_base")
- float(
self.__get_lang_cfg("fre_sentence_length") * sentence_length
)
- float(
self.__get_lang_cfg("fre_syll_per_word") * syllables_per_word
)
)
return legacy_round(flesch, 2)
@lru_cache(maxsize=128)
def flesch_kincaid_grade(self, text):
sentence_lenth = self.avg_sentence_length(text)
syllables_per_word = self.avg_syllables_per_word(text)
flesch = (
float(0.39 * sentence_lenth)
+ float(11.8 * syllables_per_word)
- 15.59)
return legacy_round(flesch, 1)
@lru_cache(maxsize=128)
def polysyllabcount(self, text):
count = 0
for word in text.split():
wrds = self.syllable_count(word)
if wrds >= 3:
count += 1
return count
@lru_cache(maxsize=128)
def smog_index(self, text):
sentences = self.sentence_count(text)
if sentences >= 3:
try:
poly_syllab = self.polysyllabcount(text)
smog = (
(1.043 * (30 * (poly_syllab / sentences)) ** .5)
+ 3.1291)
return legacy_round(smog, 1)
except ZeroDivisionError:
return 0.0
else:
return 0.0
@lru_cache(maxsize=128)
def coleman_liau_index(self, text):
letters = legacy_round(self.avg_letter_per_word(text) * 100, 2)
sentences = legacy_round(self.avg_sentence_per_word(text) * 100, 2)
coleman = float((0.058 * letters) - (0.296 * sentences) - 15.8)
return legacy_round(coleman, 2)
@lru_cache(maxsize=128)
def automated_readability_index(self, text):
chrs = self.char_count(text)
words = self.lexicon_count(text)
sentences = self.sentence_count(text)
try:
a = float(chrs) / float(words)
b = float(words) / float(sentences)
readability = (
(4.71 * legacy_round(a, 2))
+ (0.5 * legacy_round(b, 2))
- 21.43)
return legacy_round(readability, 1)
except ZeroDivisionError:
return 0.0
@lru_cache(maxsize=128)
def linsear_write_formula(self, text):
easy_word = 0
difficult_word = 0
text_list = text.split()[:100]
for word in text_list:
if self.syllable_count(word) < 3:
easy_word += 1
else:
difficult_word += 1
text = ' '.join(text_list)
number = float(
(easy_word * 1 + difficult_word * 3)
/ self.sentence_count(text))
if number <= 20:
number -= 2
return number / 2
@lru_cache(maxsize=128)
def difficult_words(self, text, syllable_threshold=2):
return len(self.difficult_words_list(text, syllable_threshold))
@lru_cache(maxsize=128)
def difficult_words_list(self, text, syllable_threshold=2):
text_list = re.findall(r"[\w\='‘’]+", text.lower())
diff_words_set = set()
for value in text_list:
if self.is_difficult_word(value, syllable_threshold):
diff_words_set.add(value)
return list(diff_words_set)
@lru_cache(maxsize=128)
def is_difficult_word(self, word, syllable_threshold=2):
easy_word_set = self.__get_lang_easy_words()
syllables = self.syllable_count(word)
if word in easy_word_set or syllables < syllable_threshold:
return False
return True
@lru_cache(maxsize=128)
def is_easy_word(self, word, syllable_threshold=2):
return not self.is_difficult_word(word, syllable_threshold)
@lru_cache(maxsize=128)
def dale_chall_readability_score(self, text):
word_count = self.lexicon_count(text)
count = word_count - self.difficult_words(text)
try:
per = float(count) / float(word_count) * 100
except ZeroDivisionError:
return 0.0
difficult_words = 100 - per
score = (
(0.1579 * difficult_words)
+ (0.0496 * self.avg_sentence_length(text)))
if difficult_words > 5:
score += 3.6365
return legacy_round(score, 2)
@lru_cache(maxsize=128)
def gunning_fog(self, text):
try:
syllable_threshold = self.__get_lang_cfg("syllable_threshold")
per_diff_words = (
self.difficult_words(
text,
syllable_threshold=syllable_threshold)
/ self.lexicon_count(text) * 100)
grade = 0.4 * (self.avg_sentence_length(text) + per_diff_words)
return legacy_round(grade, 2)
except ZeroDivisionError:
return 0.0
@lru_cache(maxsize=128)
def lix(self, text):
words = text.split()
words_len = len(words)
long_words = len([wrd for wrd in words if len(wrd) > 6])
per_long_words = (float(long_words) * 100) / words_len
asl = self.avg_sentence_length(text)
lix = asl + per_long_words
return legacy_round(lix, 2)
@lru_cache(maxsize=128)
def rix(self, text):
"""
A Rix ratio is simply the number of long words divided by
the number of assessed sentences.
rix = LW/S
"""
words = text.split()
long_words_count = len([wrd for wrd in words if len(wrd) > 6])
sentences_count = self.sentence_count(text)
try:
rix = long_words_count / sentences_count
except ZeroDivisionError:
rix = 0.00
return legacy_round(rix, 2)
@lru_cache(maxsize=128)
def spache_readability(self, text, float_output=True):
"""
Function to calculate SPACHE readability formula for young readers.
I/P - a text
O/P - an int Spache Readability Index/Grade Level
"""
total_no_of_words = self.lexicon_count(text)
count_of_sentences = self.sentence_count(text)
asl = total_no_of_words / count_of_sentences
pdw = (self.difficult_words(text) / total_no_of_words) * 100
spache = (0.141 * asl) + (0.086 * pdw) + 0.839
if not float_output:
return int(spache)
else:
return spache
@lru_cache(maxsize=128)
def dale_chall_readability_score_v2(self, text):
"""
Function to calculate New Dale Chall Readability formula.
I/P - a text
O/P - an int Dale Chall Readability Index/Grade Level
"""
total_no_of_words = self.lexicon_count(text)
count_of_sentences = self.sentence_count(text)
asl = total_no_of_words / count_of_sentences
pdw = (self.difficult_words(text) / total_no_of_words) * 100
raw_score = 0.1579 * (pdw) + 0.0496 * asl
adjusted_score = raw_score
if raw_score > 0.05:
adjusted_score = raw_score + 3.6365
return legacy_round(adjusted_score, 2)
@lru_cache(maxsize=128)
def text_standard(self, text, float_output=None):
grade = []
# Appending Flesch Kincaid Grade
lower = legacy_round(self.flesch_kincaid_grade(text))
upper = math.ceil(self.flesch_kincaid_grade(text))
grade.append(int(lower))
grade.append(int(upper))
# Appending Flesch Reading Easy
score = self.flesch_reading_ease(text)
if score < 100 and score >= 90:
grade.append(5)
elif score < 90 and score >= 80:
grade.append(6)
elif score < 80 and score >= 70:
grade.append(7)
elif score < 70 and score >= 60:
grade.append(8)
grade.append(9)
elif score < 60 and score >= 50:
grade.append(10)
elif score < 50 and score >= 40:
grade.append(11)
elif score < 40 and score >= 30:
grade.append(12)
else:
grade.append(13)
# Appending SMOG Index
lower = legacy_round(self.smog_index(text))
upper = math.ceil(self.smog_index(text))
grade.append(int(lower))
grade.append(int(upper))
# Appending Coleman_Liau_Index
lower = legacy_round(self.coleman_liau_index(text))
upper = math.ceil(self.coleman_liau_index(text))
grade.append(int(lower))
grade.append(int(upper))
# Appending Automated_Readability_Index
lower | |
if not present.
if item["loc"]["chr"] not in chr_blocks:
chr_blocks[item["loc"]["chr"]] = {}
block_id = "bid:%s" % (math.floor(item["loc"]["left"] / resolution) * 1000, )
if block_id not in chr_blocks[item["loc"]["chr"]]:
chr_blocks[item["loc"]["chr"]][block_id] = [0 for x in range(len(self.linearData))] # one for each gl
total_rows += 1
if score_key:
chr_blocks[item["loc"]["chr"]][block_id][index] = item[score_key]
else:
chr_blocks[item["loc"]["chr"]][block_id][index] = 1
config.log.info("overlap_heatmap(): Found %s unique genomic regions" % total_rows)
# Build the table for the heatmap
tab = numpy.zeros([len(self.linearData), total_rows])
crow = 0
for c, value in chr_blocks.items():
for bid in chr_blocks[c]:
for i in range(len(value[bid])): # or len(self.linearData)
tab[i, crow] = chr_blocks[c][bid][i]
crow += 1
tab = tab.T
# dendrogram dies here, so need other ways to cluster
# DBSCAN consumes too much unnecessary memory.
"""
alg = DBSCAN(eps=0.2)
print alg.fit(tab)
print alg.labels_
clusters = numpy.unique(alg.labels_)
print clusters
# reorder the list based on cluster membership
newd = {}
for index, c in enumerate(alg.labels_):
if c not in newd:
newd[c] = []
newd[c].append(tab[index])
# load it back into a numpy array
tab = None
for c in clusters:
new = numpy.vstack(newd[c])
if tab is None:
tab = new
else:
tab = numpy.vstack([tab, new])
"""
# Yay, roll my own clustering!
# I already know how many possible clusters there will be.
#num_clusters = math.factorial(len(self.linearData))
# build a cluster table, containing all possible variants for this len(self.linearData)
clusters = {}
for row in tab:
# Make an identifier for the cluster:
id = tuple(bool(i) for i in row)
if id not in clusters:
clusters[id] = []
clusters[id].append(row)
# I want to sort the clusters first:
sorted_clusters = [{"id": c, "score": sum(c)} for c in clusters]
sorted_clusters = sorted(sorted_clusters, key=itemgetter("score"))
# Flattent the arrays and load it back into a numpy array
tab = None
for c in sorted_clusters:
new = numpy.vstack(clusters[c["id"]])
tab = new if tab is None else numpy.vstack([tab, new])
ret = self.draw.heatmap(data=tab, filename=filename, col_names=[gl.name for gl in self.linearData], row_names=None,
row_cluster=False, col_cluster=True, colour_map=cm.Reds, heat_wid=0.7, heat_hei=0.7, bracket=[0,tab.max()])
config.log.info("overlap_heatmap: Saved overlap heatmap to '%s'" % ret["real_filename"])
return(tab)
def __peak_cluster(self, list_of_peaks, merge_peaks_distance):
# Merge overlapping peaks
chr_blocks = {}
total_rows = 0
#merged_peaks = {}
p = progressbar(len(list_of_peaks))
for idx, gl in enumerate(list_of_peaks):
for p1 in gl["loc"]:
#p1 = p1.pointify().expand(merge_peaks_distance) # about 10% of the time is in __getitem__ from the loc, so unpack it;
cpt = (p1.loc["left"] + p1.loc['right']) // 2
p1_chr = 'chr{0}'.format(p1['chr'])
p1_left = cpt - merge_peaks_distance
p1_right = cpt + merge_peaks_distance
if not p1_chr in chr_blocks:
chr_blocks[p1_chr] = {}
binary = [0 for x in range(len(list_of_peaks))] # set-up here in case I need to modify it.
for p2 in chr_blocks[p1_chr]: # p2 is now a block_id tuple
#if p1.qcollide(p2):
if p1_right >= p2[0] and p1_left <= p2[1]: # unfolded for speed.
binary = chr_blocks[p1_chr][p2]["binary"] # preserve the old membership
# remove the original entry
del chr_blocks[p1_chr][p2]
total_rows -= 1
# Add in a new merged peak:
cpt = (((p1_left+p2[0])//2) + ((p1_right+p2[1])//2)) // 2 # pointify()
p1_left=cpt-merge_peaks_distance
p1_right=cpt+merge_peaks_distance
# Don't get confused here, p1 is added onto the block heap below:
break
# modify binary to signify membership for this peaklist
binary[idx] = 1
# Add p1 onto the blocklist
block_id = (p1_left, p1_right)
if block_id not in chr_blocks[p1_chr]:
chr_blocks[p1_chr][block_id] = {"binary": binary,
"pil": [0 for x in range(len(list_of_peaks))]} # one for each gl, load pil with dummy data.
total_rows += 1 # because the result is a dict of dicts {"<chrname>": {"bid": {data}}, so hard to keep track of the total size.
p.update(idx)
return total_rows, chr_blocks
def chip_seq_cluster(self, list_of_peaks, merge_peaks_distance=400, sort_clusters=True,
_get_chr_blocks=False, **kargs):
"""
**Purpose**
Combine and merge all peaks, extract the read pileups then categorize the peaks into
similar groupings. Return a new list of genelists, one genelist for each grouping
that contains the list of genomic locations in each group.
Return a glbase expression object with each row a merged (unique) peak, each
column is a peak
Be careful, the resulting objects can get very huge!
The order of the genomic locations and order of the groups must be maintained
between the heatmap and the returned data.
NOTE: I sort of named this function incorectly with the whole 'cluster' business.
Although it's not wrong to label the returned groups as clusters it is certainly
confusing and may imply that some sort of k-means or hierarchical clustering
is performed. No clustering is performed, instead groups are made based on a binary
determination from the list_of_peaks. So below, where I refer to 'cluster'
I really mean group. Later I may add k-means clustering, which may make things even more
confusing.
Here is a detailed explanation of this function:
1. Join all of the peaks into a redundant set of coordinates
2. Merge all of the genomic regions to produce a single list of unique genomic regions
(this is what it means by "chip_seq_cluster_heatmap(): Found <number> unique
genomic regions")
3. Build a table of all possible peak combinations:
e.g. for two chip-seq lists, A and B:
listA only: [True, False]
listB only: [False, True]
listA and listB: [True, True]
It is these that are the 'clusters' (or groups). In this case there would
be just 3 groups. The more lists the more possible groups.
Note that groups with no members are culled.
**Arguments**
list_of_peaks (Required)
A list of genelists of peaks from your ChIP-seq data to interrogate. The order of the libraries
Genomic location data should be stored in a 'loc' key in the genelist.
merge_peaks_distance (Optional, default=400)
Maximum distance that the centers of any two peaks can be apart before the two peaks are merged into
a single peak. (taking the mean of the peak centers)
sort_clusters (Optional, default=True)
sort the clusters from most complex to least complex.
Note that chip_seq_cluster_heatmap cannot preserve the order of the peaks
(it's impossible), so setting this to false will just randomise the order of the clusters
which may not be particularly helpful.
**Returns**
Returns a glbase expression object, with rows as unique genomic peaks and
columns as each peak list.
The values will be filled with 0 or 1, if it was a peak or not a peak.
"""
assert list_of_peaks, 'list_of_peaks is empty'
assert len(list_of_peaks[0]) > 0, 'list_of_peaks lists appear to be empty'
# get a non-redundant list of genomic regions based on resolution.
chr_blocks = {} # stores a binary identifier
pil_blocks = {}
total_rows = 0
peak_lengths = sum([len(p) for p in list_of_peaks])
config.log.info("chip_seq_cluster_heatmap: Started with {0:,} redundant peaks".format(peak_lengths))
total_rows, chr_blocks = self.__peak_cluster(list_of_peaks, merge_peaks_distance)
config.log.info("chip_seq_cluster: Found {0:,} unique genomic regions".format(total_rows))
if _get_chr_blocks:
return chr_blocks
# Convert the chr_blocks into a expression object
tab = []
for chrom in chr_blocks:
for loc in chr_blocks[chrom]:
l = location(chr=chrom, left=loc[0], right=loc[1])
cid = int("".join([str(i) for i in chr_blocks[chrom][loc]["binary"]]), 2)
#print cid
tab.append({'loc': l, 'conditions': chr_blocks[chrom][loc]['binary'], 'cid': cid})
e = expression(loadable_list=tab, cond_names=[p.name for p in list_of_peaks])
if sort_clusters:
e.sort('cid')
return e
def chip_seq_cluster_heatmap(self, list_of_peaks, list_of_trks, filename=None, norm_by_library_size=False, bins=20,
pileup_distance=1000, merge_peaks_distance=400, sort_clusters=True, cache_data=False, bracket=None,
range_bracket=None, frames=False, titles=None, read_extend=200, imshow=True, cmap=cm.plasma,
log_pad=None, log=2,
size=None, **kargs):
"""
**Purpose**
Combine and merge all peaks, extract the read pileups then categorize the peaks into
similar groupings. Return a new list of genelists, one genelist for each grouping
that contains the list of genomic locations in each group. Finally, draw a nice
heatmap to <filename>.
The order of the genomic locations and order of the groups must be maintained
between the heatmap and the returned data.
NOTE: I sort of named this function incorectly with the whole 'cluster' business.
Although it's not wrong to label the returned groups as clusters it is certainly
confusing and may imply that some sort of k-means or hierarchical clustering
is performed. No clustering is performed, instead groups are made based on a binary
determination from the list_of_peaks. So below, where I refer to 'cluster'
I really mean group. Later I may add k-means | |
-> structure: parameter "path" of String, parameter
"shock_id" of String, parameter "ftp_url" of String, parameter
"gff_file" of type "File" -> structure: parameter "path" of
String, parameter "shock_id" of String, parameter "ftp_url" of
String, parameter "genome_name" of String, parameter
"workspace_name" of String, parameter "source" of String,
parameter "taxon_wsname" of String, parameter "taxon_id" of
String, parameter "release" of String, parameter "genetic_code" of
Long, parameter "scientific_name" of String, parameter "metadata"
of type "usermeta" -> mapping from String to String, parameter
"generate_missing_genes" of type "boolean" (A boolean - 0 for
false, 1 for true. @range (0, 1))
:returns: instance of type "GenomeSaveResult" -> structure: parameter
"genome_ref" of String
"""
return self._client.run_job('GenomeFileUtil.fasta_gff_to_genome',
[params], self._service_ver, context)
def fasta_gff_to_genome_json(self, params, context=None):
"""
As above but returns the genome instead
:param params: instance of type "FastaGFFToGenomeParams" (genome_name
- becomes the name of the object workspace_name - the name of the
workspace it gets saved to. source - Source of the file typically
something like RefSeq or Ensembl taxon_ws_name - where the
reference taxons are : ReferenceTaxons taxon_id - if defined, will
try to link the Genome to the specified taxonomy id in lieu of
performing the lookup during upload release - Release or version
number of the data per example Ensembl has numbered releases of
all their data: Release 31 genetic_code - Genetic code of
organism. Overwrites determined GC from taxon object
scientific_name - will be used to set the scientific name of the
genome and link to a taxon generate_missing_genes - If the file
has CDS or mRNA with no corresponding gene, generate a spoofed
gene. Off by default) -> structure: parameter "fasta_file" of type
"File" -> structure: parameter "path" of String, parameter
"shock_id" of String, parameter "ftp_url" of String, parameter
"gff_file" of type "File" -> structure: parameter "path" of
String, parameter "shock_id" of String, parameter "ftp_url" of
String, parameter "genome_name" of String, parameter
"workspace_name" of String, parameter "source" of String,
parameter "taxon_wsname" of String, parameter "taxon_id" of
String, parameter "release" of String, parameter "genetic_code" of
Long, parameter "scientific_name" of String, parameter "metadata"
of type "usermeta" -> mapping from String to String, parameter
"generate_missing_genes" of type "boolean" (A boolean - 0 for
false, 1 for true. @range (0, 1))
:returns: instance of unspecified object
"""
return self._client.run_job('GenomeFileUtil.fasta_gff_to_genome_json',
[params], self._service_ver, context)
def fasta_gff_to_metagenome(self, params, context=None):
"""
:param params: instance of type "FastaGFFToMetagenomeParams"
(genome_name - becomes the name of the object workspace_name - the
name of the workspace it gets saved to. source - Source of the
file typically something like RefSeq or Ensembl taxon_ws_name -
where the reference taxons are : ReferenceTaxons taxon_id - if
defined, will try to link the Genome to the specified taxonomy id
in lieu of performing the lookup during upload release - Release
or version number of the data per example Ensembl has numbered
releases of all their data: Release 31 genetic_code - Genetic code
of organism. Overwrites determined GC from taxon object
scientific_name - will be used to set the scientific name of the
genome and link to a taxon generate_missing_genes - If the file
has CDS or mRNA with no corresponding gene, generate a spoofed
gene. Off by default) -> structure: parameter "fasta_file" of type
"File" -> structure: parameter "path" of String, parameter
"shock_id" of String, parameter "ftp_url" of String, parameter
"gff_file" of type "File" -> structure: parameter "path" of
String, parameter "shock_id" of String, parameter "ftp_url" of
String, parameter "genome_name" of String, parameter
"workspace_name" of String, parameter "source" of String,
parameter "scientific_name" of String, parameter "metadata" of
type "usermeta" -> mapping from String to String, parameter
"generate_missing_genes" of type "boolean" (A boolean - 0 for
false, 1 for true. @range (0, 1))
:returns: instance of type "MetagenomeSaveResult" -> structure:
parameter "metagenome_ref" of String
"""
return self._client.run_job('GenomeFileUtil.fasta_gff_to_metagenome',
[params], self._service_ver, context)
def save_one_genome(self, params, context=None):
"""
:param params: instance of type "SaveOneGenomeParams" -> structure:
parameter "workspace" of String, parameter "name" of String,
parameter "data" of type "Genome" (Genome object holds much of the
data relevant for a genome in KBase Genome publications should be
papers about the genome Should the Genome object contain a list of
contig_ids too? Source: allowed entries RefSeq, Ensembl,
Phytozome, RAST, Prokka, User_upload #allowed entries RefSeq,
Ensembl, Phytozome, RAST, Prokka, User_upload controlled
vocabulary managed by API Domain is a controlled vocabulary
Warnings : mostly controlled vocab but also allow for unstructured
Genome_tiers : controlled vocabulary (based on ap input and API
checked) Allowed values: #Representative, Reference, ExternalDB,
User Examples Tiers: All phytozome - Representative and ExternalDB
Phytozome flagship genomes - Reference, Representative and
ExternalDB Ensembl - Representative and ExternalDB RefSeq
Reference - Reference, Representative and ExternalDB RefSeq
Representative - Representative and ExternalDB RefSeq Latest or
All Assemblies folder - ExternalDB User Data - User tagged Example
Sources: RefSeq, Ensembl, Phytozome, Microcosm, User, RAST,
Prokka, (other annotators) @optional warnings contig_lengths
contig_ids source_id taxonomy publications @optional
ontology_events ontologies_present non_coding_features mrnas
genome_type @optional genbank_handle_ref gff_handle_ref
external_source_origination_date @optional release
original_source_file_name notes quality_scores suspect
assembly_ref @metadata ws gc_content as GC content @metadata ws
taxonomy as Taxonomy @metadata ws md5 as MD5 @metadata ws dna_size
as Size @metadata ws genetic_code as Genetic code @metadata ws
domain as Domain @metadata ws source_id as Source ID @metadata ws
source as Source @metadata ws scientific_name as Name @metadata ws
genome_type as Genome Type @metadata ws length(features) as Number
of Protein Encoding Genes @metadata ws length(cdss) as Number of
CDS @metadata ws assembly_ref as Assembly Object @metadata ws
num_contigs as Number contigs @metadata ws length(warnings) as
Number of Genome Level Warnings @metadata ws suspect as Suspect
Genome) -> structure: parameter "id" of type "Genome_id" (KBase
genome ID @id kb), parameter "scientific_name" of String,
parameter "domain" of String, parameter "warnings" of list of
String, parameter "genome_tiers" of list of String, parameter
"feature_counts" of mapping from String to Long, parameter
"genetic_code" of Long, parameter "dna_size" of Long, parameter
"num_contigs" of Long, parameter "molecule_type" of String,
parameter "contig_lengths" of list of Long, parameter "contig_ids"
of list of String, parameter "source" of String, parameter
"source_id" of type "source_id" (Reference to a source_id @id
external), parameter "md5" of String, parameter "taxonomy" of
String, parameter "gc_content" of Double, parameter "publications"
of list of type "publication" (Structure for a publication (float
pubmedid string source (ex. Pubmed) string title string web
address string publication year string authors string journal))
-> tuple of size 7: parameter "pubmedid" of Double, parameter
"source" of String, parameter "title" of String, parameter "url"
of String, parameter "year" of String, parameter "authors" of
String, parameter "journal" of String, parameter "ontology_events"
of list of type "Ontology_event" (@optional ontology_ref
method_version eco description) -> structure: parameter "id" of
String, parameter "ontology_ref" of type "Ontology_ref" (Reference
to a ontology object @id ws KBaseOntology.OntologyDictionary),
parameter "method" of String, parameter "method_version" of
String, parameter "timestamp" of String, parameter "eco" of
String, parameter "description" of String, parameter
"ontologies_present" of mapping from String to mapping from String
to String, parameter "features" of list of type "Feature"
(Structure for a single CDS encoding ?gene? of a genome ONLY PUT
GENES THAT HAVE A CORRESPONDING CDS IN THIS ARRAY NOTE: Sequence
is optional. Ideally we can keep it in here, but Recognize due to
space constraints another solution may be needed. We may want to
add additional fields for other CDM functions (e.g., atomic
regulons, coexpressed fids, co_occurring fids,...)
protein_translation_length and protein_translation are for longest
coded protein (representative protein for splice variants) NOTE:
New Aliases field definitely breaks compatibility. As Does
Function. flags are flag fields in GenBank format. This will be a
controlled vocabulary. Initially Acceptable values are pseudo,
ribosomal_slippage, and trans_splicing Md5 is the md5 of
dna_sequence. @optional functions ontology_terms note
protein_translation mrnas flags warnings @optional inference_data
dna_sequence aliases db_xrefs children functional_descriptions) ->
structure: parameter "id" of type "Feature_id" (KBase Feature ID
@id external), parameter "location" of list of tuple | |
"""
Implements the ArraysInterface object and supporting functionality.
"""
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
import numpy as _np
from pygsti.tools import sharedmemtools as _smt
class ArraysInterface(object):
"""
An interface between pyGSTi's optimization methods and data storage arrays.
This class provides an abstract interface to algorithms (particularly the Levenberg-Marquardt
nonlinear least-squares algorithm) for creating an manipulating potentially distributed data
arrays with types such as "jtj" (Jacobian^T * Jacobian), "jtf" (Jacobian^T * objectivefn_vector),
and "x" (model parameter vector). The class encapsulates all the operations on these arrays so
that the algorithm doesn't need to worry about how the arrays are actually stored in memory,
e.g. whether shared memory is used or not.
"""
pass # just a base class - maybe make an abc abtract class in FUTURE?
class UndistributedArraysInterface(ArraysInterface):
"""
An arrays interface for the case when the arrays are not actually distributed.
Parameters
----------
num_global_elements : int
The total number of objective function "elements", i.e. the size of the
objective function array `f`.
num_global_params : int
The total number of (model) parameters, i.e. the size of the `x` array.
"""
def __init__(self, num_global_elements, num_global_params):
self.num_global_elements = num_global_elements
self.num_global_params = num_global_params
def allocate_jtf(self):
"""
Allocate an array for holding a `'jtf'`-type value.
Returns
-------
numpy.ndarray or LocalNumpyArray
"""
return _np.empty(self.num_global_params, 'd')
def allocate_jtj(self):
"""
Allocate an array for holding an approximated Hessian (type `'jtj'`).
Returns
-------
numpy.ndarray or LocalNumpyArray
"""
return _np.empty((self.num_global_params, self.num_global_params), 'd')
def allocate_jac(self):
"""
Allocate an array for holding a Jacobian matrix (type `'ep'`).
Returns
-------
numpy.ndarray or LocalNumpyArray
"""
return _np.empty((self.num_global_elements, self.num_global_params), 'd')
def deallocate_jtf(self, jtf):
"""
Free an array for holding an objective function value (type `'jtf'`).
Returns
-------
None
"""
pass
def deallocate_jtj(self, jtj):
"""
Free an array for holding an approximated Hessian (type `'jtj'`).
Returns
-------
None
"""
pass
def deallocate_jac(self, jac):
"""
Free an array for holding a Jacobian matrix (type `'ep'`).
Returns
-------
None
"""
pass
def global_num_elements(self):
"""
The total number of objective function "elements".
This is the size/length of the objective function `f` vector.
Returns
-------
int
"""
return self.num_global_elements
def jac_param_slice(self, only_if_leader=False):
"""
The slice into a Jacobian's columns that belong to this processor.
Parameters
----------
only_if_leader : bool, optional
If `True`, the current processor's parameter slice is ony returned if
the processor is the "leader" (i.e. the first) of the processors that
calculate the same parameter slice. All non-leader processors return
the zero-slice `slice(0,0)`.
Returns
-------
slice
"""
return slice(0, self.num_global_params)
def jtf_param_slice(self):
"""
The slice into a `'jtf'` vector giving the rows of owned by this processor.
Returns
-------
slice
"""
return slice(0, self.num_global_params)
def param_fine_info(self):
"""
Returns information regarding how model parameters are distributed among hosts and processors.
This information relates to the "fine" distribution used in distributed layouts,
and is needed by some algorithms which utilize shared-memory communication between
processors on the same host.
Returns
-------
param_fine_slices_by_host : list
A list with one entry per host. Each entry is itself a list of
`(rank, (global_param_slice, host_param_slice))` elements where `rank` is the top-level
overall rank of a processor, `global_param_slice` is the parameter slice that processor owns
and `host_param_slice` is the same slice relative to the parameters owned by the host.
owner_host_and_rank_of_global_fine_param_index : dict
A mapping between parameter indices (keys) and the owning processor rank and host index.
Values are `(host_index, processor_rank)` tuples.
"""
all_params = slice(0, self.num_global_params)
ranks_and_pslices_for_host0 = [(0, (all_params, all_params))]
param_fine_slices_by_host = [ranks_and_pslices_for_host0]
owner_host_and_rank_of_global_fine_param_index = {i: (0, 0) for i in range(self.num_global_params)}
return param_fine_slices_by_host, \
owner_host_and_rank_of_global_fine_param_index
def allgather_x(self, x, global_x):
"""
Gather a parameter (`x`) vector onto all the processors.
Parameters
----------
x : numpy.array or LocalNumpyArray
The input vector.
global_x : numpy.array or LocalNumpyArray
The output (gathered) vector.
Returns
-------
None
"""
global_x[:] = x
def allscatter_x(self, global_x, x):
"""
Pare down an already-scattered global parameter (`x`) vector to be just a local `x` vector.
Parameters
----------
global_x : numpy.array or LocalNumpyArray
The input vector. This global vector is already present on all the processors,
so there's no need to do any MPI communication.
x : numpy.array or LocalNumpyArray
The output vector, typically a slice of `global_x`..
Returns
-------
None
"""
x[:] = global_x
def scatter_x(self, global_x, x):
"""
Scatter a global parameter (`x`) vector onto all the processors.
Parameters
----------
global_x : numpy.array or LocalNumpyArray
The input vector.
x : numpy.array or LocalNumpyArray
The output (scattered) vector.
Returns
-------
None
"""
x[:] = global_x
def allgather_f(self, f, global_f):
"""
Gather an objective funtion (`f`) vector onto all the processors.
Parameters
----------
f : numpy.array or LocalNumpyArray
The input vector.
global_f : numpy.array or LocalNumpyArray
The output (gathered) vector.
Returns
-------
None
"""
global_f[:] = f
def gather_jtj(self, jtj, return_shared=False):
"""
Gather a Hessian (`jtj`) matrix onto the root processor.
Parameters
----------
jtj : numpy.array or LocalNumpyArray
The (local) input matrix to gather.
return_shared : bool, optional
Whether the returned array is allowed to be a shared-memory array, which results
in a small performance gain because the array used internally to gather the results
can be returned directly. When `True` a shared memory handle is also returned, and
the caller assumes responsibilty for freeing the memory via
:function:`pygsti.tools.sharedmemtools.cleanup_shared_ndarray`.
Returns
-------
gathered_array : numpy.ndarray or None
The full (global) output array on the root (rank=0) processor and
`None` on all other processors.
shared_memory_handle : multiprocessing.shared_memory.SharedMemory or None
Returned only when `return_shared == True`. The shared memory handle
associated with `gathered_array`, which is needed to free the memory.
"""
return (jtj, None) if return_shared else jtj # gathers just onto the root proc
def scatter_jtj(self, global_jtj, jtj):
"""
Scatter a Hessian (`jtj`) matrix onto all the processors.
Parameters
----------
global_jtj : numpy.ndarray
The global Hessian matrix to scatter.
jtj : numpy.ndarray or LocalNumpyArray
The local destination array.
Returns
-------
None
"""
jtj[:, :] = global_jtj
def gather_jtf(self, jtf, return_shared=False):
"""
Gather a `jtf` vector onto the root processor.
Parameters
----------
jtf : numpy.array or LocalNumpyArray
The local input vector to gather.
return_shared : bool, optional
Whether the returned array is allowed to be a shared-memory array, which results
in a small performance gain because the array used internally to gather the results
can be returned directly. When `True` a shared memory handle is also returned, and
the caller assumes responsibilty for freeing the memory via
:function:`pygsti.tools.sharedmemtools.cleanup_shared_ndarray`.
Returns
-------
gathered_array : numpy.ndarray or None
The full (global) output array on the root (rank=0) processor and
`None` on all other processors.
shared_memory_handle : multiprocessing.shared_memory.SharedMemory or None
Returned only when `return_shared == True`. The shared memory handle
associated with `gathered_array`, which is needed to free the memory.
"""
return (jtf, None) if return_shared else jtf
def scatter_jtf(self, global_jtf, jtf):
"""
Scatter a `jtf` vector onto all the processors.
Parameters
----------
global_jtf : numpy.ndarray
The global vector to scatter.
jtf : numpy.ndarray or LocalNumpyArray
The local destination array.
Returns
-------
None
"""
jtf[:] = global_jtf
def global_svd_dot(self, jac_v, minus_jtf):
"""
Gathers the dot product between a `jtj`-type matrix and a `jtf`-type vector into a global result array.
This is typically used within SVD-defined basis calculations, where `jac_v` is the "V"
matrix of the SVD of a jacobian, and `minus_jtf` is the negative dot product between the Jacobian
matrix and objective function vector.
Parameters
----------
jac_v : numpy.ndarray or LocalNumpyArray
An array of `jtj`-type.
minus_jtf : numpy.ndarray or LocalNumpyArray
An array of `jtf`-type.
Returns
-------
numpy.ndarray
The global (gathered) parameter vector `dot(jac_v.T, minus_jtf)`.
"""
return _np.dot(jac_v.T, minus_jtf)
def fill_dx_svd(self, jac_v, global_vec, | |
<gh_stars>0
#!/usr/local/bin/python3
"""
Copyright contributors to the Application Gateway project
"""
import logging as sys_logging
import sys
import os
import traceback
import yaml
from ibm_application_gateway.config import *
from ibm_application_gateway.system import *
#############################################################################
# Main line.
# Check the command line options.
if len(sys.argv) != 2 and len(sys.argv) != 3:
print("Usage: {0} [out-file] {{pem-file}}".format(__file__))
sys.exit(1)
outFile = sys.argv[1]
pemFile = sys.argv[2] if len(sys.argv) == 3 else None
logger = sys_logging.getLogger(__file__)
try:
#
# Load some of our files.
#
snippet_file = ConfiguratorFile(name = "snippet.html")
http_xform_file = ConfiguratorFile(name = "httptrans_req.xsl")
rate_limit_file = ConfiguratorFile(name = "ratelimit.yaml")
error_pages_zip = ConfiguratorFile(name = "error_pages.zip")
management_pages_zip = ConfiguratorFile(name = "management_pages.zip")
local_pages_zip = ConfiguratorFile(name = "local_pages.zip")
#
# Set up the logging configuration.
#
logging = Logging(
components = [ "audit.azn", "audit.authn" ],
request_log = LoggingRequestLog(
format = "%h %l %u %t \"%r\" %s %b",
file = LoggingRequestLogFile (
file_name = "/var/tmp/request.log")
),
statistics = LoggingStatistics(
server = "127.0.0.1",
port = 8125,
frequency = 20,
components = [ "iag.authn" ]),
tracing = [ LoggingTracing(
component = "pdweb.snoop",
file_name = "/var/tmp/tracing.log",
level = 9) ],
transaction = LoggingTransaction(
file_name = "/var/tmp/transaction.log",
compress = False,
max_file_size = 1024000,
max_files = 1),
json_logging = False
)
#
# Set up the OIDC-CI identity section.
#
# identity = Identity(ci_oidc = CiOidc(
# hostname = "my-tenant.ice.ibmcloud.com",
# client_id = "dummy_client_id",
# client_secret = "dummy_client_secret",
# mapped_identity = "identity",
# redirect_uri_host = "a.ibm.com",
# response_type = "id_token token",
# response_mode = "form_post",
# proxy = "https://proxy:3128",
# scopes = [ "profile", "email" ],
# allowed_query_args = [ "arg1", "arg2" ],
# bearer_token_attrs = [ "-attr_1", "+*" ],
# id_token_attrs = [ "-id_attr_1", "+*" ]
# ))
identity = Identity(oidc = Oidc(
discovery_endpoint = "https://www.test.com/mga/sps/oauth/oauth20/metadata/oidc_def",
client_id = "dummy_client_id",
client_secret = "dummy_client_secret",
ssl = OidcSsl(
certificate=[
"PEMFormatCertificateString1",
"PEMFormatCertificateString2"
]
),
mapped_identity = "identity",
redirect_uri_host = "a.ibm.com",
response_type = "id_token token",
response_mode = "form_post",
proxy = "https://proxy:3128",
scopes = [ "profile", "email" ],
allowed_query_args = [ "arg1", "arg2" ],
bearer_token_attrs = [ "-attr_1", "+*" ],
id_token_attrs = [ "-id_attr_1", "+*" ]),
oauth = [ Oauth(
name = "Verify",
restricted = False,
introspection_endpoint = "https://www.test.com/mga/sps/oauth/oauth20/metadata/oidc_def",
client_id = "dummy_client_id",
client_secret = "dummy_client_secret",
client_id_hdr = "dummy_client_id_header",
auth_method = "client_secret_basic",
token_type_hint = "access_token",
ssl = OauthSsl(
certificate=[
"PEMFormatCertificateString1",
"PEMFormatCertificateString2"
]
),
mapped_identity = "identity",
proxy = "https://proxy:3128",
attributes = [ "-attr_1", "+*" ],
multi_valued_scope = True
)],
eai = Eai(triggers = [
"/foo/bar",
"/foo/baz"
]),
auth_challenge_redirect = IdentityAuthChallengeRedirect(
url="/foo/login",
parameters=[
IdentityAuthChallengeRedirectParameters(
source="macro",
value="METHOD",
name="methodName"
),
IdentityAuthChallengeRedirectParameters(
source="header",
value="X-IBM-Proxy",
name="proxyName"
),
IdentityAuthChallengeRedirectParameters(
source="credential",
value="AZN_CRED_NETWORK_ADDRESS_STR",
name="sourceIp"
),
]
)
)
#
# Set up the list of authorization rules.
#
authorization = Authorization(rules=[
AuthorizationRules(
name = "administrators",
rule = "(any groupIds = \"administrator\")"
),
AuthorizationRules(
name = "users",
rule = "(all authenticationLevels > \"0\")"
)
])
#
# Advanced configuration.
#
advanced = Advanced(configuration = [
AdvancedConfiguration(
stanza = "test_stanza",
entry = "test_entry",
operation = "set",
value = [ "value_1", "value_2" ]),
AdvancedConfiguration(
stanza = "test_delete_stanza",
entry = "test_delete_entry",
operation = "delete"
)
])
#
# Server configuration.
#
web_socket = ServerWebsocket(
worker_threads=ServerWebsocketWorkerThreads(
max=300,
idle=150
),
timeouts=ServerWebsocketTimeouts(
front_end=ServerWebsocketTimeoutsFrontEnd(
read=10,
write=10
),
applications=ServerWebsocketTimeoutsApplications(
read=10,
write=10
)
)
)
redis = ServerSessionRedis(
enabled = True,
client_list_cache_lifetime = 20,
concurrent_sessions = ServerSessionRedisConcurrentSessions(
enabled = True,
prompt_for_displacement = True,
max_user_sessions = 10,
user_identity_attribute_name = "user-id"
)
)
session = ServerSession(
permit_user_switching = True,
redis = redis,
)
ssl = ServerSsl(
front_end = ServerSslFrontEnd(
certificate = [
"certificate",
"key"
],
sni = [ ServerSslFrontEndSni(
certificate = [
"certificate",
"key"
],
hostname = "testhost.ibm.com")
]
),
applications = ServerSslApplications()
)
apps = ServerLocalApplications(
cred_viewer=ServerLocalApplicationsCredViewer(
path_segment="cred-viewer-app",
attributes= [ "-a1", "-a2"]),
azn_decision=ServerLocalApplicationsAznDecision(
path_segment="azn-decision-app",
max_cache_lifetime=300,
max_cache_size=3600
),
jwks=ServerLocalApplicationsJwks(
path_segment="jwks")
)
localPages = ServerLocalPages(
content = local_pages_zip.content(),
type = "zip"
)
mgmtPages = ServerManagementPages(
content = management_pages_zip.content(),
type = "zip"
)
errorPages = ServerErrorPages(
content = error_pages_zip.content(),
type = "zip"
)
rateLimiting = ServerRateLimiting(
cache_size=1337,
redis=ServerRateLimitingRedis(
collection_name="test-collection-2",
sync_window=137
)
)
server = Server(
ssl = ssl,
failover = ServerFailover(key = "simple key"),
session = session,
worker_threads = 200,
http2 = False,
websocket = web_socket,
local_pages = localPages,
management_pages = [ mgmtPages ],
error_pages = [ errorPages ],
local_applications = apps,
rate_limiting = rateLimiting
)
#
# Resource Servers configuration.
#
resource_servers = [
ResourceServer(
path = "/static",
connection_type = "tcp",
transparent_path = False,
stateful = True,
http2 = None,
identity_headers = ResourceServerIdentityHeaders(
encoding = "utf8_bin",
basic_auth = "supply",
ip_address = True,
session_cookie = True,
attributes = [
ResourceServerIdentityHeadersAttributes(
attribute = "attribute_name",
header = "header_name"
),
ResourceServerIdentityHeadersAttributes(
attribute = "attribute_name_2"
)
],
jwt = ResourceServerIdentityHeadersJwt(
certificate = [
"certificate",
"key"
],
hdr_name = "jwt",
claims = [
ResourceServerIdentityHeadersJwtClaims(
name = "iss",
text = "www.ibm.com"
),
ResourceServerIdentityHeadersJwtClaims(
name = "sub",
attr = "AZN_CRED_PRINCIPAL_NAME"
),
ResourceServerIdentityHeadersJwtClaims(
attr = "AZN_*"
)
]
),
ltpa = ResourceServerIdentityHeadersLtpa (
key = "key",
key_password = "<PASSWORD>",
version = 2
)
),
cookies = ResourceServerCookies(
#junction_cookies = ResourceServerCookiesJunctionCookie(
# position = "inhead",
# version = "xhtml10",
# ensure_unique = True,
# preserve_name = True
#),
),
mutual_auth = ResourceServerMutualAuth(
basic_auth = ResourceServerMutualAuthBasicAuth(
username = "test",
password = "<PASSWORD>"
),
certificate_auth=[
"certificate",
"key"
]
),
servers = [
ResourceServerServers(
host = "10.10.10.200",
port = 1337,
ssl = ResourceServerSsl(
server_dn = "cn=ibm,dc=com",
certificate=[
"certificate",
"key"
]
),
url_style = ResourceServerUrlStyle(
case_insensitive = False,
windows = False
),
virtual_host = "test.ibm.com:9443"
),
],
health = None,
worker_threads = None,
identity = ResourceServerIdentity(
oauth = "Verify"
)
),
ResourceServer(
virtual_host = "iag-test:443",
connection_type = "tcp",
transparent_path = False,
stateful = True,
http2 = None,
identity_headers = ResourceServerIdentityHeaders(
ip_address = True,
encoding = "utf8_bin",
basic_auth = "supply",
session_cookie = True
),
cookies = ResourceServerCookies(
#junction_cookies = ResourceServerCookiesJunctionCookie(
# position = "inhead",
# version = "xhtml10",
# ensure_unique = True,
# preserve_name = True
#),
),
mutual_auth = ResourceServerMutualAuth(
basic_auth = ResourceServerMutualAuthBasicAuth(
username = "test",
password = "<PASSWORD>"
)
),
servers = [
ResourceServerServers(
host = "10.10.10.200",
port = 1337,
ssl = ResourceServerSsl(
server_dn = "cn=ibm,dc=com",
),
url_style = ResourceServerUrlStyle(
case_insensitive = False,
windows = False
)
)
],
sni="test.ibm.com",
health = None,
worker_threads = None
)
]
#
# Policies configuration.
#
policies = Policies(
http_transformations = PoliciesHttpTransformations(
request = [
PoliciesHttpTransformationRule(
name = "RequestHeaderInjector1",
method = "*",
paths = ["*"],
attributes = ["AZN_CRED_PRINCIPAL_NAME"],
rule = http_xform_file.content()
)
]
),
cors = [PoliciesCors(
name = "apiPolicy",
method = "*",
paths = ["*"],
host = "ibm_app_gw.ibm.com",
policy = PoliciesCorsPolicy(
allow_origins = ["*"],
handle_pre_flight = True,
allow_headers = ["X-IBM"],
max_age = 3600,
allow_methods = ["IBMGET"],
allow_credentials = True,
expose_headers = ["IBMHDR"]
)
)],
rate_limiting = [
PoliciesRateLimiting(
name = "rl1",
methods = ["*"],
paths = ["rl1"],
rule = rate_limit_file.content()
),
PoliciesRateLimiting(
name = "rl2",
methods = ["*"],
paths = ["rl2"],
rule = rate_limit_file.content()
)
],
content_injection = [
PoliciesContentInjection(
name = "test",
paths = ["/inject"],
location = "<h3>*",
content = snippet_file.content()
)
],
authorization = [
PoliciesAuthorization(
name = "test",
host = "test.ibm.com",
paths = ["*"],
methods = ["GET","PUT"],
rule = "(any groupIds = \"application owners\")",
action = "deny"
),
PoliciesAuthorization(
name = "administrators",
paths = ["*"],
methods = ["GET", "PUT"],
action = "permit"
),
PoliciesAuthorization(
name="policy1",
rule="acr = \"urn:ibm:security:policy:id:1\"",
paths=["/policy1"],
action="permit"
),
PoliciesAuthorization(
name="policy1_obligate",
rule="acr != \"urn:ibm:security:policy:id:1\"",
paths=["/policy1"],
action="obligate",
obligation=PoliciesObligation(
oidc=PoliciesObligationOidc(
acr_values="urn:ibm:security:policy:id:1 urn:ibm:security:policy:id:2",
prompt="login"
)
)
)
]
)
secrets = Secrets(
obf_key="myObfuscationKey",
enc_key="-----BEGIN RSA PRIVATE KEY-----\n...\n-----END RSA PRIVATE KEY-----"
)
services = Services(
redis=ServicesRedis(
key_prefix="key-",
default_collection="test_collection",
collections = [
ServicesRedisCollections(
name="test-collection",
matching_host="www.webseal.ibm.com",
max_pooled_connections=20,
idle_timeout=10,
health_check_interval=20,
cross_domain_support=ServicesRedisCrossDomainSupport(
master_authn_server_url="https://mas.ibm.com",
master_session_code_lifetime=15
),
servers=[
ServicesRedisServers(
name="redis-a",
host="redis-a.ibm.com",
port=6380,
username="testuser",
password="<PASSWORD>",
ssl=ServicesRedisSsl(
trust_certificates=[
"@redis-ca.crt"
],
client_certificate=[
"@cert.crt", "@cert.key"
],
sni="redis-a.ibm.com"
)
)
]
),
ServicesRedisCollections(
name = "test-collection-2",
matching_host = "www.webseal.ibm.com",
max_pooled_connections = 20,
idle_timeout = 10,
health_check_interval = 20,
cross_domain_support = ServicesRedisCrossDomainSupport(
master_authn_server_url = "https://mas.ibm.com",
master_session_code_lifetime = 15
),
servers = [
ServicesRedisServers(
name = "redis-a",
host = "redis-a.ibm.com",
port = 6380,
username = "testuser",
password = "<PASSWORD>",
ssl = ServicesRedisSsl(
trust_certificates = [
"@redis-ca.crt"
],
client_certificate = [
"@cert.crt", "@cert.key"
],
sni = "redis-a.ibm.com"
)
)
]
| |
to include the changes sent by the user."""
subtract, add = value
value = set(subject.get_value(attribute.key().name()) or [])
value -= set(subtract)
value |= set(add)
return list(value)
DEFAULT_SUBJECT_TYPES = {
'haiti': 'hospital',
'pakistan': 'hospital'
}
def match_email(text):
"""Given a string, tries to find a regex match for an email."""
email_regex = r'(.+\s+)*(<)*\s*(?P<email>\w+(?:.+\w+)*@\w+(?:.+\w+)' + \
r'(?:\.\w+)+)(>)*'
match = re.match(email_regex, text)
if match:
return match.group('email')
def get_min_subjects_by_lowercase_title(subdomain, title_lower, max=3):
"""Returns a list of minimal subjects by title, case insensitive."""
minimal_subjects = []
for key in cache.MINIMAL_SUBJECTS[subdomain]:
ms = cache.MINIMAL_SUBJECTS[subdomain][key]
if ms.get_value('title', '').lower() == title_lower:
minimal_subjects.append(ms)
if len(minimal_subjects) >= max:
break
return minimal_subjects
class MailEditor(InboundMailHandler):
"""Primary handler for inbound emails targeting
<subdomain>-<EMAIL>.
Args:
account: the account of the user the email is from
Methods:
init: handles various initialization tasks for the class
validate_subdomain: confirms that the given subdomain is valid
have_profile_info: checks to see if the current account exists and has a
valid nickname and affiliation
check_and_store_profile_info: checks to see if the email includes
profile information for the user and adds to the datastore if found
receive: override function- triggered when an email is received
match_nickname_affiliation: locates the nickname and affiliation in a
body of text, if present
extract_subject_from_update_line: given an update header, locates the
subject title (and key name if present)
extract_update_lines: given a body of text and an update header, returns
a list of the non-empty lines up through the next update
process_email: searches the text of an email for updates and errors
get_attribute_matches: gets a list of potential attribute name matches
from a given update line
check_and_return_attr_names: searches the MailUpdateMessage table in
the datastore for an attribute mapped to the given string
update_subjects: updates the datastore with all valid updates
send_email: sends a response/confirmation email to the user
send_template_email: sends an email to the user containing a blank
template on how to update subjects
"""
def init(self, message):
self.domain = 'http://%s' % self.request.headers['Host']
# Pulls out the email address from any string
self.email = match_email(message.sender)
self.account = model.Account.all().filter('email =', self.email).get()
# "To" field of email messages should be in the form
# "<subdomain>-<EMAIL>".
self.subdomain = message.to.split('-')[0]
regex_base = r'update\s+(?P<subject>.*)'
self.update_line_flags = re.UNICODE | re.MULTILINE | re.I
self.update_line_regexes = {
'unquoted': '^%s' % regex_base,
'quoted': '^(?P<quotes>[^a-zA-Z0-9_\n]+)%s' % regex_base,
'key': '.*\((?P<subject_name>.+/.+)\)\s*$'
}
def validate_subdomain(self):
"""Checks to make sure the user-supplied subdomain is legitimate."""
return model.Subdomain.get_by_key_name(self.subdomain)
def have_profile_info(self):
"""Checks to see if there is an account for the user's email and if it
has a nickname and affiliation associated with it."""
return self.account and self.account.nickname and \
self.account.affiliation
def check_and_store_profile_info(self, message):
"""Checks to see if the email contains a nickname and affiliation for
entry into the database. If both are found, creates a new account for
the user and inserts it into the datastore then returns True. Returns
False if no such information is found."""
# TODO(pfritzsche): Add HTML support.
for content_type, body in message.bodies('text/plain'):
body = body.decode()
nickname, affiliation = self.match_nickname_affiliation(
body.split('update')[0])
if nickname and affiliation:
self.account = self.account or model.Account(
email=self.email, description=message.sender,
locale='en', default_frequency='instant',
email_format='plain')
self.account.nickname = nickname
self.account.affiliation = affiliation
db.put(self.account)
return self.account
def receive(self, message):
"""Overrides InboundMailHandler. Runs when a new message is received.
Authenticates the email, then locates any updates and/or errors in the
body of the email. If any updates are found, they are inserted into the
datastore. If any updates or errors are found, or the email is not yet
authorized to submit, then a response email is sent detailing any
new information and/or problems.
"""
self.init(message)
self.need_profile_info = not (self.have_profile_info() or
self.check_and_store_profile_info(message))
if not self.validate_subdomain():
# TODO(pfritzsche): Add better handling of invalid subdomain
self.send_email(message, {}, no_subdomain=True)
return
locale = self.account and self.account.locale or 'en'
django.utils.translation.activate(locale)
# TODO(pfritzsche): Add HTML support.
for content_type, body in message.bodies('text/plain'):
data = self.process_email(body.decode())
if (data.unrecognized_subject_stanzas or data.ambiguous_stanzas or
data.update_stanzas or data.notice_stanzas):
# Email date arrives in the same form as the following example:
# Thu, 19 Aug 2010 17:29:23 -0400.
date_format = '%a, %d %b %Y %H:%M:%S'
# Chop off the last 6 characters because %z UTC offset parsing
# is not supported on all systems. Then manually parse it
# because Python is silly [@http://bugs.python.org/issue6641]
observed = (datetime.strptime(message.date[:-6], date_format) -
parse_utc_offset(message.date[-5:]))
if data.update_stanzas and not self.need_profile_info:
self.update_subjects(data.update_stanzas, observed)
logging.info('mail_editor.py: update received from %s' %
self.email)
self.send_email(message, data)
else:
self.send_template_email(message)
break # to only pay attention to the first body found
def match_nickname_affiliation(self, text):
"""For the supplied string s, try to find a match in text containing
the string s, followed by whitespace and then extra text. The extra text
will be accessible by the group name s.
"""
def check_for_example(s, example):
exp = r'%s(:)*\s+(?P<%s>.+)' % (s, s)
matches = re.finditer(exp, text, flags=self.update_line_flags)
for match in matches:
if not match.group(s) == example:
return match.group(s).strip()
nickname = check_for_example('nickname', '<NAME>')
affiliation = check_for_example('affiliation', 'Smith Inc.')
return (nickname, affiliation)
def extract_subject_from_update_line(self, match):
"""Given a re.match for an update line, returns the corresponding
subject if one exists."""
subject_line = match.group('subject')
key_match = re.match(self.update_line_regexes['key'], subject_line,
flags=self.update_line_flags)
if key_match:
subject_name = key_match.group('subject_name')
return model.Subject.get(self.subdomain, subject_name)
else:
title_lower = subject_line.strip().lower()
minimal_subjects = get_min_subjects_by_lowercase_title(
self.subdomain, title_lower)
if len(minimal_subjects) == 1:
return minimal_subjects[0].parent()
return [ms.parent() for ms in minimal_subjects]
def extract_update_lines(self, match, body):
"""Given a re.match, the body of the email, and whether or not we are
concerned with the quoted section of the body, returns the section
corresponding to the match's updates."""
start = match.end()
quotes = 'quotes' in match.groupdict() and match.group('quotes') or ''
end = body.lower().find('%supdate'.lower() % quotes, start + 1)
update_block = body[start:] if end == -1 else body[start:end]
return [line.replace(quotes, '', 1) for line in update_block.split('\n')
if line.startswith(quotes)]
def process_email(self, body):
"""Given the body of an email, locates updates from the user.
Searches for unquoted regions first. If no valid updates are found in
an unquoted section of the email, it then looks for updates in the
quoted sections of the email.
"""
data = Struct(
# list of tuples (subject, updates for the subject)
update_stanzas=[],
# list of tuples (subject, error'd lines)
notice_stanzas=[],
# list of tuples (potential subjects, updates)
ambiguous_stanzas=[],
# list of tuples (subject title, updates)
unrecognized_subject_stanzas=[]
)
stop = False
# Handles the work of the function. For each potential subject match
# found, locates any updates or errors, and picks out unrecognized data.
def process(matches):
for subject_match in matches:
notices = []
updates = []
stop = False
is_ambiguous = False
subject = None
subject_s = self.extract_subject_from_update_line(subject_match)
update_lines = self.extract_update_lines(subject_match, body)
if subject_s:
if isinstance(subject_s, list):
data.ambiguous_stanzas.append((subject_s, update_lines))
else:
subject = subject_s
else:
data.unrecognized_subject_stanzas.append(
(subject_match.group('subject'), update_lines))
if not subject:
continue
subject_type = cache.SUBJECT_TYPES[self.subdomain][subject.type]
for update in update_lines:
if STOP_DELIMITER in update:
stop = True
break
match_es = self.get_attribute_matches(subject_type, update)
if match_es and isinstance(match_es, list):
notice = AmbiguousUpdateNotice(match_es)
notices.append({
'error_message': notice.format(),
'original_line': update
})
elif match_es:
name, update_text = match_es
attribute = cache.ATTRIBUTES[name]
value, notice = parse(attribute, update_text)
if value is not NO_CHANGE:
if (value and attribute.type == 'multi' and
isinstance(value, tuple)):
value = get_list_update(
subject, attribute, value)
updates.append((name, value))
if notice:
formatted_name = get_message(
'attribute_name', attribute.key().name(), 'en')
orig_line = '%s: %s' % (formatted_name,
notice.update_text)
notices.append({
'error_message': notice.format(),
'original_line': orig_line
})
if updates:
data.update_stanzas.append((subject, updates))
if notices:
data.notice_stanzas.append((subject, notices))
if stop:
return
for key in ['unquoted', 'quoted']:
matches = re.finditer(self.update_line_regexes[key],
body.split(STOP_DELIMITER)[0],
flags=self.update_line_flags)
process(matches)
if (data.ambiguous_stanzas or data.unrecognized_subject_stanzas or
data.update_stanzas or data.notice_stanzas or stop):
break
return data
def get_attribute_matches(self, st, update):
"""Given an update line and subject type, locates any attribute name
matches that exist in the line. Returns a list of tuples
(attribute name, unparsed value) for all located matches or if only one
match is found, simply returns that match."""
matches = []
# try to match on colon first
update_split = update.split(':', 1)
if len(update_split) > 1:
attribute, update_text = update_split
attribute_lower = attribute.lower()
# use _'s as the subject type attribute_names lists do, too
attribute_formatted = attribute_lower.replace(' ', '_')
# check for actual | |
action='store_true', help='if set save test results to XML file')),
(['--with-coverage'], dict(
default=False, action='store_true', help='if set assess code coverage')),
(['--coverage-dirname'], dict(
type=str, default='tests/reports', help="Directory to store coverage data; default='tests/reports'")),
(['--coverage-type'], dict(
type=str, default='branch',
help="Type of coverage analysis to run {statement, branch, or multiple-decision}; default='branch'")),
]
@cement.ex(hide=True)
def _default(self):
# if `test_path` was not specified at the command line, try to get it from the `test_path` environment variable
# which can be set in CircleCI via build parameters
args = self.app.pargs
if args.test_path is None:
if 'test_path' in os.environ:
test_path = os.getenv('test_path')
else:
test_path = 'tests'
else:
test_path = args.test_path
verbose = args.verbose or bool(int(os.getenv('verbose', '0')))
# get coverage type
coverage_type = karr_lab_build_utils.core.CoverageType[args.coverage_type.lower().replace('-', '_')]
# run tests
buildHelper = BuildHelper()
buildHelper.run_tests_in_docker_container(args.container, test_path=test_path,
n_workers=args.n_workers, i_worker=args.i_worker,
verbose=verbose, with_xunit=args.with_xunit,
with_coverage=args.with_coverage, coverage_dirname=args.coverage_dirname,
coverage_type=coverage_type)
class DockerRemoveContainerController(cement.Controller):
""" Remove a Docker container """
class Meta:
label = 'remove-container'
description = 'Remove a Docker container'
help = 'Remove a Docker container'
stacked_on = 'docker'
stacked_type = 'nested'
arguments = [
(['container'], dict(type=str, help="Container id")),
]
@cement.ex(hide=True)
def _default(self):
args = self.app.pargs
buildHelper = BuildHelper()
buildHelper.remove_docker_container(args.container)
class FollowCircleciBuildController(cement.Controller):
""" Follow a CircleCI build for a repository """
class Meta:
label = 'follow-circleci-build'
description = 'Follow a CircleCI build for a repository'
help = 'Follow a CircleCI build for a repository'
stacked_on = 'base'
stacked_type = 'nested'
arguments = [
(['--repo-type'], dict(
type=str, default=None, help='Repository type (e.g., github)')),
(['--repo-owner'], dict(
type=str, default=None, help='Repository owner')),
(['--repo-name'], dict(
type=str, default=None, help='Name of the repository to build. This defaults to the name of the current repository.')),
(['--has-private-dependencies'], dict(
default=False, action='store_true',
help=('Set if the build requires an SSH key for the Karr Lab machine user because the repository depends on '
'another private repository'))),
]
@cement.ex(hide=True)
def _default(self):
args = self.app.pargs
buildHelper = BuildHelper()
buildHelper.follow_circleci_build(
repo_type=args.repo_type, repo_owner=args.repo_owner,
repo_name=args.repo_name,
has_private_dependencies=args.has_private_dependencies)
class GetCircleciEnvironmentVariablesController(cement.Controller):
""" Get the CircleCI environment variables for a repository and their partial values"""
class Meta:
label = 'get-circleci-environment-variables'
description = 'Get the CircleCI environment variables for a repository and their partial values'
help = 'Get the CircleCI environment variables for a repository and their partial values'
stacked_on = 'base'
stacked_type = 'nested'
arguments = [
(['--repo-type'], dict(
type=str, default=None, help='Repository type (e.g., github)')),
(['--repo-owner'], dict(
type=str, default=None, help='Repository owner')),
(['--repo-name'], dict(
type=str, default=None, help='Name of the repository to build. This defaults to the name of the current repository.')),
]
@cement.ex(hide=True)
def _default(self):
args = self.app.pargs
buildHelper = BuildHelper()
vars = buildHelper.get_circleci_environment_variables(
repo_type=args.repo_type, repo_owner=args.repo_owner,
repo_name=args.repo_name)
for key, val in vars.items():
print('{}={}'.format(key, val))
class SetCircleciEnvironmentVariableController(cement.Controller):
""" Set a CircleCI environment variable for a repository """
class Meta:
label = 'set-circleci-environment-variable'
description = 'Set a CircleCI environment variable for a repository'
help = 'Set a CircleCI environment variable for a repository'
stacked_on = 'base'
stacked_type = 'nested'
arguments = [
(['name'], dict(
type=str, help='Name of the environment variable.')),
(['value'], dict(
type=str, help='Value of the environment variable.')),
(['--repo-type'], dict(
type=str, default=None, help='Repository type (e.g., github)')),
(['--repo-owner'], dict(
type=str, default=None, help='Repository owner')),
(['--repo-name'], dict(
type=str, default=None, help='Name of the repository to build. This defaults to the name of the current repository.')),
]
@cement.ex(hide=True)
def _default(self):
args = self.app.pargs
buildHelper = BuildHelper()
buildHelper.set_circleci_environment_variables(
{args.name: args.value},
repo_type=args.repo_type, repo_owner=args.repo_owner,
repo_name=args.repo_name)
class DeleteCircleciEnvironmentVariableController(cement.Controller):
""" Delete a CircleCI environment variable for a repository """
class Meta:
label = 'delete-circleci-environment-variable'
description = 'Delete a CircleCI environment variable for a repository'
help = 'Delete a CircleCI environment variable for a repository'
stacked_on = 'base'
stacked_type = 'nested'
arguments = [
(['name'], dict(
type=str, help='Name of the environment variable.')),
(['--repo-type'], dict(
type=str, default=None, help='Repository type (e.g., github)')),
(['--repo-owner'], dict(
type=str, default=None, help='Repository owner')),
(['--repo-name'], dict(
type=str, default=None, help='Name of the repository to build. This defaults to the name of the current repository.')),
]
@cement.ex(hide=True)
def _default(self):
args = self.app.pargs
buildHelper = BuildHelper()
buildHelper.delete_circleci_environment_variable(args.name,
repo_type=args.repo_type, repo_owner=args.repo_owner,
repo_name=args.repo_name)
class CreateCodeClimateGithubWebhookController(cement.Controller):
""" Create Code Climate GitHub webhook for the current repository """
class Meta:
label = 'create-code-climate-github-webhook'
description = 'Create Code Climate GitHub webhook for the current repository'
help = 'Create Code Climate GitHub webhook for the current repository'
stacked_on = 'base'
stacked_type = 'nested'
arguments = [
(['--repo-type'], dict(
type=str, default=None, help='Repository type (e.g., github)')),
(['--repo-owner'], dict(
type=str, default=None, help='Repository owner')),
(['--repo-name'], dict(
type=str, default=None, help='Name of the repository to build. This defaults to the name of the current repository.')),
]
@cement.ex(hide=True)
def _default(self):
args = self.app.pargs
buildHelper = BuildHelper()
buildHelper.create_code_climate_github_webhook(
repo_type=args.repo_type, repo_owner=args.repo_owner, repo_name=args.repo_name)
class DoPostTestTasksController(cement.Controller):
""" Do all post-test tasks for CircleCI """
class Meta:
label = 'do-post-test-tasks'
description = 'Do all post-test tasks for CircleCI'
help = 'Do all post-test tasks for CircleCI'
stacked_on = 'base'
stacked_type = 'nested'
arguments = [
(['installation_exit_code'], dict(
type=int, help='Exit code of the package installation tasks')),
(['tests_exit_code'], dict(
type=int, help='Exit code of the tests')),
(['--dry-run'], dict(
default=False, dest='dry_run', action='store_true', help='If set, do not send results to Coveralls and Code Climate')),
]
@cement.ex(hide=True)
def _default(self):
args = self.app.pargs
dry_run = args.dry_run or bool(int(os.getenv('dry_run', '0')))
""" Do all post-test tasks for CircleCI """
buildHelper = BuildHelper()
triggered_packages, not_triggered_packages, status, other_exception = buildHelper.do_post_test_tasks(
args.installation_exit_code != 0, args.tests_exit_code != 0, dry_run=dry_run)
# downstream triggered tests
if triggered_packages:
print('{} downstream builds were triggered'.format(len(triggered_packages)))
for triggered_package in triggered_packages:
print(' {}'.format(triggered_package))
else:
print("No downstream builds were triggered")
if not_triggered_packages:
for key, msg in not_triggered_packages.items():
print(' {}: {}'.format(key, msg.replace('\n', '\n ')))
# email notifications
num_notifications = sum(status.values())
if num_notifications > 0:
print('{} notifications were sent'.format(num_notifications))
if status['is_fixed']:
print(' Build fixed')
if status['is_old_error']:
print(' Recurring error')
if status['is_new_error']:
print(' New error')
if status['is_other_error']:
print(' Other error')
if status['is_new_downstream_error']:
print(' Downstream error')
else:
print('No notifications were sent.')
if status['is_other_error']:
if other_exception:
traceback.print_tb(other_exception['traceback'])
raise SystemExit('Post-test tasks were not successful: {}'.format(
other_exception['exception']))
else:
raise SystemExit('Post-test tasks were not successful')
class MakeAndArchiveReportsController(cement.Controller):
""" Make and archive reports:
* Generate HTML test history reports
* Generate HTML API documentation
* Archive coverage report to Coveralls and Code Climate
"""
class Meta:
label = 'make-and-archive-reports'
description = 'Make and archive reports'
help = 'Make and archive reports'
stacked_on = 'base'
stacked_type = 'nested'
arguments = [
(['--coverage-dirname'], dict(
type=str, default='tests/reports', help="Directory to store coverage data; default='tests/reports'")),
(['--dry-run'], dict(
default=False, dest='dry_run', action='store_true', help='If set, do not send results to Coveralls and Code Climate')),
]
@cement.ex(hide=True)
def _default(self):
args = self.app.pargs
dry_run = args.dry_run or bool(int(os.getenv('dry_run', '0')))
buildHelper = BuildHelper()
buildHelper.make_and_archive_reports(coverage_dirname=args.coverage_dirname, dry_run=dry_run)
class CombineCoverageReportsController(cement.Controller):
""" Combine coverage reports """
class Meta:
label = 'combine-coverage-reports'
description = 'Combine coverage reports (.coverage.*) into a single file (.coverage)'
help = 'Combine coverage reports (.coverage.*) into a single file (.coverage)'
stacked_on = 'base'
stacked_type = 'nested'
arguments = [
(['--coverage-dirname'], dict(
type=str, default='tests/reports', help="Directory to store coverage data; default='tests/reports'")),
]
@cement.ex(hide=True)
def _default(self):
args = self.app.pargs
buildHelper = BuildHelper()
buildHelper.combine_coverage_reports(coverage_dirname=args.coverage_dirname)
class ArchiveCoverageReportController(cement.Controller):
""" Archive a coverage report:
* Upload report to Coveralls and Code Climate
"""
class Meta:
label = 'archive-coverage-report'
description = 'Archive coverage report'
help = 'Archive coverage report'
stacked_on = 'base'
stacked_type = 'nested'
arguments = [
(['--coverage-dirname'], dict(
type=str, default='tests/reports', help="Directory to store coverage data; default='tests/reports'")),
(['--dry-run'], dict(
default=False, dest='dry_run', action='store_true', help='If set, do not send results to Coveralls and Code Climate')),
]
@cement.ex(hide=True)
def _default(self):
""" Archive a coverage report:
* Upload report to Coveralls and Code Climate
"""
args = self.app.pargs
dry_run = args.dry_run or bool(int(os.getenv('dry_run', '0')))
buildHelper = BuildHelper()
buildHelper.archive_coverage_report(coverage_dirname=args.coverage_dirname, dry_run=dry_run)
class UploadCoverageReportToCoverallsController(cement.Controller):
""" Upload coverage report to Code Climate """
class Meta:
label = 'upload-coverage-report-to-coveralls'
description = 'Upload coverage report to Coveralls'
help = 'Upload coverage report to Coveralls'
stacked_on = 'base'
stacked_type = 'nested'
arguments = [
(['--coverage-dirname'], dict(
type=str, default='tests/reports', help="Directory to store coverage data; default='tests/reports'")),
(['--dry-run'], dict(
default=False, dest='dry_run', action='store_true', help='If set, do not send results to Coveralls')),
]
@cement.ex(hide=True)
def _default(self):
""" Upload coverage report to Coveralls """
args = self.app.pargs
dry_run = args.dry_run or bool(int(os.getenv('dry_run', '0')))
buildHelper = BuildHelper()
buildHelper.upload_coverage_report_to_coveralls(coverage_dirname=args.coverage_dirname, dry_run=dry_run)
class UploadCoverageReportToCodeClimateController(cement.Controller):
""" Upload coverage report to Code Climate """
class Meta:
label = 'upload-coverage-report-to-code-climate'
description = 'Upload coverage | |
Returns:
Settings are recoded in `self.control_df` rows: `'port_0_terms+-'` & `'port_1_terms+-'`
"""
assert port_0_pos_term in self.circ_netlist_obj.node_names, f'`{port_0_pos_term}` is not a node in the circuit under test'
self.port_0_pos_term=port_0_pos_term
assert port_0_neg_term in self.circ_netlist_obj.node_names, f'`{port_0_neg_term}` is not a node in the circuit under test'
self.port_0_neg_term=port_0_neg_term
assert port_1_pos_term in self.circ_netlist_obj.node_names, f'`{port_1_pos_term}` is not a node in the circuit under test'
self.port_1_pos_term=port_1_pos_term
assert port_1_neg_term in self.circ_netlist_obj.node_names, f'`{port_1_neg_term}` is not a node in the circuit under test'
self.port_1_neg_term=port_1_neg_term
#record the results in table
self._build_control_table(display_table)
def pz_mode_set(self, tf_type='voltage', pz_acu='pole-zero', display_table=False):
"""
Method to set the pole-zero analysis controls
Args:
tf_type (str; 'voltage'): the tf for wich the poles and zeros fit to
if `voltage` the tf is of the form V_o/V_i else if `current` in the form of
V_o/I_i
pz_acu (str; 'pole-zero'): if `pole-zero` will attempt to get all the poles and zeros for the
specfied transfer function; else if `zeros` or `poles` will get just the respective zeros
or poles
display_table (bool; False): when true will display the generated `self.control_df` below
this method call in a jupyter notebook like environment
Returns:
Settings are recoded in `self.control_df` rows: `'tf_type'` & `'acqui_mode'`
"""
assert tf_type in self.allowed_control_statments.keys(), f'`{tf_type}` is not `voltage` or `current`'
self.tf_type=tf_type
assert pz_acu in self.allowed_control_statments.keys(), f'`{pz_acu}` is not `pole-zero` or `poles` or `zeros`'
self.pz_acu=pz_acu
#record the results in table
self._build_control_table(display_table)
def _build_control_table(self, display_table=True):
"""
Internal method to build a pz control table to display pz simulation settings
Args:
display_table (bool; True): when true will display the generated `self.control_df` below
this method call in a jupyter notebook like environment
Returns:
creates dataframe table `self.control_df` that records pz simulation controls
if `display_table` is true will force showing under jupyter notebook cell
"""
self.control_df=pd.DataFrame(columns=['value'],
index=['tf_type',
'acqui_mode',
'port_0_terms+-',
'port_1_terms+-'
])
if hasattr(self, 'tf_type'):
self.control_df.at['tf_type']=self.tf_type
if hasattr(self, 'pz_acu'):
self.control_df.at['acqui_mode']=self.pz_acu
if hasattr(self, 'port_0_pos_term') and hasattr(self, 'port_0_neg_term') :
self.control_df.at['port_0_terms+-', 'value']=[self.port_0_pos_term, self.port_0_neg_term]
if hasattr(self, 'port_1_pos_term') and hasattr(self, 'port_1_neg_term') :
self.control_df.at['port_1_terms+-', 'value']=[self.port_1_pos_term, self.port_1_neg_term]
self.control_df.index.name='pz_sim_control'
if display_table:
display(self.control_df)
def do_pz_sim(self, display_table=False):
"""
Method to perform the pole-zero simulation based on values stored in self.control_df
If the simulation does not converge will give a warning with a basic debug action
but will set `self.pz_values` to empty dict.
TODO:
- add simulation kwargs
- flush out exception handling
"""
attriputs_to_check=['port_0_pos_term', 'port_0_neg_term', 'port_1_pos_term', 'port_1_neg_term',
'tf_type', 'pz_acu']
for i in attriputs_to_check:
if hasattr(self, i):
pz_is_go=True
else:
pz_is_go=False
warnings.warn(f'{i} has not been set; pole-zero simulation will not procdede till set')
if pz_is_go:
self.sim=self.circ_netlist_obj.simulator()
#I cant catch the warning when it hangs so going to have to do this
self.pz_values={}
try:
self.pz_values=self.sim.polezero(
node1=self.port_0_pos_term,
node2=self.port_0_neg_term,
node3=self.port_1_pos_term,
node4=self.port_1_neg_term,
tf_type=self.allowed_control_statments[self.tf_type],
pz_type=self.allowed_control_statments[self.pz_acu]
)
self._record_pz_results(display_table)
except pspice.Spice.NgSpice.Shared.NgSpiceCommandError:
self.pz_values={}
warnings.warn("""PZ analysis did not converge with the current setting:
start by changing the tf type (self.tf_type) and pz acusisiton type (self.pz_acu) """)
def _record_pz_results(self, display_table=True):
"""
Internal method to record the PZ results to a dataframe
Args:
display_table (bool; True): when true will display the generated `self.control_df` below
this method call in a jupyter notebook like environment
Returns:
creates dataframe table `self.pz_results_DF` that records pz simulation results
if `display_table` is true will force showing under jupyter notebook cell
"""
self.pz_results_DF=pd.DataFrame(columns=['Type', 'Values'])
if hasattr(self.pz_values, 'nodes'):
for k, v in self.pz_values.nodes.items():
self.pz_results_DF.at[len(self.pz_results_DF)]=k, v.as_ndarray()[0]
if display_table:
display(self.pz_results_DF)
def get_pz_sym_tf(self, dec_round=None, overload_K=None):
"""
Method to get the symbolic transfer function via lacpy
Args:
dec_round (int; None): contorl to `np.around`'s `decimals` argument
if left `None` np.around will not be used
overload_K (float/int; None): if not `None` will overload the DC
gain constant stored in `self.K`
Returns:
if `self.pz_results_DF` exists return the symbolic transfer function in the s
dominan in `self.sym_tf`
"""
if overload_K!=None:
assert (type(overload_K)==float) or (type(overload_K)==int), 'K must be a float or int'
self.K=overload_K
if hasattr(self, 'pz_results_DF')!=True:
warnings.warn('no poles/zero recorded run `self.do_pz_sim`')
else:
zeros_B=np.empty(0)
poles_A=np.empty(0)
for index, row in self.pz_results_DF.iterrows():
if 'zero' in row['Type']:
zeros_B=np.hstack((zeros_B, row['Values']))
elif 'pole' in row['Type']:
poles_A=np.hstack((poles_A, row['Values']))
if dec_round!=None:
zeros_B=np.around(zeros_B, dec_round)
poles_A=np.around(poles_A, dec_round)
self.zeros_B=zeros_B; self.poles_A=poles_A
#wish I didn't have to do this
zeros_B=zeros_B.tolist(); poles_A=poles_A.tolist()
#use lcapy to get the symbolic tf
self.sym_tf=kiwi.zp2tf(zeros_B, poles_A, K=self.K)
#use simplify because if in pzk it does weird things with j that
#lambdfy has issues with
self.sym_tf=self.sym_tf.simplify()
def plot_pz_loc(self, ax=None, title='', unitcircle=False):
"""
uses lcapy's `plot_pole_zero` in https://github.com/mph-/lcapy/blob/6e42983d6b77954e694057d61045bd73d17b4616/lcapy/plot.py#L12
to plot the poles and zero locations on a Nyquist chart
Args:
axs (list of matplotlib axis; None): If left None will create a new plot, else must
be a list of matplotlib subplots axis to be added to where the first entry
will be the magnitude axis, and the second will be the phase axis
title (str; ''): Subplot title string
unitcircle (bool; False): when True will plot the unit circle on the resulting plot
Returns:
Returns a real-imag with Poles and Zero map, and if an axis was passed to `ax` will be modified
with the pole-zero map
"""
axs=ax or plt.gca()
if hasattr(self, 'sym_tf')==False:
warnings.warn("""Trying to get symbolic transfer function from `self.get_pz_sym_tf`
thus you will get what you get""")
self.get_pz_sym_tf()
self.sym_tf.plot(axes=axs, unitcircle=unitcircle,
#wish there be a better way to do this
label="'X'=pole; 'O'=zero")
axs.axhline(0, linestyle='--', linewidth=2.0, color='black')
axs.axvline(0, linestyle='--', linewidth=2.0, color='black')
axs.set_xlabel('Real'); axs.set_ylabel('Imag')
axs.legend()
if title!='':
title=' of '+title
axs.set_title(f'Pole-Zero locations plot{title}');
def plot_3d_laplce(self, title=''):
"""
Creates 3d plots of the Laplace space of the transfer function, one for the mag
the other for the phase in degrees unwrapped
Args:
title (str; ''): Subplot title string
Returns:
returns a 3d plot with the left subplot being the mag and the right being the phase
TODO:
- get the freaking color bar into a clean location when working with 3d plots
- merge phase as color into mag see the physics video by eugene on Laplace
"""
if hasattr(self, 'sym_tf')==False:
warnings.warn("""Trying to get symbolic transfer function from `self.get_pz_sym_tf`
thus you will get what you get""")
self.get_pz_sym_tf()
#import the additnal matplotlib featuers for 3d
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
#stole this off lcapy's plot_pole_zero
#https://github.com/mph-/lcapy/blob/7c4225f2159aa33398dac481041ed538169b7058/lcapy/plot.py
#check self.sys_tf is good to be used
sys_tf_syms=self.sym_tf.symbols
assert len(sys_tf_syms)==1 and ('s' in sys_tf_syms.keys()), 'trasfer function must be laplce form and only have `s` as a free symbol'
#lambdfy the tf
sys_tf_lam=sym.lambdify(kiwi.s, self.sym_tf.canonical(), 'numpy', dummify=False)
#get the plot bounds
#stole this off lcapy's plot_pole_zero
#https://github.com/mph-/lcapy/blob/7c4225f2159aa33398dac481041ed538169b7058/lcapy/plot.py
poles = self.sym_tf.poles()
zeros = self.sym_tf.zeros()
try:
p = np.array([p.cval for p in poles.keys()])
z = np.array([z.cval for z in zeros.keys()])
except ValueError:
raise TypeError('Cannot get poles and zeros of `self.sym_tf')
a = np.hstack((p, z))
x_min = a.real.min()
x_max = a.real.max()
y_min = a.imag.min()
y_max = a.imag.max()
x_extra, y_extra = 3.0, 3.0
# This needs tweaking for better bounds.
if len(a) >= 2:
x_extra, y_extra = 0.1 * (x_max - x_min), 0.1 * (y_max - y_min)
if x_extra == 0:
x_extra += 1.0
if y_extra == 0:
y_extra += 1.0
x_min -= 0.5 * x_extra
x_max += 0.5 * x_extra
y_min -= 0.5 * y_extra
y_max += 0.5 * y_extra
#the input domain
RealRange=np.linspace(x_min, x_max, 100); ImagRange=np.linspace(y_min, y_max, 100)
sr, si=np.meshgrid(RealRange, ImagRange)
s_num=sr+1j*si
#plot this
fig = plt.figure()
#mag 3d plot
ax3d_mag = fig.add_subplot(121, projection='3d')
XmagPlot=ax3d_mag.plot_surface(sr, si, np.abs(sys_tf_lam(s_num)), alpha=0.5,
cmap=cm.coolwarm, antialiased=False)
ax3d_mag.set_xlabel(r'$\sigma$'); ax3d_mag.set_ylabel(r'$j\omega$'), ax3d_mag.set_zlabel(r'$|X|$')
fig.colorbar(XmagPlot, shrink=0.5, aspect=5)
#phase 3d plot
ax3d_phase = fig.add_subplot(122, projection='3d')
XphasePlot=ax3d_phase.plot_surface(sr, si, angle_phase_unwrap(sys_tf_lam(s_num)), alpha=0.5,
cmap=cm.coolwarm, antialiased=False)
ax3d_phase.set_xlabel(r'$\sigma$'); ax3d_phase.set_ylabel(r'$j\omega$'), ax3d_phase.set_zlabel(r'$ang(X)$')
fig.colorbar(XphasePlot, shrink=0.5, aspect=5)
plt.tight_layout()
if title!='':
title=' of '+title
ax3d_mag.set_title(f'3D Mag Laplace plot{title}');
ax3d_phase.set_title(f'3D Phase_deg Laplace plot{title}');
def scipy_pzk(self, dec_round=None, overload_K=None):
"""
Method to create to generate the Numerator (a) and | |
be increasing.
By default, this expectation only works for numeric or datetime data.
When `parse_strings_as_datetimes=True`, it can also parse strings to datetimes.
If `strictly=True`, then this expectation is only satisfied if each consecutive value
is strictly increasing--equal values are treated as failures.
expect_column_values_to_be_increasing is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
Keyword Args:
strictly (Boolean or None): \
If True, values must be strictly greater than previous values
parse_strings_as_datetimes (boolean or None) : \
If True, all non-null column values to datetimes before making comparisons
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without
modification. For more detail, see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_values_to_be_decreasing \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_be_decreasing>`
"""
raise NotImplementedError
def expect_column_values_to_be_decreasing(self,
column,
strictly=None,
parse_strings_as_datetimes=False,
mostly=None,
result_format=None, include_config=True, catch_exceptions=None, meta=None
):
"""Expect column values to be decreasing.
By default, this expectation only works for numeric or datetime data.
When `parse_strings_as_datetimes=True`, it can also parse strings to datetimes.
If `strictly=True`, then this expectation is only satisfied if each consecutive value
is strictly decreasing--equal values are treated as failures.
expect_column_values_to_be_decreasing is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
Keyword Args:
strictly (Boolean or None): \
If True, values must be strictly greater than previous values
parse_strings_as_datetimes (boolean or None) : \
If True, all non-null column values to datetimes before making comparisons
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_values_to_be_increasing \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_be_increasing>`
"""
raise NotImplementedError
###
#
# String matching
#
###
def expect_column_value_lengths_to_be_between(
self,
column,
min_value=None,
max_value=None,
mostly=None,
result_format=None, include_config=True, catch_exceptions=None, meta=None
):
"""Expect column entries to be strings with length between a minimum value and a maximum value (inclusive).
This expectation only works for string-type values. Invoking it on ints or floats will raise a TypeError.
expect_column_value_lengths_to_be_between is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
Keyword Args:
min_value (int or None): \
The minimum value for a column entry length.
max_value (int or None): \
The maximum value for a column entry length.
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
* min_value and max_value are both inclusive.
* If min_value is None, then max_value is treated as an upper bound, and the number of acceptable rows has \
no minimum.
* If max_value is None, then min_value is treated as a lower bound, and the number of acceptable rows has \
no maximum.
See Also:
:func:`expect_column_value_lengths_to_equal \
<great_expectations.dataset.dataset.Dataset.expect_column_value_lengths_to_equal>`
"""
raise NotImplementedError
def expect_column_value_lengths_to_equal(self,
column,
value,
mostly=None,
result_format=None, include_config=True, catch_exceptions=None, meta=None
):
"""Expect column entries to be strings with length equal to the provided value.
This expectation only works for string-type values. Invoking it on ints or floats will raise a TypeError.
expect_column_values_to_be_between is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
value (int or None): \
The expected value for a column entry length.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_value_lengths_to_be_between \
<great_expectations.dataset.dataset.Dataset.expect_column_value_lengths_to_be_between>`
"""
raise NotImplementedError
def expect_column_values_to_match_regex(self,
column,
regex,
mostly=None,
result_format=None, include_config=True, catch_exceptions=None, meta=None
):
"""Expect column entries to be strings that match a given regular expression. Valid matches can be found \
anywhere in the string, for example "[at]+" will identify the following strings as expected: "cat", "hat", \
"aa", "a", and "t", and the following strings as unexpected: "fish", "dog".
expect_column_values_to_match_regex is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
regex (str): \
The regular expression the column entries should match.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_values_to_not_match_regex \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_not_match_regex>`
:func:`expect_column_values_to_match_regex_list \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_match_regex_list>`
"""
raise NotImplementedError
def expect_column_values_to_not_match_regex(
self,
column,
regex,
mostly=None,
result_format=None, include_config=True, catch_exceptions=None, meta=None
):
"""Expect column entries to be | |
<filename>seocrawler/__init__.py
# -*- coding: utf-8 -*-
import os
import sys
import time
import uuid
import requests
import re
import hashlib
import json
from urlparse import urlparse, urljoin
import atexit
import gzip
from bs4 import BeautifulSoup
import seolinter
html_parser = "lxml"
# html_parser = "html.parser"
TIMEOUT = 16
JOBS_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'jobs')
def crawl(urls, db, internal=False, delay=0, user_agent=None,
url_associations={}, run_id=None, processed_urls={}, limit=0):
run_id = run_id or uuid.uuid4()
print "Starting crawl with run_id: %s" % run_id
def _save_state(run_id, u, ua):
if not os.path.exists(JOBS_DIR):
os.makedirs(JOBS_DIR)
print len(u), len(ua)
if len(u) == 0 and len(ua) == 0:
return
# open the job file
with gzip.open("%s/%s.gz" % (JOBS_DIR, run_id), 'w+') as f:
data = {
'urls': u,
'associations': ua,
}
f.write(json.dumps(data))
atexit.register(_save_state, run_id, urls, url_associations)
run_count = 0
limit_reached = False
while len(urls) > 0:
run_count += 1
url = urls[0]
print "\nProcessing (%d / %d): %s" % (run_count, len(urls), url)
if not is_full_url(url):
processed_urls[url] = urls.pop(0)
continue
# raise ValueError('A relative url as provided: %s. Please ensure that all urls are absolute.' % url)
processed_urls[url] = None
results = retrieve_url(url, user_agent)
for res in results:
lint_errors = {}
page_details = {}
if res['code'] == 200 and res['content_type'] == 'text/html':
lint_errors, page_details, links, sources = process_html(res['content'], res['url'])
record = store_results(db, run_id, res, lint_errors, page_details)
processed_urls[url] = record
url_associations[url] = {}
# Process links from the page
if links and len(links) > 0:
for link in links:
link_url = link['url']
if not link['valid']:
# Process any malformed links
bad_link = store_results(db, run_id, {
'url': link_url,
'code': 0,
}, {}, {}, None)
processed_urls[link_url] = bad_link
associate_link(db, record, bad_link, run_id, 'anchor', link.get('text'), link.get('alt'), link.get('rel'))
elif not is_internal_url(link_url, url):
# Process all external links and create the
if link_url not in processed_urls:
link_results = retrieve_url(link_url, user_agent, False)
for link_result in link_results:
link_store = store_results(db, run_id, link_result, {}, {}, True)
processed_urls[link_result['url']] = link_store
# Associate links
associate_link(db, record, link_store, run_id, 'anchor', link.get('text'), link.get('alt'), link.get('rel'))
else:
associate_link(db, record, processed_urls[link_url], run_id, 'anchor', link.get('text'), link.get('alt'), link.get('rel'))
elif internal and is_internal_url(link_url, url) and link_url not in processed_urls and link_url not in urls:
if not limit_reached:
urls.append(link_url)
if limit and len(urls) >= limit:
limit_reached = True
url_associations[url][link_url] = link
# Process sources from the page
if sources and len(sources) > 0:
for source in sources:
source_url = source['url']
if source_url not in processed_urls:
source_results = retrieve_url(source_url, user_agent, False)
for source_result in source_results:
source_internal = is_internal_url(source_result['url'], url)
source_store = store_results(db, run_id, source_result, {}, {}, not source_internal)
processed_urls[source_url] = source_store
associate_link(db, record, source_store, run_id, 'asset', None, source.get('alt'), None)
else:
associate_link(db, record, processed_urls[source_url], run_id, 'asset', None, source.get('alt'), None)
else:
record = store_results(db, run_id, res, lint_errors, page_details, False)
processed_urls[url] = record
time.sleep( delay / 1000.0 )
urls.pop(0)
# Process associations
for url, associations in url_associations.iteritems():
for association, link in associations.iteritems():
to_id = processed_urls.get(url)
from_id = processed_urls.get(association)
if to_id and from_id and from_id != to_id:
associate_link(db, to_id, from_id, run_id, 'anchor', link.get('text'), link.get('alt'), link.get('rel'))
# Clean up any save files that might exist
if os.path.exists('%s/%s.gz' % (JOBS_DIR, run_id)):
print "Deleting job file (%s/%s.gz)" % (JOBS_DIR, run_id)
os.remove('%s/%s.gz' % (JOBS_DIR, run_id))
return run_id
def retrieve_url(url, user_agent=None, full=True):
def _build_payload(response, request_time):
size = response.headers.get('content-length') or len(response.text)
content_type = response.headers.get('content-type')
return {
'url': response.url,
'url_length': len(response.url),
'content': response.text,
'content_type': content_type.split(';')[0] if content_type else None,
'code': int(response.status_code),
'reason': response.reason,
'size': size,
'encoding': response.encoding,
'response_time': request_time,
}
headers = {}
redirects = []
if user_agent:
headers['User-Agent'] = user_agent
if 'Googlebot' in user_agent:
# TODO: append ?__escaped_fragment__= to the url
pass
try:
sys.stdout.write("\033[K")
sys.stdout.write(" -> %s\r" % url)
sys.stdout.flush()
start = time.time()
res = requests.head(url, headers=headers, timeout=TIMEOUT)
if full and res.headers.get('content-type', '').split(';')[0] == 'text/html':
res = requests.get(url, headers=headers, timeout=TIMEOUT)
if len(res.history) > 0:
request_time = 0
redirects = [_build_payload(redirect, request_time) for redirect in res.history]
except requests.exceptions.Timeout, e:
return [{
'url': url,
'url_length': len(url),
'code': 0,
'reason': 'Timeout %s' % TIMEOUT
}]
except requests.exceptions.ConnectionError, e:
return [{
'url': url,
'url_length': len(url),
'code': 0,
'reason': 'Connection Error %s' % e
}]
except Exception, e:
print e
raise
finally:
request_time = time.time() - start
# TODO: Properly handle the failure. reraise?
return [_build_payload(res, request_time),] + redirects
def process_html(html, url):
lint_errors = seolinter.lint_html(html)
page_details = extract_page_details(html, url)
links = extract_links(html, url)
sources = extract_sources(html, url)
return lint_errors, page_details, links, sources
def extract_links(html, url):
links = []
soup = BeautifulSoup(html, html_parser)
for a_tag in soup.find_all('a'):
valid = True
try:
full_url = make_full_url(a_tag.get('href'), url)
except Exception:
full_url = a_tag.get('href')
valid = False
if full_url and 'mailto:' not in full_url: # Ignore any a tags that don't have an href
links.append({
'url': full_url,
'valid': valid,
'text': a_tag.string or a_tag.get_text(),
'alt': a_tag.get('alt'),
'rel': a_tag.get('rel'),
})
return links
def extract_sources(html, url):
sources = []
soup = BeautifulSoup(html, html_parser)
links = soup.find_all(['img', 'link', 'script', 'style', 'meta'])
for link in links:
source_url = link.get('src') or link.get('href')
if not source_url:
continue
source_url = source_url.strip()
if not is_full_url(source_url):
full_url = make_full_url(source_url, url)
else:
full_url = source_url
sources.append({
'url': full_url,
'alt': link.get('alt'),
})
return sources
def extract_page_details(html, url):
soup = BeautifulSoup(html, html_parser)
if not soup.find('head'):
return {}
robots = soup.find('head').find('meta', attrs={"name":"robots"})
rel_next = soup.find('head').find('link', attrs={'rel':'next'})
rel_prev = soup.find('head').find('link', attrs={'rel':'prev'})
title = soup.title.get_text() if soup.title else unicode(soup.find('title'))
meta_description = soup.find('head').find('meta', attrs={"name":"description"})
canonical = soup.find('head').find('link', attrs={"rel":"canonical"})
h1_1 = soup.find('h1')
h1_2 = soup.find_all('h1')[1] if len(soup.find_all('h1')) > 1 else None
return {
'size': len(html),
'canonical': canonical.get("href") if canonical else None,
'title_1': title,
'title_length_1': len(title),
'meta_description_1': meta_description.get("content") if meta_description else None,
'meta_description_length_1': len(meta_description) if meta_description else 0,
'h1_1': h1_1.get_text() if h1_1 else None,
'h1_length_1': len(h1_1.get_text()) if h1_1 else 0,
'h1_2': h1_2.get_text() if h1_2 else None,
'h1_length_2': len(h1_2.get_text()) if h1_2 else 0,
'h1_count': len(soup.find_all('h1')),
'meta_robots': robots.get("content") if robots else None,
'rel_next': rel_next.get("href") if rel_next else None,
'rel_prev': rel_prev.get('href') if rel_prev else None,
}
def store_results(db, run_id, stats, lint_errors, page_details, external=False, valid=True):
cur = db.cursor()
insert = '''
INSERT INTO `crawl_urls` (
`run_id`, `level`, `content_hash`,
`address`, `domain`, `path`, `external`, `status_code`, `status`, `body`, `size`, `address_length`, `encoding`, `content_type`, `response_time`, `redirect_uri`, `canonical`,
`title_1`, `title_length_1`, `title_occurences_1`, `meta_description_1`, `meta_description_length_1`, `meta_description_occurrences_1`, `h1_1`, `h1_length_1`, `h1_2`, `h1_length_2`, `h1_count`, `meta_robots`, `rel_next`, `rel_prev`,
`lint_critical`, `lint_error`, `lint_warn`, `lint_info`, `lint_results`, `timestamp`) VALUES (
%s, 0, %s,
%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s,
%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s,
%s, %s, %s, %s, %s, NOW())
'''
try:
url = stats.get('url')
content = stats.get('content', '')
content_hash = hashlib.sha256(content.encode('ascii', 'ignore')).hexdigest()
lint_keys = [k.upper() for k in lint_errors.keys()]
try:
lint_res = json.dumps(lint_errors)
except:
lint_res = '[]'
s = int(stats.get('size', 0))
cur.execute(insert, (
run_id,
content_hash if content else None, # content_hash
# request data
stats.get('url'), # address
_get_base_url(url) if valid else None, # domain
_get_path(url) if valid else None, # path
1 if external else 0, # external
stats.get('code'), # status_code
stats.get('reason'), # status
stats.get('content', ''), # body
s if s >= 0 else 0, # size
len(url), # address_length
stats.get('encoding'), # encoding
stats.get('content_type'), # content_type
stats.get('response_time'), # response_time
None, # redirect_uri
page_details.get('canonical'), # canonical
# parse data
page_details.get('title_1'), # title_1
page_details.get('title_length_1'), # title_length_1
page_details.get('title_occurences_1'), # title_occurences_1
page_details.get('meta_description_1'), # meta_description
page_details.get('meta_description_length_1'), # meta_description_length_1
page_details.get('meta_description_occurrences_1'), # meta_description_occurrences_1
page_details.get('h1_1'), # h1_1
page_details.get('h1_length_1'), # h1_length_1
page_details.get('h1_2'), # h1_2
page_details.get('h1_length_2'), # h1_length_2
page_details.get('h1_count'), # h1_count
page_details.get('meta_robots'), # meta_robots
page_details.get('rel_next'), # rel_next
page_details.get('rel_prev'), # rel_prev
# lint data
len([l for l in lint_keys if l[0] == 'C']), # lint_critical
len([l for l in lint_keys if l[0] == 'E']), # lint_error
len([l for l in lint_keys if l[0] == 'W']), # lint_warn
len([l for l in lint_keys if l[0] == 'I']), # lint_info
json.dumps(lint_errors) # lint_results
))
db.commit()
except:
db.rollback()
raise
return cur.lastrowid
def is_internal_url(url, source_url):
if is_full_url(url):
base_url = _get_base_url(url)
base_source_url = _get_base_url(source_url)
return (
base_url == base_source_url
or (len(base_url) > len(base_source_url) and base_source_url == base_url[-len(base_source_url):])
or (len(base_source_url) > len(base_url) and base_url == base_source_url[-len(base_url):])
)
else:
return True
def is_full_url(url):
link_re = re.compile(r'^(http(s)?:\/\/[a-zA-Z0-9\-_]+\.[a-zA-Z]+(.)+)+')
return True if link_re.match(url) else False
def make_full_url(url, source_url):
full = urljoin(source_url, url)
return full.split('#')[0]
def associate_link(db, from_url_id, to_url_id, run_id, link_type, text, alt, rel):
if not from_url_id or not to_url_id or not run_id:
print "Failed to save association (From:", from_url_id, | |
# utils codes of reader. The codes are based on huggingface pre-trained-bert libraries.
from __future__ import absolute_import, division, print_function
import json
import logging
import math
import collections
from io import open
import numpy as np
from pytorch_pretrained_bert.tokenization import BasicTokenizer, whitespace_tokenize
logger = logging.getLogger(__name__)
class SquadExample(object):
"""
A single training/test example for the Squad dataset.
For examples without an answer, the start and end position are -1.
"""
def __init__(self,
qas_id,
question_text,
doc_tokens,
orig_answer_text=None,
start_position=None,
end_position=None,
is_impossible=None,
switch=None,
path_prob=None,
para_titles=None):
self.qas_id = qas_id
self.question_text = question_text
self.doc_tokens = doc_tokens
self.orig_answer_text = orig_answer_text
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
self.switch = switch
self.path_prob = path_prob
self.para_titles = para_titles
def __str__(self):
return self.__repr__()
def __repr__(self):
s = ""
s += "qas_id: %s" % (self.qas_id)
s += ", question_text: %s" % (
self.question_text)
s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens))
if self.start_position:
s += ", start_position: %d" % (self.start_position)
if self.end_position:
s += ", end_position: %d" % (self.end_position)
if self.is_impossible:
s += ", is_impossible: %r" % (self.is_impossible)
return s
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
unique_id,
example_index,
doc_span_index,
tokens,
token_to_orig_map,
token_is_max_context,
input_ids,
input_mask,
segment_ids,
cls_index,
p_mask,
paragraph_len,
start_position=None,
end_position=None,
is_impossible=None,
switch=None):
self.unique_id = unique_id
self.example_index = example_index
self.doc_span_index = doc_span_index
self.tokens = tokens
self.token_to_orig_map = token_to_orig_map
self.token_is_max_context = token_is_max_context
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.cls_index = cls_index
self.p_mask = p_mask
self.paragraph_len = paragraph_len
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
self.switch = switch
def read_squad_examples(input_file, is_training, version_2_with_negative, max_answer_len=100000, skip_negatives=False):
"""Read a SQuAD json file into a list of SquadExample."""
with open(input_file, "r", encoding='utf-8') as reader:
input_data = json.load(reader)["data"]
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
examples = []
for entry in input_data:
for paragraph in entry["paragraphs"]:
paragraph_text = paragraph["context"]
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
for c in paragraph_text:
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
for qa in paragraph["qas"]:
qas_id = str(qa["id"])
# this is temporary added to see whether reducing the negatives
# improves the performance.
if skip_negatives is True and "_NEGATIVE_" in qas_id:
continue
if "FAKE2" in qas_id:
continue
question_text = qa["question"]
start_position = None
end_position = None
orig_answer_text = None
is_impossible = False
if is_training:
if version_2_with_negative:
is_impossible = qa["is_impossible"]
if is_impossible is False:
switch = 0
else:
switch = 1
# if (len(qa["answers"]) != 1) and (not is_impossible):
# raise ValueError(
# "For training, each question should have exactly 1 answer.")
if not is_impossible:
answers = qa['answers']
if type(answers) == list:
answer = qa["answers"][0]
else:
answer = answers
orig_answer_text = answer["text"]
answer_offset = answer["answer_start"]
answer_length = len(orig_answer_text)
start_position = char_to_word_offset[answer_offset]
end_position = char_to_word_offset[answer_offset +
answer_length - 1]
# Only add answers where the text can be exactly recovered from the
# document. If this CAN'T happen it's likely due to weird Unicode
# stuff so we will just skip the example.
#
# Note that this means for training mode, every example is NOT
# guaranteed to be preserved.
actual_text = " ".join(
doc_tokens[start_position:(end_position + 1)])
cleaned_answer_text = " ".join(
whitespace_tokenize(orig_answer_text))
if actual_text.find(cleaned_answer_text) == -1:
logger.warning("Could not find answer: '%s' vs. '%s'",
actual_text, cleaned_answer_text)
continue
if len(orig_answer_text.split()) > max_answer_len:
logger.info(
"Omitting a long answer: '%s'", orig_answer_text)
continue
else:
start_position = -1
end_position = -1
orig_answer_text = ""
if len(qa["answers"]) > 0:
answer = qa["answers"][0]
# Make sure that answer text will be preserved for
# yes/no.
if answer["text"] in ["yes", "no"]:
orig_answer_text = answer["text"]
if not is_training:
switch = None
example = SquadExample(
qas_id=qas_id,
question_text=question_text,
doc_tokens=doc_tokens,
orig_answer_text=orig_answer_text,
start_position=start_position,
end_position=end_position,
switch=switch,
is_impossible=is_impossible)
examples.append(example)
return examples
# TODO: check if we could remove this functions; it seems we don't need this?
def convert_examples_to_features(examples, tokenizer, max_seq_length,
doc_stride, max_query_length, is_training,
cls_token_at_end=False,
cls_token='[CLS]', sep_token='[SEP]', pad_token=0,
sequence_a_segment_id=0, sequence_b_segment_id=1,
cls_token_segment_id=0, pad_token_segment_id=0,
mask_padding_with_zero=True,
quiet=False):
"""Loads a data file into a list of `InputBatch`s."""
unique_id = 1000000000
features = []
for (example_index, example) in enumerate(examples):
query_tokens = tokenizer.tokenize(example.question_text)
if len(query_tokens) > max_query_length:
query_tokens = query_tokens[0:max_query_length]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
tok_start_position = None
tok_end_position = None
if is_training and example.is_impossible:
tok_start_position = -1
tok_end_position = -1
if is_training and not example.is_impossible:
tok_start_position = orig_to_tok_index[example.start_position]
if example.end_position < len(example.doc_tokens) - 1:
tok_end_position = orig_to_tok_index[example.end_position + 1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
(tok_start_position, tok_end_position) = _improve_answer_span(
all_doc_tokens, tok_start_position, tok_end_position, tokenizer,
example.orig_answer_text)
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
_DocSpan = collections.namedtuple( # pylint: disable=invalid-name
"DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
p_mask = []
# CLS token at the beginning
if not cls_token_at_end:
tokens.append(cls_token)
segment_ids.append(cls_token_segment_id)
p_mask.append(0)
cls_index = 0
# Query
for token in query_tokens:
tokens.append(token)
segment_ids.append(sequence_a_segment_id)
p_mask.append(1)
# SEP token
tokens.append(sep_token)
segment_ids.append(sequence_a_segment_id)
p_mask.append(1)
# Paragraph
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(
tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(doc_spans, doc_span_index,
split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(sequence_b_segment_id)
p_mask.append(0)
paragraph_len = doc_span.length
# SEP token
tokens.append(sep_token)
segment_ids.append(sequence_b_segment_id)
p_mask.append(1)
# CLS token at the end
if cls_token_at_end:
tokens.append(cls_token)
segment_ids.append(cls_token_segment_id)
p_mask.append(0)
cls_index = len(tokens) - 1 # Index of classification token
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(pad_token)
input_mask.append(0 if mask_padding_with_zero else 1)
segment_ids.append(pad_token_segment_id)
p_mask.append(1)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
span_is_impossible = example.is_impossible
start_position = None
end_position = None
switch = None
if is_training and not span_is_impossible:
doc_start = doc_span.start
doc_end = doc_span.start + doc_span.length - 1
out_of_span = False
if not (tok_start_position >= doc_start and
tok_end_position <= doc_end):
out_of_span = True
if out_of_span:
start_position = 0
end_position = 0
span_is_impossible = True
else:
doc_offset = len(query_tokens) + 2
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
if is_training and span_is_impossible:
start_position = cls_index
end_position = cls_index
switch = 1
elif is_training and not span_is_impossible:
switch = 0
# The questions whose ``is_impossible'' are originally True should
# be 1.
if example.is_impossible is True:
switch = 1
if example_index < 20 and not quiet:
logger.info("*** Example ***")
logger.info("unique_id: %s" % (unique_id))
logger.info("example_index: %s" % (example_index))
logger.info("doc_span_index: %s" % (doc_span_index))
logger.info("tokens: %s" % " ".join(tokens))
logger.info("token_to_orig_map: %s" % " ".join([
"%d:%d" % (x, y) for (x, y) in token_to_orig_map.items()]))
logger.info("token_is_max_context: %s" % " ".join([
"%d:%s" % (x, y) for (x, y) in token_is_max_context.items()
]))
logger.info("input_ids: %s" %
" ".join([str(x) for x in input_ids]))
logger.info(
"input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
if is_training and span_is_impossible:
logger.info("impossible example")
if is_training and not span_is_impossible:
answer_text = " ".join(
tokens[start_position:(end_position + 1)])
logger.info("start_position: %d" % (start_position))
logger.info("end_position: %d" % (end_position))
logger.info(
"answer: %s" % (answer_text))
features.append(
InputFeatures(
unique_id=unique_id,
example_index=example_index,
doc_span_index=doc_span_index,
tokens=tokens,
token_to_orig_map=token_to_orig_map,
token_is_max_context=token_is_max_context,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
cls_index=cls_index,
p_mask=p_mask,
paragraph_len=paragraph_len,
start_position=start_position,
end_position=end_position,
switch=switch,
is_impossible=span_is_impossible))
unique_id += 1
return features
# Convert example method for span + yes/no datasets (e.g., HotpotQA, NaturalQuestions)
def convert_examples_to_features_yes_no(examples, tokenizer, max_seq_length,
doc_stride, max_query_length, is_training,
cls_token_at_end=False,
| |
"""
Notice : 神兽保佑 ,测试一次通过
//
// ┏┛ ┻━━━━━┛ ┻┓
// ┃ ┃
// ┃ ━ ┃
// ┃ ┳┛ ┗┳ ┃
// ┃ ┃
// ┃ ┻ ┃
// ┃ ┃
// ┗━┓ ┏━━━┛
// ┃ ┃ Author: somewheve
// ┃ ┃ Datetime: 2019/7/3 下午8:46 ---> 无知即是罪恶
// ┃ ┗━━━━━━━━━┓
// ┃ ┣┓
// ┃ ┏┛
// ┗━┓ ┓ ┏━━━┳ ┓ ┏━┛
// ┃ ┫ ┫ ┃ ┫ ┫
// ┗━┻━┛ ┗━┻━┛
//
"""
from typing import Text
""" 本地持仓对象 """
from copy import copy
from ctpbee.constant import PositionData, Offset, Direction, OrderRequest, OrderData, \
Exchange, TickData, EXCHANGE_MAPPING, BarData
class LocalVariable:
def __init__(self, data):
if data.get("long") is not None:
self.long = float(data.get('long'))
else:
self.long = 0
if data.get("short") is not None:
self.short = float(data.get('short'))
else:
self.short = 0
class PositionHolding:
""" 单个合约的持仓 """
def __init__(self, local_symbol, app):
""""""
self.local_symbol = local_symbol
try:
self.exchange = local_symbol.split(".")[1]
self.symbol = local_symbol.split(".")[0]
except Exception:
raise ValueError("invalid local_symbol")
self.active_orders = {}
self.size = 1
from ctpbee.looper.account import Account
if isinstance(app, Account): # if app.balance
self.size = app.get_size_from_map(local_symbol)
else:
if app.recorder.get_contract(self.local_symbol) is not None:
self.size = app.recorder.get_contract(self.local_symbol).size
elif getattr(app.trader, "account", None) is not None:
self.size = app.trader.account.get_size_from_map(local_symbol=local_symbol)
else:
raise ValueError("获取合约信息失败, 持仓盈亏计算失败")
if self.size is None:
raise ValueError(f"当前仓位: {self.local_symbol} 合约乘数设置出现问题")
self.long_pos = 0
self.long_yd = 0
self.long_td = 0
self.long_pnl = 0
self.long_stare_pnl = 0
self.long_price = 0
self.long_open_price = 0
self.short_pos = 0
self.short_yd = 0
self.short_td = 0
self.short_pnl = 0
self.short_stare_pnl = 0
self.short_price = 0
self.short_open_price = 0
self.long_pos_frozen = 0
self.long_yd_frozen = 0
self.long_td_frozen = 0
self.short_pos_frozen = 0
self.short_yd_frozen = 0
self.short_td_frozen = 0
self.pre_settlement_price = 0
self.last_price = 0
@property
def long_available(self):
return self.long_pos - self.long_pos_frozen
@property
def short_available(self):
return self.short_pos - self.short_pos_frozen
def update_trade(self, trade):
"""成交更新"""
# 多头
if trade.direction == Direction.LONG:
# 开仓
if trade.offset == Offset.OPEN:
self.long_td += trade.volume
# 平今
elif trade.offset == Offset.CLOSETODAY:
self.short_td -= trade.volume
# 平昨
elif trade.offset == Offset.CLOSEYESTERDAY:
self.short_yd -= trade.volume
# 平仓
elif trade.offset == Offset.CLOSE:
# 上期所等同于平昨
if self.exchange == Exchange.SHFE:
self.short_yd -= trade.volume
# 非上期所,优先平今
else:
self.short_td -= trade.volume
if self.short_td < 0:
self.short_yd += self.short_td
self.short_td = 0
elif trade.direction == Direction.SHORT:
# 开仓
if trade.offset == Offset.OPEN:
self.short_td += trade.volume
# 平今
elif trade.offset == Offset.CLOSETODAY:
self.long_td -= trade.volume
# 平昨
elif trade.offset == Offset.CLOSEYESTERDAY:
self.long_yd -= trade.volume
# 平仓
elif trade.offset == Offset.CLOSE:
# 上期所等同于平昨
if self.exchange == Exchange.SHFE:
self.long_yd -= trade.volume
# 非上期所,优先平今
else:
self.long_td -= trade.volume
if self.long_td < 0:
self.long_yd += self.long_td
self.long_td = 0
# self.long_pos = self.long_td + self.long_yd
# self.short_pos = self.short_yd + self.short_td
# 汇总
self.calculate_price(trade)
self.calculate_position()
self.calculate_pnl()
self.calculate_stare_pnl()
def calculate_position(self):
"""计算持仓情况"""
self.long_pos = self.long_td + self.long_yd
self.short_pos = self.short_td + self.short_yd
def update_position(self, position: PositionData):
""""""
if position.direction == Direction.LONG:
self.long_pos = position.volume
self.long_yd = position.yd_volume
self.long_td = self.long_pos - self.long_yd
self.long_pnl = position.pnl
self.long_price = position.price
self.long_open_price = position.open_price
elif position.direction == Direction.SHORT:
self.short_pos = position.volume
self.short_yd = position.yd_volume
self.short_td = self.short_pos - self.short_yd
self.short_pnl = position.pnl
self.short_price = position.price
self.short_open_price = position.open_price
def update_order(self, order: OrderData):
""""""
if order._is_active():
self.active_orders[order.local_order_id] = order
else:
if order.local_order_id in self.active_orders:
self.active_orders.pop(order.local_order_id)
self.calculate_frozen()
def update_order_request(self, req: OrderRequest, local_order_id: str):
""""""
gateway_name, orderid = local_order_id.split(".")
order = req._create_order_data(orderid, gateway_name)
self.update_order(order)
def update_tick(self, tick, pre_settlement_price):
""" 行情更新 """
self.pre_settlement_price = pre_settlement_price
self.last_price = tick.last_price
self.calculate_pnl()
self.calculate_stare_pnl()
def update_bar(self, bar, pre_close):
self.pre_settlement_price = pre_close
self.last_price = bar.close_price
self.calculate_pnl()
self.calculate_stare_pnl()
def calculate_frozen(self):
""""""
self.long_pos_frozen = 0
self.long_yd_frozen = 0
self.long_td_frozen = 0
self.short_pos_frozen = 0
self.short_yd_frozen = 0
self.short_td_frozen = 0
for order in self.active_orders.values():
# Ignore position open orders
if order.offset == Offset.OPEN:
continue
frozen = order.volume - order.traded
if order.direction == Direction.LONG:
if order.offset == Offset.CLOSETODAY:
self.short_td_frozen += frozen
elif order.offset == Offset.CLOSEYESTERDAY:
self.short_yd_frozen += frozen
elif order.offset == Offset.CLOSE:
self.short_td_frozen += frozen
if self.short_td_frozen > self.short_td:
self.short_yd_frozen += (self.short_td_frozen
- self.short_td)
self.short_td_frozen = self.short_td
elif order.direction == Direction.SHORT:
if order.offset == Offset.CLOSETODAY:
self.long_td_frozen += frozen
elif order.offset == Offset.CLOSEYESTERDAY:
self.long_yd_frozen += frozen
elif order.offset == Offset.CLOSE:
self.long_td_frozen += frozen
if self.long_td_frozen > self.long_td:
self.long_yd_frozen += (self.long_td_frozen
- self.long_td)
self.long_td_frozen = self.long_td
self.long_pos_frozen = self.long_td_frozen + self.long_yd_frozen
self.short_pos_frozen = self.short_td_frozen + self.short_yd_frozen
def convert_order_request_shfe(self, req: OrderRequest):
""""""
if req.offset == Offset.OPEN:
return [req]
if req.direction == Direction.LONG:
pos_available = self.short_pos - self.short_pos_frozen
td_available = self.short_td - self.short_td_frozen
else:
pos_available = self.long_pos - self.long_pos_frozen
td_available = self.long_td - self.long_td_frozen
if req.volume > pos_available:
return []
elif req.volume <= td_available:
req_td = copy(req)
req_td.offset = Offset.CLOSETODAY
return [req_td]
else:
req_list = []
if td_available > 0:
req_td = copy(req)
req_td.offset = Offset.CLOSETODAY
req_td.volume = td_available
req_list.append(req_td)
req_yd = copy(req)
req_yd.offset = Offset.CLOSEYESTERDAY
req_yd.volume = req.volume - td_available
req_list.append(req_yd)
return req_list
def convert_order_request_lock(self, req: OrderRequest):
""""""
if req.direction == Direction.LONG:
td_volume = self.short_td
yd_available = self.short_yd - self.short_yd_frozen
else:
td_volume = self.long_td
yd_available = self.long_yd - self.long_yd_frozen
# If there is td_volume, we can only lock position
if td_volume:
req_open = copy(req)
req_open.offset = Offset.OPEN
return [req_open]
# If no td_volume, we close opposite yd position first
# then open new position
else:
open_volume = max(0, req.volume - yd_available)
req_list = []
if yd_available:
req_yd = copy(req)
if self.exchange == Exchange.SHFE:
req_yd.offset = Offset.CLOSEYESTERDAY
else:
req_yd.offset = Offset.CLOSE
req_list.append(req_yd)
if open_volume:
req_open = copy(req)
req_open.offset = Offset.OPEN
req_open.volume = open_volume
req_list.append(req_open)
return req_list
def calculate_pnl(self):
""" 计算浮动盈亏 """
try:
self.long_pnl = round(self.long_pos * (self.last_price - self.long_price) * self.size)
except ZeroDivisionError:
self.long_pnl = 0
except AttributeError:
self.long_pnl = 0
try:
self.short_pnl = round(self.short_pos * (self.short_price - self.last_price) * self.size)
except ZeroDivisionError:
self.short_pnl = 0
except AttributeError:
self.short_pnl = 0
def calculate_stare_pnl(self):
"""计算盯市盈亏"""
try:
self.long_stare_pnl = self.long_pos * (self.last_price - self.long_open_price) * self.size
except ZeroDivisionError:
self.long_stare_pnl = 0
except AttributeError:
self.long_stare_pnl = 0
except OverflowError:
self.long_stare_pnl = 0
try:
self.short_stare_pnl = self.short_pos * (self.short_open_price - self.last_price) * self.size
except ZeroDivisionError:
self.short_stare_pnl = 0
except AttributeError:
self.short_stare_pnl = 0
except OverflowError:
self.short_stare_pnl = 0
def calculate_price(self, trade):
"""计算持仓均价(基于成交数据)"""
# 只有开仓会影响持仓均价
if trade.offset == Offset.OPEN:
if trade.direction == Direction.LONG:
cost = self.long_price * self.long_pos + trade.volume * trade.price
open_cost = self.long_open_price * self.long_pos + trade.volume * trade.price
new_pos = self.long_pos + trade.volume
if new_pos:
self.long_price = cost / new_pos
self.long_open_price = open_cost / new_pos
else:
self.long_price = 0
self.long_open_price = 0
else:
cost = self.short_price * self.short_pos + trade.volume * trade.price
open_cost = self.short_open_price * self.short_pos + trade.volume * trade.price
new_pos = self.short_pos + trade.volume
if new_pos:
self.short_price = cost / new_pos
self.short_open_price = open_cost / new_pos
else:
self.short_price = 0
self.short_open_price = 0
def get_position_by_direction(self, direction):
if direction == Direction.LONG:
return PositionData(
symbol=self.symbol,
volume=self.long_pos,
exchange=EXCHANGE_MAPPING[self.exchange],
direction=direction,
pnl=self.long_pnl,
price=self.long_price,
frozen=self.long_pos_frozen,
open_price=self.long_open_price,
yd_volume=self.long_yd,
float_pnl=self.long_stare_pnl,
)
elif direction == Direction.SHORT:
return PositionData(
symbol=self.symbol,
volume=self.short_pos,
exchange=EXCHANGE_MAPPING[self.exchange],
direction=direction,
pnl=self.short_pnl,
price=self.short_price,
frozen=self.short_pos_frozen,
yd_volume=self.short_yd,
float_pnl=self.short_stare_pnl,
open_price=self.short_open_price,
)
return None
def __repr__(self):
return f"Pos<local_symbol:{self.local_symbol} long_direction: {self.long_pos}---{self.long_price} pnl: {self.long_pnl} short_direction: {self.short_pos}---{self.short_price} pnl:{self.short_pnl}>"
class LocalPositionManager(dict):
""" 用于管理持仓信息 只提供向外的接口 """
def __init__(self, app):
super().__init__({})
self.app = app
self.size_map = {}
def update_tick(self, tick: TickData, pre_close):
""" 更新tick """
if tick.local_symbol not in self:
return
self.get(tick.local_symbol).update_tick(tick, pre_close)
def update_bar(self, bar: BarData, pre_close):
if bar.local_symbol not in self:
return
self.get(bar.local_symbol).update_bar(bar, pre_close)
def is_convert_required(self, local_symbol: str):
"""
Check if the contract needs offset convert.
"""
contract = self.app.recorder.get_contract(local_symbol)
# Only contracts with long-short position mode requires convert
if not contract:
return False
elif contract.net_position:
return False
else:
return True
def update_order_request(self, req: OrderRequest, local_orderid: str):
""""""
if not self.is_convert_required(req.local_symbol):
return
holding = self.get(req.local_symbol, None)
if not holding:
self[req.local_symbol] = PositionHolding(req.local_symbol, self.app)
self[req.local_symbol].update_order_request(req, local_orderid)
def convert_order_request(self, req: OrderRequest, lock: bool):
""""""
if not self.is_convert_required(req.local_symbol):
return [req]
holding = self.get(req.local_symbol, None)
if not holding:
self[req.local_symbol] = PositionHolding(req.local_symbol, self.app)
if lock:
return self[req.local_symbol].convert_order_request_lock(req)
elif req.exchange == Exchange.SHFE:
return self[req.local_symbol].convert_order_request_shfe(req)
else:
return [req]
def update_order(self, order):
""" 更新order """
if order.local_symbol not in self:
self[order.local_symbol] = PositionHolding(order.local_symbol, self.app)
else:
self.get(order.local_symbol).update_order(order)
def update_trade(self, trade):
""" 更新成交 """
if trade.local_symbol not in self:
self[trade.local_symbol] = PositionHolding(trade.local_symbol, self.app)
self[trade.local_symbol].update_trade(trade)
else:
self.get(trade.local_symbol).update_trade(trade)
def update_position(self, position):
""" 更新持仓 """
if position.local_symbol not in self.keys():
self[position.local_symbol] = PositionHolding(position.local_symbol, self.app)
self[position.local_symbol].update_position(position)
else:
self.get(position.local_symbol).update_position(position)
def get_position(self, local_symbol):
""" 根据local_symbol | |
Field(
name='Extra_ModsKeys',
type='ref|list|ulong',
key='Mods.dat',
),
Field(
name='Flag0',
type='byte',
),
Field(
name='Flag1',
type='byte',
),
Field(
name='Flag2',
type='byte',
),
Field(
name='Key3',
type='ulong',
),
Field(
name='Unknown12',
type='int',
),
Field(
name='Unknown13',
type='int',
),
Field(
name='Unknown14',
type='int',
),
Field(
name='Unknown15',
type='int',
),
Field(
name='Key4',
type='ulong',
),
Field(
name='Key5',
type='ulong',
),
Field(
name='Key6',
type='ulong',
),
Field(
name='Flag3',
type='byte',
),
Field(
name='Key7',
type='ulong',
),
Field(
name='Flag4',
type='bool',
),
),
),
'Strongboxes.dat': File(
fields=(
Field(
name='ChestsKey',
type='ulong',
key='Chests.dat',
unique=True,
),
Field(
name='SpawnWeight',
type='int',
),
Field(
name='Unknown1',
type='int',
),
Field(
name='IsCartographerBox',
type='bool',
),
Field(
name='Flag0',
type='bool',
),
),
),
'SuicideExplosion.dat': File(
fields=(
Field(
name='Id',
type='int',
),
Field(
name='Key0',
type='ulong',
),
Field(
name='Key1',
type='ulong',
),
Field(
name='Flag0',
type='bool',
),
Field(
name='Flag1',
type='bool',
),
Field(
name='Flag2',
type='bool',
),
Field(
name='Flag3',
type='bool',
),
Field(
name='Unknown1',
type='int',
),
),
),
'SummonedSpecificBarrels.dat': File(
fields=(
Field(
name='Id',
type='ref|string',
unique=True,
),
Field(
name='ChestsKey',
type='ulong',
key='Chests.dat',
),
Field(
name='Key1',
type='ulong',
),
Field(
name='Unknown0',
type='int',
),
Field(
name='Key2',
type='ulong',
),
),
),
'SummonedSpecificMonsters.dat': File(
fields=(
Field(
name='Id',
type='int',
unique=True,
),
Field(
name='MonsterVarietiesKey',
type='ulong',
key='MonsterVarieties.dat',
),
Field(
name='Unknown0',
type='int',
),
# TODO unknownKey
Field(
name='Key0',
type='ulong',
),
Field(
name='Flag0',
type='bool',
),
Field(
name='Flag1',
type='bool',
),
Field(
name='Unknown1',
type='int',
),
Field(
name='Unknown2',
type='int',
),
Field(
name='Flag2',
type='byte',
),
Field(
name='Key1',
type='ulong',
),
Field(
name='Key2',
type='ulong',
),
Field(
name='Unknown3',
type='int',
),
Field(
name='Flag3',
type='bool',
),
Field(
name='Unknown4',
type='int',
),
),
),
'SummonedSpecificMonstersOnDeath.dat': File(
fields=(
Field(
name='Id',
type='ref|string',
unique=True,
),
Field(
name='MonsterVarietiesKey',
type='ulong',
key='MonsterVarieties.dat',
),
Field(
name='Unknown2',
type='int',
),
Field(
name='Key1',
type='ulong',
),
Field(
name='Unknown4',
type='int',
),
Field(
name='Unknown7',
type='byte',
),
),
),
'SupporterPackSets.dat': File(
fields=(
Field(
name='Id',
type='ref|string',
unique=True,
),
Field(
name='FormatTitle',
type='ref|string',
),
Field(
name='Background',
type='ref|string',
),
Field(
name='Time0',
type='ref|string',
),
Field(
name='Time1',
type='ref|string',
),
Field(
name='ShopPackagePlatformKey',
type='ref|list|int',
enum='SHOP_PACKAGE_PLATFORM',
),
Field(
name='Unknown0',
type='ref|string',
),
),
),
'SurgeCategory.dat': File(
fields=(
),
),
'SurgeTypes.dat': File(
fields=(
Field(
name='Id',
type='ref|string',
unique=True,
),
Field(
name='Keys0',
type='ref|list|ulong',
),
),
),
'SynthesisAreaSize.dat': File(
fields=(
Field(
name='Id',
type='ref|string',
unique=True,
),
Field(
name='Unknown1',
type='int',
),
Field(
name='Unknown2',
type='int',
),
Field(
name='Unknown3',
type='int',
),
),
),
'SynthesisAreas.dat': File(
fields=(
Field(
name='Id',
type='ref|string',
unique=True,
),
Field(
name='MinLevel',
type='int',
),
Field(
name='MaxLevel',
type='int',
),
Field(
name='Weight',
type='int',
),
Field(
name='TopologiesKey',
type='ulong',
key='Topologies.dat',
),
Field(
name='MonsterPacksKeys',
type='ref|list|ulong',
key='MonsterPacks.dat',
),
Field(
name='ArtFile',
type='ref|string',
file_path=True,
),
Field(
name='Name',
type='ref|string',
),
Field(
name='SynthesisAreaSizeKey',
type='ulong',
key='SynthesisAreaSize.dat',
),
Field(
name='AchievementItemsKey',
type='ulong',
key='AchievementItems.dat',
),
),
),
'SynthesisBonuses.dat': File(
fields=(
Field(
name='ModsKey',
type='ulong',
key='Mods.dat',
),
Field(
name='Unknown1',
type='int',
),
Field(
name='Unknown2',
type='int',
),
Field(
name='Unknown3',
type='int',
),
Field(
name='Unknown4',
type='int',
),
Field(
name='Unknown5',
type='int',
),
Field(
name='Unknown7',
type='short',
),
),
),
'SynthesisBrackets.dat': File(
fields=(
Field(
name='Key0',
type='ulong',
),
Field(
name='MinLevel',
type='int',
),
Field(
name='MaxLevel',
type='int',
),
Field(
name='Key1',
type='ulong',
),
Field(
name='Unknown4',
type='int',
),
Field(
name='Unknown5',
type='int',
),
Field(
name='Unknown6',
type='int',
),
Field(
name='Unknown7',
type='int',
),
),
),
'SynthesisFragmentDialogue.dat': File(
fields=(
Field(
name='Key0',
type='ulong',
),
Field(
name='NPCTextAudioKey1',
type='ulong',
key='NPCTextAudio.dat',
),
Field(
name='NPCTextAudioKey2',
type='ulong',
key='NPCTextAudio.dat',
),
Field(
name='NPCTextAudioKey3',
type='ulong',
key='NPCTextAudio.dat',
),
Field(
name='NPCTextAudioKey4',
type='ulong',
key='NPCTextAudio.dat',
),
Field(
name='NPCTextAudioKey5',
type='ulong',
key='NPCTextAudio.dat',
),
Field(
name='NPCTextAudioKey6',
type='ulong',
key='NPCTextAudio.dat',
),
),
),
'SynthesisGlobalMods.dat': File(
fields=(
Field(
name='ModsKey',
type='ulong',
key='Mods.dat',
),
Field(
name='Weight',
type='int',
),
Field(
name='MinLevel',
type='int',
),
Field(
name='MaxLevel',
type='int',
),
),
),
'SynthesisMonsterExperiencePerLevel.dat': File(
fields=(
Field(
name='Level',
type='int',
),
Field(
name='ExperienceBonus',
type='int',
),
),
),
'SynthesisRewardCategories.dat': File(
fields=(
Field(
name='Id',
type='ref|string',
),
),
),
'SynthesisRewardTypes.dat': File(
fields=(
Field(
name='Id',
type='ref|string',
unique=True,
),
Field(
name='Description',
type='ref|string',
),
Field(
name='ArtFile',
type='ref|string',
file_path=True,
),
Field(
name='AchievementItemsKey',
type='ulong',
key='AchievementItems.dat',
),
),
),
'TableMonsterSpawners.dat': File(
fields=(
Field(
name='Metadata',
type='ref|string',
),
Field(
name='Unknown1',
type='int',
),
Field(
name='Unknown2',
type='ref|list|ulong',
),
Field(
name='Unknown3',
type='int',
),
Field(
name='Unknown4',
type='int',
),
Field(
name='Unknown5',
type='int',
),
Field(
name='Unknown6',
type='int',
),
Field(
name='Unknown7',
type='int',
),
Field(
name='Unknown8',
type='int',
),
Field(
name='Unknown9',
type='int',
),
Field(
name='Unknown10',
type='int',
),
Field(
name='Flag0',
type='byte',
),
Field(
name='Unknown11',
type='int',
),
Field(
name='Unknown12',
type='int',
),
Field(
name='Flag1',
type='byte',
),
Field(
name='Unknown13',
type='int',
),
Field(
name='Unknown14',
type='int',
),
Field(
name='Unknown15',
type='int',
),
Field(
name='Unknown16',
type='int',
),
Field(
name='Unknown17',
type='int',
),
Field(
name='Flag3',
type='byte',
),
Field(
name='Flag4',
type='byte',
),
Field(
name='Unknown18',
type='int',
),
Field(
name='Script1',
type='ref|string',
),
Field(
name='Flag5',
type='byte',
),
Field(
name='Flag6',
type='byte',
),
Field(
name='Script2',
type='ref|string',
),
Field(
name='Data0',
type='ref|list|int',
),
Field(
name='Unknown19',
type='int',
),
Field(
name='Unknown20',
type='int',
),
),
),
'Tags.dat': File(
fields=(
Field(
name='Id',
type='ref|string',
unique=True,
),
Field(
name='Unknown0',
type='uint',
),
Field(
name='DisplayString',
type='ref|string',
),
Field(
name='Name',
type='ref|string',
),
),
),
# display_type = "{0:#032b}"
'TalismanMonsterMods.dat': File(
fields=(
Field(
name='Unknown1',
type='int',
),
Field(
name='Unknown2',
type='int',
),
Field(
name='Key0',
type='ulong',
),
),
),
'TalismanPacks.dat': File(
fields=(
Field(
name='Id',
type='ref|string',
),
Field(
name='MonsterPacksKeys',
type='ref|list|ulong',
key='MonsterPacks.dat',
),
Field(
name='Unknown4',
type='int',
),
Field(
name='Unknown5',
type='int',
),
Field(
name='MonsterVarietiesKey',
type='ulong',
key='MonsterVarieties.dat',
),
Field(
name='Unknown8',
type='int',
),
Field(
name='Unknown9',
type='int',
),
Field(
name='Unknown10',
type='int',
),
Field(
name='Unknown11',
type='int',
),
Field(
name='MonsterPacksKey',
type='ulong',
key='MonsterPacks.dat',
),
),
),
'Talismans.dat': File(
fields=(
Field(
name='BaseItemTypesKey',
type='ulong',
key='BaseItemTypes.dat',
unique=True,
),
Field(
name='SpawnWeight',
type='int',
),
Field(
name='ModsKey',
type='ulong',
key='Mods.dat',
),
Field(
name='Tier',
type='int',
),
Field(
name='Flag0',
type='bool',
),
Field(
name='Flag1',
type='bool',
),
Field(
name='Key0',
type='ulong',
),
Field(
name='Key1',
type='ulong',
),
Field(
name='Unknown11',
type='int',
),
),
),
'TencentAutoLootPetCurrencies.dat': File(
fields=(
Field(
name='BaseItemTypesKey',
type='ulong',
key='BaseItemTypes.dat',
),
Field(
name='Unknown2',
type='byte',
),
),
),
'TencentAutoLootPetCurrenciesExcludable.dat': File(
fields=(
Field(
name='BaseItemTypesKey',
type='ulong',
key='BaseItemTypes.dat',
),
),
),
'TerrainPlugins.dat': File(
fields=(
Field(
name='Id',
type='ref|string',
unique=True,
),
Field(
name='UnknownUnique',
type='int',
unique=True,
),
),
),
'Tips.dat': File(
fields=(
Field(
name='Id',
type='ref|string',
unique=True,
),
Field(
name='Text',
type='ref|string',
),
Field(
name='TextXBox',
type='ref|string',
),
),
),
'Topologies.dat': File(
fields=(
Field(
name='Id',
type='ref|string',
unique=True,
),
Field(
name='DGRFile',
type='ref|string',
file_path=True,
file_ext='.dgr',
),
Field(
name='Unknown2',
type='int',
),
Field(
name='Unknown3',
type='int',
),
Field(
name='Unknown4',
type='int',
),
),
),
'TormentSpirits.dat': File(
fields=(
Field(
name='MonsterVarietiesKey',
type='ulong',
key='MonsterVarieties.dat',
),
Field(
name='Spirit_ModsKeys',
type='ref|list|ulong',
key='Mods.dat',
),
Field(
name='Touched_ModsKeys',
type='ref|list|ulong',
key='Mods.dat',
),
Field(
name='Possessed_ModsKeys',
type='ref|list|ulong',
key='Mods.dat',
),
Field(
name='MinZoneLevel',
type='int',
),
Field(
name='MaxZoneLevel',
type='int',
),
Field(
name='SpawnWeight',
type='int',
),
Field(
name='SummonedMonster_MonsterVarietiesKey',
type='ulong',
key='MonsterVarieties.dat',
),
Field(
name='Unknown13',
type='int',
),
Field(
name='ModsKeys0',
type='ref|list|ulong',
key='Mods.dat',
),
Field(
name='ModsKeys1',
type='ref|list|ulong',
key='Mods.dat',
),
),
),
'TreasureHunterMissions.dat': File(
fields=(
Field(
name='Unknown0',
type='ref|string',
),
Field(
name='Unknown1',
type='ulong',
),
Field(
name='Unknown3',
type='ulong',
),
Field(
name='Unknown5',
type='ref|list|ulong',
),
Field(
name='Unknown7',
type='ref|list|ulong',
),
Field(
name='Unknown9',
type='ref|list|ulong',
),
Field(
name='Unknown11',
type='int',
),
Field(
name='Unknown12',
type='int',
),
Field(
name='Unknown13',
type='int',
),
Field(
name='Unknown14',
type='int',
),
Field(
name='Unknown15',
type='int',
),
Field(
name='Unknown19',
type='byte',
),
Field(
name='Unknown16',
type='int',
),
Field(
name='Unknown17',
type='int',
),
Field(
name='Unknown18',
type='int',
),
),
),
'TriggerBeam.dat': File(
fields=(
Field(
name='Unknown0',
type='int',
),
Field(
name='Unknown1',
type='ref|list|ulong',
),
Field(
name='Unknown2',
type='ref|list|ulong',
),
Field(
name='Unknown3',
type='ref|list|int',
),
Field(
name='Flag0',
type='byte',
),
Field(
name='Unknown4',
type='int',
),
Field(
name='Unknown5',
type='int',
),
Field(
name='Unknown6',
type='int',
),
Field(
name='Unknown7',
type='int',
),
Field(
name='Flag1',
type='byte',
),
Field(
name='Data0',
type='ref|list|int',
),
Field(
name='Flag2',
type='byte',
),
),
),
'TriggerSpawners.dat': File(
fields=(
Field(
name='Id',
type='ref|string',
unique=True,
),
Field(
name='Data0',
type='ref|list|int',
),
Field(
name='Unknown0',
type='int',
),
Field(
name='Data1',
type='ref|list|int',
),
Field(
name='Flag0',
type='bool',
),
),
),
'Tutorial.dat': File(
fields=(
Field(
name='Id',
type='ref|string',
unique=True,
),
Field(
name='UIFile',
type='ref|string',
file_path=True,
file_ext='.ui',
),
Field(
name='Key0',
type='ulong',
),
Field(
name='IsEnabled',
type='bool',
),
Field(
name='Unknown0',
type='int',
),
Field(
name='Unknown1',
type='ref|list|int',
),
Field(
name='Unknown2',
type='int',
),
Field(
name='Unknown3',
type='int',
),
Field(
name='Unknown4',
type='ref|list|int',
),
Field(
name='Flag0',
type='bool',
),
Field(
name='Flag1',
type='bool',
),
),
),
'UITalkCategories.dat': File(
fields=(
),
),
'UITalkText.dat': File(
fields=(
Field(
name='Id',
type='ref|string',
unique=True,
),
Field(
name='UITalkCategoriesKey',
type='int',
key='UITalkCategories.dat',
key_offset=1,
),
Field(
name='OGGFile',
type='ref|string',
file_path=True,
file_ext='.ogg',
),
Field(
name='Text',
type='ref|string',
),
Field(
name='Flag0',
type='byte',
),
Field(
name='Key0',
type='ulong',
),
),
),
'UniqueChests.dat': File(
fields=(
Field(
name='Id',
type='ref|string',
unique=True,
),
Field(
name='WordsKey',
type='ulong',
key='Words.dat',
),
Field(
name='FlavourTextKey',
type='ulong',
key='FlavourText.dat',
),
Field(
name='MinLevel',
type='int',
),
Field(
name='ModsKeys',
type='ref|list|ulong',
key='Mods.dat',
),
Field(
name='SpawnWeight',
type='int',
),
Field(
name='Data1',
type='ref|list|int',
),
Field(
name='AOFile',
type='ref|string',
file_path=True,
file_ext='.ao',
),
Field(
name='Flag0',
type='bool',
),
Field(
name='Data2',
type='ref|list|uint',
),
Field(
name='AppearanceChestsKey',
description='Uses this chest for it"s visuals',
type='ulong',
key='Chests.dat',
),
Field(
name='ChestsKey',
type='ulong',
key='Chests.dat',
),
Field(
name='Keys0',
type='ref|list|ulong',
),
),
),
'UniqueJewelLimits.dat': File(
fields=(
Field(
name='UniqueItemsKey',
type='ulong',
),
Field(
name='Limit',
type='int',
),
),
),
'UniqueMapInfo.dat': File(
fields=(
Field(
| |
'data': attribData_8570479821456996,
'lang': attribLang_267608473188383376,
'name': attribName_1939937075622105121,
'standby': attribStandby_1257958614369601764,
'align': attribAlign_242187980190960400,
'vspace': attribVspace_3197471081211222544,
'onkeyup': attribOnkeyup_4105996191008517796,
'ondblclick': attribOndblclick_923980074842425329,
'declare': attribDeclare_2910115601546336836,
'dir': attribDir_4297072167429554704,
'tabindex': attribTabindex_1133897031401996169,
}
_name = u'object'
#
# param is used to supply a named property value.
# In XML it would seem natural to follow RDF and support an
# abbreviated syntax where the param elements are replaced
# by attribute value pairs on the object start tag.
#
class Param(pycopia.XML.POM.ElementNode):
ATTRIBUTES = {
u'valuetype': attribValuetype_3058020386316608164,
u'type': attribType_2839642281990897124,
u'id': attribId_4002951160133423716,
u'value': attribValue_227024667205755136,
u'name': attribName_3839651748354608356,
}
CONTENTMODEL = pycopia.XML.POM.ContentModel(None)
KWATTRIBUTES = {
'valuetype': attribValuetype_3058020386316608164,
'type': attribType_2839642281990897124,
'id': attribId_4002951160133423716,
'value': attribValue_227024667205755136,
'name': attribName_3839651748354608356,
}
_name = u'param'
# =================== Images ===========================================
#
# To avoid accessibility problems for people who aren't
# able to see the image, you should provide a text
# description using the alt and longdesc attributes.
# In addition, avoid the use of server-side image maps.
#
class Img(pycopia.XML.POM.ElementNode):
ATTRIBUTES = {
u'onmousedown': attribOnmousedown_312304592206311721,
u'usemap': attribUsemap_209539994327519769,
u'alt': attribAlt_3627988262564192641,
u'border': attribBorder_4105672098752522596,
u'id': attribId_4002951160133423716,
u'style': attribStyle_733285237156411536,
u'title': attribTitle_1178737426446382009,
u'hspace': attribHspace_511202655064171876,
u'ismap': attribIsmap_2782304858555480644,
u'width': attribWidth_936277652245334569,
u'onmousemove': attribOnmousemove_1463303904047580100,
u'onmouseup': attribOnmouseup_162556595998286400,
u'onclick': attribOnclick_1389815037327772224,
u'onmouseout': attribOnmouseout_55467262469652544,
u'onkeypress': attribOnkeypress_532917457362969849,
u'onkeydown': attribOnkeydown_1257884844152169025,
u'onmouseover': attribOnmouseover_741809317326693841,
u'height': attribHeight_3964235387625190441,
u'class': attribClass_1166814720137472289,
u'lang': attribLang_267608473188383376,
u'src': attribSrc_3756099719576568676,
u'name': attribName_1939937075622105121,
u'align': attribAlign_242187980190960400,
u'vspace': attribVspace_3197471081211222544,
u'xml:lang': attribXml_lang_1645670971257252241,
u'onkeyup': attribOnkeyup_4105996191008517796,
u'ondblclick': attribOndblclick_923980074842425329,
u'longdesc': attribLongdesc_35967826104941025,
u'dir': attribDir_4297072167429554704,
}
CONTENTMODEL = pycopia.XML.POM.ContentModel(None)
KWATTRIBUTES = {
'onmousedown': attribOnmousedown_312304592206311721,
'usemap': attribUsemap_209539994327519769,
'alt': attribAlt_3627988262564192641,
'border': attribBorder_4105672098752522596,
'id': attribId_4002951160133423716,
'style': attribStyle_733285237156411536,
'title': attribTitle_1178737426446382009,
'hspace': attribHspace_511202655064171876,
'ismap': attribIsmap_2782304858555480644,
'width': attribWidth_936277652245334569,
'onmousemove': attribOnmousemove_1463303904047580100,
'onmouseup': attribOnmouseup_162556595998286400,
'xml_lang': attribXml_lang_1645670971257252241,
'onclick': attribOnclick_1389815037327772224,
'onmouseout': attribOnmouseout_55467262469652544,
'onkeypress': attribOnkeypress_532917457362969849,
'onkeydown': attribOnkeydown_1257884844152169025,
'class_': attribClass_1166814720137472289,
'onmouseover': attribOnmouseover_741809317326693841,
'height': attribHeight_3964235387625190441,
'lang': attribLang_267608473188383376,
'src': attribSrc_3756099719576568676,
'name': attribName_1939937075622105121,
'align': attribAlign_242187980190960400,
'vspace': attribVspace_3197471081211222544,
'onkeyup': attribOnkeyup_4105996191008517796,
'ondblclick': attribOndblclick_923980074842425329,
'longdesc': attribLongdesc_35967826104941025,
'dir': attribDir_4297072167429554704,
}
_name = u'img'
# usemap points to a map element which may be in this document
# or an external document, although the latter is not widely supported
# ================== Client-side image maps ============================
# These can be placed in the same document or grouped in a
# separate document although this isn't yet widely supported
class Map(pycopia.XML.POM.ElementNode):
ATTRIBUTES = {
u'lang': attribLang_267608473188383376,
u'style': attribStyle_733285237156411536,
u'onmouseup': attribOnmouseup_162556595998286400,
u'onmouseout': attribOnmouseout_55467262469652544,
u'onkeypress': attribOnkeypress_532917457362969849,
u'class': attribClass_1166814720137472289,
u'title': attribTitle_1178737426446382009,
u'onkeydown': attribOnkeydown_1257884844152169025,
u'onmousedown': attribOnmousedown_312304592206311721,
u'xml:lang': attribXml_lang_1645670971257252241,
u'onmousemove': attribOnmousemove_1463303904047580100,
u'onmouseover': attribOnmouseover_741809317326693841,
u'onclick': attribOnclick_1389815037327772224,
u'onkeyup': attribOnkeyup_4105996191008517796,
u'ondblclick': attribOndblclick_923980074842425329,
u'id': attribId_4454725712327073225,
u'dir': attribDir_4297072167429554704,
u'name': attribName_1485147577566348721,
}
CONTENTMODEL = pycopia.XML.POM.ContentModel((True,))
KWATTRIBUTES = {
'lang': attribLang_267608473188383376,
'xml_lang': attribXml_lang_1645670971257252241,
'onmouseup': attribOnmouseup_162556595998286400,
'onmouseout': attribOnmouseout_55467262469652544,
'onkeypress': attribOnkeypress_532917457362969849,
'title': attribTitle_1178737426446382009,
'onkeydown': attribOnkeydown_1257884844152169025,
'onmousedown': attribOnmousedown_312304592206311721,
'class_': attribClass_1166814720137472289,
'style': attribStyle_733285237156411536,
'onmousemove': attribOnmousemove_1463303904047580100,
'onmouseover': attribOnmouseover_741809317326693841,
'onclick': attribOnclick_1389815037327772224,
'onkeyup': attribOnkeyup_4105996191008517796,
'ondblclick': attribOndblclick_923980074842425329,
'id': attribId_4454725712327073225,
'dir': attribDir_4297072167429554704,
'name': attribName_1485147577566348721,
}
_name = u'map'
class Area(pycopia.XML.POM.ElementNode):
ATTRIBUTES = {
u'accesskey': attribAccesskey_1489765571156059024,
u'onmousedown': attribOnmousedown_312304592206311721,
u'shape': attribShape_2366611433715347289,
u'href': attribHref_2430637454403731329,
u'alt': attribAlt_3627988262564192641,
u'id': attribId_4002951160133423716,
u'style': attribStyle_733285237156411536,
u'title': attribTitle_1178737426446382009,
u'onmousemove': attribOnmousemove_1463303904047580100,
u'onmouseup': attribOnmouseup_162556595998286400,
u'onfocus': attribOnfocus_60779381971423504,
u'onblur': attribOnblur_280018615590293904,
u'onclick': attribOnclick_1389815037327772224,
u'onmouseout': attribOnmouseout_55467262469652544,
u'onkeypress': attribOnkeypress_532917457362969849,
u'onkeydown': attribOnkeydown_1257884844152169025,
u'nohref': attribNohref_230804690830991424,
u'onmouseover': attribOnmouseover_741809317326693841,
u'class': attribClass_1166814720137472289,
u'lang': attribLang_267608473188383376,
u'target': attribTarget_2606114967532412449,
u'xml:lang': attribXml_lang_1645670971257252241,
u'coords': attribCoords_942228262644424900,
u'onkeyup': attribOnkeyup_4105996191008517796,
u'ondblclick': attribOndblclick_923980074842425329,
u'dir': attribDir_4297072167429554704,
u'tabindex': attribTabindex_1133897031401996169,
}
CONTENTMODEL = pycopia.XML.POM.ContentModel(None)
KWATTRIBUTES = {
'accesskey': attribAccesskey_1489765571156059024,
'xml_lang': attribXml_lang_1645670971257252241,
'onmousedown': attribOnmousedown_312304592206311721,
'shape': attribShape_2366611433715347289,
'href': attribHref_2430637454403731329,
'alt': attribAlt_3627988262564192641,
'id': attribId_4002951160133423716,
'style': attribStyle_733285237156411536,
'title': attribTitle_1178737426446382009,
'onmousemove': attribOnmousemove_1463303904047580100,
'onmouseup': attribOnmouseup_162556595998286400,
'onfocus': attribOnfocus_60779381971423504,
'onblur': attribOnblur_280018615590293904,
'onclick': attribOnclick_1389815037327772224,
'onmouseout': attribOnmouseout_55467262469652544,
'onkeypress': attribOnkeypress_532917457362969849,
'onkeydown': attribOnkeydown_1257884844152169025,
'nohref': attribNohref_230804690830991424,
'class_': attribClass_1166814720137472289,
'onmouseover': attribOnmouseover_741809317326693841,
'lang': attribLang_267608473188383376,
'target': attribTarget_2606114967532412449,
'coords': attribCoords_942228262644424900,
'onkeyup': attribOnkeyup_4105996191008517796,
'ondblclick': attribOndblclick_923980074842425329,
'dir': attribDir_4297072167429554704,
'tabindex': attribTabindex_1133897031401996169,
}
_name = u'area'
# ================ Forms ===============================================
class Form(pycopia.XML.POM.ElementNode):
ATTRIBUTES = {
u'accept': attribAccept_2938868979681976576,
u'onmousedown': attribOnmousedown_312304592206311721,
u'accept-charset': attribAccept_charset_349609442685618025,
u'id': attribId_4002951160133423716,
u'style': attribStyle_733285237156411536,
u'title': attribTitle_1178737426446382009,
u'onmousemove': attribOnmousemove_1463303904047580100,
u'onmouseup': attribOnmouseup_162556595998286400,
u'method': attribMethod_1594070884251040000,
u'onsubmit': attribOnsubmit_279271915279261921,
u'onclick': attribOnclick_1389815037327772224,
u'onmouseout': attribOnmouseout_55467262469652544,
u'onkeypress': attribOnkeypress_532917457362969849,
u'onkeydown': attribOnkeydown_1257884844152169025,
u'onkeyup': attribOnkeyup_4105996191008517796,
u'onmouseover': attribOnmouseover_741809317326693841,
u'class': attribClass_1166814720137472289,
u'enctype': attribEnctype_436576891689519025,
u'lang': attribLang_267608473188383376,
u'name': attribName_1939937075622105121,
u'xml:lang': attribXml_lang_1645670971257252241,
u'target': attribTarget_2606114967532412449,
u'action': attribAction_1870430243837841,
u'ondblclick': attribOndblclick_923980074842425329,
u'dir': attribDir_4297072167429554704,
}
CONTENTMODEL = pycopia.XML.POM.ContentModel((True,))
KWATTRIBUTES = {
'accept': attribAccept_2938868979681976576,
'onmousedown': attribOnmousedown_312304592206311721,
'id': attribId_4002951160133423716,
'style': attribStyle_733285237156411536,
'title': attribTitle_1178737426446382009,
'accept_charset': attribAccept_charset_349609442685618025,
'onmousemove': attribOnmousemove_1463303904047580100,
'onmouseup': attribOnmouseup_162556595998286400,
'method': attribMethod_1594070884251040000,
'xml_lang': attribXml_lang_1645670971257252241,
'onsubmit': attribOnsubmit_279271915279261921,
'onclick': attribOnclick_1389815037327772224,
'onmouseout': attribOnmouseout_55467262469652544,
'onkeypress': attribOnkeypress_532917457362969849,
'onkeydown': attribOnkeydown_1257884844152169025,
'class_': attribClass_1166814720137472289,
'onmouseover': attribOnmouseover_741809317326693841,
'onkeyup': attribOnkeyup_4105996191008517796,
'enctype': attribEnctype_436576891689519025,
'lang': attribLang_267608473188383376,
'name': attribName_1939937075622105121,
'target': attribTarget_2606114967532412449,
'action': attribAction_1870430243837841,
'ondblclick': attribOndblclick_923980074842425329,
'dir': attribDir_4297072167429554704,
}
_name = u'form'
# forms shouldn't be nested
#
# Each label must not contain more than ONE field
# Label elements shouldn't be nested.
#
class Label(pycopia.XML.POM.ElementNode):
ATTRIBUTES = {
u'lang': attribLang_267608473188383376,
u'style': attribStyle_733285237156411536,
u'onmousedown': attribOnmousedown_312304592206311721,
u'onmouseup': attribOnmouseup_162556595998286400,
u'onmouseout': attribOnmouseout_55467262469652544,
u'title': attribTitle_1178737426446382009,
u'accesskey': attribAccesskey_1489765571156059024,
u'onkeypress': attribOnkeypress_532917457362969849,
u'onblur': attribOnblur_280018615590293904,
u'onkeydown': attribOnkeydown_1257884844152169025,
u'class': attribClass_1166814720137472289,
u'xml:lang': attribXml_lang_1645670971257252241,
u'onmousemove': attribOnmousemove_1463303904047580100,
u'onmouseover': attribOnmouseover_741809317326693841,
u'onclick': attribOnclick_1389815037327772224,
u'onkeyup': attribOnkeyup_4105996191008517796,
u'onfocus': attribOnfocus_60779381971423504,
u'ondblclick': attribOndblclick_923980074842425329,
u'id': attribId_4002951160133423716,
u'dir': attribDir_4297072167429554704,
u'for': attribFor_3922368010519548100,
}
CONTENTMODEL = pycopia.XML.POM.ContentModel((True,))
KWATTRIBUTES = {
'lang': attribLang_267608473188383376,
'style': attribStyle_733285237156411536,
'onmouseup': attribOnmouseup_162556595998286400,
'onmouseout': attribOnmouseout_55467262469652544,
'title': attribTitle_1178737426446382009,
'accesskey': attribAccesskey_1489765571156059024,
'onkeypress': attribOnkeypress_532917457362969849,
'onblur': attribOnblur_280018615590293904,
'xml_lang': attribXml_lang_1645670971257252241,
'onmousedown': attribOnmousedown_312304592206311721,
'class_': attribClass_1166814720137472289,
'onkeydown': attribOnkeydown_1257884844152169025,
'onmousemove': attribOnmousemove_1463303904047580100,
'onmouseover': attribOnmouseover_741809317326693841,
'onclick': attribOnclick_1389815037327772224,
'onkeyup': attribOnkeyup_4105996191008517796,
'for_': attribFor_3922368010519548100,
'onfocus': attribOnfocus_60779381971423504,
'ondblclick': attribOndblclick_923980074842425329,
'id': attribId_4002951160133423716,
'dir': attribDir_4297072167429554704,
}
_name = u'label'
# the name attribute is required for all but submit & reset
class Input(pycopia.XML.POM.ElementNode):
ATTRIBUTES = {
u'accesskey': attribAccesskey_1489765571156059024,
u'accept': attribAccept_2938868979681976576,
u'onmousedown': attribOnmousedown_312304592206311721,
u'disabled': attribDisabled_3572037554672910400,
u'usemap': attribUsemap_209539994327519769,
u'alt': attribAlt_76754958203109904,
u'onchange': attribOnchange_4344895153777381681,
u'id': attribId_4002951160133423716,
u'size': attribSize_390481296968790729,
u'style': attribStyle_733285237156411536,
u'checked': attribChecked_791188830984961600,
u'title': attribTitle_1178737426446382009,
u'readonly': attribReadonly_4574848601210343184,
u'onselect': attribOnselect_1657697452831032001,
u'onmousemove': attribOnmousemove_1463303904047580100,
u'onmouseup': attribOnmouseup_162556595998286400,
u'onfocus': attribOnfocus_60779381971423504,
u'type': attribType_73263565986139489,
u'onblur': attribOnblur_280018615590293904,
u'onclick': attribOnclick_1389815037327772224,
u'onmouseout': attribOnmouseout_55467262469652544,
u'onkeypress': attribOnkeypress_532917457362969849,
u'onkeydown': attribOnkeydown_1257884844152169025,
u'onmouseover': attribOnmouseover_741809317326693841,
u'class': attribClass_1166814720137472289,
u'lang': attribLang_267608473188383376,
u'src': attribSrc_3443572122774772041,
u'name': attribName_1485147577566348721,
u'align': attribAlign_242187980190960400,
u'value': attribValue_227024667205755136,
u'xml:lang': attribXml_lang_1645670971257252241,
u'maxlength': attribMaxlength_3065314439821131481,
u'onkeyup': attribOnkeyup_4105996191008517796,
u'ondblclick': attribOndblclick_923980074842425329,
u'dir': attribDir_4297072167429554704,
u'tabindex': attribTabindex_1133897031401996169,
}
CONTENTMODEL = pycopia.XML.POM.ContentModel(None)
KWATTRIBUTES = {
'accesskey': attribAccesskey_1489765571156059024,
'xml_lang': attribXml_lang_1645670971257252241,
'accept': attribAccept_2938868979681976576,
'onmousedown': attribOnmousedown_312304592206311721,
'disabled': attribDisabled_3572037554672910400,
'usemap': attribUsemap_209539994327519769,
'alt': attribAlt_76754958203109904,
'onchange': attribOnchange_4344895153777381681,
'id': attribId_4002951160133423716,
'size': attribSize_390481296968790729,
'style': attribStyle_733285237156411536,
'checked': attribChecked_791188830984961600,
'title': attribTitle_1178737426446382009,
'readonly': attribReadonly_4574848601210343184,
'onselect': attribOnselect_1657697452831032001,
'onmousemove': attribOnmousemove_1463303904047580100,
'onmouseup': attribOnmouseup_162556595998286400,
'onfocus': attribOnfocus_60779381971423504,
'type': attribType_73263565986139489,
'onblur': attribOnblur_280018615590293904,
'onclick': attribOnclick_1389815037327772224,
'onmouseout': attribOnmouseout_55467262469652544,
'onkeypress': attribOnkeypress_532917457362969849,
'onkeydown': attribOnkeydown_1257884844152169025,
'class_': attribClass_1166814720137472289,
'onmouseover': attribOnmouseover_741809317326693841,
'lang': attribLang_267608473188383376,
'src': attribSrc_3443572122774772041,
'name': attribName_1485147577566348721,
'align': attribAlign_242187980190960400,
'value': attribValue_227024667205755136,
'maxlength': attribMaxlength_3065314439821131481,
'onkeyup': attribOnkeyup_4105996191008517796,
'ondblclick': attribOndblclick_923980074842425329,
'dir': attribDir_4297072167429554704,
'tabindex': attribTabindex_1133897031401996169,
}
_name = u'input'
# form control
class Select(pycopia.XML.POM.ElementNode):
ATTRIBUTES = {
u'onmousedown': attribOnmousedown_312304592206311721,
u'disabled': attribDisabled_3572037554672910400,
u'onchange': attribOnchange_4344895153777381681,
u'id': attribId_4002951160133423716,
u'size': attribSize_390481296968790729,
u'style': attribStyle_733285237156411536,
u'title': attribTitle_1178737426446382009,
u'onmousemove': attribOnmousemove_1463303904047580100,
u'onmouseup': attribOnmouseup_162556595998286400,
u'onfocus': attribOnfocus_60779381971423504, | |
# :[diStorm3}: Python binding
# Based on diStorm64 Python binding by <NAME>
# Initial support for decompose API added by <NAME>
# Changed license to GPLv3.
#
# Compatible with Python2.6 and above.
#
info = (
"diStorm3 by <NAME>, http://code.google.com/p/distorm/\n"
"Based on diStorm64 Python binding by <NAME>, http://breakingcode.wordpress.com/\n"
)
__revision__ = "$Id: distorm.py 186 2010-05-01 14:20:41Z gdabah $"
__all__ = [
'Decode',
'DecodeGenerator',
'Decompose',
'DecomposeGenerator',
'Decode16Bits',
'Decode32Bits',
'Decode64Bits',
'Mnemonics',
'Registers',
]
from ctypes import *
from os.path import split, join
#==============================================================================
# Load the diStorm DLL
# Guess the DLL filename and load the library.
_distorm_path = split(__file__)[0]
potential_libs = ['distorm3.dll', 'libdistorm3.dll', 'libdistorm3.so', 'libdistorm3.dylib']
lib_was_found = False
for i in potential_libs:
try:
_distorm_file = join(_distorm_path, i)
_distorm = cdll.LoadLibrary(_distorm_file)
lib_was_found = True
except OSError:
pass
if lib_was_found == False:
raise ImportError("Error loading the diStorm dynamic library (or cannot load library into process).")
# Get the decode C function (try 64 bits version first, only then 32 bits).
SUPPORT_64BIT_OFFSET = False
try:
internal_decode = _distorm.distorm_decode64
internal_decompose = _distorm.distorm_decompose64
internal_format = _distorm.distorm_format64
SUPPORT_64BIT_OFFSET = True
except AttributeError:
try:
internal_decode = _distorm.distorm_decode32
internal_decompose = _distorm.distorm_decompose32
internal_format = _distorm.distorm_format32
except AttributeError:
raise ImportError("Error loading distorm")
#==============================================================================
# diStorm C interface
MAX_TEXT_SIZE = 48 # See distorm.h for this value.
MAX_INSTRUCTIONS = 1000
DECRES_NONE = 0
DECRES_SUCCESS = 1
DECRES_MEMORYERR = 2
DECRES_INPUTERR = 3
if SUPPORT_64BIT_OFFSET:
_OffsetType = c_ulonglong
else:
_OffsetType = c_uint
class _WString (Structure):
_fields_ = [
('length', c_uint),
('p', c_char * MAX_TEXT_SIZE),
]
class _CodeInfo (Structure):
_fields_ = [
('codeOffset', _OffsetType),
('nextOffset', _OffsetType),
('code', c_char_p),
('codeLen', c_int),
('dt', c_byte),
('features', c_uint),
]
class _DecodedInst (Structure):
_fields_ = [
('mnemonic', _WString),
('operands', _WString),
('instructionHex', _WString),
('size', c_uint),
('offset', _OffsetType),
]
# _OperandType enum
_OperandType = c_ubyte
O_NONE = 0
O_REG = 1
O_IMM = 2
O_IMM1 = 3
O_IMM2 = 4
O_DISP = 5
O_SMEM = 6
O_MEM = 7
O_PC = 8
O_PTR = 9
class _Operand (Structure):
_fields_ = [
('type', c_ubyte), # of type _OperandType
('index', c_ubyte),
('size', c_uint16),
]
class _ex (Structure):
_fields_ = [
('i1', c_uint32),
('i2', c_uint32),
]
class _ptr (Structure):
_fields_ = [
('seg', c_uint16),
('off', c_uint32),
]
class _Value (Union):
_fields_ = [
('sbyte', c_byte),
('byte', c_ubyte),
('sword', c_int16),
('word', c_uint16),
('sdword', c_int32),
('dword', c_uint32),
('sqword', c_int64),
('qword', c_uint64),
('addr', _OffsetType),
('ptr', _ptr),
('ex', _ex),
]
class _DInst (Structure):
_fields_ = [
('imm', _Value),
('disp', c_uint64), # displacement. size is according to dispSize
('addr', _OffsetType),
('flags', c_uint16), # -1 if invalid. See C headers for more info
('unusedPrefixesMask', c_uint16),
('usedRegistersMask', c_uint32), # used registers mask.
('opcode', c_uint16), # look up in opcode table
('ops', _Operand*4),
('size', c_ubyte),
('segment', c_ubyte), # -1 if unused. See C headers for more info
('base', c_ubyte), # base register for indirections
('scale', c_ubyte), # ignore for values 0, 1 (other valid values - 2,4,8)
('dispSize', c_ubyte),
('meta', c_ubyte), # meta flags - instruction set class, etc. See C headers again...
('modifiedFlagsMask', c_uint16), # CPU modified (output) flags by instruction.
('testedFlagsMask', c_uint16), # CPU tested (input) flags by instruction.
('undefinedFlagsMask', c_uint16) # CPU undefined flags by instruction.
]
#==============================================================================
# diStorm Python interface
Decode16Bits = 0 # 80286 decoding
Decode32Bits = 1 # IA-32 decoding
Decode64Bits = 2 # AMD64 decoding
OffsetTypeSize = sizeof(_OffsetType)
Mnemonics = {0x669: "SLDT", 0x62: "POPA", 0x8ee: "UNPCKHPS", 0x115: "POPF", 0x11b9: "CMPLTSS",
0x85f: "VMOVSD", 0x789: "PFPNACC", 0xb14: "VMOVMSKPD", 0x737: "INVLPGA", 0x8f8: "UNPCKHPD",
0x270: "SYSEXIT", 0x7b2: "PFSUB", 0x1208: "CMPLTSD", 0x1a5d: "VPMULHUW", 0x1d35: "VPHSUBSW",
0x12b2: "VCMPNGEPS", 0x857: "VMOVSS", 0x6f: "ARPL", 0x52a: "FICOMP", 0x162: "RETF",
0x44d: "FCHS", 0x1124: "CMPLEPS", 0xef2: "PUNPCKHDQ", 0x2401: "VAESDEC", 0x5ee: "FUCOM",
0x129a: "VCMPORDPS", 0x19ab: "PSUBUSW", 0x1b45: "PXOR", 0x1e0f: "VPABSB", 0x24a: "WRMSR",
0x12a5: "VCMPEQ_UQPS", 0x22b6: "VFMADDSUB231PD", 0x7c9: "PFMAX", 0x16cd: "VCMPNEQ_OSSS",
0x2244: "VFNMADD213PD", 0x3b8: "MOVNTI", 0x7c0: "PFCMPGT", 0x236a: "VFNMADD231SS",
0x2450: "ROUNDPD", 0x12ed: "VCMPGTPS", 0xb9f: "VRCPSS", 0x213a: "VFNMADD132SS",
0x1444: "VCMPNGEPD", 0x220f: "VFMSUB213PD", 0x185f: "VCMPNEQ_OSSD", 0x267f: "VPSLLDQ",
0x792: "PFCMPGE", 0x147f: "VCMPGTPD", 0x1a83: "CVTDQ2PD", 0x1211: "CMPLESD",
0xae: "JNS", 0xdd8: "VDIVSD", 0xb7: "JNP", 0x2508: "EXTRACTPS", 0x1f43: "PMOVZXBQ",
0x9c: "JNZ", 0x5d8: "FCOMI", 0xee6: "VPUNPCKHWD", 0x1f2e: "PMOVZXBD", 0x1aca: "VMOVNTDQ",
0x1e74: "PMOVSXWD", 0x10f2: "POPCNT", 0x8a: "JNO", 0x1c8f: "FNSAVE", 0x1a5: "LOOP",
0xb09: "VMOVMSKPS", 0x46b: "FLDL2T", 0x12d: "CMPS", 0x408: "FSUB", 0xda4: "DIVPS",
0x1d1b: "PHSUBD", 0x11b0: "CMPEQSS", 0x1e7: "CMC", 0xcff: "CVTTPS2DQ", 0xdab: "DIVPD",
0xf5c: "VMOVD", 0x104: "CALL FAR", 0x1d72: "PMULHRSW", 0x1d7c: "VPMULHRSW",
0x1d0a: "PHSUBW", 0x11ff: "CMPEQSD", 0x3b2: "XADD", 0x2ae: "CMOVBE", 0x47: "CMP",
0x24: "SBB", 0x106e: "VHADDPS", 0x26ad: "FXRSTOR64", 0x2064: "INVVPID", 0x20f: "LSL",
0x165d: "VCMPNEQ_USSS", 0x1065: "VHADDPD", 0x38b: "LSS", 0x20f7: "VFMSUB132PD",
0x121: "LAHF", 0x7ec: "PFACC", 0x803: "PFRCPIT2", 0xe27: "VPUNPCKLBW", 0x7d0: "PFRCPIT1",
0x1f97: "PCMPGTQ", 0x49f: "FYL2X", 0x1819: "VCMPORD_SSD", 0x1933: "PSRLD",
0x10e1: "SFENCE", 0xcf5: "CVTPS2DQ", 0x24af: "PBLENDW", 0x21ae: "VFMSUBADD213PS",
0x2542: "VPINSRB", 0xe76: "PCMPGTB", 0xe9c: "PCMPGTD", 0x23d7: "VAESENC", 0x957: "VMOVSHDUP",
0x259c: "MPSADBW", 0x14e7: "VCMPNLE_UQPD", 0x70a: "VMMCALL", 0x102f: "INSERTQ",
0x2252: "VFNMADD213SS", 0x9bf: "CVTPI2PD", 0x16f: "INT", 0x1d87: "VPERMILPS",
0x1e2: "HLT", 0x2043: "PHMINPOSUW", 0x5b1: "FCMOVNU", 0x206d: "INVPCID", 0x7b: "INS",
0x647: "FCOMIP", 0x9b5: "CVTPI2PS", 0x2260: "VFNMADD213SD", 0xeaf: "PACKUSWB",
0xe4: "CBW", 0x71b: "VMSAVE", 0x10e: "PUSHF", 0x64f: "NOT", 0x595: "FCMOVNB",
0x245: "NOP", 0x4e8: "FSQRT", 0x1d92: "VPERMILPD", 0x51: "INC", 0x239: "UD2",
0xfe7: "VPCMPEQW", 0x2615: "PCMPISTRM", 0x1ecd: "VPCMPEQQ", 0x114d: "CMPNLEPS",
0x1826: "VCMPEQ_USSD", 0x13fe: "VCMPUNORDPD", 0x5fd: "FADDP", 0x145: "RET",
0xffa: "VPCMPEQD", 0x1fc3: "VPMINSD", 0xfd4: "VPCMPEQB", 0x18fa: "ADDSUBPD",
0x22a6: "VFMADDSUB231PS", 0x1694: "VCMPEQ_USSS", 0x1d50: "PSIGNW", 0x1ea8: "VPMOVSXDQ",
0x2007: "VPMAXSD", 0x35b: "SETG", 0x1ff6: "VPMAXSB", 0x327: "SETA", 0x306: "SETB",
0x26df: "STMXCSR", 0x347: "SETL", 0x20ea: "VFMSUB132PS", 0x2f9: "SETO", 0xbcd: "ANDNPD",
0x1106: "BSR", 0x8ba: "VMOVDDUP", 0x1b3c: "VPMAXSW", 0x1d61: "PSIGND", 0x33a: "SETP",
0x1d3f: "PSIGNB", 0x395: "LFS", 0x32d: "SETS", 0x1590: "VCMPUNORDSS", 0xbc5: "ANDNPS",
0x2716: "VMXON", 0xbb5: "VANDPS", 0x6f3: "XSETBV", 0x1c3: "OUT", 0x67a: "LTR",
0x2570: "VPINSRD", 0x10ff: "TZCNT", 0xa57: "VCVTTSS2SI", 0x266e: "VPSRLDQ",
0x4c6: "FDECSTP", 0x2666: "PSRLDQ", 0x186d: "VCMPGE_OQSD", 0x2677: "PSLLDQ",
0x50f: "FCOS", 0x4b5: "FXTRACT", 0x16db: "VCMPGE_OQSS", 0x1ee1: "VMOVNTDQA",
0x151d: "VCMPNGT_UQPD", 0x3f5: "FMUL", 0x13c4: "VCMPGT_OQPS", 0x60b: "FCOMPP",
0x77a: "PF2ID", 0xf5: "CWD", 0x132a: "VCMPUNORD_SPS", 0x2ea: "CMOVLE", 0xfb7: "VPSHUFHW",
0x1556: "VCMPGT_OQPD", 0x1ce0: "PHADDSW", 0x773: "PF2IW", 0xa21: "VMOVNTPD",
0x401: "FCOMP", 0x8c4: "UNPCKLPS", 0x1bcf: "MASKMOVDQU", 0x560: "FCMOVBE",
0x14a2: "VCMPLT_OQPD", 0xe14: "VMAXSD", 0x1416: "VCMPNLTPD", 0x987: "PREFETCHT2",
0x97b: "PREFETCHT1", 0x96f: "PREFETCHT0", 0x8ce: "UNPCKLPD", 0xa41: "CVTTSS2SI",
0x65e: "DIV", 0x1e9e: "PMOVSXDQ", 0x1607: "VCMPGESS", 0xef: "CDQE", 0x26f2: "VSTMXCSR",
0x539: "FISUBR", 0x1fb2: "VPMINSB", 0x2202: "VFMSUB213PS", 0x1310: "VCMPLT_OQPS",
0x11c2: "CMPLESS", 0x1afe: "VPMINSW", 0x1c5a: "FSTENV", 0x1799: "VCMPGESD",
0x1dd4: "VPTEST", 0x532: "FISUB", 0x205: "STD", 0xf13: "VPACKSSDW", 0x3d: "XOR",
0xc7f: "VMULPD", 0x1f1: "STC", 0x1fb: "STI", 0x26c2: "LDMXCSR", 0x116a: "CMPLTPD",
0xbe7: "ORPS", 0x1ef6: "VPACKUSDW", 0x61b: "FSUBP", 0x66f: "STR", 0x40e: "FSUBR",
0x111b: "CMPLTPS", 0x230d: "VFMADD231SD", 0x2725: "PAUSE", 0x1a8d: "CVTPD2DQ",
0x372: "RSM", 0xb5a: "VSQRTSD", 0xbf3: "VORPS", 0x218e: "VFMADDSUB213PS", 0x23cf: "AESENC",
0x1437: "VCMPEQ_UQPD", 0x902: "VUNPCKHPS", 0x1cf3: "PMADDUBSW", 0x1355: "VCMPNLE_UQPS",
0x1b68: "VPSLLW", 0x1bc5: "MASKMOVQ", 0x1c8: "CALL", 0xb51: "VSQRTSS", 0x19dc: "PADDUSB",
0x1020: "VMREAD", 0x10d5: "XSAVEOPT64", 0x90d: "VUNPCKHPD", 0xd48: "VSUBPS",
0xcd5: "VCVTSS2SD", 0x2416: "VAESDECLAST", 0x107f: "HSUBPS", 0xa97: "VCVTSS2SI",
0x25dc: "VPBLENDVB", 0x17a3: "VCMPGTSD", 0x57a: "FILD", 0xae3: "VCOMISS", 0x1077: "HSUBPD",
0x23a2: "VFNMSUB231SS", 0x1a3d: "VPSRAD", 0x128f: "VCMPNLEPS", 0x3e5: "SAL",
0x214: "SYSCALL", 0xb7f: "VRSQRTSS", 0x2579: "VPINSRQ", 0x26e8: "WRGSBASE",
0xfae: "VPSHUFD", 0x1e35: "PMOVSXBW", 0x1a2e: "VPSRAW", 0x1421: "VCMPNLEPD",
0x3ef: "FADD", 0x3ea: "SAR", 0x1ab9: "MOVNTQ", 0x2643: "AESKEYGENASSIST", 0xf09: "PACKSSDW",
0x21e8: "VFMADD213SS", 0xf7a: "VMOVDQA", 0x8af: "VMOVSLDUP", 0x4f8: "FRNDINT",
0x1960: "PMULLW", 0xdb9: "DIVSD", 0xaf5: "MOVMSKPS", 0x2018: "VPMAXUW", 0xdc8: "VDIVPD",
0x1e3f: "VPMOVSXBW", 0x1e89: "PMOVSXWQ", 0x2032: "PMULLD", 0xf83: "VMOVDQU",
0x2298: "VFNMSUB213SD", 0x297: "CMOVAE", 0x1495: "VCMPEQ_OSPD", 0xdc0: "VDIVPS",
0x93: "JAE", 0xaff: "MOVMSKPD", 0xdb2: "DIVSS", 0x1c97: "FSAVE", 0x1ec4: "PCMPEQQ",
0xfc1: "VPSHUFLW", 0xfde: "PCMPEQW", 0x26d5: "VLDMXCSR", 0x2104: "VFMSUB132SS",
0x11a6: "CMPORDPD", 0xb90: "RCPSS", 0x1b77: "VPSLLD", 0x663: "IDIV", 0x142c: "VCMPORDPD",
0xfcb: "PCMPEQB", 0xff1: "PCMPEQD", 0x1b86: "VPSLLQ", 0x1f4d: "VPMOVZXBQ",
0x21be: "VFMSUBADD213PD", 0x25d1: "VBLENDVPD", 0x1157: "CMPORDPS", 0xf1e: "PUNPCKLQDQ",
0x19d5: "VPAND", 0x1467: "VCMPNEQ_OQPD", 0x1055: "HADDPD", 0x1919: "VADDSUBPS",
0x18d1: "VSHUFPD", 0xd60: "VSUBSD", 0xb3f: "VSQRTPS", 0x931: "MOVSHDUP", 0x2378: "VFNMADD231SD",
0x6bf: "VMLAUNCH", 0x1f0d: "VMASKMOVPD", 0x105d: "HADDPS", 0x12d5: "VCMPNEQ_OQPS",
0xe33: "PUNPCKLWD", 0x16af: "VCMPNGT_UQSS", 0xb48: "VSQRTPD", 0xd58: "VSUBSS",
0x18c8: "VSHUFPS", 0x159d: "VCMPNEQSS", 0x1b59: "VLDDQU", 0x1634: "VCMPLT_OQSS",
0x26fc: "RDRAND", 0x1b23: "PADDSW", 0x1370: "VCMPEQ_USPS", 0xbed: "ORPD", 0x1a09: "PANDN",
0x4a6: "FPTAN", 0x541: "FIDIV", 0x17c6: "VCMPLT_OQSD", 0x2704: "VMPTRLD", 0x231a: "VFMSUB231PS",
0x172f: "VCMPNEQSD", 0x1ebb: "VPMULDQ", 0x196: "LOOPNZ", 0x126c: "VCMPUNORDPS",
0x3e0: "SHR", 0x37c: "SHRD", 0x6db: "MONITOR", 0x23e0: "AESENCLAST", 0x83e: "MOVSD",
0x189e: "VPINSRW", 0x713: "VMLOAD", 0x918: "MOVLHPS", 0x8a6: "VMOVLPD", 0x1971: "MOVQ2DQ",
0xb2f: "SQRTSS", 0x2588: "VDPPS", 0xd3a: "SUBSS", 0x3ab: "MOVSX", 0x93b: "VMOVLHPS",
0x89d: "VMOVLPS", 0xefd: "VPUNPCKHDQ", 0x1aae: "VCVTPD2DQ", 0x3db: "SHL", 0x837: "MOVSS",
0x2568: "PINSRQ", 0x781: "PFNACC", 0xf72: "MOVDQU", 0x80: "OUTS", 0x1be8: "PSUBB",
0x377: "BTS", 0x390: "BTR", 0x17ef: "VCMPNEQ_USSD", 0x68b: "SGDT", 0x2300: "VFMADD231SS",
0x501: "FSCALE", 0x1bf7: "PSUBW", 0x1192: "CMPNLTPD", 0x1eec: "PACKUSDW", 0x20a: "LAR",
0x3a6: "BTC", 0x2148: "VFNMADD132SD", 0x144f: "VCMPNGTPD", 0x1f23: "VPMOVZXBW",
0x2111: "VFMSUB132SD", 0x23be: "AESIMC", 0x3fb: "FCOM", 0x1f38: "VPMOVZXBD",
0x190e: "VADDSUBPD", 0x1c88: "FINIT", 0x11f5: "CMPORDSS", 0x231: "WBINVD",
0x19cf: "PAND", 0x24cb: "VPALIGNR", 0x1244: "CMPORDSD", 0x1b4b: "VPXOR", 0xa1: "JBE",
0x45f: "FXAM", 0x10cb: "XSAVEOPT", 0x659: "MUL", 0x19c6: "VPMINUB", 0x1b2b: "VPADDSW",
0x1b34: "PMAXSW", 0x2555: "VINSERTPS", 0x13e0: "VCMPEQPD", 0x5e7: "FFREE",
0x1f01: "VMASKMOVPS", 0x18da: "CMPXCHG8B", 0x1fff: "PMAXSD", 0x1b1a: "VPADDSB",
0x10: "PUSH", 0x25ba: "VPCLMULQDQ", 0x124e: "VCMPEQPS", 0x7da: "PFRSQIT1",
0x243d: "ROUNDPS", 0x2ff: "SETNO", 0x6eb: "XGETBV", 0x1fbb: "PMINSD", 0x1c24: "PADDB",
0x4be: "FPREM1", 0x200: "CLD", 0x51c: "FIMUL", 0xc08: "XORPD", 0x1ec: "CLC",
0x42c: "FSTP", 0x249c: "BLENDPD", 0x19ef: "PADDUSW", 0x1c80: "FNINIT", 0x319: "SETNZ",
0x1951: "PADDQ", 0xc01: "XORPS", 0x228a: | |
<gh_stars>10-100
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light,md
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.13.3
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # 🔄 Online learning in non-stationary environments 🔄
#
# We reproduce the empirical results of [1].
#
# ## References
#
# [1] [<NAME>., <NAME>., <NAME>. and <NAME>., 2013, June. Online learning for time series prediction. In Conference on learning theory (pp. 172-184)](https://arxiv.org/pdf/1302.6927.pdf)
#
#
# %pylab inline
# %load_ext autoreload
# %autoreload 2
# +
from typing import Any, NamedTuple
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as onp
import optax
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
from tqdm.auto import tqdm
from wax.modules import ARMA, SNARIMAX, GymFeedback, OnlineOptimizer, UpdateParams, VMap
from wax.modules.lag import tree_lag
from wax.modules.vmap import add_batch
from wax.optim import newton
from wax.unroll import unroll_transform_with_state
# + tags=["parameters"]
T = 10000
N_BATCH = 20
N_STEP_SIZE = 30
N_STEP_SIZE_NEWTON = 10
N_EPS = 5
# -
# ## Agent
OPTIMIZERS = [optax.sgd, optax.adagrad, optax.rmsprop, optax.adam]
# +
from optax._src.base import OptState
def build_agent(time_series_model=None, opt=None):
if time_series_model is None:
time_series_model = lambda y, X: SNARIMAX(10)(y, X)
if opt is None:
opt = optax.sgd(1.0e-3)
class AgentInfo(NamedTuple):
optim: Any
forecast: Any
class ModelWithLossInfo(NamedTuple):
pred: Any
loss: Any
def agent(obs):
if isinstance(obs, tuple):
y, X = obs
else:
y = obs
X = None
def evaluate(y_pred, y):
return jnp.linalg.norm(y_pred - y) ** 2, {}
def model_with_loss(y, X=None):
# predict with lagged data
y_pred, pred_info = time_series_model(*tree_lag(1)(y, X))
# evaluate loss with actual data
loss, loss_info = evaluate(y_pred, y)
return loss, ModelWithLossInfo(pred_info, loss_info)
def project_params(params: Any, opt_state: OptState = None):
del opt_state
return jax.tree_map(lambda w: jnp.clip(w, -1, 1), params)
def split_params(params):
def filter_params(m, n, p):
# print(m, n, p)
return m.endswith("snarimax/~/linear") and n == "w"
return hk.data_structures.partition(filter_params, params)
def learn_and_forecast(y, X=None):
# use lagged data for the optimizer
opt_info = OnlineOptimizer(
model_with_loss,
opt,
project_params=project_params,
split_params=split_params,
return_params=True,
)(*tree_lag(1)(y, X))
# use updated params to forecast with actual data
predict_params = opt_info.params
y_pred, forecast_info = UpdateParams(time_series_model)(
predict_params, y, X
)
return y_pred, AgentInfo(opt_info, forecast_info)
return learn_and_forecast(y, X)
return agent
# -
# ## Non-stationary environments
#
# We will now wrapup the study of an environment + agent in few analysis functions.
#
# We will then use them to perform the same analysis in the non-stationary setting proposed in [1], namely:
#
# * setting 1 : sanity check (stationary ARMA environment).
# * setting 2 : slowly varying parameters.
# * setting 3 : brutal variation of parameters.
# * setting 4 : non-stationary (random walk) noise.
# ## Analysis functions
# For each solver, we will select the best hyper parameters (step size $\eta$, $\epsilon$)
# by measuring the average loss between the 5000 and 10000 steps.
# ### First order solvers
#
#
#
#
#
def scan_hparams_first_order():
STEP_SIZE_idx = pd.Index(onp.logspace(-4, 1, N_STEP_SIZE), name="step_size")
STEP_SIZE = jax.device_put(STEP_SIZE_idx.values)
rng = jax.random.PRNGKey(42)
eps = sample_noise(rng)
res = {}
for optimizer in tqdm(OPTIMIZERS):
def gym_loop_scan_hparams(eps):
def scan_params(step_size):
return GymFeedback(build_agent(opt=optimizer(step_size)), env)(eps)
res = VMap(scan_params)(STEP_SIZE)
return res
sim = unroll_transform_with_state(add_batch(gym_loop_scan_hparams))
params, state = sim.init(rng, eps)
_res, state = sim.apply(params, state, rng, eps)
res[optimizer.__name__] = _res
ax = None
BEST_STEP_SIZE = {}
BEST_GYM = {}
for name, (gym, info) in res.items():
loss = (
pd.DataFrame(-gym.reward, columns=STEP_SIZE).iloc[LEARN_TIME_SLICE].mean()
)
BEST_STEP_SIZE[name] = loss.idxmin()
best_idx = jnp.argmax(gym.reward[LEARN_TIME_SLICE].mean(axis=0))
BEST_GYM[name] = jax.tree_map(lambda x: x[:, best_idx], gym)
ax = loss.plot(
logx=True, logy=False, ax=ax, label=name, ylim=(MIN_ERR, MAX_ERR)
)
plt.legend(bbox_to_anchor=(1.0, 1.0))
return BEST_STEP_SIZE, BEST_GYM
# We will "cross-validate" the result by running the agent on new samples.
CROSS_VAL_RNG = jax.random.PRNGKey(44)
COLORS = sns.color_palette("hls")
def cross_validate_first_order(BEST_STEP_SIZE, BEST_GYM):
plt.figure()
eps = sample_noise(CROSS_VAL_RNG)
CROSS_VAL_GYM = {}
ax = None
# def measure(reward):
# return pd.Series(-reward).rolling(T/2, min_periods=T/2).mean()
def measure(reward):
return pd.Series(-reward).expanding().mean()
for i, (name, gym) in enumerate(BEST_GYM.items()):
ax = measure(gym.reward).plot(
ax=ax,
color=COLORS[i],
label=(f"(TRAIN) - {name} " f"- $\eta$={BEST_STEP_SIZE[name]:.2e}"),
style="--",
)
for i, optimizer in enumerate(tqdm(OPTIMIZERS)):
name = optimizer.__name__
def gym_loop(eps):
return GymFeedback(build_agent(opt=optimizer(BEST_STEP_SIZE[name])), env)(
eps
)
sim = unroll_transform_with_state(add_batch(gym_loop))
rng = jax.random.PRNGKey(42)
params, state = sim.init(rng, eps)
(gym, info), state = sim.apply(params, state, rng, eps)
CROSS_VAL_GYM[name] = gym
ax = measure(gym.reward).plot(
ax=ax,
color=COLORS[i],
ylim=(MIN_ERR, MAX_ERR),
label=(
f"(VALIDATE) - {name} " f"- $\eta$={BEST_STEP_SIZE[name]:.2e}"
),
)
plt.legend(bbox_to_anchor=(1.0, 1.0))
return CROSS_VAL_GYM
# ### Newton solver
#
#
#
#
#
def scan_hparams_newton():
STEP_SIZE = pd.Index(onp.logspace(-2, 3, N_STEP_SIZE_NEWTON), name="step_size")
EPS = pd.Index(onp.logspace(-4, 3, N_EPS), name="eps")
HPARAMS_idx = pd.MultiIndex.from_product([STEP_SIZE, EPS])
HPARAMS = jnp.stack(list(map(onp.array, HPARAMS_idx)))
@add_batch
def gym_loop_scan_hparams(eps):
def scan_params(hparams):
step_size, newton_eps = hparams
agent = build_agent(opt=newton(step_size, eps=newton_eps))
return GymFeedback(agent, env)(eps)
return VMap(scan_params)(HPARAMS)
sim = unroll_transform_with_state(gym_loop_scan_hparams)
rng = jax.random.PRNGKey(42)
eps = sample_noise(rng)
params, state = sim.init(rng, eps)
res_newton, state = sim.apply(params, state, rng, eps)
gym_newton, info_newton = res_newton
loss_newton = (
pd.DataFrame(-gym_newton.reward, columns=HPARAMS_idx)
.iloc[LEARN_TIME_SLICE]
.mean()
.unstack()
)
sns.heatmap(loss_newton[loss_newton < 0.4], annot=True, cmap="YlGnBu")
STEP_SIZE, NEWTON_EPS = loss_newton.stack().idxmin()
x = -gym_newton.reward[LEARN_TIME_SLICE].mean(axis=0)
x = jax.ops.index_update(x, jnp.isnan(x), jnp.inf)
I_BEST_PARAM = jnp.argmin(x)
BEST_NEWTON_GYM = jax.tree_map(lambda x: x[:, I_BEST_PARAM], gym_newton)
print("Best newton parameters: ", STEP_SIZE, NEWTON_EPS)
return (STEP_SIZE, NEWTON_EPS), BEST_NEWTON_GYM
def cross_validate_newton(BEST_HPARAMS, BEST_NEWTON_GYM):
(STEP_SIZE, NEWTON_EPS) = BEST_HPARAMS
plt.figure()
# def measure(reward):
# return pd.Series(-reward).rolling(T/2, min_periods=T/2).mean()
def measure(reward):
return pd.Series(-reward).expanding().mean()
@add_batch
def gym_loop(eps):
agent = build_agent(opt=newton(STEP_SIZE, eps=NEWTON_EPS))
return GymFeedback(agent, env)(eps)
sim = unroll_transform_with_state(gym_loop)
rng = jax.random.PRNGKey(44)
eps = sample_noise(rng)
params, state = sim.init(rng, eps)
(gym, info), state = sim.apply(params, state, rng, eps)
ax = None
i = 4
ax = measure(BEST_NEWTON_GYM.reward).plot(
ax=ax,
color=COLORS[i],
label=f"(TRAIN) - Newton - $\eta$={STEP_SIZE:.2e}, $\epsilon$={NEWTON_EPS:.2e}",
ylim=(MIN_ERR, MAX_ERR),
style="--",
)
ax = measure(gym.reward).plot(
ax=ax,
color=COLORS[i],
ylim=(MIN_ERR, MAX_ERR),
label=f"(VALIDATE) - Newton - $\eta$={STEP_SIZE:.2e}, $\epsilon$={NEWTON_EPS:.2e}",
)
ax.legend(bbox_to_anchor=(1.0, 1.0))
ax.plot()
return gym
# ### Plot everithing
#
#
#
#
#
def plot_everything(BEST_STEP_SIZE, BEST_GYM, BEST_HPARAMS, BEST_NEWTON_GYM):
MESURES = []
def measure(reward):
return pd.Series(-reward).rolling(int(T / 2), min_periods=int(T / 2)).mean()
MESURES.append(("Rolling mean of loss (5000) time-steps", measure))
def measure(reward):
return pd.Series(-reward).expanding().mean()
MESURES.append(("Expanding means", measure))
for MEASURE_NAME, MEASUR_FUNC in MESURES:
plt.figure()
for i, (name, gym) in enumerate(BEST_GYM.items()):
MEASUR_FUNC(gym.reward).plot(
label=f"{name} - $\eta$={BEST_STEP_SIZE[name]:.2e}",
ylim=(MIN_ERR, MAX_ERR),
color=COLORS[i],
)
i = 4
(STEP_SIZE, NEWTON_EPS) = BEST_HPARAMS
gym = BEST_NEWTON_GYM
MEASUR_FUNC(gym.reward).plot(
label=f"Newton - $\eta$={STEP_SIZE:.2e}, $\epsilon$={NEWTON_EPS:.2e}",
ylim=(MIN_ERR, MAX_ERR),
color=COLORS[i],
)
plt.legend(bbox_to_anchor=(1.0, 1.0))
plt.title(MEASURE_NAME)
# ## Setting 1
# ### Environment
# let's wrapup the results for the "setting 1" in [1]
# +
from wax.modules import Counter
def build_env():
def env(action, obs):
y_pred, eps = action, obs
ar_coefs = jnp.array([0.6, -0.5, 0.4, -0.4, 0.3])
ma_coefs = jnp.array([0.3, -0.2])
y = ARMA(ar_coefs, ma_coefs)(eps)
rw = -((y - y_pred) ** 2)
env_info = {"y": y, "y_pred": y_pred}
obs = y
return rw, obs, env_info
return env
def sample_noise(rng):
eps = jax.random.normal(rng, (T, 20)) * 0.3
return eps
MIN_ERR = 0.09
MAX_ERR = 0.15
LEARN_TIME_SLICE = slice(int(T / 2), T)
env = build_env()
# -
BEST_STEP_SIZE, BEST_GYM = scan_hparams_first_order()
CROSS_VAL_GYM = cross_validate_first_order(BEST_STEP_SIZE, BEST_GYM)
BEST_HPARAMS, BEST_NEWTON_GYM = scan_hparams_newton()
CROSS_VAL_GYM = cross_validate_newton(BEST_HPARAMS, BEST_NEWTON_GYM)
plot_everything(BEST_STEP_SIZE, BEST_GYM, BEST_HPARAMS, BEST_NEWTON_GYM)
# ### Conclusions
#
# - The NEWTON and ADAGRAD optimizers are the faster to converge.
# - The SGD and ADAM optimizers have the worst performance.
# ### Fixed setting
# +
@add_batch
def gym_loop_newton(eps):
return GymFeedback(build_agent(opt=newton(0.1, eps=0.3)), env)(eps)
def run_fixed_setting():
rng = jax.random.PRNGKey(42)
eps = sample_noise(rng)
sim = unroll_transform_with_state(gym_loop_newton)
params, state = sim.init(rng, eps)
(gym, info), state = sim.apply(params, state, rng, eps)
pd.Series(-gym.reward).expanding().mean().plot() # ylim=(MIN_ERR, MAX_ERR))
# -
# %%time
run_fixed_setting()
# ## Setting 2
# ### Environment
# let's build an environment corresponding to "setting 2" in [1]
# +
from wax.modules import Counter
def build_env():
def env(action, obs):
y_pred, eps = action, obs
t = Counter()()
ar_coefs_1 = jnp.array([-0.4, -0.5, 0.4, 0.4, 0.1])
ar_coefs_2 = jnp.array([0.6, -0.4, 0.4, -0.5, 0.5])
ar_coefs = ar_coefs_1 * t / T + ar_coefs_2 * (1 - t / T)
ma_coefs = jnp.array([0.32, -0.2])
y = ARMA(ar_coefs, ma_coefs)(eps)
# prediction used on a fresh y observation.
rw = -((y - y_pred) ** 2)
env_info = {"y": y, "y_pred": y_pred}
obs = y
return rw, obs, env_info
return env
def sample_noise(rng):
eps = jax.random.uniform(rng, (T, 20), minval=-0.5, maxval=0.5)
return eps
MIN_ERR = 0.0833
MAX_ERR = 0.15
LEARN_TIME_SLICE = slice(int(T / 2), T)
env = build_env()
# -
BEST_STEP_SIZE, BEST_GYM = scan_hparams_first_order()
CROSS_VAL_GYM = cross_validate_first_order(BEST_STEP_SIZE, BEST_GYM)
BEST_HPARAMS, BEST_NEWTON_GYM = scan_hparams_newton()
CROSS_VAL_GYM = cross_validate_newton(BEST_HPARAMS, BEST_NEWTON_GYM)
plot_everything(BEST_STEP_SIZE, BEST_GYM, BEST_HPARAMS, BEST_NEWTON_GYM)
# ### Conclusions
#
# - The NEWTON and ADAGRAD optimizers are more efficient to adapt to slowly changing environments.
# - The SGD and ADAM optimizers seem to have the worst performance.
# ### Fixed setting
# %%time
run_fixed_setting()
# ## Setting 3
# ### Environment
# Let us build an environment corresponding to the "setting 3" of [1].
# We modify it slightly by adding 10000 steps. We intentionally use
# use the 5000 to 10000 steps to optimize | |
<gh_stars>1-10
import six
import types
import itertools
import numpy as np
class Sampling(object):
"""Some useful functions for sampling are defined
Attributes:
input_configuration (dict): store sampling yielders
_backup_input_configuration (dict): copy of self.input_configuration
_dummy_type_tee (itertools.tee): type of itertools.tee
"""
def __init__(self):
self.input_configuration = {}
self._backup_input_configuration = {}
self._dummy_type_tee = type(self.dummy_type_tee())
@staticmethod
def dummy_type_tee():
"""Give itertools.tee(yielder)[0]
Edited date:
160704
Test:
160704
Returns:
itertools.tee: this is used self.type_generator_or_tee
"""
def dummy():
yield None
copy1, copy2 = itertools.tee(dummy())
return copy2
@staticmethod
def type_generator_or_tee(generator):
"""Check generator is generator or itertools.tee
Edited date:
160704
Test:
160704
Example:
::
p = a.yield_continuous_random_batch_samples(3, 3, 10)
>>> print(self.type_generator_or_tee(p))
True
>>> print(self.type_generator_or_tee(1))
False
Returns:
True if yielder is generator or itertools.tee, False otherwise
"""
if isinstance(generator, types.GeneratorType):
return True
if isinstance(generator, type(Sampling.dummy_type_tee())):
return True
return False
def keys(self):
"""Give keys of self.input_configuration
Edited date:
160704
Test:
160704
Returns:
list: keys of self.input_configuration
"""
return self.input_configuration.keys()
def set(self, func_yields, keys):
"""Set yielders
Edited date:
160704
Test:
160704
Note:
usage is written in Example. Normaly you just set yielders by self.set, then call self.sample_from for sampling. You can reset yielders by self.reset, but be aware that resetted random samplig yielders is exactyly same as before.
Example:
::
p = a.yield_continuous_random_batch_samples(3, 3, 10)
self.set(p, 'test')
for ele in self.sample_from('test'):
>>> print(ele)
[1 0 6]
[2 1 7]
[3 2 8]
p1 = a.yield_continuous_random_batch_samples(3, 3, 10)
p2 = a.yield_continuous_random_batch_samples(3, 3, 10)
self.set([p1, p2], ['test1', 'test2'])
for ele in self.sample_from('test1'):
>>> print(ele)
[1 6 0]
[2 7 1]
[3 8 2]
for ele in self.sample_from('test2'):
>>> print(ele)
[1 5 3]
[2 6 4]
[3 7 5]
Args:
func_yields Optional([list, generator, tee]): generator to set
keys Optional([list, str]): the key of generator
Returns:
True if yielders are setted, False otherwise
"""
# if func_yields and keys are stored into list
if isinstance(func_yields, list) and isinstance(keys, list):
for func_yield, key in six.moves.zip(func_yields, keys):
# generator or tee?
if self.type_generator_or_tee(func_yield):
# copy generator by using itertools.tee
self.input_configuration[key], self._backup_input_configuration[key] = itertools.tee(func_yield)
return True
# if func_yields is just generator or tee
if self.type_generator_or_tee(func_yields):
# copy generator by using itertools.tee
self.input_configuration[keys], self._backup_input_configuration[keys] = itertools.tee(func_yields)
return True
return False
def delete_all(self):
"""Delete all self.input_configuration
Edited date:
160704
Test:
160704
Example:
::
p = a.yield_continuous_random_batch_samples(3, 3, 10)
self.set(p, 'test')
self.delete_all()
Returns:
True if all yielders are deleted, False otherwise
"""
self.input_configuration = {}
self._backup_input_configuration = {}
return True
def delete(self, key):
"""Delete self.input_configuration[key]
Edited date:
160704
Test:
160704
Example:
::
p = a.yield_continuous_random_batch_samples(3, 3, 10)
self.set(p, 'test')
self.delete('test')
Returns:
True if the yielder is deleted, False otherwise
"""
if key in self.input_configuration:
self.input_configuration.pop(key)
self._backup_input_configuration.pop(key)
return True
return False
def reset_all(self):
"""Reset all self.input_configuration by self._backup_input_configuration
Edited date:
160704
Test:
160704
Note:
usage is written in Example. Normaly you just set yielders by self.set, then call self.sample_from for sampling. You can reset yielders by self.reset_all, but be aware that resetted random samplig yielders is exactyly same as before.
Example:
::
p = a.yield_continuous_random_batch_samples(3, 3, 10)
self.set(p, 'test')
for ele in self.sample_from('test'):
>>> print(ele)
[1 0 6]
[2 1 7]
[3 2 8]
for ele in self.sample_from('test'):
>>> print(ele)
self.reset_all()
for ele in self.sample_from('test'):
>>> print(ele)
[1 0 6]
[2 1 7]
[3 2 8]
for ele in self.sample_from('test'):
>>> print(ele)
Returns:
True if successful
"""
for key in self._backup_input_configuration:
self.set(self._backup_input_configuration[key], key)
return True
def reset(self, key):
"""Reset self.input_configuration[key] by self._backup_input_configuration[key]
Edited date:
160704
Test:
160704
Note:
usage is written in Example. Normaly you just set yielders by self.set, then call self.sample_from for sampling. You can reset yielders by self.reset, but be aware that resetted random samplig yielders is exactyly same as before.
Example:
::
p = a.yield_continuous_random_batch_samples(3, 3, 10)
self.set(p, 'test')
for ele in self.sample_from('test'):
>>> print(ele)
[1 0 6]
[2 1 7]
[3 2 8]
for ele in self.sample_from('test'):
>>> print(ele)
self.reset('test')
for ele in self.sample_from('test'):
>>> print(ele)
[1 0 6]
[2 1 7]
[3 2 8]
for ele in self.sample_from('test'):
>>> print(ele)
Args:
key: key for self.input_configuration
Returns:
True if self.input_configuration[key] is resetted, False otherwise
"""
if key in self._backup_input_configuration:
self.set(self._backup_input_configuration[key], key)
return True
return False
def sample_from(self, key):
"""Return yielder
Edited date:
160704
Test:
160704
Note:
usage is written in Example. Normaly you just set yielders by self.set, then call self.sample_from for sampling. You can reset yielders by self.reset_all, but be aware that resetted random samplig yielders is exactyly same as before.
Example:
::
p = a.yield_continuous_random_batch_samples(3, 3, 10)
self.set(p, 'test')
for ele in self.sample_from('test'):
>>> print(ele)
[1 0 6]
[2 1 7]
[3 2 8]
for ele in self.sample_from('test'):
>>> print(ele)
self.reset_all()
for ele in self.sample_from('test'):
>>> print(ele)
[1 0 6]
[2 1 7]
[3 2 8]
for ele in self.sample_from('test'):
>>> print(ele)
Args:
key: key for self.input_configuration
Returns:
yield : yielder is returned
"""
return self.input_configuration[key]
@staticmethod
def yield_batch_samples(end, start=0, stride=1):
"""Yield range(start, end, stride)
Edited date:
160704
Test:
160704
Example:
::
p = a.yield_batch_samples(3)
self.set(p, 'test')
for ele in self.sample_from('test'):
>>> print(ele)
0
1
2
p = a.yield_batch_samples(10, start=1, stride=3)
self.set(p, 'test')
for ele in self.sample_from('test'):
>>> print(ele)
1
4
7
Args:
start (int): index starts from this
end (int): index ends this
stride (int): stride
Yields:
int: range(start, end, stride)
"""
for i in six.moves.range(start, end, stride):
yield i
@staticmethod
def yield_equal_interval_batch_samples(batch, sample_length):
"""Give batch samples with equal interval
Edited date:
160704
Test:
160704
Example:
::
p = a.yield_continuous_random_batch_samples(3,10)
self.set(p, 'test')
for ele in self.sample_from('test'):
>>> print(ele)
[0 3 6]
[1 4 7]
[2 5 8]
Args:
batch (int): batch number
sample_length (int): lengtgh of sample list
Yields:
numpy.ndarray: sampled indices
"""
batch_length = int(sample_length // batch)
samples = Sampling.batch_sample_equal_interval(batch, sample_length)
for i in six.moves.range(batch_length):
yield samples + int(i)
@staticmethod
def yield_continuous_random_batch_samples(batch, epoch, sample_length):
"""Give random batch indices continuously
Edited date:
160704
Test:
160704
Note:
sampling starts from sample_length - batch_length
Example:
::
p = a.yield_continuous_random_batch_samples(3, 5, 100)
self.set(p, 'test')
for ele in self.sample_from('test'):
>>> print(ele)
[74 67 43]
[75 68 44]
[76 69 45]
[77 70 46]
[78 71 47]
Args:
batch (int): batch number
epoch (int): how many times self.yield_random_batch_samples yield
sample_length (int): lengtgh of sample list
Yields:
numpy.ndarray: sampled indices
"""
samples = Sampling.pick_random_permutation(batch, sample_length - epoch)
for i in six.moves.range(epoch):
yield samples + int(i)
@staticmethod
def yield_random_batch_samples(epoch, batch, sample_length, sort=False):
"""Give batch indices randomly
Edited date:
160704
Test:
160704
Example:
::
p = self.yield_random_batch_samples(3, 5, 10, sort=False)
self.set(p, 'test')
for ele in self.sample_from('test'):
>>> print(ele)
[7 0 3 2 6]
[8 1 9 7 5]
[0 6 9 5 2]
self.delete_all()
p = self.yield_random_batch_samples(3, 5, 10, sort=True)
self.set(p, 'test')
for ele in self.sample_from('test'):
>>> print(ele)
[1 3 5 8 9]
[0 1 7 8 9]
[2 3 4 5 6]
Args:
epoch (int): how many times self.yield_random_batch_samples yields
batch (int): batch number
sample_length (int): lengtgh of sample list
sort (bool): sorted list will be returned if True, otherwise False
Yields:
numpy.ndarray: sampled indices
"""
epoch = int(epoch)
batch = int(batch)
sample_length = int(sample_length)
sort = bool(sort)
for i in six.moves.range(epoch):
yield Sampling.pick_random_permutation(batch, sample_length, sort=sort)
@staticmethod
def yield_random_batch_from_category(epoch, number_of_picture_at_each_categories, pick_number, sequence=True, shuffle=True):
"""Yield batch that samples equally over imbalanced category randomly
Edited date:
160704
Test:
160704
Example:
::
>>> print(list(sample.yield_random_batch_from_category(3,[3,3,3],5,sequence=False)))
# [[0, 1], [0, 2], [2]] means sampled index from [category[0], category[1], category[2]]
[[[0, 1], [0, 2], [2]], [[2, 0], [2], [0, 1]], [[1, 0], [2], [2, 0]]]
>>> print(list(sample.yield_random_batch_from_category(3,[3,3,3],5,sequence=True, shuffle=False)))
# 0~2 is category[0]
# 3~5 is category[1]
# 6~8 is category[2]
# shuffle is False, thus the order is kept
[[1, 1, 4, 4, 8], [0, 3, 3, 5, 8], [1, 2, 4, 3, 7]]
>>> print(list(sample.yield_random_batch_from_category(3,[3,3,3],5,sequence=True, shuffle=True)))
[[6, 6, 8, 7, 1], [8, 7, 5, 4, 0], [6, 0, 2, 7, 5]]
Args:
epoch (int): how many time this function yield
number_of_picture_at_each_categories (list): it contains the number of sample at each category. It is expected to be imbalance.
pick_number (int): how many samples you need
sequence | |
#-------------------------------------------------------------------------------
#
# Handles the WPS requests to the VirES server
#
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
#-------------------------------------------------------------------------------
# Copyright (C) 2018 EOX IT Services GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#-------------------------------------------------------------------------------
import os
import tempfile
import shutil
import numpy
import pandas
import xarray
import cdflib
from ._wps import time_util
if os.name == "nt":
import atexit
from ._data import CONFIG_SWARM
CDF_EPOCH_1970 = 62167219200000.0
ALLOWED_SPACECRFTS = ["A", "B", "C", "1", "2", "-"]
# Frame names to use as xarray dimension names
FRAME_NAMES = {
"NEC": ["B_NEC", "B_OB", "B_CF", "B_SV", "sigma_OB", "sigma_CF", "sigma_SV"],
"VFM": ["B_VFM", "dB_Sun", "dB_AOCS", "dB_other", "B_error"],
"quaternion": ["q_NEC_CRF"],
"WGS84": ["GPS_Position", "LEO_Position"],
"EEJ_QDLat": ["EEJ"],
"NE": ["J_NE", "J_CF_NE", "J_DF_NE", "B_NE"],
}
# Reverse mapping of the above
DATANAMES_TO_FRAME_NAMES = {}
for framename, datanameset in FRAME_NAMES.items():
for dataname in datanameset:
DATANAMES_TO_FRAME_NAMES[dataname] = framename
# Labels to use for suffixes on expanded columns in pandas dataframe
# and on dimension coordinates in xarray
FRAME_LABELS = {
"NEC": ["N", "E", "C"],
"VFM": ["i", "j", "k"],
"quaternion": ["1", "i", "j", "k"],
"WGS84": ["X", "Y", "Z"],
"EEJ_QDLat": numpy.linspace(-20, 20, 81),
"NE": ["N", "E"],
}
FRAME_DESCRIPTIONS = {
"NEC": "NEC frame - North, East, Centre (down)",
"NE": "Horizontal NE frame - North, East",
"VFM": "Vector Field Magnetometer instrument frame",
"EEJ_QDLat": "Quasi-dipole latitude profile between -20 and 20 degrees from the EEF product",
}
class FileReader(object):
"""Provides access to file contents (wrapper around cdflib)
"""
def __init__(self, file, filetype="cdf"):
"""
Args:
file (file-like or str)
"""
if filetype.lower() == "cdf":
self._cdf = self._open_cdf(file)
globalatts = self._cdf.globalattsget()
self.sources = self._ensure_list(
globalatts.get('ORIGINAL_PRODUCT_NAMES', []))
self.magnetic_models = self._ensure_list(
globalatts.get('MAGNETIC_MODELS', []))
self.range_filters = self._ensure_list(
globalatts.get('DATA_FILTERS', []))
self.variables = self._cdf.cdf_info()['zVariables']
self._varatts = {var: self._cdf.varattsget(var)
for var in self.variables}
self._varinfo = {var: self._cdf.varinq(var)
for var in self.variables}
else:
raise NotImplementedError("{} not supported".format(filetype))
def __enter__(self):
return self
def __exit__(self, *args):
self._cdf.close()
@staticmethod
def _open_cdf(file):
try:
f = file.name
except AttributeError:
f = file
try:
return cdflib.cdfread.CDF(f, string_encoding="utf-8")
except TypeError:
return cdflib.cdfread.CDF(f)
@staticmethod
def _ensure_list(attribute):
if isinstance(attribute, str):
return [attribute]
else:
return attribute
def get_variable(self, var):
try:
data = self._cdf.varget(var)
except ValueError:
data = None
if data is None:
shape = [0, *self.get_variable_dimsizes(var)]
data = numpy.empty(shape)
return data
def get_variable_units(self, var):
return self._varatts[var].get("UNITS", "")
def get_variable_description(self, var):
return self._varatts[var].get("DESCRIPTION", "")
def get_variable_numdims(self, var):
return self._varinfo[var].get("Num_Dims")
def get_variable_dimsizes(self, var):
return self._varinfo[var].get("Dim_Sizes")
@staticmethod
def _cdftime_to_datetime(t):
try:
return pandas.to_datetime(
(t - CDF_EPOCH_1970)/1e3,
unit='s'
)
except TypeError:
return []
def as_pandas_dataframe(self, expand=False):
# Use the variables in the file as columns to create in the dataframe.
# Skip Timestamp as it will be used as the index.
columns = set(self.variables)
columns.remove("Timestamp")
# Split columns according to those to be expanded into multiple columns
if expand:
columns_to_expand = set(c for c in columns
if c in DATANAMES_TO_FRAME_NAMES.keys()
or "B_NEC" in c)
# Avoid conflict with 2D AOB_FAC Quality variable
# when accessing AUX_OBS Quality
if any(["AUX_OBS" in s for s in self.sources]):
columns_to_expand.discard("Quality")
else:
columns_to_expand = set()
columns_standard = columns.difference(columns_to_expand)
# Initialise dataframe with Timestamp as index
df = pandas.DataFrame(index=self.get_variable("Timestamp"))
df.index.name = "Timestamp"
# Return empty dataframe, including column names
# when retrieval from server is empty
if len(df.index) == 0:
for column in columns_standard:
df[column] = None
for column in columns_to_expand:
framename = DATANAMES_TO_FRAME_NAMES.get(column, "NEC")
suffixes = FRAME_LABELS[framename]
for suffix in suffixes:
df[column + "_" + str(suffix)] = None
return df
# Convert timestamps to datetime objects
df.index = self._cdftime_to_datetime(df.index)
# Separately add non-expanded and expanded columns
for column in columns_standard:
df[column] = list(self.get_variable(column))
for column in columns_to_expand:
vector_data = self.get_variable(column)
framename = DATANAMES_TO_FRAME_NAMES.get(column, "NEC")
suffixes = FRAME_LABELS[framename]
if len(vector_data.shape) > 2:
raise NotImplementedError("{}".format(column))
if vector_data.shape[1] != len(suffixes):
raise NotImplementedError("{}".format(column))
for i, suffix in enumerate(suffixes):
df[column + "_" + str(suffix)] = vector_data[:, i]
return df
def as_xarray_dataset(self, reshape=False):
# NB currrently does not set the global metadata (attrs)
# (avoids issues with concatenating them)
# (this is done in ReturnedData)
# Initialise dataset with time coordinate
ds = xarray.Dataset(
coords={"Timestamp":
self._cdftime_to_datetime(self.get_variable("Timestamp"))})
# Add Spacecraft variable as Categorical to save memory
if "Spacecraft" in self.variables:
ds["Spacecraft"] = (("Timestamp",), pandas.Categorical(
self.get_variable("Spacecraft"), categories=ALLOWED_SPACECRFTS))
datanames = set(self.variables) - {"Timestamp", "Spacecraft"}
# Loop through each variable available and append them to the Dataset,
# attaching the Timestamp coordinate to each.
# Attach dimension names based on the name of the variable,
# with coordinate labels if available.
dims_used = set()
for dataname in datanames:
data = self.get_variable(dataname)
numdims = self.get_variable_numdims(dataname)
# 1D case (scalar series)
if numdims == 0:
ds[dataname] = (("Timestamp",), data)
# 2D case (vector series)
elif numdims == 1:
if "B_NEC" in dataname:
dimname = "NEC"
dims_used.add(dimname)
elif dataname in DATANAMES_TO_FRAME_NAMES.keys():
dimname = DATANAMES_TO_FRAME_NAMES[dataname]
dims_used.add(dimname)
else:
dimname = "%s_dim1" % dataname
ds[dataname] = (("Timestamp", dimname),
self.get_variable(dataname))
# 3D case (matrix series), e.g. QDBasis
elif numdims == 2:
dimname1 = "%s_dim1" % dataname
dimname2 = "%s_dim2" % dataname
ds[dataname] = (("Timestamp", dimname1, dimname2),
self.get_variable(dataname))
else:
raise NotImplementedError("%s: array too complicated" %
dataname)
# Add named coordinates
for dimname, dimlabels in FRAME_LABELS.items():
if dimname in dims_used:
ds[dimname] = numpy.array(dimlabels)
ds = ds.set_coords(dimname)
# ds[dimname].attrs["description"] = FRAME_DESCRIPTIONS.get(
# dimname, None)
# ds = ds.set_coords(dimname)
# Reshape to a sensible higher dimensional structure
# Currently only for GVO data, and without magnetic model values or auxiliaries
# Inefficient as it is duplicating the data (ds -> ds2)
if reshape:
ds = self.reshape_dataset(ds)
# Add metadata of each variable
for var in list(ds.data_vars) + list(ds.coords):
try:
ds[var].attrs["units"] = self.get_variable_units(var)
except KeyError:
ds[var].attrs["units"] = ""
try:
ds[var].attrs["description"] = self.get_variable_description(var)
except KeyError:
ds[var].attrs["description"] = FRAME_DESCRIPTIONS.get(var, "")
# Remove unused Timestamp unit (-)
# for xarray 0.17 compatibility when writing to netcdf
ds["Timestamp"].attrs.pop("units", None)
return ds
@staticmethod
def reshape_dataset(ds):
if "SiteCode" in ds.data_vars:
codevar = "SiteCode"
elif "IAGA_code" in ds.data_vars:
codevar = "IAGA_code"
else:
raise NotImplementedError(
"""
Only available for GVO dataset where the "SiteCode"
parameter has been requested, or OBS dataset with "IAGA_code"
"""
)
# Create integer "Site" identifier based on SiteCode / IAGA_code
sites = dict(enumerate(sorted(set(ds[codevar].values))))
sites_inv = {v: k for k, v in sites.items()}
if len(sites) == 0:
_ds_locs = ds
else:
# Identify (V)OBS locations and mapping from integer "Site" identifier
pos_vars = ["Longitude", "Latitude", "Radius", codevar]
_ds_locs = next(iter(ds[pos_vars].groupby("Timestamp")))[1]
if len(sites) > 1:
_ds_locs = _ds_locs.drop(("Timestamp")).rename({"Timestamp": "Site"})
else:
_ds_locs = _ds_locs.drop(("Timestamp")).expand_dims("Site")
_ds_locs["Site"] = [sites_inv.get(code) for code in _ds_locs[codevar].values]
_ds_locs = _ds_locs.sortby("Site")
# Create dataset initialised with the (V)OBS positional info as coords
# and datavars (empty) reshaped to (Site, Timestamp, ...)
t = numpy.unique(ds["Timestamp"])
ds2 = xarray.Dataset(
coords={
"Timestamp": t,
codevar: (("Site"), _ds_locs[codevar].data),
"Latitude": ("Site", _ds_locs["Latitude"].data),
"Longitude": ("Site", _ds_locs["Longitude"].data),
"Radius": ("Site", _ds_locs["Radius"].data),
"NEC": ["N", "E", "C"]
},
)
# (Dropping unused Spacecraft var)
data_vars = set(ds.data_vars) - {"Latitude", "Longitude", "Radius", codevar, "Spacecraft"}
N_sites = len(_ds_locs[codevar])
# Create empty data variables to be infilled
for var in data_vars:
shape = [N_sites, len(t), *ds[var].shape[1:]]
ds2[var] = ("Site", *ds[var].dims), numpy.empty(shape, dtype=ds[var].dtype)
ds2[var][...] = None
# Loop through each (V)OBS site to infill the data
if N_sites != 0:
for k, _ds in dict(ds.groupby(codevar)).items():
site = sites_inv.get(k)
for var in data_vars:
ds2[var][site, ...] = _ds[var].values
# Revert to using only | |
_dict:
args['limits'] = ResourceLimits.from_dict(_dict.get('limits'))
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ResourceObjectFabV1 object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'requests') and self.requests is not None:
_dict['requests'] = self.requests.to_dict()
if hasattr(self, 'limits') and self.limits is not None:
_dict['limits'] = self.limits.to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ResourceObjectFabV1 object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'ResourceObjectFabV1') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ResourceObjectFabV1') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ResourceObjectFabV2():
"""
This field requires the use of Fabric v2.1.* and higher.
:attr ResourceRequests requests:
:attr ResourceLimits limits: (optional)
"""
def __init__(self,
requests: 'ResourceRequests',
*,
limits: 'ResourceLimits' = None) -> None:
"""
Initialize a ResourceObjectFabV2 object.
:param ResourceRequests requests:
:param ResourceLimits limits: (optional)
"""
self.requests = requests
self.limits = limits
@classmethod
def from_dict(cls, _dict: Dict) -> 'ResourceObjectFabV2':
"""Initialize a ResourceObjectFabV2 object from a json dictionary."""
args = {}
if 'requests' in _dict:
args['requests'] = ResourceRequests.from_dict(_dict.get('requests'))
else:
raise ValueError('Required property \'requests\' not present in ResourceObjectFabV2 JSON')
if 'limits' in _dict:
args['limits'] = ResourceLimits.from_dict(_dict.get('limits'))
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ResourceObjectFabV2 object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'requests') and self.requests is not None:
_dict['requests'] = self.requests.to_dict()
if hasattr(self, 'limits') and self.limits is not None:
_dict['limits'] = self.limits.to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ResourceObjectFabV2 object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'ResourceObjectFabV2') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ResourceObjectFabV2') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ResourceRequests():
"""
ResourceRequests.
:attr str cpu: (optional) Desired CPU for subcomponent. [Resource
details](/docs/blockchain?topic=blockchain-ibp-console-govern-components#ibp-console-govern-components-allocate-resources).
:attr str memory: (optional) Desired memory for subcomponent. [Resource
details](/docs/blockchain?topic=blockchain-ibp-console-govern-components#ibp-console-govern-components-allocate-resources).
"""
def __init__(self,
*,
cpu: str = None,
memory: str = None) -> None:
"""
Initialize a ResourceRequests object.
:param str cpu: (optional) Desired CPU for subcomponent. [Resource
details](/docs/blockchain?topic=blockchain-ibp-console-govern-components#ibp-console-govern-components-allocate-resources).
:param str memory: (optional) Desired memory for subcomponent. [Resource
details](/docs/blockchain?topic=blockchain-ibp-console-govern-components#ibp-console-govern-components-allocate-resources).
"""
self.cpu = cpu
self.memory = memory
@classmethod
def from_dict(cls, _dict: Dict) -> 'ResourceRequests':
"""Initialize a ResourceRequests object from a json dictionary."""
args = {}
if 'cpu' in _dict:
args['cpu'] = _dict.get('cpu')
if 'memory' in _dict:
args['memory'] = _dict.get('memory')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ResourceRequests object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'cpu') and self.cpu is not None:
_dict['cpu'] = self.cpu
if hasattr(self, 'memory') and self.memory is not None:
_dict['memory'] = self.memory
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ResourceRequests object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'ResourceRequests') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ResourceRequests') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class RestartAthenaResponse():
"""
RestartAthenaResponse.
:attr str message: (optional) Text describing the outcome of the api.
"""
def __init__(self,
*,
message: str = None) -> None:
"""
Initialize a RestartAthenaResponse object.
:param str message: (optional) Text describing the outcome of the api.
"""
self.message = message
@classmethod
def from_dict(cls, _dict: Dict) -> 'RestartAthenaResponse':
"""Initialize a RestartAthenaResponse object from a json dictionary."""
args = {}
if 'message' in _dict:
args['message'] = _dict.get('message')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a RestartAthenaResponse object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'message') and self.message is not None:
_dict['message'] = self.message
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this RestartAthenaResponse object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'RestartAthenaResponse') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'RestartAthenaResponse') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class SettingsTimestampData():
"""
SettingsTimestampData.
:attr float now: (optional) UTC UNIX timestamp of the current time according to
the server. In milliseconds.
:attr float born: (optional) UTC UNIX timestamp of when the server started. In
milliseconds.
:attr str next_settings_update: (optional) Time remaining until the server
performs a hard-refresh of its settings.
:attr str up_time: (optional) Total time the IBP console server has been
running.
"""
def __init__(self,
*,
now: float = None,
born: float = None,
next_settings_update: str = None,
up_time: str = None) -> None:
"""
Initialize a SettingsTimestampData object.
:param float now: (optional) UTC UNIX timestamp of the current time
according to the server. In milliseconds.
:param float born: (optional) UTC UNIX timestamp of when the server
started. In milliseconds.
:param str next_settings_update: (optional) Time remaining until the server
performs a hard-refresh of its settings.
:param str up_time: (optional) Total time the IBP console server has been
running.
"""
self.now = now
self.born = born
self.next_settings_update = next_settings_update
self.up_time = up_time
@classmethod
def from_dict(cls, _dict: Dict) -> 'SettingsTimestampData':
"""Initialize a SettingsTimestampData object from a json dictionary."""
args = {}
if 'now' in _dict:
args['now'] = _dict.get('now')
if 'born' in _dict:
args['born'] = _dict.get('born')
if 'next_settings_update' in _dict:
args['next_settings_update'] = _dict.get('next_settings_update')
if 'up_time' in _dict:
args['up_time'] = _dict.get('up_time')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a SettingsTimestampData object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'now') and self.now is not None:
_dict['now'] = self.now
if hasattr(self, 'born') and self.born is not None:
_dict['born'] = self.born
if hasattr(self, 'next_settings_update') and self.next_settings_update is not None:
_dict['next_settings_update'] = self.next_settings_update
if hasattr(self, 'up_time') and self.up_time is not None:
_dict['up_time'] = self.up_time
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this SettingsTimestampData object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'SettingsTimestampData') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'SettingsTimestampData') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class StorageObject():
"""
StorageObject.
:attr str size: (optional) Maximum disk space for subcomponent. [Resource
details](/docs/blockchain?topic=blockchain-ibp-console-govern-components#ibp-console-govern-components-allocate-resources).
:attr str class_: (optional) Kubernetes storage class for subcomponent's disk
space.
"""
def __init__(self,
*,
size: str = None,
class_: str = None) -> None:
"""
Initialize a StorageObject object.
:param str size: (optional) Maximum disk space for subcomponent. [Resource
details](/docs/blockchain?topic=blockchain-ibp-console-govern-components#ibp-console-govern-components-allocate-resources).
:param str class_: (optional) Kubernetes storage class for subcomponent's
disk space.
"""
self.size = size
self.class_ = class_
@classmethod
def from_dict(cls, _dict: Dict) -> 'StorageObject':
"""Initialize a StorageObject object from a json dictionary."""
args = {}
if 'size' in _dict:
args['size'] = _dict.get('size')
if 'class' in _dict:
args['class_'] = _dict.get('class')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a StorageObject object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'size') and self.size is not None:
_dict['size'] = self.size
if hasattr(self, 'class_') and self.class_ is not None:
_dict['class'] = | |
"""
Undirected graph data type and procedures for its manipulation.
"""
from collections import deque
import sys
class Graph:
"""
Generic purpose data structure to represent an directed or undirected graph in a time and memory efficient way.\n
Vertexes are named with consecutive numbers starting from 0
(the last is V-1, being V the number of vertexes in the graph).
**Implementation notes**:
This implementation is based on an adjacency-list representation of the graph:
for every vertex V we maintain a list of adjacent vertexes,
i.e. vertexes reachable from V with a direct connection (an edge).
"""
def __init__(self, numvertices, directed=False):
"""Creates a graph with the given number of vertices and no edges.
:param directed: True if the graph is a directed graph; default is False, i.e. an undirected graph.
:return: an empty graph with a structure to hold edges for the given number of vertexes
:rtype: Graph
"""
self._numvertices = numvertices
self._directed = directed
self._numedges = 0
self._adjacents = [list() for _ in range(0, numvertices)]
@classmethod
def from_file(cls, filename: str, directed = False):
"""Loads a graph definition from a file.
First line must contain the number of vertexes;
second line must contain the number of edges;
from third line onward there must be two integers representing the two vertexes to be connected by and edge.
:param filename: the name of the file containing the graph definition.
:param directed: True if the graph is a directed graph; default is False, i.e. an undirected graph.
:return: a graph built from the information stored in the file
:rtype: Graph
"""
with open(filename) as fh:
vertnum = int(fh.readline().strip())
int(fh.readline().strip())
graph = Graph(vertnum, directed)
for line in fh:
numstr = line.split()
v1 = int(numstr[0])
v2 = int(numstr[1])
graph.add_edge(v1, v2)
return graph
def is_directed(self):
""":return: True if the graph is a directed graph, False if is an undirected graph."""
return self._directed
def num_vertices(self) -> int:
""":return: the number of vertices of this Graph."""
return self._numvertices
def num_edges(self) -> int:
""":return: the number of edges of this Graph."""
return self._numedges
def add_edge(self, vertex1, vertex2):
"""
Add and edge connecting vertex1 to vertex2.
An edge is not added if it is already present.
:param vertex1: the first vertex of the edge being added.
:param vertex2: the second vertex of the edge being added.
:return: None
"""
self._numedges += 1
self._adjacents[vertex1].append(vertex2)
if not self._directed:
self._adjacents[vertex2].append(vertex1)
def adjacents(self, vertex):
return self._adjacents[vertex]
def __str__(self):
lines = []
for v, adjacents in enumerate(self._adjacents):
if len(adjacents) > 0:
adjstr = str(adjacents)
else:
adjstr = "[]"
lines.append(str(v) + " => " + adjstr + "\n")
return "".join(lines)
class DepthFirstSearch:
"""
Finds the vertexes connected with the given source vertex and a path (not necessarily the shortest) to reach it.
"""
def __init__(self, graph: Graph, source_vertex: int):
"""Navigate the given Graph from the given source vertex using Depth First Search.
:param graph: the graph we want to navigate.
:param source_vertex: the vertex where we start the navigation.
:return: a DepthFirstSearch object to query the graph starting from the given source vertex.
"""
self._source = source_vertex
self._visited = [False] * graph.num_vertices()
self._predecessor = [-1] * graph.num_vertices()
self._predecessor[source_vertex] = source_vertex
self._count = self._depth_first_search(graph, source_vertex)
def _depth_first_search(self, graph: Graph, vertex: int):
count = 0
self._visited[vertex] = True
for v in graph.adjacents(vertex):
if not self._visited[v]:
self._predecessor[v] = vertex
count += self._depth_first_search(graph, v)
return count + 1
def connected(self, vertex: int):
"""
:param vertex: the vertex we want to know if it is connected to the source fro the current Graph.
:return: True if the given vertex is connected to the source, False otherwise.
"""
return self._visited[vertex]
def count(self) -> int:
""":return: How many vertexes are connected with the source, including the source in the count."""
return self._count
def path_to(self, vertex: int):
"""Find a path from the given vertex to the source.
:param vertex: the vertex to find a path to the source
:return: a list with the vertexes to navigate to get to the source if it is connected or None otherwise
"""
path = None
if self.connected(vertex):
path = []
while vertex != self._source:
path.append(vertex)
vertex = self._predecessor[vertex]
path.append(self._source)
return path
class BreadthFirstSearch:
"""
Finds the vertexes connected with the given source vertex and a the shortest path to reach each.
"""
def __init__(self, graph: Graph, source_vertex: int):
"""Navigate the given Graph from the given source vertex using Breadth First Search.
:param graph: the graph we want to navigate.
:param source_vertex: the vertex where we start the navigation.
:return: a BreadthFirstSearch object to query the graph starting from the given source vertex.
"""
self._graph = graph
self._source = source_vertex
self._queue = deque()
self._visited = [False] * self._graph.num_vertices()
self._predecessor = [-1] * self._graph.num_vertices()
self._distance = [sys.maxsize] * self._graph.num_vertices()
self._count = 0
self._breadth_first_search(self._source)
def _breadth_first_search(self, source):
self._visited[source] = True
self._predecessor[source] = source
self._distance[source] = 0
self._queue.append(source)
while len(self._queue) > 0:
v = self._queue.popleft()
self._count += 1
for adj in self._graph.adjacents(v):
if not self._visited[adj]:
self._visited[adj] = True
self._predecessor[adj] = v
self._distance[adj] = self._distance[v] + 1
self._queue.append(adj)
def connected(self, vertex: int):
"""
:param vertex: the vertex we want to know if it is connected to the source fro the current Graph.
:return: True if the given vertex is connected to the source, False otherwise.
"""
return self._visited[vertex]
def count(self) -> int:
""":return: How many vertexes are connected with the source, including the source in the count."""
return self._count
def path_to(self, vertex: int):
"""Find a path from the given vertex to the source.
:param vertex: the vertex to find a path to the source
:return: a list with the vertexes to navigate to get to the source if it is connected or None otherwise
"""
path = None
if self.connected(vertex):
path = []
while vertex != self._source:
path.append(vertex)
vertex = self._predecessor[vertex]
path.append(self._source)
return path
def distance(self, vertex: int):
"""
:param vertex: the vertex we want to know if it is connected to the source fro the current Graph.
:return: the distance between the given vertex and the source.
"""
return self._distance[vertex]
class ConnectedComponents:
"""Determines the connected components in an undirected graph."""
def __init__(self, graph: Graph):
""" Analyzes the given graph and store the results to be ready to answer for queries on connected components.
:param graph: The Graph to analyze
"""
self._visited = [False] * graph.num_vertices()
self._group = [-1] * graph.num_vertices()
self._group_size = [-1] * graph.num_vertices()
self._count = 0
for v in range(graph.num_vertices()):
if not self._visited[v]:
self._visited[v] = True
self._group[v] = v
self._count += 1
print("Checking CC for vertex ", v)
dfs = DepthFirstSearch(graph, v)
self._group_size[v] = dfs.count()
for w in range(v+1, graph.num_vertices()):
if dfs.connected(w):
self._visited[w] = True
self._group[w] = v
self._group_size[w] = dfs.count()
def count(self):
""":return: The number of different connected components. """
return self._count
def connected(self, v: int, w: int) -> bool:
""" :return: True if the two vertexes are connected, False otherwise."""
return self._group[v] == self._group[w]
def group(self, vertex: int) -> int:
""":return: The id of the connected component the given vertex is part of."""
return self._group[vertex]
def groupsize(self, vertex: int) -> int:
""":return: The size of the connected component the given vertex is part of."""
return self._group_size[vertex]
class CycleDetector:
"""A class to detect cycles in undirected graphs.
Cycles can be self loops, parallel edges or multi vertec circular paths.
"""
pass
"""
More depth-first search applications.
Cycle detection: Is a given graph acyclic? Cycle.java uses depth-first search to determine whether a graph has a
cycle, and if so return one. It takes time proportional to V + E in the worst case.
Two-colorability:
Can the vertices of a given graph be assigned one of two colors in such a way that no edge
connects vertices of the same color?
Bipartite.java uses depth-first search to determine whether a graph has a bipartition;
if so, return one; if not, return an odd-length cycle. It takes time proportional to V + E in the worst case.
Bridge:
A bridge (or cut-edge) is an | |
import re
import pathlib
import tempfile
from collections import OrderedDict
from enum import Enum
from functools import lru_cache
from typing import List, Union, Dict
from hikari.resources import cif_core_dict
from hikari.utility import make_abspath
class CifBlock(OrderedDict):
"""
CifBlock object handles all data inside an individual block of Cif file.
It is a subclass of an `OrderedDict` and, as such, features a lot
of similarities with python dictionary while preserving item order.
Individual Cif items can be accessed or assigned using a dict-like syntax.
"""
def __init__(self, *args):
super().__init__(*args)
def get_as_type(self, key, typ, default=None):
"""
Get value of `self[key]` converted to `typ`. If value is a list,
convert its contents element-wise.
:param key: key associated with accessed element
:type key: str
:param typ: type/function applied to a value or its every element
:type typ: Callable
:param default: if given, return it on KeyError
:type default: Any
:return: converted value of `self[key]` or `default`
:rtype: Union[List, str]
"""
try:
value = self[key]
except KeyError as e:
if default is not None:
value = default
else:
raise e
else:
if value and typ:
if isinstance(value, str):
value = typ(value)
elif isinstance(value, list):
value = list(map(typ, value))
else:
raise TypeError(f'Unknown value type'
f'of {value}: {type(value)}')
return value
def read(self, path, block):
"""
Read the contents of .cif file specified by the `path` parameter, but
access and store only the `block` data block in self.
:param path: Absolute or relative path to the .cif file.
:type path: str
:param block: Name of the cif data block to be accessed
:type block: str
"""
reader = CifReader(cif_file_path=path)
self.update(reader.read()[block])
class CifFrame(OrderedDict):
"""
A master object which manages cif files. It utilises other `Cif*` classes
to manage multiple :class:`CifBlock`s with crystallographic information.
It is a subclass of an `OrderedDict` and, as such, features a lot
of similarities with python dictionary while preserving item order.
Individual Cif blocks and items within them can be accessed or assigned
using a single- or nested- dict-like syntax.
Similarly to other `Frame`s, `CifFrame` is designed to work in-place,
meaning it should be first created, and only then accessed using
methods such as :func:`read` or :func:`write`, but not chain assignments.
Unlike OrderedDict, CifBlock always initiates empty and does not accept
any parameters at creation.
"""
def read(self, path):
"""
Read the contents of .cif file specified by the `path` parameter.
Store each found block as a {block_name: CifBlock} pair.
:param path: Absolute or relative path to the .cif file.
:type path: str
"""
reader = CifReader(cif_file_path=path)
self.update(reader.read())
class CifValidator(OrderedDict):
"""
This object reads an appropriate cif core dictionary and uses it in order to
format or validate all entries passing through it.
"""
def __init__(self):
super().__init__()
with tempfile.TemporaryDirectory() as temp_dir:
temp_dic_path = str(pathlib.Path(temp_dir) / 'cif_core.dic')
with open(temp_dic_path, 'w+') as f:
f.write(cif_core_dict)
reader = CifReader(cif_file_path=temp_dic_path, validate=False)
core_dict_raw = reader.read()
core_dict_expanded = self._expand_names(core_dict_raw)
self.update(core_dict_expanded)
@staticmethod
def _expand_names(dict_):
expanded_items = CifBlock()
for data_block_name, data_block in dict_.items():
names = data_block.get('_name', None)
if names:
data_block_without_name_item = OrderedDict()
for data_name, data_value in data_block.items():
if data_name is not '_name':
data_block_without_name_item[data_name] = data_value
for name in names:
expanded_items[name] = data_block_without_name_item
return expanded_items
class CifIO:
"""
A base class for `CifRead` and `CifWrite`. This class and its inheritors
base on the IUCr File Syntax version 1.1 Working specification available
[here](`https://www.iucr.org/resources/cif/spec/version1.1/cifsyntax`)
"""
COMMENT_REGEX = \
re.compile(r"""(?<=\s)(#.*)(?=$)|(?<=^)(#.*)(?=$)""")
MATCHING_QUOTES_REGEX = \
re.compile(r"""(?<!\b)(["'])((?:\\\1|(?!\1\s).)*.)(\1)(?!\b)""")
MATCHING_OUTER_QUOTES_REGEX = \
re.compile(r"""(?<=^)(["'])((?:\\\1|(?!\1\s).)*.)(\1)(?=$)""")
WHITESPACE_SUBSTITUTES = {' ': '█', '\t': '▄'}
def __init__(self, cif_file_path, validate=True):
self.file_path = make_abspath(cif_file_path)
self.file_lines = []
self.data = OrderedDict()
self.validator = CifValidator() if validate else None
class CifReader(CifIO):
"""A helper class managing reading cif files into a `CifFrame`."""
class DataBuffer:
"""This class buffers data in temporary dict until flush() is called."""
def __init__(self, target):
self.names = []
self.values = []
self.target = target
self.multilines = []
def add_word(self, word):
"""Append the word to names or values based on its first char"""
if word.startswith('_'):
if self.values:
self.flush()
self.names.append(word)
else:
self.values.append(CifReader.release_quote(word))
def initiate_multiline(self):
self.multilines = []
def append_to_multiline(self, string):
"""Add the word to values if they're empty, concatenate otherwise"""
self.multilines.append(string)
def terminate_multiline(self):
self.values.append('\n'.join(self.multilines))
def flush(self):
"""Update the target dict with names and values stored hitherto"""
d = OrderedDict()
lv = len(self.values)
ln = len(self.names)
if lv == ln == 0:
pass
elif ln == 0:
raise IndexError(f'Orphan values found while '
f'flushing buffer: {self.values}')
elif lv % ln == 0:
d.update({n: self.values[i::ln] for i, n in enumerate(self.names)})
else:
raise IndexError(f'len(values) == {lv} % len(names) == {ln} mus'
f't be zero: {self.values} % {self.names}')
self.target.update(d)
self.__init__(target=self.target)
@property
def blocks(self):
"""A dictionary of all blocks names and their positions in cif file."""
return self._blocks(lines=tuple(self.file_lines))
@lru_cache(maxsize=1)
def _blocks(self, lines):
return OrderedDict({l[5:]: i for i, l in enumerate(lines)
if l.startswith('data_')})
class ReadingState(Enum):
"""This class stores current cif reading state (eg. inside loop etc.)"""
default = 0
loop = 1
loop_header = 2
multiline = 3
def format_dictionary(self, parsed_dict_: Dict[str, List[str]]) \
-> Dict[str, Union[str, List[str]]]:
"""
Reformat a dictionary of parsed data so that the format of every name
and value agrees with the cif core dictionary stored in `CifValidator`.
:param parsed_dict_: Dictionary with data pairs
:return: Data dictionary with correctly formatted data names and values
"""
def item_value_should_be_a_list(k_, v_):
data_entry = self.validator.get(k_, {}) if self.validator else {}
return data_entry.get('_list', '') == 'yes' or len(v_) > 1 \
or (not self.validator and k_ == '_name')
new_dict = OrderedDict()
for k, v in parsed_dict_.items():
if item_value_should_be_a_list(k, v):
new_dict[k] = v
else:
new_dict[k] = v[0]
return new_dict
def parse_lines(self, start, end):
"""
Read the data from :attr:`~.CifIO.lines` numbered `start` to `end`,
interpret it, and return it as an instance of an `OrderedDict`.
:param start: number of the first line which data should be read from
:type start: int
:param end: number of the first line which should not be read anymore
:type end: int
:return: ordered dictionary with name: value pairs for all parsed lines
:rtype: OrderedDict
"""
parsed_data = OrderedDict()
buffer = self.DataBuffer(target=parsed_data)
state = self.ReadingState.default
for line in self.file_lines[start:end]:
line = self.strip_comments(line)
if state is self.ReadingState.loop_header:
state = self.ReadingState.loop
if line.startswith(';') and state != self.ReadingState.multiline:
buffer.initiate_multiline()
state = self.ReadingState.multiline
line = line[1:]
elif line.startswith(';') and state is self.ReadingState.multiline:
buffer.terminate_multiline()
state = self.ReadingState.default
continue
if state is self.ReadingState.multiline:
buffer.append_to_multiline(line)
continue
elif line.lstrip().startswith('loop_'):
buffer.flush()
state = self.ReadingState.loop_header
line = line.lstrip()[5:]
words = self.split_line(line)
if not words and self.ReadingState.multiline:
buffer.append_to_multiline(line)
continue
if words[0].startswith('_') and state is self.ReadingState.default:
buffer.flush()
for word in words:
buffer.add_word(word)
if not words and state is self.ReadingState.loop:
pass
buffer.flush()
formatted_data = self.format_dictionary(parsed_data)
return formatted_data
def split_line(self, line):
"""
Split line into words, keeping words inside quotation marks together.
:param line: line to be split based on whitespace into words
:type line: str
:return: list of words obtained from splitting
:rtype: list
"""
return self.protect_quotes(line).strip().split()
def read(self):
"""
Read the contents of cif currently pointed by :attr:`~.CifIO.file_path`
and block :attr:`~.CifIO.data_block_header` and return them to a dict.
:return: A dictionary containing information read from .cif file.
:rtype: dict
"""
with open(self.file_path, 'r') as cif_file:
self.file_lines = cif_file.read().splitlines()
block_names = self.blocks.keys()
block_starts = self.blocks.values()
block_ends = list(block_starts)[1:] + [None]
for n, s, e in zip(block_names, block_starts, block_ends):
self.data[n] = CifBlock(self.parse_lines(s + 1, e))
return self.data
@classmethod
def protect_quotes(cls, string):
"""
Substitute whitespace between matching quotation marks with substitutes
and remove the outer quotation marks
:param string: text in which whitespace will be substituted
:type string: str
:return: string where whitespace inside quotes were substituted
:rtype: str
"""
# see: https://stackoverflow.com/q/46967465/, https://regex101.com/
split_by_quotes = cls.MATCHING_QUOTES_REGEX.split(string)
quoted = split_by_quotes[2::4]
for ws, sub in cls.WHITESPACE_SUBSTITUTES.items():
quoted = [w.replace(ws, sub) for w in quoted]
split_by_quotes[2::4] = quoted
return ''.join(split_by_quotes)
@classmethod
def release_quote(cls, string):
"""
Change the substitute characters in supplied `string` back
to whitespace, remove matching outer quotation marks, and return string
:param string: text where whitespace will be reverted and quotes removed
:type string: str
:return: modified output string
:rtype: str
"""
new_str = ''.join(cls.MATCHING_OUTER_QUOTES_REGEX.split(string)[::2])
for ws, sub in cls.WHITESPACE_SUBSTITUTES.items():
new_str = new_str.replace(sub, ws)
return new_str
| |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
1&1 Cloud Server Compute driver
"""
import json
from libcloud.compute.providers import Provider
from libcloud.common.base import JsonResponse, ConnectionKey
from libcloud.compute.base import NodeSize, NodeImage, NodeLocation, \
Node, NodeAuthPassword, NodeAuthSSHKey
from libcloud.common.types import InvalidCredsError
from libcloud.compute.types import NodeState
from libcloud.utils.py3 import httplib
from libcloud.compute.base import NodeDriver
from time import sleep
API_HOST = 'cloudpanel-api.1and1.com'
API_VERSION = '/v1/'
__all__ = [
'API_HOST',
'API_VERSION',
'OneAndOneResponse',
'OneAndOneConnection',
'OneAndOneNodeDriver'
]
class OneAndOneResponse(JsonResponse):
"""
OneAndOne response parsing.
"""
valid_response_codes = [httplib.OK, httplib.CREATED, httplib.ACCEPTED]
def parse_error(self):
if self.status == httplib.UNAUTHORIZED:
body = self.parse_body()
raise InvalidCredsError(body['message'])
else:
body = self.parse_body()
if 'message' in body:
error = '%s (code: %s)' % (body['message'], self.status)
else:
error = body
return error
def success(self):
return self.status in self.valid_response_codes
class OneAndOneConnection(ConnectionKey):
"""
Connection class for the 1&1 driver
"""
host = API_HOST
api_prefix = API_VERSION
responseCls = OneAndOneResponse
def encode_data(self, data):
return json.dumps(data)
def add_default_headers(self, headers):
"""
Add headers that are necessary for every request
This method adds ``token`` and ``Content-Type`` to the request.
"""
headers['X-Token'] = self.key
headers['Content-Type'] = 'application/json'
return headers
def request(self, action, params=None, data=None, headers=None,
method='GET', raw=False):
"""
Some requests will use the href attribute directly.
If this is not the case, then we should formulate the
url based on the action specified.
If we are using a full url, we need to remove the
host and protocol components.
"""
action = self.api_prefix + action.lstrip('/')
return super(OneAndOneConnection, self). \
request(action=action,
params=params,
data=data,
headers=headers,
method=method,
raw=raw)
class OneAndOneNodeDriver(NodeDriver):
"""
Base OneAndOne node driver.
"""
connectionCls = OneAndOneConnection
name = '1and1'
website = 'http://www.1and1.com'
type = Provider.ONEANDONE
NODE_STATE_MAP = {
'POWERING_ON': NodeState.STARTING,
'POWERING_OFF': NodeState.PENDING,
'POWERED_OFF': NodeState.STOPPING,
'POWERED_ON': NodeState.RUNNING,
'REBOOTING': NodeState.REBOOTING,
'CONFIGURING': NodeState.RECONFIGURING,
'REMOVING': NodeState.UNKNOWN,
'DEPLOYING': NodeState.STARTING,
}
"""
Core Functions
"""
def list_sizes(self):
"""
Lists all sizes
:return: A list of all configurable node sizes.
:rtype: ``list`` of :class:`NodeSize`
"""
sizes = []
fixed_instances = self._list_fixed_instances()
for value in fixed_instances:
node_size = self._to_node_size(value)
sizes.append(node_size)
return sizes
def list_locations(self):
"""
Lists all locations
:return: ``list`` of :class:`NodeLocation`
:rtype: ``list``
"""
datacenters = self.ex_list_datacenters()
locations = []
for values in datacenters:
node_size = self._to_location(values)
locations.append(node_size)
return locations
def list_images(self, image_type=None):
"""
:return: ``list`` of :class: `NodeImage`
:rtype: ``list``
"""
response = self.connection.request(
action='server_appliances',
method='GET'
)
return self._to_images(response.object, image_type)
def get_image(self, image_id):
response = self.connection.request(
action='server_appliances/%s' % image_id,
method='GET'
)
return self._to_image(response.object)
"""
Node functions
"""
def create_node(self,
name,
image,
ex_fixed_instance_size_id,
location=None,
auth=None,
ex_ip=None,
ex_monitoring_policy_id=None,
ex_firewall_policy_id=None,
ex_loadbalancer_id=None,
ex_description=None,
ex_power_on=None):
"""
Creates a node.
:param name: The name of the new node
:type name: `str`
:param ex_fixed_instance_size_id:
Fixed instance size ID from list_sizes
:type ex_fixed_instance_size_id: ``str``
:param location: 1&1 Data center Location
:type location: `NodeLocation`
:param ex_ip: IP address
:type ex_ip: `str`
:param ex_ssh_key: SSH Key
:type ex_ssh_key: `str`
:param password: Password
:type password: `str`
:param ex_monitoring_policy_id:
:type ex_firewall_policy_id: `str`
:param ex_firewall_policy_id:
:type ex_firewall_policy_id: `str`
:param ex_loadbalancer_id:
:type ex_loadbalancer_id: `str`
:param ex_description:
:type ex_description: `str`
:param ex_power_on:
:type ex_power_on: `bool`
:return: Instance of class ``Node``
:rtype: :class:`Node`
"""
body = {
'name': name,
'appliance_id': image.id,
'hardware': {
'fixed_instance_size_id': ex_fixed_instance_size_id
},
}
if location is not None:
body['datacenter_id'] = location.id
if ex_power_on is not None:
body['power_on'] = ex_power_on
if ex_description is not None:
body['description'] = ex_description
if ex_firewall_policy_id is not None:
body['firewall_policy_id'] = ex_firewall_policy_id
if ex_monitoring_policy_id is not None:
body['monitoring_policy_id'] = ex_monitoring_policy_id
if ex_loadbalancer_id is not None:
body['loadbalancer_id'] = ex_loadbalancer_id
if auth is not None:
if isinstance(auth, NodeAuthPassword):
body['password'] = <PASSWORD>
elif isinstance(auth, NodeAuthSSHKey):
body['rsa_key'] = auth.pubkey
if ex_ip is not None:
body['ip_id'] = ex_ip
response = self.connection.request(
action='servers',
data=body,
method='POST',
)
return self._to_node(response.object)
def list_nodes(self):
"""
List all nodes.
:return: ``list`` of :class:`Node`
:rtype: ``list``
"""
response = self.connection.request(
action='servers',
method='GET'
)
return self._to_nodes(response.object)
def destroy_node(self, node, ex_keep_ips=False):
"""
Destroys a node.
:param node: The node you wish to destroy.
:type volume: :class:`Node`
:param ex_keep_ips: True to keep all IP addresses assigned to the node
:type ex_keep_ips: : ``bool``
:return: Instance of class ``Node``
:rtype: :class: `Node`
"""
self.ex_shutdown_server(node.id)
self._wait_for_state(node.id, 'POWERED_OFF')
response = self.connection.request(
action='servers/%s' % node.id,
params={'keep_ips': ex_keep_ips},
method='DELETE'
)
return self._to_node(response.object)
def reboot_node(self, node):
"""
Reboots the node.
:param node: The node you wish to destroy.
:type volume: :class:`Node`
:return: Instance of class ``Node``
:rtype: :class: `Node`
"""
shutdown_body = {
"action": "REBOOT",
"method": "HARDWARE"
}
response = self.connection.request(
action='servers/%s/status/action' % node.id,
data=shutdown_body,
method='PUT',
)
return self._to_node(response.object)
"""
Extension functions
"""
def ex_rename_server(self, server_id, name=None, description=None):
"""
Renames the server
:param server_id: ID of the server you want to rename
:param name: New name of the server
:type: ``str``
:param description: New description of the server
:type: ``str``
:return: Instance of class ``Node``
:rtype: :class: `Node`
"""
body = {}
if name is not None:
body["name"] = name
if description is not None:
body["description"] = description
response = self.connection.request(
action='servers/%s' % server_id,
data=body,
method='PUT'
)
return self._to_node(response.object)
def ex_get_server_hardware(self, server_id):
"""
Gets all server hardware
:param server_id: Id of the server
:type: ``str``
:return: Server's hardware
:rtype: ``dict``
"""
response = self.connection.request(
action='servers/%s/hardware' % server_id,
method='GET'
)
return response.object
"""
Hardware operations
"""
def ex_modify_server_hardware(self, server_id,
fixed_instance_size_id=None, vcore=None,
cores_per_processor=None, ram=None):
"""
Modifies server's hardware
:param server_id:
:type: ``str``
:param fixed_instance_size_id: Id of the fixed instance size
:type: ``str``
:param vcore: Virtual cores count
:type: ``int``
:param cores_per_processor: Count of cores per procesor
:type: ``int``
:param ram: Amount of ram for the server
:type: ``int``
:return: Instance of class ``Node``
:type: :class: `Node`
"""
body = {}
if fixed_instance_size_id is not None:
body['fixed_instance_size_id'] = fixed_instance_size_id
if vcore is not None:
body['vcore'] = vcore
if cores_per_processor is not None:
body['cores_per_processor'] = cores_per_processor
if ram is not None:
body['ram'] = ram
response = self.connection.request(
action='servers/%s/hardware' % server_id,
data=body,
method='PUT'
)
return self._to_node(response.object)
"""
HDD operations
"""
def ex_modify_server_hdd(self, server_id, hdd_id=None, size=None):
"""
Modifies server hard disk drives
:param server_id: Id of the server
:type: ``str``
:param hdd_id: Id of the hard disk
:type: ``str``
:param size: Size of the hard disk
:type: ``str``
:return: Instance of class ``Node``
:rtype: :class: `Node`
"""
body = {}
if size is not None:
body['size'] = size
response = self.connection.request(
action='servers/%s/hardware/hdds/%s' % (server_id, hdd_id),
data=body,
method='PUT'
)
return self._to_node(response.object)
def ex_add_hdd(self, server_id, size, is_main):
"""
Add a hard disk to the server
:param server_id: Id of the server
:type: ``str``
:param size: Size of the new disk
:type: ``str``
:param is_main: Indicates if the disk is going to be the boot disk
:type: ``boolean``
:return: Instance of class ``Node``
:type: :class: `Node`
"""
body = {
'size': size,
'is_main': is_main
}
response = self.connection.request(
action='servers/%s/hardware/hdds' % server_id,
data=body,
method='POST'
)
return self._to_node(response.object)
def ex_remove_hdd(self, server_id, hdd_id):
"""
Removes existing hard disk
:param server_id: Id of the server
:type: ``str``
:param hdd_id: Id of the hard disk
:type: ``str``
:return: Instance of class ``Node``
:rtype: :class: `Node`
"""
response = self.connection.request(
action='servers/%s/hardware/hdds/%s' % (server_id, hdd_id),
method='DELETE'
)
return self._to_node(response.object)
"""
Data center operations
"""
def ex_list_datacenters(self):
"""
Lists all data centers
:return: List of data centers
:rtype: ``dict``
"""
response = self.connection.request(
action='datacenters',
method='GET'
)
return response.object
def ex_get_server(self, server_id):
"""
Gets a server
:param server_id: Id of the server to be retrieved
:type: ``str``
:return: Instance of class ``Node``
:rtype: :class: `Node`
"""
response = self.connection.request(
action='servers/%s' % (server_id),
method='GET'
)
return self._to_node(response.object)
def ex_shutdown_server(self, server_id, method='SOFTWARE'):
"""
| |
[], "SRL L", 2),
(0xCB, 0x3E) : (0, [], [ MR(indirect="HL", action=SRL()), MW(indirect="HL") ], "SRL (HL)", 2),
(0xCB, 0x3F) : (0, [ SRL("A") ], [], "SRL A", 2),
(0xCB, 0x40) : (0, [ BIT(0, "B") ], [], "BIT 0,B", 2),
(0xCB, 0x41) : (0, [ BIT(0, "C") ], [], "BIT 0,C", 2),
(0xCB, 0x42) : (0, [ BIT(0, "D") ], [], "BIT 0,D", 2),
(0xCB, 0x43) : (0, [ BIT(0, "E") ], [], "BIT 0,E", 2),
(0xCB, 0x44) : (0, [ BIT(0, "H") ], [], "BIT 0,H", 2),
(0xCB, 0x45) : (0, [ BIT(0, "L") ], [], "BIT 0,L", 2),
(0xCB, 0x46) : (0, [], [ MR(indirect="HL", action=BIT(0)) ], "BIT 0,(HL)", 2),
(0xCB, 0x47) : (0, [ BIT(0, "A") ], [], "BIT 0,A", 2),
(0xCB, 0x48) : (0, [ BIT(1, "B") ], [], "BIT 1,B", 2),
(0xCB, 0x49) : (0, [ BIT(1, "C") ], [], "BIT 1,C", 2),
(0xCB, 0x4A) : (0, [ BIT(1, "D") ], [], "BIT 1,D", 2),
(0xCB, 0x4B) : (0, [ BIT(1, "E") ], [], "BIT 1,E", 2),
(0xCB, 0x4C) : (0, [ BIT(1, "H") ], [], "BIT 1,H", 2),
(0xCB, 0x4D) : (0, [ BIT(1, "L") ], [], "BIT 1,L", 2),
(0xCB, 0x4E) : (0, [], [ MR(indirect="HL", action=BIT(1)) ], "BIT 1,(HL)", 2),
(0xCB, 0x4F) : (0, [ BIT(1, "A") ], [], "BIT 1,A", 2),
(0xCB, 0x50) : (0, [ BIT(2, "B") ], [], "BIT 2,B", 2),
(0xCB, 0x51) : (0, [ BIT(2, "C") ], [], "BIT 2,C", 2),
(0xCB, 0x52) : (0, [ BIT(2, "D") ], [], "BIT 2,D", 2),
(0xCB, 0x53) : (0, [ BIT(2, "E") ], [], "BIT 2,E", 2),
(0xCB, 0x54) : (0, [ BIT(2, "H") ], [], "BIT 2,H", 2),
(0xCB, 0x55) : (0, [ BIT(2, "L") ], [], "BIT 2,L", 2),
(0xCB, 0x56) : (0, [], [ MR(indirect="HL", action=BIT(2)) ], "BIT 2,(HL)", 2),
(0xCB, 0x57) : (0, [ BIT(2, "A") ], [], "BIT 2,A", 2),
(0xCB, 0x58) : (0, [ BIT(3, "B") ], [], "BIT 3,B", 2),
(0xCB, 0x59) : (0, [ BIT(3, "C") ], [], "BIT 3,C", 2),
(0xCB, 0x5A) : (0, [ BIT(3, "D") ], [], "BIT 3,D", 2),
(0xCB, 0x5B) : (0, [ BIT(3, "E") ], [], "BIT 3,E", 2),
(0xCB, 0x5C) : (0, [ BIT(3, "H") ], [], "BIT 3,H", 2),
(0xCB, 0x5D) : (0, [ BIT(3, "L") ], [], "BIT 3,L", 2),
(0xCB, 0x5E) : (0, [], [ MR(indirect="HL", action=BIT(3)) ], "BIT 3,(HL)", 2),
(0xCB, 0x5F) : (0, [ BIT(3, "A") ], [], "BIT 3,A", 2),
(0xCB, 0x60) : (0, [ BIT(4, "B") ], [], "BIT 4,B", 2),
(0xCB, 0x61) : (0, [ BIT(4, "C") ], [], "BIT 4,C", 2),
(0xCB, 0x62) : (0, [ BIT(4, "D") ], [], "BIT 4,D", 2),
(0xCB, 0x63) : (0, [ BIT(4, "E") ], [], "BIT 4,E", 2),
(0xCB, 0x64) : (0, [ BIT(4, "H") ], [], "BIT 4,H", 2),
(0xCB, 0x65) : (0, [ BIT(4, "L") ], [], "BIT 4,L", 2),
(0xCB, 0x66) : (0, [], [ MR(indirect="HL", action=BIT(4)) ], "BIT 4,(HL)", 2),
(0xCB, 0x67) : (0, [ BIT(4, "A") ], [], "BIT 4,A", 2),
(0xCB, 0x68) : (0, [ BIT(5, "B") ], [], "BIT 5,B", 2),
(0xCB, 0x69) : (0, [ BIT(5, "C") ], [], "BIT 5,C", 2),
(0xCB, 0x6A) : (0, [ BIT(5, "D") ], [], "BIT 5,D", 2),
(0xCB, 0x6B) : (0, [ BIT(5, "E") ], [], "BIT 5,E", 2),
(0xCB, 0x6C) : (0, [ BIT(5, "H") ], [], "BIT 5,H", 2),
(0xCB, 0x6D) : (0, [ BIT(5, "L") ], [], "BIT 5,L", 2),
(0xCB, 0x6E) : (0, [], [ MR(indirect="HL", action=BIT(5)) ], "BIT 5,(HL)", 2),
(0xCB, 0x6F) : (0, [ BIT(5, "A") ], [], "BIT 5,A", 2),
(0xCB, 0x70) : (0, [ BIT(6, "B") ], [], "BIT 6,B", 2),
(0xCB, 0x71) : (0, [ BIT(6, "C") ], [], "BIT 6,C", 2),
(0xCB, 0x72) : (0, [ BIT(6, "D") ], [], "BIT 6,D", 2),
(0xCB, 0x73) : (0, [ BIT(6, "E") ], [], "BIT 6,E", 2),
(0xCB, 0x74) : (0, [ BIT(6, "H") ], [], "BIT 6,H", 2),
(0xCB, 0x75) : (0, [ BIT(6, "L") ], [], "BIT 6,L", 2),
(0xCB, 0x76) : (0, [], [ MR(indirect="HL", action=BIT(6)) ], "BIT 6,(HL)", 2),
(0xCB, 0x77) : (0, [ BIT(6, "A") ], [], "BIT 6,A", 2),
(0xCB, 0x78) : (0, [ BIT(7, "B") ], [], "BIT 7,B", 2),
(0xCB, 0x79) : (0, [ BIT(7, "C") ], [], "BIT 7,C", 2),
(0xCB, 0x7A) : (0, [ BIT(7, "D") ], [], "BIT 7,D", 2),
(0xCB, 0x7B) : (0, [ BIT(7, "E") ], [], "BIT 7,E", 2),
(0xCB, 0x7C) : (0, [ BIT(7, "H") ], [], "BIT 7,H", 2),
(0xCB, 0x7D) : (0, [ BIT(7, "L") ], [], "BIT 7,L", 2),
(0xCB, 0x7E) : (0, [], [ MR(indirect="HL", action=BIT(7)) ], "BIT 7,(HL)", 2),
(0xCB, 0x7F) : (0, [ BIT(7, "A") ], [], "BIT 7,A", 2),
(0xCB, 0x80) : (0, [ RES(0, "B") ], [], "RES 0,B", 2),
(0xCB, 0x81) : (0, [ RES(0, "C") ], [], "RES 0,C", 2),
(0xCB, 0x82) : (0, [ RES(0, "D") ], [], "RES 0,D", 2),
(0xCB, 0x83) : (0, [ RES(0, "E") ], [], "RES 0,E", 2),
(0xCB, 0x84) : (0, [ RES(0, "H") ], [], "RES 0,H", 2),
(0xCB, 0x85) : (0, [ RES(0, "L") ], [], "RES 0,L", 2),
(0xCB, 0x86) : (0, [], [ MR(indirect="HL", action=RES(0)), MW(indirect="HL") ], "RES 0,(HL)", 2),
(0xCB, 0x87) : (0, [ RES(0, "A") ], [], "RES 0,A", 2),
(0xCB, 0x88) : (0, [ RES(1, "B") ], [], "RES 1,B", 2),
(0xCB, 0x89) : (0, [ RES(1, "C") ], [], "RES 1,C", 2),
(0xCB, 0x8A) : (0, [ RES(1, "D") ], [], "RES 1,D", 2),
(0xCB, 0x8B) : (0, [ RES(1, "E") ], [], "RES 1,E", 2),
(0xCB, 0x8C) : (0, [ RES(1, "H") ], [], "RES 1,H", 2),
(0xCB, 0x8D) : (0, [ RES(1, "L") ], [], "RES 1,L", 2),
(0xCB, 0x8E) : (0, [], [ MR(indirect="HL", action=RES(1)), MW(indirect="HL") ], "RES 1,(HL)", 2),
(0xCB, 0x8F) : (0, [ RES(1, "A") ], [], "RES 1,A", 2),
(0xCB, 0x90) : (0, [ RES(2, "B") ], [], "RES 2,B", 2),
(0xCB, 0x91) : (0, [ RES(2, "C") ], [], "RES 2,C", 2),
(0xCB, 0x92) : (0, [ RES(2, "D") ], [], "RES 2,D", 2),
(0xCB, 0x93) : (0, [ RES(2, "E") ], [], "RES 2,E", 2),
(0xCB, 0x94) : (0, [ RES(2, "H") ], [], "RES 2,H", 2),
(0xCB, 0x95) : (0, [ RES(2, "L") ], [], "RES 2,L", 2),
(0xCB, 0x96) : (0, [], [ MR(indirect="HL", action=RES(2)), MW(indirect="HL") ], "RES 2,(HL)", 2),
(0xCB, 0x97) : (0, [ RES(2, "A") ], [], "RES 2,A", 2),
(0xCB, 0x98) : (0, [ RES(3, "B") ], [], "RES 3,B", 2),
(0xCB, 0x99) : (0, [ RES(3, "C") ], [], "RES 3,C", 2),
(0xCB, 0x9A) : (0, [ RES(3, "D") ], [], "RES 3,D", 2),
(0xCB, 0x9B) : (0, [ RES(3, "E") ], [], "RES 3,E", 2),
(0xCB, 0x9C) : (0, [ RES(3, "H") ], [], "RES 3,H", 2),
(0xCB, 0x9D) : (0, [ RES(3, "L") ], [], "RES 3,L", 2),
(0xCB, 0x9E) : (0, [], [ MR(indirect="HL", action=RES(3)), MW(indirect="HL") ], "RES 3,(HL)", 2),
(0xCB, 0x9F) : (0, [ RES(3, "A") ], [], "RES 3,A", 2),
(0xCB, 0xA0) : (0, [ RES(4, "B") ], [], "RES 4,B", 2),
(0xCB, 0xA1) : (0, [ RES(4, "C") ], [], "RES 4,C", 2),
(0xCB, 0xA2) : (0, [ RES(4, "D") ], [], "RES 4,D", 2),
(0xCB, 0xA3) : (0, [ RES(4, "E") ], [], "RES 4,E", 2),
(0xCB, 0xA4) : (0, [ RES(4, "H") ], [], "RES 4,H", 2),
(0xCB, 0xA5) : (0, [ RES(4, "L") ], [], "RES 4,L", 2),
(0xCB, 0xA6) : (0, [], [ MR(indirect="HL", action=RES(4)), MW(indirect="HL") ], "RES 4,(HL)", 2),
(0xCB, 0xA7) : (0, [ RES(4, "A") ], [], "RES 4,A", | |
<filename>lib/saq/cloudphish.py
# vim: sw=4:ts=4:et:cc=120
# constants used by cloudphish
import datetime
import hashlib
import json
import logging
import os, os.path
import pickle
import uuid
from urllib.parse import urlparse
import saq
from saq.analysis import RootAnalysis
from saq.constants import *
from saq.crawlphish import CrawlphishURLFilter
from saq.database import execute_with_retry, use_db
from saq.error import report_exception
from saq.util import workload_storage_dir, storage_dir_from_uuid
import pymysql.err
__all__ = [
'RESULT_OK',
'RESULT_ERROR',
'KEY_RESULT',
'KEY_DETAILS',
'KEY_STATUS',
'KEY_ANALYSIS_RESULT',
'KEY_HTTP_RESULT',
'KEY_HTTP_MESSAGE',
'KEY_SHA256_CONTENT',
'KEY_SHA256_URL',
'KEY_LOCATION',
'KEY_FILE_NAME',
'KEY_UUID',
'STATUS_NEW',
'STATUS_ANALYZING',
'STATUS_ANALYZED',
'SCAN_RESULT_UNKNOWN',
'SCAN_RESULT_ERROR',
'SCAN_RESULT_CLEAR',
'SCAN_RESULT_ALERT',
'SCAN_RESULT_PASS',
'hash_url',
'get_cached_analysis',
'create_analysis',
'initialize_url_filter',
'analyze_url',
'KEY_DETAILS_URL',
'KEY_DETAILS_SHA256_URL',
'KEY_DETAILS_ALERTABLE',
'KEY_DETAILS_CONTEXT',
'update_cloudphish_result',
'update_content_metadata',
'get_content_metadata',
]
# json schema
# KEY_RESULT: RESULT_OK | RESULT_ERROR
# KEY_DETAILS: str (reason for error)
# KEY_STATUS: STATUS_* (current analysis status of this url)
# KEY_ANALYSIS_RESULT: SCAN_RESULT_* (analysis result of the url)
# KEY_HTTP_RESULT: http status code (200, 404, etc...)
# KEY_HTTP_MESSAGE: server description of status code OR detailed reason for SCAN_RESULT_PASS
# KEY_SHA256_CONTENT: the sha256 hash of the content that was downloaded from this url
# KEY_LOCATION: the server hosting the content
# KEY_FILE_NAME: the name of the file that was downloaded from the url
RESULT_OK = 'OK'
RESULT_ERROR = 'ERROR'
KEY_RESULT = 'result'
KEY_DETAILS = 'details'
KEY_STATUS = 'status'
KEY_ANALYSIS_RESULT = 'analysis_result'
KEY_HTTP_RESULT = 'http_result'
KEY_HTTP_MESSAGE = 'http_message'
KEY_SHA256_CONTENT = 'sha256_content'
KEY_SHA256_URL = 'sha256_url'
KEY_LOCATION = 'location'
KEY_FILE_NAME = 'file_name'
KEY_UUID = 'uuid'
STATUS_NEW = 'NEW'
STATUS_ANALYZING = 'ANALYZING'
STATUS_ANALYZED = 'ANALYZED'
SCAN_RESULT_UNKNOWN = 'UNKNOWN'
SCAN_RESULT_ERROR = 'ERROR'
SCAN_RESULT_CLEAR = 'CLEAR'
SCAN_RESULT_ALERT = 'ALERT'
SCAN_RESULT_PASS = 'PASS'
KEY_DETAILS_URL = 'url'
KEY_DETAILS_SHA256_URL = 'sha256_url'
KEY_DETAILS_ALERTABLE = 'alertable'
KEY_DETAILS_CONTEXT = 'context'
# some utility functions
@use_db
def update_cloudphish_result(
sha256_url,
http_result_code=None,
http_message=None,
sha256_content=None,
result=None,
status=None,
db=None, c=None):
sql = []
params = []
if http_result_code is not None:
sql.append('http_result_code = %s')
params.append(http_result_code)
if http_message is not None:
sql.append('http_message = %s')
params.append(http_message[:256])
if sha256_content is not None:
sql.append('sha256_content = UNHEX(%s)')
params.append(sha256_content)
if result is not None:
sql.append('result = %s')
params.append(result)
if status is not None:
sql.append('status = %s')
params.append(status)
if not sql:
logging.warning("update_cloudphish_result called for {} but nothing was passed in to update?".format(sha256_url))
return
params.append(sha256_url)
sql = "UPDATE cloudphish_analysis_results SET {} WHERE sha256_url = UNHEX(%s)".format(', '.join(sql))
logging.debug("executing cloudphish update {}".format(sql, params))
return execute_with_retry(db, c, sql, tuple(params), commit=True)
@use_db
def update_content_metadata(sha256_content, node, file_name, db, c):
return execute_with_retry(db, c, """
INSERT INTO cloudphish_content_metadata ( sha256_content, node, name ) VALUES ( UNHEX(%s), %s, %s )
ON DUPLICATE KEY UPDATE node = %s, name = %s""", ( sha256_content, node, file_name, node, file_name ), commit=True)
@use_db
def get_content_metadata(sha256_content, db, c):
assert isinstance(sha256_content, str) and sha256_content
c.execute("SELECT node, name FROM cloudphish_content_metadata WHERE sha256_content = UNHEX(%s)",
sha256_content)
row = c.fetchone()
if row is None:
return None
return row[0], row[1].decode('unicode_internal')
# global url filter
url_filter = None
def initialize_url_filter():
global url_filter
# initialize the crawlphish url filter
url_filter = CrawlphishURLFilter()
# TODO schedule tasks to reload lists
url_filter.load()
logging.debug("url filter loaded")
def hash_url(url):
"""Returns a sha256 hash of the given URL."""
h = hashlib.sha256()
h.update(url.encode('ascii', errors='ignore'))
return h.hexdigest()
class CloudphishAnalysisResult(object):
def __init__(self, result, details, status=None, analysis_result=None, http_result=None, http_message=None,
sha256_content=None, sha256_url=None, location=None, file_name=None, uuid=None):
self.result = result
self.details = details
self.status = status
self.analysis_result = analysis_result
self.http_result = http_result
self.http_message = http_message
self.sha256_content = sha256_content
self.sha256_url = sha256_url
self.location = location
self.file_name = file_name
self.uuid = uuid
def json(self):
return { KEY_RESULT: self.result,
KEY_DETAILS: self.details,
KEY_STATUS: self.status,
KEY_ANALYSIS_RESULT: self.analysis_result,
KEY_HTTP_RESULT: self.http_result,
KEY_HTTP_MESSAGE: self.http_message,
KEY_SHA256_CONTENT: self.sha256_content,
KEY_SHA256_URL: self.sha256_url,
KEY_LOCATION: self.location,
KEY_FILE_NAME: self.file_name,
KEY_UUID: self.uuid }
def __str__(self):
return "CloudphishAnalysisResult(result:{},details:{},status:{},analysis_result:{},http_result:{}," \
"http_message:{},sha256_content:{},sha256_url:{},location:{},file_name:{},uuid:{})".format(
self.result, self.details, self.status, self.analysis_result, self.http_result, self.http_message,
self.sha256_content, self.sha256_url, self.location, self.file_name, self.uuid)
def __repr__(self):
return str(self)
def get_cached_analysis(url):
"""Returns the CloudphishAnalysisResult of the cached analysis or None if analysis is not cached."""
try:
return _get_cached_analysis(url)
except Exception as e:
message = "Unable to get analysis for url {}: {}".format(url, e)
logging.error(message)
report_exception()
return CloudphishAnalysisResults(RESULT_ERROR, message)
@use_db
def _get_cached_analysis(url, db, c):
sha256 = hash_url(url)
# have we already requested and/or processed this URL before?
c.execute("""SELECT
ar.status,
ar.result,
ar.http_result_code,
ar.http_message,
HEX(ar.sha256_content),
cm.node,
cm.name,
ar.uuid
FROM cloudphish_analysis_results AS ar
LEFT JOIN cloudphish_content_metadata AS cm ON ar.sha256_content = cm.sha256_content
WHERE sha256_url = UNHEX(%s)""", (sha256,))
row = c.fetchone()
if row:
status, result, http_result, http_message, sha256_content, node, file_name, uuid = row
if file_name:
file_name = file_name.decode('unicode_internal')
storage_dir = storage_dir_from_uuid(uuid)
if saq.CONFIG['engine']['work_dir'] and not os.path.isdir(storage_dir):
storage_dir = workload_storage_dir(uuid)
root_details = None
if os.path.exists(storage_dir):
try:
root = RootAnalysis(storage_dir=storage_dir)
root.load()
root_details = root.details
except Exception as e:
# this isn't really an error -- another process may be in the middle of processing this url
# the database contents should be correct though
logging.debug("unable to load cloudphish analysis {}: {}".format(uuid, e))
#report_exception()
# keep track of the most popular URLs
# old URLs get cleaned out
c.execute("UPDATE cloudphish_url_lookup SET last_lookup = NOW() WHERE sha256_url = UNHEX(%s)",
(sha256,))
db.commit()
return CloudphishAnalysisResult(RESULT_OK, # result
root_details, # details
status=status,
analysis_result=result,
http_result=http_result,
http_message=http_message,
sha256_content=sha256_content,
sha256_url=sha256,
location=node,
file_name=file_name,
uuid=uuid)
# if we have not then we return None
return None
def create_analysis(url, reprocess, details):
try:
# url must be parsable
urlparse(url)
return _create_analysis(url, reprocess, details)
except Exception as e:
message = "unable to create analysis request for url {}: {}".format(url, e)
logging.error(message)
report_exception()
return CloudphishAnalysisResult(RESULT_ERROR, message)
@use_db
def _create_analysis(url, reprocess, details, db, c):
assert isinstance(url, str)
assert isinstance(reprocess, bool)
assert isinstance(details, dict)
sha256_url = hash_url(url)
if reprocess:
# if we're reprocessing the url then we clear any existing analysis
# IF the current analysis has completed
# it's OK if we delete nothing here
execute_with_retry("""DELETE FROM cloudphish_analysis_results
WHERE sha256_url = UNHEX(%s) AND status = 'ANALYZED'""",
(sha256_url,), commit=True)
# if we're at this point it means that when we asked the database for an entry from cloudphish_analysis_results
# it was empty, OR, we cleared existing analysis
# however, we could have multiple requests coming in at the same time for the same url
# so we need to take that into account here
# first we'll generate our analysis uuid we're going to use
_uuid = str(uuid.uuid4())
# so first we try to insert it
try:
execute_with_retry(db, c, ["""INSERT INTO cloudphish_analysis_results ( sha256_url, uuid, insert_date )
VALUES ( UNHEX(%s), %s, NOW() )""",
"""INSERT INTO cloudphish_url_lookup ( sha256_url, url )
VALUES ( UNHEX(%s), %s )"""],
[(sha256_url, _uuid),
(sha256_url, url)], commit=True)
except pymysql.err.IntegrityError as e:
# (<class 'pymysql.err.IntegrityError'>--(1062, "Duplicate entry
# if we get a duplicate key entry here then it means that an entry was created between when we asked
# and now
if e.args[0] != 1062:
raise e
# so just return that one that was already created
return get_cached_analysis(url)
# at this point we've inserted an entry into cloudphish_analysis_results for this url
# now at it's processing to the workload
root = RootAnalysis()
root.uuid = _uuid
root.storage_dir = workload_storage_dir(root.uuid)
root.initialize_storage()
root.analysis_mode = ANALYSIS_MODE_CLOUDPHISH
# this is kind of a kludge but,
# the company_id initially starts out as whatever the default is for this node
# later, should the analysis turn into an alert, the company_id changes to whatever
# is stored as the "d" field in the KEY_DETAILS_CONTEXT
root.company_id = saq.COMPANY_ID
root.tool = 'ACE - Cloudphish'
root.tool_instance = saq.SAQ_NODE
root.alert_type = ANALYSIS_TYPE_CLOUDPHISH
root.description = 'ACE Cloudphish Detection - {}'.format(url)
root.event_time = datetime.datetime.now()
root.details = {
KEY_DETAILS_URL: url,
KEY_DETAILS_SHA256_URL: sha256_url,
# this used to be configurable but it's always true now
KEY_DETAILS_ALERTABLE: True,
KEY_DETAILS_CONTEXT: details, # <-- optionally contains the source company_id
}
# the context can optionally contain tracking information (sent as the "t" POST variable)
# this will be a list of dict({'type': o_type, 'value': o_value, 'time': o_time})
if 't' in root.details[KEY_DETAILS_CONTEXT]:
tracking = json.loads(root.details[KEY_DETAILS_CONTEXT]['t'])
for o_dict in tracking:
o = root.add_observable(o_dict['type'], o_dict['value'], o_time=o_dict['time'])
o.add_tag("tracked")
# allow delayed analysis on this observable
# this currently matters for MessageIDAnalyzer
o.add_directive(DIRECTIVE_DELAY)
url_observable = root.add_observable(F_URL, url)
if url_observable:
url_observable.add_directive(DIRECTIVE_CRAWL)
root.save()
root.schedule()
return get_cached_analysis(url)
def analyze_url(url, reprocess, ignore_filters, details):
"""Analyze the given url with cloudphish. If reprocess is True then the existing (cached) results are deleted
and the url is processed again."""
assert isinstance(url, str) and url
assert isinstance(reprocess, bool)
assert isinstance(ignore_filters, bool)
assert isinstance(details, dict)
result = None
# if we've not requested reprocessing then we get the cached results if they exist
if not reprocess:
result = get_cached_analysis(url)
if result is None:
# we do not have analysis for this url yet
# now we check to | |
<filename>notion2pg.py
#!/usr/bin/env python
# Notes on formulas
# -----------------
#
# There are four output types of formulas:
#
# 1. string
# 2. number
# 3. date — never a date range, unlike date properties
# 4. boolean
# Notes on rollups
# ----------------
#
# There are four signatures of rollup functions:
#
# 1. any -> array[any]
# * show_original
# * show_unique
# 2. any -> number
# * count / count_all
# * count_values
# * unique / count_unique_values
# * empty / count_empty
# * not_empty / count_not_empty
# * percent_empty
# * percent_not_empty
# 3. number -> number
# * sum
# * average
# * median
# * min
# * max
# * range
# 4. date -> date
# * earliest_date
# * latest_date
# * date_range
#
# Rollups returning arrays aren't implemented. Tables containing such rollups
# can stil be imported but these rollups will be ignored.
#
# Some functions have different names in the API / documentation. This is
# probably a documentation bug. We use the name that we get from the API.
import argparse
import datetime
import json
import logging
import os
import re
import time
import unicodedata
import httpx
import psycopg
logging.basicConfig(
format="%(asctime)s %(message)s",
level=logging.INFO,
)
DATE_RE = re.compile(r"[0-9]{4}-[0-9]{2}-[0-9]{2}")
def maybe_date(value):
"""Fix date values when Notion returns them as datetimes."""
if value is None:
return None
# Switch to str.removesuffix when dropping Python 3.8.
if value.endswith("T00:00:00.000+00:00"):
return value[:-19]
return value
INVALID_IN_NAME_RE = re.compile("[^a-z0-9_]")
# Maximum total delay for a single call is 2047 seconds which should let Notion
# recover from most temporary issues.
DELAY = 1 # before HTTP requests when reading databases, for throttling
RETRIES = 10 # retry queries up to RETRIES times
BACKOFF = 2 # multiply DELAY by BACKOFF between retries
PAGE_SIZE = 64 # lower than the default of 100 to prevent timeouts
TIMEOUT = 120 # seconds :-( Notion's API isn't all that fast
def get_database(database_id, token):
"""Get properties of a Notion database."""
t0 = time.perf_counter()
data = httpx.get(
f"https://api.notion.com/v1/databases/{database_id}",
headers={
"Authorization": f"Bearer {token}",
"Notion-Version": "2021-08-16",
},
).json()
t1 = time.perf_counter()
if data["object"] == "error":
logging.error(
"Failed to fetch the next pages: Notion API error: HTTP %s: %s",
data["status"],
data["message"],
)
raise RuntimeError(f"HTTP {data['status']}: {data['message']}")
logging.info(
"Fetched Notion database %s in %.1f seconds",
database_id,
t1 - t0,
)
return data
def iter_database(database_id, token):
"""Iterate over the pages of a Notion database."""
has_more = True
query = {
"sorts": [{"timestamp": "created_time", "direction": "descending"}],
"page_size": PAGE_SIZE,
}
while has_more:
t0 = time.perf_counter()
delay = DELAY
for retry in range(RETRIES):
try:
time.sleep(delay)
data = httpx.post(
f"https://api.notion.com/v1/databases/{database_id}/query",
headers={
"Authorization": f"Bearer {token}",
"Notion-Version": "2021-08-16",
},
json=query,
timeout=TIMEOUT,
).json()
except httpx.RequestError as exc:
logging.warning(
"Failed to fetch the next pages: HTTP request error: %s",
exc,
)
if retry == RETRIES - 1:
raise
else:
delay *= BACKOFF
continue
except json.JSONDecodeError as exc:
logging.warning(
"Failed to parse response: JSON decode error: %s",
exc,
)
if retry == RETRIES - 1:
raise
else:
delay *= BACKOFF
continue
if data["object"] == "error":
logging.error(
"Failed to fetch the next pages: Notion API error: HTTP %s: %s",
data["status"],
data["message"],
)
if retry == RETRIES - 1:
raise RuntimeError(f"HTTP {data['status']}: {data['message']}")
else:
delay *= BACKOFF
continue
break
t1 = time.perf_counter()
assert data["object"] == "list"
logging.info(
"Fetched %d Notion pages in %.1f seconds",
len(data["results"]),
t1 - t0,
)
has_more = data["has_more"]
query["start_cursor"] = data["next_cursor"]
yield from data["results"]
def get_value(property):
"""Convert a Notion property value to a Python value."""
type_ = property["type"]
if type_ == "title":
# Optional[str]
return "".join(t["plain_text"] for t in property["title"]) or None
# Basic properties
elif type_ == "rich_text":
# Optional[str]
return "".join(t["plain_text"] for t in property["rich_text"]) or None
elif type_ == "number":
# Optional[Number]
return property["number"]
elif type_ == "select":
# Optional[str]
if property["select"] is None:
return None
return property["select"]["name"]
elif type_ == "multi_select":
# List[str]
return [ms["name"] for ms in property["multi_select"]]
elif type_ == "date":
# Tuple[Optional[str], Optional[str]] - start and end date or datetime
if property["date"] is None:
return None, None
# "The public API will always return the time_zone field as null when
# rendering dates and time zone will be displayed as a UTC offset in
# the start and end date fields."
assert property["date"]["time_zone"] is None
return property["date"]["start"], property["date"]["end"]
elif type_ == "people":
# List[str] - UUID of person
return [p["id"] for p in property["people"]]
elif type_ == "files":
# List[str] - URL of the file
files = []
for f in property["files"]:
url = f["file"]["url"]
# Remove authentication information from files uploaded to Notion;
# it is too short lived to be worth storing in a database.
if "/secure.notion-static.com/" in url:
url = url.partition("?")[0]
files.append(url)
return files
elif type_ == "checkbox":
# bool
return property["checkbox"]
elif type_ == "url":
# Optional[str]
return property["url"]
elif type_ == "email":
# Optional[str]
return property["email"]
elif type_ == "phone_number":
# Optional[str]
return property["phone_number"]
# Advanced properties
elif type_ == "formula":
formula = property["formula"]
subtype = formula["type"]
if subtype == "string":
# str
return ("string", formula["string"])
elif subtype == "number":
# Optional[Number]
return ("number", formula["number"])
elif subtype == "date":
# Tuple[Optional[str], NoneType] - start date or datetime
if formula["date"] is None:
return ("date", (None, None))
assert formula["date"]["time_zone"] is None
assert formula["date"]["end"] is None
# Return the same format for consistency, even if end date is never set.
start_date = maybe_date(formula["date"]["start"])
return ("date", (start_date, None))
elif subtype == "boolean":
# bool
return ("boolean", formula["boolean"])
raise NotImplementedError(f"unsupported formula: {json.dumps(formula)}")
elif type_ == "relation":
# List[str] - UUID of related object
return [r["id"] for r in property["relation"]]
elif type_ == "rollup":
rollup = property["rollup"]
subtype = rollup["type"]
if subtype == "array":
# Skip rollups returning arrays
return ("array", [])
elif subtype == "number":
# Optional[Number]
return ("number", rollup["number"])
elif subtype == "date":
# Tuple[Optional[str], Optional[str]] - start and end date or datetime
if rollup["date"] is None:
return ("date", (None, None))
assert rollup["date"]["time_zone"] is None
start_date = maybe_date(rollup["date"]["start"])
end_date = maybe_date(rollup["date"]["end"])
return ("date", (start_date, end_date))
raise NotImplementedError(f"unsupported rollup: {json.dumps(rollup)}")
elif type_ == "created_time":
return property["created_time"]
elif type_ == "created_by":
return property["created_by"]["id"]
elif type_ == "last_edited_time":
return property["last_edited_time"]
elif type_ == "last_edited_by":
return property["last_edited_by"]["id"]
raise NotImplementedError(f"unsupported property: {json.dumps(property)}")
def convert(property, values):
"""Convert a Notion property to a PostgreSQL column."""
type_ = property["type"]
if type_ == "title":
return "text", values
# Basic properties
elif type_ == "rich_text":
return "text", values
elif type_ == "number":
if all(isinstance(value, int) for value in values if value is not None):
return "integer", values
else:
return "double precision", values
elif type_ == "select":
return "text", values
elif type_ == "multi_select":
return "text[]", values
elif type_ == "date":
if any(value[1] is not None for value in values):
# This is a range of dates or datetimes.
if all(
DATE_RE.fullmatch(value[0]) for value in values if value[0] is not None
) and all(
DATE_RE.fullmatch(value[1]) for value in values if value[1] is not None
):
return "daterange", values
else:
return "tstzrange", values
else:
# This is a date or datetime.
values = [value[0] for value in values]
if all(DATE_RE.fullmatch(value) for value in values if value is not None):
return "date", values
else:
return "timestamp with time zone", values
elif type_ == "people":
if all(len(value) <= 1 for value in values):
return "uuid", [value[0] if value else None for value in values]
else:
return "uuid[]", values
elif type_ == "files":
if all(len(value) <= 1 for value in values):
return "text", [value[0] if value else None for value in values]
else:
return "text[]", values
elif type_ == "checkbox":
return "boolean", values
elif type_ == "url":
return "text", values
elif type_ == "email":
return "text", values
elif type_ == "phone_number":
return "text", values
# Advanced properties
elif type_ == "formula":
(subtype,) = set(value[0] for value in values)
values = list(value[1] for value in values)
if subtype == "string":
return "text", values
elif subtype == "number":
return convert({"type": "number"}, values)
elif subtype == "date":
return convert({"type": "date"}, values)
elif subtype == "boolean":
return "boolean", values
formula = property["formula"]
raise NotImplementedError(f"unsupported formula: {json.dumps(formula)}")
elif type_ == "relation":
if all(len(value) <= 1 for value in values):
return "uuid", [value[0] if value else None for value in values]
else:
return "uuid[]", values
elif type_ == | |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Default data for dynamic routing game."""
from open_spiel.python.games import dynamic_routing_utils
# The line network is a very simple network (O -> A -> D) with the goal of
# testing the routing game. There is no possible action and all cars will go
# from node O (being at node O means being on the link bef_O->O) to node D
# (being at node D means being on the link D->aft_D).
LINE_NETWORK = dynamic_routing_utils.Network({
"bef_O": "O",
"O": ["A"],
"A": ["D"],
"D": ["aft_D"],
"aft_D": []
})
LINE_NETWORK_VEHICLES_DEMAND = [
dynamic_routing_utils.Vehicle("bef_O->O", "D->aft_D") for _ in range(2)
]
LINE_NETWORK_OD_DEMAND = [
dynamic_routing_utils.OriginDestinationDemand("bef_O->O", "D->aft_D", 0,
100)
]
# The Braess network comes from the Braess paradox: Braess, D., 1968. "Uber ein
# Paradoxon aus der Verkehrsplanung". Unternehmensforschung 12, 258-268.
BRAESS_NUM_PLAYER = 5
BRAESS_NETWORK = dynamic_routing_utils.Network(
{
"O": "A",
"A": ["B", "C"],
"B": ["C", "D"],
"C": ["D"],
"D": ["E"],
"E": []
},
node_position={
"O": (0, 0),
"A": (1, 0),
"B": (2, 1),
"C": (2, -1),
"D": (3, 0),
"E": (4, 0)
},
bpr_a_coefficient={
"O->A": 0,
"A->B": 1.0,
"A->C": 0,
"B->C": 0,
"B->D": 0,
"C->D": 1.0,
"D->E": 0
},
bpr_b_coefficient={
"O->A": 1.0,
"A->B": 1.0,
"A->C": 1.0,
"B->C": 1.0,
"B->D": 1.0,
"C->D": 1.0,
"D->E": 1.0
},
capacity={
"O->A": BRAESS_NUM_PLAYER,
"A->B": BRAESS_NUM_PLAYER,
"A->C": BRAESS_NUM_PLAYER,
"B->C": BRAESS_NUM_PLAYER,
"B->D": BRAESS_NUM_PLAYER,
"C->D": BRAESS_NUM_PLAYER,
"D->E": BRAESS_NUM_PLAYER
},
free_flow_travel_time={
"O->A": 0,
"A->B": 1.0,
"A->C": 2.0,
"B->C": 0.25,
"B->D": 2.0,
"C->D": 1.0,
"D->E": 0
})
BRAESS_NETWORK_VEHICLES_DEMAND = [
dynamic_routing_utils.Vehicle("O->A", "D->E")
for _ in range(BRAESS_NUM_PLAYER)
]
BRAESS_NETWORK_OD_DEMAND = [
dynamic_routing_utils.OriginDestinationDemand("O->A", "D->E", 0,
BRAESS_NUM_PLAYER)
]
# The Sioux Falls data comes from "An Efficient Approach to Solving the Road
# Network Equilibrium Traffic Assignment Problem" by <NAME> and <NAME>.
# Morlok (http://doi.org/10.1016/0041-1647(75)90030-1). We scale uniformly the
# data to decrease the number of time steps needed to cross the network. The
# demand and congestion functions data has been copied and pasted from the
# paper. The node position has been created from the paper's figure with a
# simple scale.
__SIOUX_FALLS_ADJACENCY = {
"1": ["2", "3"],
"2": ["1", "6"],
"3": ["1", "4", "12"],
"4": ["3", "5", "11"],
"5": ["4", "6", "9"],
"6": ["2", "5", "8"],
"7": ["8", "18"],
"8": ["6", "7", "9", "16"],
"9": ["5", "8", "10"],
"10": ["9", "11", "15", "16", "17"],
"11": ["4", "10", "12", "14"],
"12": ["3", "11", "13"],
"13": ["12", "24"],
"14": ["11", "15", "23"],
"15": ["10", "14", "19", "22"],
"16": ["8", "10", "17", "18"],
"17": ["10", "16", "19"],
"18": ["7", "16", "20"],
"19": ["15", "17", "20"],
"20": ["18", "19", "21", "22"],
"21": ["20", "22", "24"],
"22": ["15", "20", "21", "23"],
"23": ["14", "22", "24"],
"24": ["13", "21", "23"]
}
__SIOUX_FALLS_FREE_FLOW_TRAVEL_TIME = {
"1->2": 6, "1->3": 4, "2->1": 6, "2->6": 5, "3->1": 4, "3->4": 4,
"3->12": 4, "4->3": 4, "4->5": 2, "4->11": 6, "5->4": 2, "5->6": 4,
"5->9": 5, "6->2": 5, "6->5": 4, "6->8": 2, "7->8": 3, "7->18": 2,
"8->6": 2, "8->7": 3, "8->9": 10, "8->16": 5, "9->5": 5, "9->8": 10,
"9->10": 3, "10->9": 3, "10->11": 5, "10->15": 6, "10->16": 4, "10->17": 8,
"11->4": 6, "11->10": 5, "11->12": 6, "11->14": 4, "12->3": 4, "12->11": 6,
"12->13": 3, "13->12": 3, "13->24": 4, "14->11": 4, "14->15": 5,
"14->23": 4, "15->10": 6, "15->14": 5, "15->19": 3, "15->22": 3, "16->8": 5,
"16->10": 4, "16->17": 2, "16->18": 3, "17->10": 8, "17->16": 2,
"17->19": 2, "18->7": 2, "18->16": 3, "18->20": 4, "19->15": 3, "19->17": 2,
"19->20": 4, "20->18": 4, "20->19": 4, "20->21": 6, "20->22": 5,
"21->20": 6, "21->22": 2, "21->24": 3, "22->15": 3, "22->20": 5,
"22->21": 2, "22->23": 4, "23->14": 4, "23->22": 4, "23->24": 2,
"24->13": 4, "24->21": 3, "24->23": 2
}
__SIOUX_FALLS_BPR_A_COEFF = {
"1->2": 2 * 1e-18,
"1->3": 2 * 1e-18,
"2->1": 2 * 1e-18,
"2->6": 1240 * 1e-18,
"3->1": 2 * 1e-18,
"3->4": 6 * 1e-18,
"3->12": 2 * 1e-18,
"4->3": 6 * 1e-18,
"4->5": 3 * 1e-18,
"4->11": 1550 * 1e-18,
"5->4": 3 * 1e-18,
"5->6": 1000 * 1e-18,
"5->9": 75 * 1e-18,
"6->2": 1240 * 1e-18,
"6->5": 1000 * 1e-18,
"6->8": 520 * 1e-18,
"7->8": 119 * 1e-18,
"7->18": 1 * 1e-18,
"8->6": 520 * 1e-18,
"8->7": 119 * 1e-18,
"8->9": 2306 * 1e-18,
"8->16": 1156 * 1e-18,
"9->5": 75 * 1e-18,
"9->8": 2306 * 1e-18,
"9->10": 11 * 1e-18,
"10->9": 11 * 1e-18,
"10->11": 75 * 1e-18,
"10->15": 26 * 1e-18,
"10->16": 1080 * 1e-18,
"10->17": 1929 * 1e-18,
"11->4": 1550 * 1e-18,
"11->10": 75 * 1e-18,
"11->12": 1550 * 1e-18,
"11->14": 1061 * 1e-18,
"12->3": 2 * 1e-18,
"12->11": 1550 * 1e-18,
"12->13": 1 * 1e-18,
"13->12": 1 * 1e-18,
"13->24": 893 * 1e-18,
"14->11": 1061 * 1e-18,
"14->15": 1085 * 1e-18,
"14->23": 1020 * 1e-18,
"15->10": 26 * 1e-18,
"15->14": 1085 * 1e-18,
"15->19": 10 * 1e-18,
"15->22": 53 * 1e-18,
"16->8": 1156 * 1e-18,
"16->10": 1080 * 1e-18,
"16->17": 401 * 1e-18,
"16->18": 3 * 1e-18,
"17->10": 1929 * 1e-18,
"17->16": 401 * 1e-18,
"17->19": 553 * 1e-18,
"18->7": 1 * 1e-18,
"18->16": 3 * 1e-18,
"18->20": 2 * 1e-18,
"19->15": 10 * 1e-18,
"19->17": 553 * 1e-18,
"19->20": 957 * 1e-18,
"20->18": 2 * 1e-18,
"20->19": 957 * 1e-18,
"20->21": 1373 * 1e-18,
"20->22": 1130 * 1e-18,
"21->20": 1373 * 1e-18,
"21->22": 401 * 1e-18,
"21->24": 789 * 1e-18,
"22->15": 53 * 1e-18,
"22->20": 1130 * 1e-18,
"22->21": 401 * 1e-18,
"22->23": 960 * 1e-18,
"23->14": 1020 * 1e-18,
"23->22": 960 * 1e-18,
"23->24": 451 * 1e-18,
"24->13": 893 * 1e-18,
"24->21": 789 * 1e-18,
"24->23": 451 * 1e-18,
}
__SIOUX_FALLS_NODES = {
"1": (0, 9), "2": (5, 9), "3": (0, 8), "4": (1, 8), "5": (3, 8),
"6": (5, 8), "7": (7, 6), "8": (5, 6), "9": (3, 6), "10": (3, 5),
"11": (1, 5), "12": (0, 5), "13": (0, 0), "14": (1, 2), "15": (3, 2),
"16": (5, 5), "17": (5, 4), "18": (7, 5), "19": (5, 2), "20": (5, 0),
"21": (3, 0), "22": (3, 1), "23": (1, 1), "24": (1, 0)
}
__SIOUX_FALLS_DEMAND_AUX = [
("2", "1", 1), ("3", "1", 1), ("4", "1", 5), ("5", "1", 2),
("6", "1", 3), ("7", "1", 5), ("8", "1", 8), ("9", "1", 5),
("10", "1", 13), ("11", "1", 5), ("12", "1", 2), ("13", "1", 5),
("14", "1", 3), ("15", "1", 5), ("16", "1", 5), ("17", "1", 4),
("18", "1", 1), ("19", "1", 3), ("20", "1", 3), ("21", "1", 1),
("22", "1", 4), ("23", "1", 3), ("24", "1", 1), ("1", "2", 1),
("3", "2", 1), ("4", "2", 2), ("5", "2", 1), ("6", "2", 4),
("7", "2", 2), ("8", "2", 4), ("9", "2", 2), ("10", "2", 6),
("11", "2", 2), ("12", "2", 1), ("13", "2", 3), ("14", "2", 1),
("15", "2", 1), ("16", "2", 4), ("17", "2", 2), ("19", "2", 1),
("20", "2", 1), ("22", "2", 1), ("1", "3", 1), ("2", "3", 1),
("4", "3", 2), ("5", "3", 1), ("6", "3", 3), ("7", "3", 1),
("8", "3", 2), ("9", "3", 1), ("10", "3", 3), ("11", "3", 3),
("12", "3", 2), ("13", "3", 1), ("14", "3", 1), ("15", "3", 1),
("16", "3", 2), ("17", "3", 1), ("22", "3", 1), ("23", "3", 1),
("1", "4", 5), ("2", "4", 2), ("3", "4", 2), ("5", "4", 5),
("6", "4", 4), ("7", "4", 4), ("8", "4", 7), ("9", "4", 7),
("10", "4", 12), ("11", "4", 14), ("12", "4", 6), ("13", "4", 6),
("14", "4", 5), ("15", "4", 5), ("16", "4", 8), ("17", "4", 5),
("18", "4", 1), ("19", "4", 2), ("20", "4", 3), ("21", "4", 2),
("22", "4", 4), ("23", "4", 5), ("24", "4", 2), ("1", "5", 2),
("2", "5", 1), ("3", "5", 1), ("4", "5", | |
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I16:
self.Sub = iprot.readI16()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I16:
self.Subsub = iprot.readI16()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('MVersion')
if self.Major is not None:
oprot.writeFieldBegin('Major', TType.I16, 1)
oprot.writeI16(self.Major)
oprot.writeFieldEnd()
if self.Minor is not None:
oprot.writeFieldBegin('Minor', TType.I16, 2)
oprot.writeI16(self.Minor)
oprot.writeFieldEnd()
if self.Sub is not None:
oprot.writeFieldBegin('Sub', TType.I16, 3)
oprot.writeI16(self.Sub)
oprot.writeFieldEnd()
if self.Subsub is not None:
oprot.writeFieldBegin('Subsub', TType.I16, 4)
oprot.writeI16(self.Subsub)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.Major is None:
raise TProtocolException(message='Required field Major is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class MDependency(object):
"""
Attributes:
- ID
- Type
- MinVersion
- MaxVersion
- ExcludedVersions
- Name
"""
def __init__(self, ID=None, Type=None, MinVersion=None, MaxVersion=None, ExcludedVersions=None, Name=None,):
self.ID = ID
self.Type = Type
self.MinVersion = MinVersion
self.MaxVersion = MaxVersion
self.ExcludedVersions = ExcludedVersions
self.Name = Name
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.ID = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.Type = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.MinVersion = MVersion()
self.MinVersion.read(iprot)
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRUCT:
self.MaxVersion = MVersion()
self.MaxVersion.read(iprot)
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.LIST:
self.ExcludedVersions = []
(_etype68, _size65) = iprot.readListBegin()
for _i69 in range(_size65):
_elem70 = MVersion()
_elem70.read(iprot)
self.ExcludedVersions.append(_elem70)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRING:
self.Name = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('MDependency')
if self.ID is not None:
oprot.writeFieldBegin('ID', TType.STRING, 1)
oprot.writeString(self.ID.encode('utf-8') if sys.version_info[0] == 2 else self.ID)
oprot.writeFieldEnd()
if self.Type is not None:
oprot.writeFieldBegin('Type', TType.I32, 2)
oprot.writeI32(self.Type)
oprot.writeFieldEnd()
if self.MinVersion is not None:
oprot.writeFieldBegin('MinVersion', TType.STRUCT, 3)
self.MinVersion.write(oprot)
oprot.writeFieldEnd()
if self.MaxVersion is not None:
oprot.writeFieldBegin('MaxVersion', TType.STRUCT, 4)
self.MaxVersion.write(oprot)
oprot.writeFieldEnd()
if self.ExcludedVersions is not None:
oprot.writeFieldBegin('ExcludedVersions', TType.LIST, 5)
oprot.writeListBegin(TType.STRUCT, len(self.ExcludedVersions))
for iter71 in self.ExcludedVersions:
iter71.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.Name is not None:
oprot.writeFieldBegin('Name', TType.STRING, 6)
oprot.writeString(self.Name.encode('utf-8') if sys.version_info[0] == 2 else self.Name)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.ID is None:
raise TProtocolException(message='Required field ID is unset!')
if self.Type is None:
raise TProtocolException(message='Required field Type is unset!')
if self.MinVersion is None:
raise TProtocolException(message='Required field MinVersion is unset!')
if self.MaxVersion is None:
raise TProtocolException(message='Required field MaxVersion is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class MMUDescription(object):
"""
Attributes:
- Name
- ID
- AssemblyName
- MotionType
- Language
- Author
- Version
- Prerequisites
- Properties
- Dependencies
- Events
- LongDescription
- ShortDescription
- Parameters
- SceneParameters
- Vendor
- VendorDomain
- MmuUrl
- UpdateUrl
"""
def __init__(self, Name=None, ID=None, AssemblyName=None, MotionType=None, Language=None, Author=None, Version=None, Prerequisites=None, Properties=None, Dependencies=None, Events=None, LongDescription=None, ShortDescription=None, Parameters=None, SceneParameters=None, Vendor=None, VendorDomain=None, MmuUrl=None, UpdateUrl=None,):
self.Name = Name
self.ID = ID
self.AssemblyName = AssemblyName
self.MotionType = MotionType
self.Language = Language
self.Author = Author
self.Version = Version
self.Prerequisites = Prerequisites
self.Properties = Properties
self.Dependencies = Dependencies
self.Events = Events
self.LongDescription = LongDescription
self.ShortDescription = ShortDescription
self.Parameters = Parameters
self.SceneParameters = SceneParameters
self.Vendor = Vendor
self.VendorDomain = VendorDomain
self.MmuUrl = MmuUrl
self.UpdateUrl = UpdateUrl
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.Name = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.ID = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.AssemblyName = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.MotionType = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRING:
self.Language = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.STRING:
self.Author = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.STRING:
self.Version = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.LIST:
self.Prerequisites = []
(_etype75, _size72) = iprot.readListBegin()
for _i76 in range(_size72):
_elem77 = MOSIM.mmi.constraints.ttypes.MConstraint()
_elem77.read(iprot)
self.Prerequisites.append(_elem77)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 11:
if ftype == TType.MAP:
self.Properties = {}
(_ktype79, _vtype80, _size78) = iprot.readMapBegin()
for _i82 in range(_size78):
_key83 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
_val84 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
self.Properties[_key83] = _val84
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 12:
if ftype == TType.LIST:
self.Dependencies = []
(_etype88, _size85) = iprot.readListBegin()
for _i89 in range(_size85):
_elem90 = MDependency()
_elem90.read(iprot)
self.Dependencies.append(_elem90)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 13:
if ftype == TType.LIST:
self.Events = []
(_etype94, _size91) = iprot.readListBegin()
for _i95 in range(_size91):
_elem96 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
self.Events.append(_elem96)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 14:
if ftype == TType.STRING:
self.LongDescription = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 15:
if ftype == TType.STRING:
self.ShortDescription = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 16:
if ftype == TType.LIST:
self.Parameters = []
(_etype100, _size97) = iprot.readListBegin()
for _i101 in range(_size97):
_elem102 = MOSIM.mmi.core.ttypes.MParameter()
_elem102.read(iprot)
self.Parameters.append(_elem102)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 17:
if ftype == TType.LIST:
self.SceneParameters = []
(_etype106, _size103) = iprot.readListBegin()
for _i107 in range(_size103):
_elem108 = MOSIM.mmi.core.ttypes.MParameter()
_elem108.read(iprot)
self.SceneParameters.append(_elem108)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 18:
if ftype == TType.STRING:
self.Vendor = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 19:
if ftype == TType.STRING:
self.VendorDomain = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 20:
if ftype == TType.STRING:
self.MmuUrl = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 21:
if ftype == TType.STRING:
self.UpdateUrl = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('MMUDescription')
if self.Name is not None:
oprot.writeFieldBegin('Name', TType.STRING, 1)
oprot.writeString(self.Name.encode('utf-8') if sys.version_info[0] == 2 else self.Name)
oprot.writeFieldEnd()
if self.ID is not None:
oprot.writeFieldBegin('ID', TType.STRING, 2)
oprot.writeString(self.ID.encode('utf-8') if sys.version_info[0] == 2 else self.ID)
oprot.writeFieldEnd()
if self.AssemblyName is not None:
oprot.writeFieldBegin('AssemblyName', TType.STRING, 3)
oprot.writeString(self.AssemblyName.encode('utf-8') if sys.version_info[0] == 2 else self.AssemblyName)
oprot.writeFieldEnd()
if self.MotionType is not None:
oprot.writeFieldBegin('MotionType', TType.STRING, 4)
oprot.writeString(self.MotionType.encode('utf-8') if sys.version_info[0] == 2 else self.MotionType)
oprot.writeFieldEnd()
if self.Language is not None:
oprot.writeFieldBegin('Language', TType.STRING, 6)
oprot.writeString(self.Language.encode('utf-8') if sys.version_info[0] == 2 else self.Language)
oprot.writeFieldEnd()
if self.Author is not None:
oprot.writeFieldBegin('Author', TType.STRING, 7)
oprot.writeString(self.Author.encode('utf-8') if sys.version_info[0] == 2 else self.Author)
oprot.writeFieldEnd()
if self.Version is not None:
oprot.writeFieldBegin('Version', TType.STRING, 8)
oprot.writeString(self.Version.encode('utf-8') if sys.version_info[0] == 2 else self.Version)
oprot.writeFieldEnd()
if self.Prerequisites is not None:
oprot.writeFieldBegin('Prerequisites', TType.LIST, 9)
oprot.writeListBegin(TType.STRUCT, len(self.Prerequisites))
for iter109 in self.Prerequisites:
iter109.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.Properties is not None:
oprot.writeFieldBegin('Properties', TType.MAP, 11)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.Properties))
for kiter110, viter111 in self.Properties.items():
oprot.writeString(kiter110.encode('utf-8') if sys.version_info[0] == 2 else kiter110)
oprot.writeString(viter111.encode('utf-8') if sys.version_info[0] == 2 else viter111)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.Dependencies is not None:
oprot.writeFieldBegin('Dependencies', | |
out_channels: int,
kernel_size: typing.Union[
int, typing.Tuple[int, int], typing.Tuple[int, int, int]
] = 3,
stride: typing.Union[
int, typing.Tuple[int, int], typing.Tuple[int, int, int]
] = 1,
padding: typing.Union[
str, int, typing.Tuple[int, int], typing.Tuple[int, int, int]
] = "same",
dilation: typing.Union[
int, typing.Tuple[int, int], typing.Tuple[int, int, int]
] = 1,
bias: bool = True,
padding_mode: str = "zeros",
):
if out_channels % in_channels != 0:
raise ValueError(
"Depthwise separable convolution needs out_channels divisible by in_channels without remainder."
)
super().__init__(
module_name="Conv",
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=in_channels,
bias=bias,
padding_mode=padding_mode,
)
class SeparableConv(torch.nn.Module):
"""Separable convolution layer (a.k.a. depthwise separable convolution).
Based on input shape it either creates 1D, 2D or 3D separable convolution
for inputs of shape 3D, 4D, 5D respectively (including batch as first dimension).
Additional `same` `padding` mode was added and set as default.
This mode preserves all dimensions excepts channels.
`kernel_size` got a default value of `3`.
.. note::
**IMPORTANT**: `same` currently works only for odd values of `kernel_size`,
`dilation` and `stride`. If any of those is even you should explicitly pad
your input asymmetrically with `torch.functional.pad` or a-like.
Parameters
----------
in_channels : int
Number of channels in the input image
out_channels : int
Number of channels produced by the convolution
kernel_size : Union[int, Tuple[int, int], Tuple[int, int, int]], optional
Size of the convolving kernel. User can specify `int` or 2-tuple (for `Conv2d`)
or 3-tuple (for `Conv3d`). Default: `3`
stride : Union[int, Tuple[int, int], Tuple[int, int, int]], optional
Stride of the convolution. User can specify `int` or 2-tuple (for `Conv2d`)
or 3-tuple (for `Conv3d`). Default: `3`
padding : Union[str, int, Tuple[int, int], Tuple[int, int, int]], optional
Padding added to both sides of the input. String "same" can be used with odd
`kernel_size`, `stride` and `dilation`
User can specify `int` or 2-tuple (for `Conv2d`)
or 3-tuple (for `Conv3d`). Default: `same`
dilation : Union[int, Tuple[int, int], Tuple[int, int, int]], optional
Spacing between kernel elements. String "same" can be used with odd
`kernel_size`, `stride` and `dilation`
User can specify `int` or 2-tuple (for `Conv2d`)
or 3-tuple (for `Conv3d`). Default: `1`
bias : bool, optional
If ``True``, adds a learnable bias to the output. Default: ``True``
padding_mode : string, optional
Accepted values `zeros` and `circular` Default: `zeros`
"""
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: typing.Union[
int, typing.Tuple[int, int], typing.Tuple[int, int, int]
] = 3,
stride: typing.Union[
int, typing.Tuple[int, int], typing.Tuple[int, int, int]
] = 1,
padding: typing.Union[
str, int, typing.Tuple[int, int], typing.Tuple[int, int, int]
] = "same",
dilation: typing.Union[
int, typing.Tuple[int, int], typing.Tuple[int, int, int]
] = 1,
bias: bool = True,
padding_mode: str = "zeros",
):
super().__init__()
self.in_channels: int = in_channels
self.out_channels: int = out_channels
self.kernel_size: typing.Union[
int, typing.Tuple[int, int], typing.Tuple[int, int, int]
] = kernel_size
self.stride: typing.Union[
int, typing.Tuple[int, int], typing.Tuple[int, int, int]
] = stride
self.padding: typing.Union[
str, int, typing.Tuple[int, int], typing.Tuple[int, int, int]
] = padding
self.dilation: typing.Union[
int, typing.Tuple[int, int], typing.Tuple[int, int, int]
] = dilation
self.bias: bool = bias
self.padding_mode: str = padding_mode
self.depthwise = Conv(
in_channels=in_channels,
out_channels=in_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=in_channels,
bias=bias,
padding_mode=padding_mode,
)
self.pointwise = Conv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=False,
padding_mode=padding_mode,
)
def forward(self, inputs):
return self.pointwise(self.depthwise(inputs))
class ChannelShuffle(_dev_utils.modules.Representation):
"""Shuffle output channels.
When using group convolution knowledge transfer between next layers is reduced
(as the same input channels are convolved with the same output channels).
This layer reshuffles output channels via simple `reshape` in order to mix the representation
from separate groups and improve knowledge transfer.
Originally proposed by <NAME> et. al in:
`ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices <https://arxiv.org/abs/1707.01083>`__
Parameters
----------
groups : int
Number of groups used in the previous convolutional layer.
"""
def __init__(self, groups: int):
super().__init__()
self.groups: int = groups
def forward(self, inputs):
return (
inputs.reshape(inputs.shape[0], self.groups, -1, *inputs.shape[2:])
.transpose(1, 2)
.reshape(*inputs.shape)
)
class ChannelSplit(_dev_utils.modules.Representation):
"""Convenience layer splitting tensor using `p`.
Returns two outputs, splitted accordingly to parameters.
Parameters
----------
p : float
Percentage of channels to go into first group
dim : int, optional
Dimension along which input will be splitted. Default: `1` (channel dimension)
"""
def __init__(self, p: float, dim: int = 1):
super().__init__()
if not 0.0 < p < 1.0:
raise ValueError(
"Ratio of small expand fire module has to be between 0 and 1."
)
self.p: float = p
self.dim: int = dim
def forward(self, inputs):
return torch.split(inputs, int(inputs.shape[1] * self.p), dim=self.dim)
class Residual(torch.nn.Module):
"""Residual connection adding input to output of provided module.
Originally proposed by He et. al in `ResNet <www.arxiv.org/abs/1512.03385>`__
For correct usage it is advised to keep input line (skip connection) without
any layer or activation and implement transformations only in module arguments
(as per `Identity Mappings in Deep Residual Networks <https://arxiv.org/pdf/1603.05027.pdf>`__).
Parameters
----------
module : torch.nn.Module
Convolutional PyTorch module (or other compatible module).
Shape of module's `inputs` has to be equal to it's `outputs`, both
should be addable `torch.Tensor` instances.
projection : torch.nn.Module, optional
If shapes of `inputs` and `module` results are different, it's user
responsibility to add custom `projection` module (usually `1x1` convolution).
Default: `None`
"""
def __init__(self, module: torch.nn.Module, projection: torch.nn.Module = None):
super().__init__()
self.module: torch.nn.Module = module
self.projection: torch.nn.Module = projection
def forward(self, inputs):
output = self.module(inputs)
if self.projection is not None:
inputs = self.projections(inputs)
return output + inputs
class Dense(torch.nn.Module):
"""Dense residual connection concatenating input channels and output channels of provided module.
Originally proposed by <NAME> et. al in `Densely Connected Convolutional Networks <https://arxiv.org/abs/1608.06993>`__
Parameters
----------
module : torch.nn.Module
Convolutional PyTorch module (or other compatible module).
Shape of module's `inputs` has to be equal to it's `outputs`, both
should be addable `torch.Tensor` instances.
dim : int, optional
Dimension along which `input` and module's `output` will be concatenated.
Default: `1` (channel-wise)
"""
def __init__(self, module: torch.nn.Module, dim: int = 1):
super().__init__()
self.module: torch.nn.Module = module
self.dim: int = dim
def forward(self, inputs):
return torch.cat(self.module(inputs), inputs, dim=self.dim)
class Poly(torch.nn.Module):
"""Apply one module to input multiple times and sum.
It's equation for `order` equal to :math:`N` could be expressed as
.. math::
I + F + F^2 + ... + F^N
where :math:`I` is identity mapping and :math:`F` is output of `module` applied :math:`^N` times.
Originally proposed by <NAME> et. al in
`PolyNet: A Pursuit of Structural Diversity in Very Deep Networks <https://arxiv.org/abs/1608.06993>`__
Parameters
----------
module : torch.nn.Module
Convolutional PyTorch module (or other compatible module).
`inputs` shape has to be equal to it's `output` shape
(for 2D convolution it would be :math:`(C, H, W)`)
order : int, optional
Order of PolyInception module. For order equal to `1` acts just like
ResNet, order of `2` was used in original paper. Default: `2`
"""
def __init__(self, module: torch.nn.Module, order: int = 2):
super().__init__()
if order < 1:
raise ValueError("Order of Poly cannot be less than 1.")
self.module: torch.nn.Module = module
self.order: int = order
def extra_repr(self):
return f"order={self.order},"
def forward(self, inputs):
outputs = [self.module(inputs)]
for _ in range(1, self.order):
outputs.append(self.module(outputs[-1]))
return torch.stack([inputs] + outputs, dim=0).sum(dim=0)
class MPoly(torch.nn.Module):
"""Apply multiple modules to input multiple times and sum.
It's equation for `poly_modules` length equal to :math:`N` could be expressed by
.. math::
I + F_1 + F_1(F_0) + ... + F_N(F_{N-1}...F_0)
where :math:`I` is identity and consecutive :math:`F_N` are consecutive modules
applied to output of previous ones.
Originally proposed by <NAME> et. al in
`PolyNet: A Pursuit of Structural Diversity in Very Deep Networks <https://arxiv.org/abs/1608.06993>`__
Parameters
----------
*poly_modules : torch.nn.Module
Variable arg of modules to use. If empty, acts as an identity.
For single module acts like `ResNet`. `2` was used in original paper.
All modules need `inputs` and `outputs` of equal `shape`.
"""
def __init__(self, *poly_modules: torch.nn.Module):
super().__init__()
self.poly_modules: torch.nn.Module = torch.nn.ModuleList(poly_modules)
def forward(self, inputs):
outputs = [self.poly_modules[0](inputs)]
for module in self.poly_modules[1:]:
outputs.append(module(outputs[-1]))
return torch.stack([inputs] + outputs, dim=0).sum(dim=0)
class WayPoly(torch.nn.Module):
"""Apply multiple modules to input and sum.
It's equation for `poly_modules` length equal to :math:`N` could be expressed by
.. math::
I + F_1(I) | |
triples generated from an RDFa 1.1 source are not the same as for RDFa 1.0. (See the separate U{section in the RDFa 1.1 specification<http://www.w3.org/TR/rdfa-core/#major-differences-with-rdfa-syntax-1.0>} for some further details.)
This distiller’s default behavior is RDFa 1.1. However, if the source includes, in the top element of the file (e.g., the C{html} element) a C{@version} attribute whose value contains the C{RDFa 1.0} string, then the distiller switches to a RDFa 1.0 mode. (Although the C{@version} attribute is not required in RDFa 1.0, it is fairly commonly used.) Similarly, if the RDFa 1.0 DTD is used in the XHTML source, it will be taken into account (a very frequent setup is that an XHTML file is defined with that DTD and is served as text/html; pyRdfa will consider that file as XHTML5, i.e., parse it with the HTML5 parser, but interpret the RDFa attributes under the RDFa 1.0 rules).
Transformers
============
The package uses the concept of 'transformers': the parsed DOM tree is possibly
transformed I{before} performing the real RDFa processing. This transformer structure makes it possible to
add additional 'services' without distoring the core code of RDFa processing.
A transformer is a function with three arguments:
- C{node}: a DOM node for the top level element of the DOM tree
- C{options}: the current L{Options} instance
- C{state}: the current L{ExecutionContext} instance, corresponding to the top level DOM Tree element
The function may perform any type of change on the DOM tree; the typical behavior is to add or remove attributes on specific elements. Some transformations are included in the package and can be used as examples; see the L{transform} module of the distribution. These are:
- The C{@name} attribute of the C{meta} element is copied into a C{@property} attribute of the same element
- Interpreting the 'openid' references in the header. See L{transform.OpenID} for further details.
- Implementing the Dublin Core dialect to include DC statements from the header. See L{transform.DublinCore} for further details.
The user of the package may refer add these transformers to L{Options} instance. Here is a possible usage with the “openid” transformer added to the call::
from pyRdfa.options import Options
from pyRdfa.transform.OpenID import OpenID_transform
options = Options(transformers=[OpenID_transform])
print pyRdfa(options=options).rdf_from_source('filename')
@summary: RDFa parser (distiller)
@requires: Python version 2.5 or up; 2.7 is preferred
@requires: U{RDFLib<http://rdflib.net>}; version 3.X is preferred.
@requires: U{html5lib<http://code.google.com/p/html5lib/>} for the HTML5 parsing (note that version 1.0b1 and 1.0b2 should be avoided, it may lead to unicode encoding problems)
@requires: U{httpheader<http://deron.meranda.us/python/httpheader/>}; however, a small modification had to make on the original file, so for this reason and to make distribution easier this module (single file) is added to the package.
@organization: U{World Wide Web Consortium<http://www.w3.org>}
@author: U{<NAME><a href="http://www.w3.org/People/Ivan/">}
@license: This software is available for use under the
U{W3C® SOFTWARE NOTICE AND LICENSE<href="http://www.w3.org/Consortium/Legal/2002/copyright-software-20021231">}
@var builtInTransformers: List of built-in transformers that are to be run regardless, because they are part of the RDFa spec
@var CACHE_DIR_VAR: Environment variable used to define cache directories for RDFa vocabularies in case the default setting does not work or is not appropriate.
@var rdfa_current_version: Current "official" version of RDFa that this package implements by default. This can be changed at the invocation of the package
@var uri_schemes: List of registered (or widely used) URI schemes; used for warnings...
"""
__version__ = "3.5.3"
__author__ = '<NAME>'
__contact__ = '<NAME>, <EMAIL>'
__license__ = 'W3C® SOFTWARE NOTICE AND LICENSE, http://www.w3.org/Consortium/Legal/2002/copyright-software-20021231'
name = "pyRdfa3"
import sys
PY3 = (sys.version_info[0] >= 3)
if PY3 :
from io import StringIO
else :
from StringIO import StringIO
import os
import xml.dom.minidom
if PY3 :
from urllib.parse import urlparse
else :
from urlparse import urlparse
import rdflib
from rdflib import URIRef
from rdflib import Literal
from rdflib import BNode
from rdflib import Namespace
if rdflib.__version__ >= "3.0.0" :
from rdflib import RDF as ns_rdf
from rdflib import RDFS as ns_rdfs
from rdflib import Graph
else :
from rdflib.RDFS import RDFSNS as ns_rdfs
from rdflib.RDF import RDFNS as ns_rdf
from rdflib.Graph import Graph
# Namespace, in the RDFLib sense, for the rdfa vocabulary
ns_rdfa = Namespace("http://www.w3.org/ns/rdfa#")
from .extras.httpheader import acceptable_content_type, content_type
from .transform.prototype import handle_prototypes
# Vocabulary terms for vocab reporting
RDFA_VOCAB = ns_rdfa["usesVocabulary"]
# Namespace, in the RDFLib sense, for the XSD Datatypes
ns_xsd = Namespace('http://www.w3.org/2001/XMLSchema#')
# Namespace, in the RDFLib sense, for the distiller vocabulary, used as part of the processor graph
ns_distill = Namespace("http://www.w3.org/2007/08/pyRdfa/vocab#")
debug = False
#########################################################################################################
# Exception/error handling. Essentially, all the different exceptions are re-packaged into
# separate exception class, to allow for an easier management on the user level
class RDFaError(Exception) :
"""Superclass exceptions representing error conditions defined by the RDFa 1.1 specification.
It does not add any new functionality to the
Exception class."""
def __init__(self, msg) :
self.msg = msg
Exception.__init__(self)
class FailedSource(RDFaError) :
"""Raised when the original source cannot be accessed. It does not add any new functionality to the
Exception class."""
def __init__(self, msg, http_code = None) :
self.msg = msg
self.http_code = http_code
RDFaError.__init__(self, msg)
class HTTPError(RDFaError) :
"""Raised when HTTP problems are detected. It does not add any new functionality to the
Exception class."""
def __init__(self, http_msg, http_code) :
self.msg = http_msg
self.http_code = http_code
RDFaError.__init__(self,http_msg)
class ProcessingError(RDFaError) :
"""Error found during processing. It does not add any new functionality to the
Exception class."""
pass
class pyRdfaError(Exception) :
"""Superclass exceptions representing error conditions outside the RDFa 1.1 specification."""
pass
# Error and Warning RDFS classes
RDFA_Error = ns_rdfa["Error"]
RDFA_Warning = ns_rdfa["Warning"]
RDFA_Info = ns_rdfa["Information"]
NonConformantMarkup = ns_rdfa["DocumentError"]
UnresolvablePrefix = ns_rdfa["UnresolvedCURIE"]
UnresolvableReference = ns_rdfa["UnresolvedCURIE"]
UnresolvableTerm = ns_rdfa["UnresolvedTerm"]
VocabReferenceError = ns_rdfa["VocabReferenceError"]
PrefixRedefinitionWarning = ns_rdfa["PrefixRedefinition"]
FileReferenceError = ns_distill["FileReferenceError"]
HTError = ns_distill["HTTPError"]
IncorrectPrefixDefinition = ns_distill["IncorrectPrefixDefinition"]
IncorrectBlankNodeUsage = ns_distill["IncorrectBlankNodeUsage"]
IncorrectLiteral = ns_distill["IncorrectLiteral"]
# Error message texts
err_no_blank_node = "Blank node in %s position is not allowed; ignored"
err_redefining_URI_as_prefix = "'%s' a registered or an otherwise used URI scheme, but is defined as a prefix here; is this a mistake? (see, eg, http://en.wikipedia.org/wiki/URI_scheme or http://www.iana.org/assignments/uri-schemes.html for further information for most of the URI schemes)"
err_xmlns_deprecated = "The usage of 'xmlns' for prefix definition is deprecated; please use the 'prefix' attribute instead (definition for '%s')"
err_bnode_local_prefix = "The '_' local CURIE prefix is reserved for blank nodes, and cannot be defined as a prefix"
err_col_local_prefix = "The character ':' is not valid in a CURIE Prefix, and cannot be used in a prefix definition (definition for '%s')"
err_missing_URI_prefix = "Missing URI in prefix declaration for '%s' (in '%s')"
err_invalid_prefix = "Invalid prefix declaration '%s' (in '%s')"
err_no_default_prefix = "Default prefix cannot be changed (in '%s')"
err_prefix_and_xmlns = "@prefix setting for '%s' overrides the 'xmlns:%s' setting; may be a source of problem if same file is run through RDFa 1.0"
err_non_ncname_prefix = "Non NCNAME '%s' in prefix definition (in '%s'); ignored"
err_absolute_reference = "CURIE Reference part contains an authority part: %s (in '%s'); ignored"
err_query_reference = "CURIE Reference query part contains an unauthorized character: %s (in '%s'); ignored"
err_fragment_reference = "CURIE Reference fragment part contains an unauthorized character: %s (in '%s'); ignored"
err_lang = "There is a problem with language setting; either both xml:lang and lang used on an element with different values, or, for (X)HTML5, only xml:lang is used."
err_URI_scheme = "Unusual URI scheme used in <%s>; may that be a mistake, e.g., resulting from using an undefined CURIE prefix or an incorrect CURIE?"
err_illegal_safe_CURIE = "Illegal safe CURIE: %s; ignored"
err_no_CURIE_in_safe_CURIE = "Safe CURIE is used, but the value does not correspond to a defined CURIE: [%s]; ignored"
err_undefined_terms = "'%s' is used as a term, but has not been defined as such; ignored"
err_non_legal_CURIE_ref = "Relative URI is not allowed in this position (or not a legal CURIE reference) '%s'; ignored"
err_undefined_CURIE = "Undefined CURIE: '%s'; ignored"
err_prefix_redefinition = "Prefix '%s' (defined in the initial RDFa context or in an ancestor) is redefined"
err_unusual_char_in_URI = "Unusual character in uri: %s; possible error?"
#############################################################################################
from .state import ExecutionContext
from .parse import parse_one_node
from .options import Options
from .transform import top_about, empty_safe_curie, vocab_for_role
from .utils import URIOpener
from .host import HostLanguage, MediaTypes, preferred_suffixes, content_to_host_language
# Environment variable used to characterize cache directories for RDFa vocabulary files.
CACHE_DIR_VAR = "PyRdfaCacheDir"
# current "official" version of RDFa that this package implements. This can be changed at the invocation of the package
rdfa_current_version = "1.1"
# I removed schemes that would not appear as a prefix anyway, like iris.beep
# http://en.wikipedia.org/wiki/URI_scheme seems to be a good source of information
# as well as http://www.iana.org/assignments/uri-schemes.html
# There are some overlaps here, but better more than not enough...
# This comes from wikipedia
registered_iana_schemes = [
"aaa","aaas","acap","cap","cid","crid","data","dav","dict","dns","fax","file", "ftp","geo","go",
"gopher","h323","http","https","iax","icap","im","imap","info","ipp","iris","ldap", "lsid",
"mailto","mid","modem","msrp","msrps", "mtqp", "mupdate","news","nfs","nntp","opaquelocktoken",
"pop","pres", "prospero","rstp","rsync", "service","shttp","sieve","sip","sips", "sms", "snmp", "soap", "tag",
"tel","telnet", "tftp", "thismessage","tn3270","tip","tv","urn","vemmi","wais","ws", "wss", "xmpp"
]
# This comes from wikipedia, too
unofficial_common = [
"about", "adiumxtra", "aim", "apt", "afp", "aw", "bitcoin", "bolo", "callto", "chrome", "coap",
"content", "cvs", "doi", "ed2k", "facetime", "feed", "finger", "fish", | |
P.ghWp__tilde__, P.G0 ],
color = [ '1' ],
lorentz = [ L.UUS1 ],
couplings = {(0,0):C.GC_2052})
V_104 = Vertex(name = 'V_104',
particles = [ P.ghWp, P.ghWp__tilde__, P.A0 ],
color = [ '1' ],
lorentz = [ L.UUS1 ],
couplings = {(0,0):C.GC_2044})
V_105 = Vertex(name = 'V_105',
particles = [ P.ghWp, P.ghWp__tilde__, P.a ],
color = [ '1' ],
lorentz = [ L.UUV1 ],
couplings = {(0,0):C.GC_3})
V_106 = Vertex(name = 'V_106',
particles = [ P.ghWp, P.ghWp__tilde__, P.Z ],
color = [ '1' ],
lorentz = [ L.UUV1 ],
couplings = {(0,0):C.GC_35})
V_107 = Vertex(name = 'V_107',
particles = [ P.ghWp, P.ghZ__tilde__, P.G__minus__ ],
color = [ '1' ],
lorentz = [ L.UUS1 ],
couplings = {(0,0):C.GC_2053})
V_108 = Vertex(name = 'V_108',
particles = [ P.ghWp, P.ghZ__tilde__, P.H__minus__ ],
color = [ '1' ],
lorentz = [ L.UUS1 ],
couplings = {(0,0):C.GC_2047})
V_109 = Vertex(name = 'V_109',
particles = [ P.ghWp, P.ghZ__tilde__, P.W__minus__ ],
color = [ '1' ],
lorentz = [ L.UUV1 ],
couplings = {(0,0):C.GC_36})
V_110 = Vertex(name = 'V_110',
particles = [ P.ghZ, P.ghWm__tilde__, P.G__minus__ ],
color = [ '1' ],
lorentz = [ L.UUS1 ],
couplings = {(0,0):C.GC_2054})
V_111 = Vertex(name = 'V_111',
particles = [ P.ghZ, P.ghWm__tilde__, P.H__minus__ ],
color = [ '1' ],
lorentz = [ L.UUS1 ],
couplings = {(0,0):C.GC_2046})
V_112 = Vertex(name = 'V_112',
particles = [ P.ghZ, P.ghWm__tilde__, P.W__minus__ ],
color = [ '1' ],
lorentz = [ L.UUV1 ],
couplings = {(0,0):C.GC_35})
V_113 = Vertex(name = 'V_113',
particles = [ P.ghZ, P.ghWp__tilde__, P.G__plus__ ],
color = [ '1' ],
lorentz = [ L.UUS1 ],
couplings = {(0,0):C.GC_2054})
V_114 = Vertex(name = 'V_114',
particles = [ P.ghZ, P.ghWp__tilde__, P.H__plus__ ],
color = [ '1' ],
lorentz = [ L.UUS1 ],
couplings = {(0,0):C.GC_2046})
V_115 = Vertex(name = 'V_115',
particles = [ P.ghZ, P.ghWp__tilde__, P.W__plus__ ],
color = [ '1' ],
lorentz = [ L.UUV1 ],
couplings = {(0,0):C.GC_36})
V_116 = Vertex(name = 'V_116',
particles = [ P.ghZ, P.ghZ__tilde__, P.h02 ],
color = [ '1' ],
lorentz = [ L.UUS1 ],
couplings = {(0,0):C.GC_1816})
V_117 = Vertex(name = 'V_117',
particles = [ P.ghZ, P.ghZ__tilde__, P.h01 ],
color = [ '1' ],
lorentz = [ L.UUS1 ],
couplings = {(0,0):C.GC_1811})
V_118 = Vertex(name = 'V_118',
particles = [ P.ghG, P.ghG__tilde__, P.g ],
color = [ 'f(1,2,3)' ],
lorentz = [ L.UUV1 ],
couplings = {(0,0):C.GC_8})
V_119 = Vertex(name = 'V_119',
particles = [ P.g, P.g, P.g ],
color = [ 'f(1,2,3)' ],
lorentz = [ L.VVV8 ],
couplings = {(0,0):C.GC_8})
V_120 = Vertex(name = 'V_120',
particles = [ P.g, P.g, P.g, P.g ],
color = [ 'f(-1,1,2)*f(3,4,-1)', 'f(-1,1,3)*f(2,4,-1)', 'f(-1,1,4)*f(2,3,-1)' ],
lorentz = [ L.VVVV4, L.VVVV6, L.VVVV7 ],
couplings = {(1,1):C.GC_11,(0,0):C.GC_11,(2,2):C.GC_11})
V_121 = Vertex(name = 'V_121',
particles = [ P.x1__minus__, P.x1__plus__, P.h02 ],
color = [ '1' ],
lorentz = [ L.FFS3, L.FFS4 ],
couplings = {(0,0):C.GC_1888,(0,1):C.GC_1821})
V_122 = Vertex(name = 'V_122',
particles = [ P.x1__minus__, P.x1__plus__, P.h01 ],
color = [ '1' ],
lorentz = [ L.FFS3, L.FFS4 ],
couplings = {(0,0):C.GC_1886,(0,1):C.GC_1819})
V_123 = Vertex(name = 'V_123',
particles = [ P.x1__minus__, P.x1__plus__, P.G0 ],
color = [ '1' ],
lorentz = [ L.FFS3, L.FFS4 ],
couplings = {(0,0):C.GC_2144,(0,1):C.GC_2064})
V_124 = Vertex(name = 'V_124',
particles = [ P.x1__minus__, P.x1__plus__, P.A0 ],
color = [ '1' ],
lorentz = [ L.FFS3, L.FFS4 ],
couplings = {(0,0):C.GC_2138,(0,1):C.GC_2058})
V_125 = Vertex(name = 'V_125',
particles = [ P.x2__minus__, P.x1__plus__, P.h02 ],
color = [ '1' ],
lorentz = [ L.FFS3, L.FFS4 ],
couplings = {(0,0):C.GC_1892,(0,1):C.GC_1822})
V_126 = Vertex(name = 'V_126',
particles = [ P.x2__minus__, P.x1__plus__, P.h01 ],
color = [ '1' ],
lorentz = [ L.FFS3, L.FFS4 ],
couplings = {(0,0):C.GC_1890,(0,1):C.GC_1820})
V_127 = Vertex(name = 'V_127',
particles = [ P.x2__minus__, P.x1__plus__, P.G0 ],
color = [ '1' ],
lorentz = [ L.FFS3, L.FFS4 ],
couplings = {(0,0):C.GC_2152,(0,1):C.GC_2065})
V_128 = Vertex(name = 'V_128',
particles = [ P.x2__minus__, P.x1__plus__, P.A0 ],
color = [ '1' ],
lorentz = [ L.FFS3, L.FFS4 ],
couplings = {(0,0):C.GC_2146,(0,1):C.GC_2059})
V_129 = Vertex(name = 'V_129',
particles = [ P.n1, P.x1__plus__, P.G__minus__ ],
color = [ '1' ],
lorentz = [ L.FFS3, L.FFS4 ],
couplings = {(0,0):C.GC_1057,(0,1):C.GC_2060})
V_130 = Vertex(name = 'V_130',
particles = [ P.n2, P.x1__plus__, P.G__minus__ ],
color = [ '1' ],
lorentz = [ L.FFS3, L.FFS4 ],
couplings = {(0,0):C.GC_1058,(0,1):C.GC_2061})
V_131 = Vertex(name = 'V_131',
particles = [ P.n3, P.x1__plus__, P.G__minus__ ],
color = [ '1' ],
lorentz = [ L.FFS3, L.FFS4 ],
couplings = {(0,0):C.GC_1059,(0,1):C.GC_2062})
V_132 = Vertex(name = 'V_132',
particles = [ P.n4, P.x1__plus__, P.G__minus__ ],
color = [ '1' ],
lorentz = [ L.FFS3, L.FFS4 ],
couplings = {(0,0):C.GC_1060,(0,1):C.GC_2063})
V_133 = Vertex(name = 'V_133',
particles = [ P.x1__minus__, P.x2__plus__, P.h02 ],
color = [ '1' ],
lorentz = [ L.FFS3, L.FFS4 ],
couplings = {(0,0):C.GC_1889,(0,1):C.GC_1825})
V_134 = Vertex(name = 'V_134',
particles = [ P.x1__minus__, P.x2__plus__, P.h01 ],
color = [ '1' ],
lorentz = [ L.FFS3, L.FFS4 ],
couplings = {(0,0):C.GC_1887,(0,1):C.GC_1823})
V_135 = Vertex(name = 'V_135',
particles = [ P.x1__minus__, P.x2__plus__, P.G0 ],
color = [ '1' ],
lorentz = [ L.FFS3, L.FFS4 ],
couplings = {(0,0):C.GC_2145,(0,1):C.GC_2072})
V_136 = Vertex(name = 'V_136',
particles = [ P.x1__minus__, P.x2__plus__, P.A0 ],
color = [ '1' ],
lorentz = [ L.FFS3, L.FFS4 ],
couplings = {(0,0):C.GC_2139,(0,1):C.GC_2066})
V_137 = Vertex(name = 'V_137',
particles = [ P.x2__minus__, P.x2__plus__, P.h02 ],
color = [ '1' ],
lorentz = [ L.FFS3, L.FFS4 ],
couplings = {(0,0):C.GC_1893,(0,1):C.GC_1826})
V_138 = Vertex(name = 'V_138',
particles = [ P.x2__minus__, P.x2__plus__, P.h01 ],
color = [ '1' ],
lorentz = [ L.FFS3, L.FFS4 ],
couplings = {(0,0):C.GC_1891,(0,1):C.GC_1824})
V_139 = Vertex(name = 'V_139',
particles = [ P.x2__minus__, P.x2__plus__, P.G0 ],
color = [ '1' ],
lorentz = [ L.FFS3, L.FFS4 ],
couplings = {(0,0):C.GC_2153,(0,1):C.GC_2073})
V_140 = Vertex(name = 'V_140',
particles = [ P.x2__minus__, P.x2__plus__, P.A0 ],
color = [ '1' ],
lorentz = [ L.FFS3, L.FFS4 ],
couplings = {(0,0):C.GC_2147,(0,1):C.GC_2067})
V_141 = Vertex(name = 'V_141',
particles = [ P.n1, P.x2__plus__, P.G__minus__ ],
color = [ '1' ],
lorentz = [ L.FFS3, L.FFS4 ],
couplings = {(0,0):C.GC_1061,(0,1):C.GC_2068})
V_142 = Vertex(name = 'V_142',
particles = [ P.n2, P.x2__plus__, P.G__minus__ ],
color = [ '1' ],
lorentz = [ L.FFS3, L.FFS4 ],
couplings = {(0,0):C.GC_1062,(0,1):C.GC_2069})
V_143 = Vertex(name = 'V_143',
particles = [ P.n3, P.x2__plus__, P.G__minus__ ],
color = [ '1' ],
lorentz = [ L.FFS3, L.FFS4 ],
couplings = {(0,0):C.GC_1063,(0,1):C.GC_2070})
V_144 = Vertex(name = 'V_144',
particles = [ P.n4, P.x2__plus__, P.G__minus__ ],
color = [ '1' ],
lorentz = [ L.FFS3, L.FFS4 ],
couplings = {(0,0):C.GC_1064,(0,1):C.GC_2071})
V_145 = Vertex(name = 'V_145',
particles = [ P.n1, P.e__minus__, P.sl4__plus__ ],
color = [ '1' ],
lorentz = [ L.FFS3 ],
couplings = {(0,0):C.GC_450})
V_146 = Vertex(name = 'V_146',
particles = [ P.n2, P.e__minus__, P.sl4__plus__ ],
color = [ '1' ],
lorentz = [ L.FFS3 ],
couplings = {(0,0):C.GC_451})
V_147 = Vertex(name = 'V_147',
particles = [ P.n3, P.e__minus__, P.sl4__plus__ ],
color = [ '1' ],
lorentz = [ L.FFS3 ],
couplings = {(0,0):C.GC_452})
V_148 = Vertex(name = 'V_148',
particles = [ P.n4, P.e__minus__, P.sl4__plus__ ],
color = [ '1' ],
lorentz = [ L.FFS3 ],
couplings = {(0,0):C.GC_453})
V_149 = Vertex(name = 'V_149',
particles = [ P.n1, P.mu__minus__, P.sl5__plus__ ],
color = [ '1' ],
lorentz = [ L.FFS3 ],
couplings = {(0,0):C.GC_471})
V_150 = Vertex(name = 'V_150',
particles = [ P.n2, P.mu__minus__, P.sl5__plus__ ],
color = [ '1' ],
lorentz = [ L.FFS3 ],
couplings = {(0,0):C.GC_472})
V_151 = Vertex(name = 'V_151',
particles = [ P.n3, P.mu__minus__, P.sl5__plus__ ],
color = [ '1' ],
lorentz = [ L.FFS3 ],
couplings = {(0,0):C.GC_473})
V_152 = Vertex(name = 'V_152',
particles = [ P.n4, P.mu__minus__, P.sl5__plus__ ],
color = [ '1' ],
lorentz = [ L.FFS3 ],
couplings = {(0,0):C.GC_474})
V_153 = Vertex(name = 'V_153',
particles = [ P.x1__minus__, P.n1, P.H__plus__ ],
color = [ '1' ],
lorentz = [ L.FFS3, L.FFS4 ],
couplings = {(0,0):C.GC_1065,(0,1):C.GC_2036})
V_154 = Vertex(name = 'V_154',
particles = [ P.x2__minus__, P.n1, P.H__plus__ ],
color = [ '1' ],
lorentz = [ L.FFS3, L.FFS4 ],
couplings = {(0,0):C.GC_1069,(0,1):C.GC_2040})
V_155 = Vertex(name = 'V_155',
particles = [ P.n1, P.n1, | |
<gh_stars>0
# coding: utf-8
# (C) Copyright IBM Corp. 2021.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# IBM OpenAPI SDK Code Generator Version: 3.22.0-937b9a1c-20201211-223043
"""
This doc lists APIs that you can use to interact with your IBM Blockchain Platform console
(IBP console)
"""
from enum import Enum
from typing import Dict, List
import json
from ibm_cloud_sdk_core import BaseService, DetailedResponse
from ibm_cloud_sdk_core.authenticators.authenticator import Authenticator
from ibm_cloud_sdk_core.get_authenticator import get_authenticator_from_environment
from ibm_cloud_sdk_core.utils import convert_model
from .common import get_sdk_headers
##############################################################################
# Service
##############################################################################
class BlockchainV3(BaseService):
"""The blockchain V3 service."""
DEFAULT_SERVICE_URL = None
DEFAULT_SERVICE_NAME = 'blockchain'
@classmethod
def new_instance(cls,
service_name: str = DEFAULT_SERVICE_NAME,
) -> 'BlockchainV3':
"""
Return a new client for the blockchain service using the specified
parameters and external configuration.
"""
authenticator = get_authenticator_from_environment(service_name)
service = cls(
authenticator
)
service.configure_service(service_name)
return service
def __init__(self,
authenticator: Authenticator = None,
) -> None:
"""
Construct a new client for the blockchain service.
:param Authenticator authenticator: The authenticator specifies the authentication mechanism.
Get up to date information from https://github.com/IBM/python-sdk-core/blob/master/README.md
about initializing the authenticator of your choice.
"""
BaseService.__init__(self,
service_url=self.DEFAULT_SERVICE_URL,
authenticator=authenticator)
#########################
# Manage component
#########################
def get_component(self,
id: str,
*,
deployment_attrs: str = None,
parsed_certs: str = None,
cache: str = None,
ca_attrs: str = None,
**kwargs
) -> DetailedResponse:
"""
Get component data.
Get the IBP console's data on a component (peer, CA, orderer, or MSP). The
component might be imported or created.
:param str id: The `id` of the component to retrieve. Use the [Get all
components](#list_components) API to determine the component id.
:param str deployment_attrs: (optional) Set to 'included' if the response
should include Kubernetes deployment attributes such as 'resources',
'storage', 'zone', 'region', 'admin_certs', etc. Default responses will not
include these fields.
**This parameter will not work on *imported* components.**
It's recommended to use `cache=skip` as well if up-to-date deployment data
is needed.
:param str parsed_certs: (optional) Set to 'included' if the response
should include parsed PEM data along with base 64 encoded PEM string.
Parsed certificate data will include fields such as the serial number,
issuer, expiration, subject, subject alt names, etc. Default responses will
not include these fields.
:param str cache: (optional) Set to 'skip' if the response should skip
local data and fetch live data wherever possible. Expect longer response
times if the cache is skipped. Default responses will use the cache.
:param str ca_attrs: (optional) Set to 'included' if the response should
fetch CA attributes, inspect certificates, and append extra fields to CA
and MSP component responses.
- CA components will have fields appended/updated with data fetched from
the `/cainfo?ca=ca` endpoint of a CA, such as: `ca_name`, `root_cert`,
`fabric_version`, `issuer_public_key` and `issued_known_msps`. The field
`issued_known_msps` indicates imported IBP MSPs that this CA has issued.
Meaning the MSP's root cert contains a signature that is derived from this
CA's root cert. Only imported MSPs are checked. Default responses will not
include these fields.
- MSP components will have the field `issued_by_ca_id` appended. This field
indicates the id of an IBP console CA that issued this MSP. Meaning the
MSP's root cert contains a signature that is derived from this CA's root
cert. Only imported/created CAs are checked. Default responses will not
include these fields.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `dict` result representing a `GenericComponentResponse` object
"""
if id is None:
raise ValueError('id must be provided')
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V3',
operation_id='get_component')
headers.update(sdk_headers)
params = {
'deployment_attrs': deployment_attrs,
'parsed_certs': parsed_certs,
'cache': cache,
'ca_attrs': ca_attrs
}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json'
path_param_keys = ['id']
path_param_values = self.encode_path_vars(id)
path_param_dict = dict(zip(path_param_keys, path_param_values))
url = '/ak/api/v3/components/{id}'.format(**path_param_dict)
request = self.prepare_request(method='GET',
url=url,
headers=headers,
params=params)
response = self.send(request)
return response
def remove_component(self,
id: str,
**kwargs
) -> DetailedResponse:
"""
Remove imported component.
Remove a single component from the IBP console.
- Using this api on an **imported** component removes it from the IBP console.
- Using this api on a **created** component removes it from the IBP console
**but** it will **not** delete the component from the Kubernetes cluster where it
resides. Thus it orphans the Kubernetes deployment (if it exists). Instead use the
[Delete component](#delete-component) API to delete the Kubernetes deployment and
the IBP console data at once.
:param str id: The `id` of the imported component to remove. Use the [Get
all components](#list-components) API to determine the component id.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `dict` result representing a `DeleteComponentResponse` object
"""
if id is None:
raise ValueError('id must be provided')
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V3',
operation_id='remove_component')
headers.update(sdk_headers)
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json'
path_param_keys = ['id']
path_param_values = self.encode_path_vars(id)
path_param_dict = dict(zip(path_param_keys, path_param_values))
url = '/ak/api/v3/components/{id}'.format(**path_param_dict)
request = self.prepare_request(method='DELETE',
url=url,
headers=headers)
response = self.send(request)
return response
def delete_component(self,
id: str,
**kwargs
) -> DetailedResponse:
"""
Delete component.
Removes a single component from the IBP console **and** it deletes the Kubernetes
deployment.
- Using this api on an **imported** component will *error out* since its
Kubernetes deployment is unknown and cannot be removed. Instead use the [Remove
imported component](#remove-component) API to remove imported components.
- Using this api on a **created** component removes it from the IBP console
**and** it will delete the component from the Kubernetes cluster where it resides.
The Kubernetes delete must succeed before the component will be removed from the
IBP console.
:param str id: The `id` of the component to delete. Use the [Get all
components](#list_components) API to determine the id of the component to
be deleted.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `dict` result representing a `DeleteComponentResponse` object
"""
if id is None:
raise ValueError('id must be provided')
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V3',
operation_id='delete_component')
headers.update(sdk_headers)
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json'
path_param_keys = ['id']
path_param_values = self.encode_path_vars(id)
path_param_dict = dict(zip(path_param_keys, path_param_values))
url = '/ak/api/v3/kubernetes/components/{id}'.format(**path_param_dict)
request = self.prepare_request(method='DELETE',
url=url,
headers=headers)
response = self.send(request)
return response
def create_ca(self,
display_name: str,
config_override: 'CreateCaBodyConfigOverride',
*,
id: str = None,
resources: 'CreateCaBodyResources' = None,
storage: 'CreateCaBodyStorage' = None,
zone: str = None,
replicas: float = None,
tags: List[str] = None,
hsm: 'Hsm' = None,
region: str = None,
version: str = None,
**kwargs
) -> DetailedResponse:
"""
Create a CA.
Create a Hyperledger Fabric Certificate Authority (CA) in your Kubernetes cluster.
:param str display_name: A descriptive name for this CA. The IBP console
tile displays this name.
:param CreateCaBodyConfigOverride config_override: Set `config_override` to
create the root/initial enroll id and enroll secret as well as enabling
custom CA configurations (such as using postgres). See the [Fabric CA
configuration
file](https://hyperledger-fabric-ca.readthedocs.io/en/release-1.4/serverconfig.html)
for more information about each parameter.
The field `tlsca` is optional. The IBP console will copy the value of
`config_override.ca` into `config_override.tlsca` if
`config_override.tlsca` is omitted (which is recommended).
*The nested field **names** below are not case-sensitive.*.
:param str id: (optional) The unique identifier of this component. Must
start with a letter, be lowercase and only contain letters and numbers. If
`id` is not provide a component id will be generated using the field
`display_name` as the base.
:param CreateCaBodyResources resources: (optional) CPU and memory
properties. This feature is not available if using a free Kubernetes
cluster.
:param CreateCaBodyStorage storage: (optional) Disk space properties. This
feature | |
"""\
basis sets for use with PyQuante
This program is part of the PyQuante quantum chemistry program suite.
"""
sto3g = \
{1: [('S',
[(3.4252509099999999, 0.15432897000000001),
(0.62391373000000006, 0.53532813999999995),
(0.16885539999999999, 0.44463454000000002)])],
2: [('S',
[(6.3624213899999997, 0.15432897000000001),
(1.1589229999999999, 0.53532813999999995),
(0.31364978999999998, 0.44463454000000002)])],
3: [('S',
[(16.119575000000001, 0.15432897000000001),
(2.9362007000000001, 0.53532813999999995),
(0.79465050000000004, 0.44463454000000002)]),
('S',
[(0.63628969999999996, -0.099967230000000004),
(0.14786009999999999, 0.39951282999999999),
(0.048088699999999998, 0.70011546999999996)]),
('P',
[(0.63628969999999996, 0.15591627),
(0.14786009999999999, 0.60768372000000004),
(0.048088699999999998, 0.39195739000000002)])],
4: [('S',
[(30.167871000000002, 0.15432897000000001),
(5.4951153000000001, 0.53532813999999995),
(1.4871927, 0.44463454000000002)]),
('S',
[(1.3148331, -0.099967230000000004),
(0.3055389, 0.39951282999999999),
(0.099370700000000006, 0.70011546999999996)]),
('P',
[(1.3148331, 0.15591627),
(0.3055389, 0.60768372000000004),
(0.099370700000000006, 0.39195739000000002)])],
5: [('S',
[(48.791113000000003, 0.15432897000000001),
(8.8873622000000001, 0.53532813999999995),
(2.4052669999999998, 0.44463454000000002)]),
('S',
[(2.2369561, -0.099967230000000004),
(0.51982050000000002, 0.39951282999999999),
(0.16906180000000001, 0.70011546999999996)]),
('P',
[(2.2369561, 0.15591627),
(0.51982050000000002, 0.60768372000000004),
(0.16906180000000001, 0.39195739000000002)])],
6: [('S',
[(71.616837000000004, 0.15432897000000001),
(13.045095999999999, 0.53532813999999995),
(3.5305122, 0.44463454000000002)]),
('S',
[(2.9412493999999998, -0.099967230000000004),
(0.68348310000000001, 0.39951282999999999),
(0.22228990000000001, 0.70011546999999996)]),
('P',
[(2.9412493999999998, 0.15591627),
(0.68348310000000001, 0.60768372000000004),
(0.22228990000000001, 0.39195739000000002)])],
7: [('S',
[(99.106168999999994, 0.15432897000000001),
(18.052312000000001, 0.53532813999999995),
(4.8856602000000002, 0.44463454000000002)]),
('S',
[(3.7804559000000002, -0.099967230000000004),
(0.87849659999999996, 0.39951282999999999),
(0.28571439999999998, 0.70011546999999996)]),
('P',
[(3.7804559000000002, 0.15591627),
(0.87849659999999996, 0.60768372000000004),
(0.28571439999999998, 0.39195739000000002)])],
8: [('S',
[(130.70931999999999, 0.15432897000000001),
(23.808861, 0.53532813999999995),
(6.4436083000000002, 0.44463454000000002)]),
('S',
[(5.0331513000000001, -0.099967230000000004),
(1.1695960999999999, 0.39951282999999999),
(0.38038899999999998, 0.70011546999999996)]),
('P',
[(5.0331513000000001, 0.15591627),
(1.1695960999999999, 0.60768372000000004),
(0.38038899999999998, 0.39195739000000002)])],
9: [('S',
[(166.67912999999999, 0.15432897000000001),
(30.360811999999999, 0.53532813999999995),
(8.2168206999999995, 0.44463454000000002)]),
('S',
[(6.4648032000000004, -0.099967230000000004),
(1.5022812000000001, 0.39951282999999999),
(0.48858849999999998, 0.70011546999999996)]),
('P',
[(6.4648032000000004, 0.15591627),
(1.5022812000000001, 0.60768372000000004),
(0.48858849999999998, 0.39195739000000002)])],
10: [('S',
[(207.01561000000001, 0.15432897000000001),
(37.708151000000001, 0.53532813999999995),
(10.205297, 0.44463454000000002)]),
('S',
[(8.2463151000000003, -0.099967230000000004),
(1.9162661999999999, 0.39951282999999999),
(0.62322929999999999, 0.70011546999999996)]),
('P',
[(8.2463151000000003, 0.15591627),
(1.9162661999999999, 0.60768372000000004),
(0.62322929999999999, 0.39195739000000002)])],
11: [('S',
[(250.77243000000001, 0.15432896730000001),
(45.678511, 0.53532814230000003),
(12.362387999999999, 0.44463454219999998)]),
('S',
[(12.040193, -0.099967229190000007),
(2.7978819000000001, 0.3995128261),
(0.90995800000000004, 0.70011546889999998)]),
('P',
[(12.040193, 0.15591627499999999),
(2.7978819000000001, 0.60768371860000003),
(0.90995800000000004, 0.3919573931)]),
('S',
[(1.4787406000000001, -0.21962036900000001),
(0.41256490000000001, 0.22559543360000001),
(0.16147510000000001, 0.90039842599999997)]),
('P',
[(1.4787406000000001, 0.01058760429),
(0.41256490000000001, 0.5951670053),
(0.16147510000000001, 0.46200101199999999)])],
12: [('S',
[(299.23739999999998, 0.15432896730000001),
(54.50647, 0.53532814230000003),
(14.751580000000001, 0.44463454219999998)]),
('S',
[(15.12182, -0.099967229190000007),
(3.5139870000000002, 0.3995128261),
(1.142857, 0.70011546889999998)]),
('P',
[(15.12182, 0.15591627499999999),
(3.5139870000000002, 0.60768371860000003),
(1.142857, 0.3919573931)]),
('S',
[(1.395448, -0.21962036900000001),
(0.38932600000000001, 0.22559543360000001),
(0.15237999999999999, 0.90039842599999997)]),
('P',
[(1.395448, 0.01058760429),
(0.38932600000000001, 0.5951670053),
(0.15237999999999999, 0.46200101199999999)])],
13: [('S',
[(351.42147670000003, 0.15432896730000001),
(64.011860670000004, 0.53532814230000003),
(17.324107609999999, 0.44463454219999998)]),
('S',
[(18.899396209999999, -0.099967229190000007),
(4.3918132329999997, 0.3995128261),
(1.4283539700000001, 0.70011546889999998)]),
('P',
[(18.899396209999999, 0.15591627499999999),
(4.3918132329999997, 0.60768371860000003),
(1.4283539700000001, 0.3919573931)]),
('S',
[(1.3954482930000001, -0.21962036900000001),
(0.38932653179999999, 0.22559543360000001),
(0.15237976589999999, 0.90039842599999997)]),
('P',
[(1.3954482930000001, 0.01058760429),
(0.38932653179999999, 0.5951670053),
(0.15237976589999999, 0.46200101199999999)])],
14: [('S',
[(407.79755139999997, 0.15432896730000001),
(74.280833049999998, 0.53532814230000003),
(20.103292289999999, 0.44463454219999998)]),
('S',
[(23.193656059999999, -0.099967229190000007),
(5.3897068709999996, 0.3995128261),
(1.7528999519999999, 0.70011546889999998)]),
('P',
[(23.193656059999999, 0.15591627499999999),
(5.3897068709999996, 0.60768371860000003),
(1.7528999519999999, 0.3919573931)]),
('S',
[(1.4787406219999999, -0.21962036900000001),
(0.41256488009999998, 0.22559543360000001),
(0.1614750979, 0.90039842599999997)]),
('P',
[(1.4787406219999999, 0.01058760429),
(0.41256488009999998, 0.5951670053),
(0.1614750979, 0.46200101199999999)])],
15: [('S',
[(468.3656378, 0.15432896730000001),
(85.313385589999996, 0.53532814230000003),
(23.089131559999998, 0.44463454219999998)]),
('S',
[(28.032639580000001, -0.099967229190000007),
(6.5141825769999997, 0.3995128261),
(2.1186143519999998, 0.70011546889999998)]),
('P',
[(28.032639580000001, 0.15591627499999999),
(6.5141825769999997, 0.60768371860000003),
(2.1186143519999998, 0.3919573931)]),
('S',
[(1.7431032310000001, -0.21962036900000001),
(0.48632137710000001, 0.22559543360000001),
(0.19034289090000001, 0.90039842599999997)]),
('P',
[(1.7431032310000001, 0.01058760429),
(0.48632137710000001, 0.5951670053),
(0.19034289090000001, 0.46200101199999999)])],
16: [('S',
[(533.1257359, 0.15432896730000001),
(97.109518300000005, 0.53532814230000003),
(26.281625420000001, 0.44463454219999998)]),
('S',
[(33.329751729999998, -0.099967229190000007),
(7.7451175210000001, 0.3995128261),
(2.5189525989999999, 0.70011546889999998)]),
('P',
[(33.329751729999998, 0.15591627499999999),
(7.7451175210000001, 0.60768371860000003),
(2.5189525989999999, 0.3919573931)]),
('S',
[(2.029194274, -0.21962036900000001),
(0.56614005180000004, 0.22559543360000001),
(0.22158337920000001, 0.90039842599999997)]),
('P',
[(2.029194274, 0.01058760429),
(0.56614005180000004, 0.5951670053),
(0.22158337920000001, 0.46200101199999999)])],
17: [('S',
[(601.34561359999998, 0.15432896730000001),
(109.5358542, 0.53532814230000003),
(29.644676860000001, 0.44463454219999998)]),
('S',
[(38.96041889, -0.099967229190000007),
(9.0535634770000009, 0.3995128261),
(2.9444998340000001, 0.70011546889999998)]),
('P',
[(38.96041889, 0.15591627499999999),
(9.0535634770000009, 0.60768371860000003),
(2.9444998340000001, 0.3919573931)]),
('S',
[(2.1293864949999999, -0.21962036900000001),
(0.59409342740000004, 0.22559543360000001),
(0.23252414099999999, 0.90039842599999997)]),
('P',
[(2.1293864949999999, 0.01058760429),
(0.59409342740000004, 0.5951670053),
(0.23252414099999999, 0.46200101199999999)])],
18: [('S',
[(674.44651839999995, 0.15432896730000001),
(122.8512753, 0.53532814230000003),
(33.248349449999999, 0.44463454219999998)]),
('S',
[(45.164243919999997, -0.099967229190000007),
(10.495199, 0.3995128261),
(3.4133644479999998, 0.70011546889999998)]),
('P',
[(45.164243919999997, 0.15591627499999999),
(10.495199, 0.60768371860000003),
(3.4133644479999998, 0.3919573931)]),
('S',
[(2.6213665179999999, -0.21962036900000001),
(0.73135460500000005, 0.22559543360000001),
(0.28624723559999998, 0.90039842599999997)]),
('P',
[(2.6213665179999999, 0.01058760429),
(0.73135460500000005, 0.5951670053),
(0.28624723559999998, 0.46200101199999999)])],
19: [('S',
[(771.51036810000005, 0.15432896730000001),
(140.53157659999999, 0.53532814230000003),
(38.033328990000001, 0.44463454219999998)]),
('S',
[(52.402039790000003, -0.099967229199999993),
(12.177107100000001, 0.3995128261),
(3.960373165, 0.70011546889999998)]),
('P',
[(52.402039790000003, 0.15591627499999999),
(12.177107100000001, 0.60768371860000003),
(3.960373165, 0.3919573931)]),
('S',
[(3.6515839849999998, -0.21962036900000001),
(1.0187826630000001, 0.22559543360000001),
(0.3987446295, 0.90039842599999997)]),
('P',
[(3.6515839849999998, 0.010587604299999999),
(1.0187826630000001, 0.5951670053),
(0.3987446295, 0.46200101199999999)]),
('S',
[(0.50398225050000001, -0.30884412150000001),
(0.18600114649999999, 0.0196064117),
(0.082140067430000005, 1.131034442)]),
('P',
[(0.50398225050000001, -0.12154686000000001),
(0.18600114649999999, 0.57152276040000005),
(0.082140067430000005, 0.54989494709999998)])],
20: [('S',
[(854.03249510000001, 0.15432896730000001),
(155.5630851, 0.53532814230000003),
(42.101441790000003, 0.44463454219999998)]),
('S',
[(59.560299440000001, -0.099967229199999993),
(13.840532700000001, 0.3995128261),
(4.5013707969999999, 0.70011546889999998)]),
('P',
[(59.560299440000001, 0.15591627499999999),
(13.840532700000001, 0.60768371860000003),
(4.5013707969999999, 0.3919573931)]),
('S',
[(4.3747062559999996, -0.21962036900000001),
(1.220531941, 0.22559543360000001),
(0.47770793, 0.90039842599999997)]),
('P',
[(4.3747062559999996, 0.010587604299999999),
(1.220531941, 0.5951670053),
(0.47770793, 0.46200101199999999)]),
('S',
[(0.45584897569999999, -0.30884412150000001),
(0.168236941, 0.0196064117),
(0.074295207000000002, 1.131034442)]),
('P',
[(0.45584897569999999, -0.12154686000000001),
(0.168236941, 0.57152276040000005),
(0.074295207000000002, 0.54989494709999998)])],
21: [('S',
[(941.66242499999998, 0.15432896730000001),
(171.5249862, 0.53532814230000003),
(46.421355159999997, 0.44463454219999998)]),
('S',
[(67.176687709999996, -0.099967229199999993),
(15.61041754, 0.3995128261),
(5.0769922779999996, 0.70011546889999998)]),
('P',
[(67.176687709999996, 0.15591627499999999),
(15.61041754, 0.60768371860000003),
(5.0769922779999996, 0.3919573931)]),
('S',
[(4.698159231, -0.2277635023),
(1.4330883130000001, 0.21754360440000001),
(0.55293002400000002, 0.91667696109999997)]),
('P',
[(4.698159231, 0.0049515111999999997),
(1.4330883130000001, 0.57776646909999996),
(0.55293002400000002, 0.4846460366)]),
('D',
[(0.55170006790000004, 0.2197679508),
(0.16828610550000001, 0.65554736270000002),
(0.064930011199999998, 0.28657325900000002)]),
('S',
[(0.63093283840000003, -0.30884412150000001),
(0.2328538976, 0.0196064117),
(0.1028307363, 1.131034442)]),
('P',
[(0.63093283840000003, -0.12154686000000001),
(0.2328538976, 0.57152276040000005),
(0.1028307363, 0.54989494709999998)])],
22: [('S',
[(1033.5712450000001, 0.15432896730000001),
(188.26629260000001, 0.53532814230000003),
(50.952206009999998, 0.44463454219999998)]),
('S',
[(75.251204599999994, -0.099967229199999993),
(17.486761619999999, 0.3995128261),
(5.6872376060000001, 0.70011546889999998)]),
('P',
[(75.251204599999994, 0.15591627499999999),
(17.486761619999999, 0.60768371860000003),
(5.6872376060000001, 0.3919573931)]),
('S',
[(5.3955354739999999, -0.2277635023),
(1.6458102960000001, 0.21754360440000001),
(0.63500477700000002, 0.91667696109999997)]),
('P',
[(5.3955354739999999, 0.0049515111999999997),
(1.6458102960000001, 0.57776646909999996),
(0.63500477700000002, 0.4846460366)]),
('D',
[(1.645981194, 0.2197679508),
(0.50207672800000003, 0.65554736270000002),
(0.19371680999999999, 0.28657325900000002)]),
('S',
[(0.71226402460000005, -0.30884412150000001),
(0.26287022030000001, 0.0196064117),
(0.1160862609, 1.131034442)]),
('P',
[(0.71226402460000005, -0.12154686000000001),
(0.26287022030000001, 0.57152276040000005),
(0.1160862609, 0.54989494709999998)])],
23: [('S',
[(1130.7625169999999, 0.15432896730000001),
(205.9698041, 0.53532814230000003),
(55.743467109999997, 0.44463454219999998)]),
('S',
[(83.783850110000003, -0.099967229199999993),
(19.469564930000001, 0.3995128261),
(6.3321067839999996, 0.70011546889999998)]),
('P',
[(83.783850110000003, 0.15591627499999999),
(19.469564930000001, 0.60768371860000003),
(6.3321067839999996, 0.3919573931)]),
('S',
[(6.1411512760000004, -0.2277635023),
(1.873246881, 0.21754360440000001),
(0.72275688250000003, 0.91667696109999997)]),
('P',
[(6.1411512760000004, 0.0049515111999999997),
(1.873246881, 0.57776646909999996),
(0.72275688250000003, 0.4846460366)]),
('D',
[(2.9648179269999999, 0.2197679508),
(0.90436396760000004, 0.65554736270000002),
(0.34893173370000002, 0.28657325900000002)]),
('S',
[(0.71226402460000005, -0.30884412150000001),
(0.26287022030000001, 0.0196064117),
(0.1160862609, 1.131034442)]),
('P',
[(0.71226402460000005, -0.12154686000000001),
(0.26287022030000001, 0.57152276040000005),
(0.1160862609, 0.54989494709999998)])],
24: [('S',
[(1232.3204499999999, 0.15432896730000001),
(224.46870820000001, 0.53532814230000003),
(60.749992509999998, 0.44463454219999998)]),
('S',
[(92.774624230000001, -0.099967229199999993),
(21.558827489999999, 0.3995128261),
(7.0115998099999999, 0.70011546889999998)]),
('P',
[(92.774624230000001, 0.15591627499999999),
(21.558827489999999, 0.60768371860000003),
(7.0115998099999999, 0.3919573931)]),
('S',
[(6.8994880959999998, -0.2277635023),
(2.104563782, 0.21754360440000001),
(0.81200613430000002, 0.91667696109999997)]),
('P',
[(6.8994880959999998, 0.0049515111999999997),
(2.104563782, 0.57776646909999996),
(0.81200613430000002, 0.4846460366)]),
('D',
[(4.2414792410000004, 0.2197679508),
(1.2937863599999999, 0.65554736270000002),
(0.49918299929999999, 0.28657325900000002)]),
('S',
[(0.75477805369999995, -0.30884412150000001),
(0.27856057080000002, 0.0196064117),
(0.1230152851, 1.131034442)]),
('P',
[(0.75477805369999995, -0.12154686000000001),
(0.27856057080000002, 0.57152276040000005),
(0.1230152851, 0.54989494709999998)])],
25: [('S',
[(1337.153266, 0.15432896730000001),
(243.56413649999999, 0.53532814230000003),
(65.917960620000002, 0.44463454219999998)]),
('S',
[(102.02200209999999, -0.099967229199999993),
(23.707719229999999, 0.3995128261),
(7.7104860979999996, 0.70011546889999998)]),
('P',
[(102.02200209999999, 0.15591627499999999),
(23.707719229999999, 0.60768371860000003),
(7.7104860979999996, 0.3919573931)]),
('S',
[(7.7019609219999996, -0.2277635023),
(2.349343572, 0.21754360440000001),
(0.90644978700000001, 0.91667696109999997)]),
('P',
[(7.7019609219999996, 0.0049515111999999997),
(2.349343572, 0.57776646909999996),
(0.90644978700000001, 0.4846460366)]),
('D',
[(5.4269504609999997, 0.2197679508),
(1.6553928680000001, 0.65554736270000002),
(0.63870203199999998, 0.28657325900000002)]),
('S',
[(0.67098228599999998, -0.30884412150000001),
(0.247634663, 0.0196064117),
(0.109358078, 1.131034442)]),
('P',
[(0.67098228599999998, -0.12154686000000001),
(0.247634663, 0.57152276040000005),
(0.109358078, 0.54989494709999998)])],
26: [('S',
[(1447.4004110000001, 0.15432896730000001),
(263.6457916, 0.53532814230000003),
(71.352840189999995, 0.44463454219999998)]),
('S',
[(111.91948910000001, -0.099967229199999993),
(26.00768236, 0.3995128261),
(8.4585054900000003, 0.70011546889999998)]),
('P',
[(111.91948910000001, 0.15591627499999999),
(26.00768236, 0.60768371860000003),
(8.4585054900000003, 0.3919573931)]),
('S',
[(8.5485697540000007, -0.2277635023),
(2.6075862500000002, 0.21754360440000001),
(1.00608784, 0.91667696109999997)]),
('P',
[(8.5485697540000007, 0.0049515111999999997),
(2.6075862500000002, 0.57776646909999996),
(1.00608784, 0.4846460366)]),
('D',
[(6.4118034750000001, 0.2197679508),
(1.955804428, 0.65554736270000002),
(0.75461015099999995, 0.28657325900000002)]),
('S',
[(0.59211568140000004, -0.30884412150000001),
(0.21852792539999999, 0.0196064117),
(0.096504235899999999, 1.131034442)]),
('P',
[(0.59211568140000004, -0.12154686000000001),
(0.21852792539999999, 0.57152276040000005),
(0.096504235899999999, 0.54989494709999998)])],
27: [('S',
[(1557.298704, 0.15432896730000001),
(283.66390289999998, 0.53532814230000003),
(76.770522339999999, 0.44463454219999998)]),
('S',
[(121.83447409999999, -0.099967229199999993),
(28.311711639999999, 0.3995128261),
(9.2078473209999991, 0.70011546889999998)]),
('P',
[(121.83447409999999, 0.15591627499999999),
(28.311711639999999, 0.60768371860000003),
(9.2078473209999991, 0.3919573931)]),
('S',
[(9.4808516780000005, -0.2277635023),
(2.8919619519999999, 0.21754360440000001),
(1.1158088269999999, 0.91667696109999997)]),
('P',
[(9.4808516780000005, 0.0049515111999999997),
(2.8919619519999999, 0.57776646909999996),
(1.1158088269999999, 0.4846460366)]),
('D',
[(7.6645273889999999, 0.2197679508),
(2.3379251509999999, 0.65554736270000002),
(0.90204420500000004, 0.28657325900000002)]),
('S',
[(0.59211568140000004, -0.30884412150000001),
(0.21852792539999999, 0.0196064117),
(0.096504235899999999, 1.131034442)]),
('P',
[(0.59211568140000004, -0.12154686000000001),
(0.21852792539999999, 0.57152276040000005),
(0.096504235899999999, 0.54989494709999998)])],
28: [('S',
[(1679.7710279999999, 0.15432896730000001),
(305.97238959999999, 0.53532814230000003),
(82.808069430000003, 0.44463454219999998)]),
('S',
[(132.85888990000001, -0.099967229199999993),
(30.87354878, 0.3995128261),
(10.041036269999999, 0.70011546889999998)]),
('P',
[(132.85888990000001, 0.15591627499999999),
(30.87354878, 0.60768371860000003),
(10.041036269999999, 0.3919573931)]),
('S',
[(10.330743350000001, -0.2277635023),
(3.151206003, 0.21754360440000001),
(1.2158332409999999, 0.91667696109999997)]),
('P',
[(10.330743350000001, 0.0049515111999999997),
(3.151206003, 0.57776646909999996),
(1.2158332409999999, 0.4846460366)]),
('D',
[(8.6277227550000006, 0.2197679508),
(2.6317304379999999, 0.65554736270000002),
(1.0154034190000001, 0.28657325900000002)]),
('S',
[(0.63093283840000003, -0.30884412150000001),
(0.2328538976, 0.0196064117),
(0.1028307363, 1.131034442)]),
('P',
[(0.63093283840000003, -0.12154686000000001),
(0.2328538976, 0.57152276040000005),
(0.1028307363, 0.54989494709999998)])],
29: [('S',
[(1801.80673, 0.15432896730000001),
(328.201345, 0.53532814230000003),
(88.824092280000002, 0.44463454219999998)]),
('S',
[(144.1212184, -0.099967229199999993),
(33.490671730000003, 0.3995128261),
(10.892205880000001, 0.70011546889999998)]),
('P',
[(144.1212184, 0.15591627499999999),
(33.490671730000003, 0.60768371860000003),
(10.892205880000001, 0.3919573931)]),
('S',
[(11.307754020000001, -0.2277635023),
(3.4492253970000002, 0.21754360440000001),
(1.330818388, 0.91667696109999997)]),
('P',
[(11.307754020000001, 0.0049515111999999997),
(3.4492253970000002, 0.57776646909999996),
(1.330818388, 0.4846460366)]),
('D',
[(9.6479119299999994, 0.2197679508),
(2.9429206539999999, 0.65554736270000002),
(1.1354702780000001, 0.28657325900000002)]),
('S',
[(0.63093283840000003, -0.30884412150000001),
(0.2328538976, 0.0196064117),
(0.1028307363, 1.131034442)]),
('P',
[(0.63093283840000003, -0.12154686000000001),
(0.2328538976, 0.57152276040000005),
(0.1028307363, 0.54989494709999998)])],
30: [('S',
[(1929.4323010000001, 0.15432896730000001),
(351.44850209999998, 0.53532814230000003),
(95.115680209999994, 0.44463454219999998)]),
('S',
[(155.84167550000001, -0.099967229199999993),
(36.214253909999996, 0.3995128261),
(11.777999339999999, 0.70011546889999998)]),
('P',
[(155.84167550000001, 0.15591627499999999),
(36.214253909999996, 0.60768371860000003),
(11.777999339999999, 0.3919573931)]),
('S',
[(12.28152744, -0.2277635023),
(3.7462573269999999, 0.21754360440000001),
(1.4454225409999999, 0.91667696109999997)]),
('P',
[(12.28152744, 0.0049515111999999997),
(3.7462573269999999, 0.57776646909999996),
(1.4454225409999999, 0.4846460366)]),
('D',
[(10.947370769999999, 0.2197679508),
(3.3392970179999999, 0.65554736270000002),
(1.288404602, 0.28657325900000002)]),
('S',
[(0.88971388539999996, -0.30884412150000001),
(0.32836037899999998, 0.0196064117),
(0.14500740549999999, 1.131034442)]),
('P',
[(0.88971388539999996, -0.12154686000000001),
(0.32836037899999998, 0.57152276040000005),
(0.14500740549999999, 0.54989494709999998)])],
31: [('S',
[(2061.424532, 0.15432896730000001),
(375.49105170000001, 0.53532814230000003),
(101.6225324, 0.44463454219999998)]),
('S',
[(167.76186799999999, -0.099967229199999993),
(38.984250279999998, 0.3995128261),
(12.678888130000001, 0.70011546889999998)]),
('P',
[(167.76186799999999, 0.15591627499999999),
(38.984250279999998, 0.60768371860000003),
(12.678888130000001, 0.3919573931)]),
('S',
[(12.6150552, -0.2277635023),
(3.8479939270000001, 0.21754360440000001),
(1.4846756839999999, 0.91667696109999997)]),
('P',
[(12.6150552, 0.0049515111999999997),
(3.8479939270000001, 0.57776646909999996),
(1.4846756839999999, 0.4846460366)]),
('D',
[(12.6150552, 0.2197679508),
(3.8479939270000001, 0.65554736270000002),
(1.4846756839999999, 0.28657325900000002)]),
('S',
[(0.79852437359999995, -0.30884412150000001),
(0.29470571410000002, 0.0196064117),
(0.13014515060000001, 1.131034442)]),
('P',
[(0.79852437359999995, -0.12154686000000001),
(0.29470571410000002, 0.57152276040000005),
(0.13014515060000001, 0.54989494709999998)])],
32: [('S',
[(2196.3842289999998, 0.15432896730000001),
(400.07412920000002, 0.53532814230000003),
(108.27567259999999, 0.44463454219999998)]),
('S',
[(180.389038, -0.099967229199999993),
(41.91853304, 0.3995128261),
(13.633207949999999, 0.70011546889999998)]),
('P',
[(180.389038, 0.15591627499999999),
(41.91853304, 0.60768371860000003),
(13.633207949999999, 0.3919573931)]),
('S',
[(14.196656190000001, -0.2277635023),
(4.3304326399999997, 0.21754360440000001),
(1.670815538, 0.91667696109999997)]),
('P',
[(14.196656190000001, 0.0049515111999999997),
(4.3304326399999997, 0.57776646909999996),
(1.670815538, 0.4846460366)]),
('D',
[(14.196656190000001, 0.2197679508),
(4.3304326399999997, 0.65554736270000002),
(1.670815538, 0.28657325900000002)]),
('S',
[(0.98583255999999997, -0.30884412150000001),
(0.36383421500000002, 0.0196064117),
(0.1606730254, 1.131034442)]),
('P',
[(0.98583255999999997, -0.12154686000000001),
(0.36383421500000002, | |
yielding strings, tuples or dicts.
keys: which keys of the tuple/dict to tokenize (by default: all)
vocab_type: Type of vocabulary, one of: 'subword', 'sentencepiece', 'char'.
vocab_file: Name of the vocabulary file.
vocab_dir: Directory which contains the vocabulary file.
n_reserved_ids: An int, offset added so 0, ..., n_reserved_ids-1 are unused;
This is common for example when reserving the 0 for padding and 1 for EOS,
but it's only needed if these symbols are not already included (and thus
reserved) in the vocab_file.
debug: boolean, If true, prints debug information every power of 2 steps.
Yields:
Examples from stream with strings at `keys` replaced by np.arrays of
integers -- the tokenized version of these strings.
"""
vocab = _get_vocab(vocab_type, vocab_file, vocab_dir)
debug_count = 0
for example in stream:
debug_count += 1
if isinstance(example, (list, tuple)):
new_example = []
for i, x in enumerate(example):
if keys is None or i in keys:
new_example.append(np.array(vocab.encode(x)) + n_reserved_ids)
else:
new_example.append(x)
output = tuple(new_example)
if debug and (debug_count & debug_count - 1 == 0):
logging.info('Tokenize Example[%d] is %r', debug_count, output)
yield output
elif isinstance(example, dict):
new_example = {}
for k in example:
if keys is None or k in keys:
new_example[k] = np.array(vocab.encode(example[k])) + n_reserved_ids
else:
new_example[k] = example[k]
if debug and (debug_count & debug_count - 1 == 0):
logging.info('Tokenize Example[%d] is %r', debug_count, new_example)
yield new_example
else:
output = np.array(vocab.encode(example)) + n_reserved_ids
if debug and (debug_count & debug_count - 1 == 0):
logging.info('Tokenize Example[%d] is %r', debug_count, output)
yield output
@gin.configurable()
def Tokenize( # pylint: disable=invalid-name
keys=None,
vocab_type='subword', # pylint: disable=invalid-name
vocab_file=None,
vocab_dir=None,
n_reserved_ids=0,
debug=False):
"""Returns a function that maps text to integer arrays; see `tokenize`."""
return lambda g: tokenize( # pylint: disable=g-long-lambda
g,
keys=keys,
vocab_type=vocab_type,
vocab_file=vocab_file,
vocab_dir=vocab_dir,
n_reserved_ids=n_reserved_ids,
debug=debug)
def detokenize(x,
vocab_type='subword',
vocab_file=None,
vocab_dir=None,
n_reserved_ids=0):
"""Maps integer arrays to text; the opposite of `tokenize`.
In many cases (all char- and subword-type vocabularies and most sentencepiece
ones) the tokenization is invertible, so detokenize(tokenize(x)) = x. In some
more rare cases this can remove some spacing, but it is still often useful
to run detokenize to get a readable version for a tokenized string.
Args:
x: a list or numpy array of integers.
vocab_type: Type of vocabulary, one of: 'subword', 'sentencepiece', 'char'.
vocab_file: Name of the vocabulary file.
vocab_dir: Directory which contains the vocabulary file.
n_reserved_ids: An int, offset added so 0, ..., n_reserved_ids-1 are unused;
This is common for example when reserving the 0 for padding and 1 for EOS,
but it's only needed if these symbols are not already included (and thus
reserved) in the vocab_file.
Returns:
A string corresponding to the de-tokenized version of x.
"""
vocab = _get_vocab(vocab_type, vocab_file, vocab_dir)
x_unreserved = np.array(x) - n_reserved_ids
return str(vocab.decode(x_unreserved.tolist()))
def _to_unicode(s):
# Errors of the casting are ignored (e.g. sequences not allowed by UTF-8),
# in order not to stay with incomplete examples (with empty values).
return str(s, encoding='utf-8', errors='ignore')
def ConvertToUnicode(keys=None, debug=False): # pylint: disable=invalid-name
"""Converts to Unicode UTF-8 elements of an example.
Useful for when TFDS outputs byte arrays. All of the errors of the conversion
are ignored.
Args:
keys: tuple/list of example dimensions to convert.
debug: boolean, If true, prints debug information every power of 2 steps.
Returns:
Function converting chosen elements of an example to UTF-8.
"""
def _convert_to_unicode_str(stream, keys=None):
debug_count = 0
for example in stream:
debug_count += 1
if isinstance(example, (list, tuple)):
new_example = []
for i, x in enumerate(example):
if keys is None or i in keys:
new_example.append(_to_unicode(x))
else:
new_example.append(x)
output = tuple(new_example)
if debug and (debug_count & debug_count - 1 == 0):
logging.info('Example[%d] is %r', debug_count, output)
yield output
elif isinstance(example, dict):
new_example = {}
for k in example:
if keys is None or k in keys:
new_example[k] = _to_unicode(example[k])
else:
new_example[k] = example[k]
if debug and (debug_count & debug_count - 1 == 0):
logging.info('Example[%d] is %r', debug_count, new_example)
yield new_example
else:
output = _to_unicode(example)
if debug and (debug_count & debug_count - 1 == 0):
logging.info('Example[%d] is %r', debug_count, output)
yield output
return lambda g: _convert_to_unicode_str(g, keys)
def vocab_size(vocab_type='subword',
vocab_file=None,
vocab_dir=None,
n_reserved_ids=0):
"""Returns the size of the vocabulary (number of symbols used).
This function can be used to set the size of the final layers of a model that
needs to predict symbols from a given vocabulary. More precisely, if this
function returns N then the last layer size should be set to at least N (it
can be more). Note that this function does take reserved IDs into account.
Args:
vocab_type: Type of vocabulary, one of: 'subword', 'sentencepiece', 'char'.
vocab_file: Name of the vocabulary file.
vocab_dir: Directory which contains the vocabulary file.
n_reserved_ids: An int, offset added so 0, ..., n_reserved_ids-1 are unused.
Returns:
An integer, the number of symbols used (including reserved IDs).
"""
vocab = _get_vocab(vocab_type, vocab_file, vocab_dir)
return vocab.vocab_size + n_reserved_ids
def _get_vocab(vocab_type='subword', vocab_file=None, vocab_dir=None):
"""Gets the vocabulary object for tokenization; see tokenize for details."""
if vocab_type not in [
'char', 'subword', 'sentencepiece', 'bert', 'bert-lowercase'
]:
raise ValueError(
'vocab_type must be "subword", "char", "sentencepiece", "bert" or "bert-lowercase" '
f'but got {vocab_type}')
if vocab_type == 'char':
# Note that we set num_reserved_ids=0 below. We could instead pass
# the value n_reserved_ids from tokenize here -- ByteTextEncoder does
# exactly the same thing as tokenize above, ie., adds num_reserved_ids.
return text_encoder.ByteTextEncoder(num_reserved_ids=0)
vocab_dir = vocab_dir or 'gs://trax-ml/vocabs/'
path = os.path.join(vocab_dir, vocab_file)
if vocab_type == 'subword':
return text_encoder.SubwordTextEncoder(path)
if vocab_type == 'bert':
return text_encoder.BertEncoder(path, do_lower_case=False)
if vocab_type == 'bert-lowercase':
return text_encoder.BertEncoder(path, do_lower_case=True)
assert vocab_type == 'sentencepiece'
return t5.data.SentencePieceVocabulary(sentencepiece_model_file=path,
extra_ids=0)
# Makes the function accessible in gin configs, even with all args denylisted.
@gin.configurable(denylist=['dataset', 'training'])
def cifar10_no_augmentation_preprocess(dataset, training):
del training
def cast_image(features, targets):
features['image'] = tf.cast(features['image'], tf.float32) / 255.0
return features, targets
dataset = dataset.map(cast_image)
return dataset
def _cifar_augment_image(image):
"""Image augmentation suitable for CIFAR-10/100.
As described in https://arxiv.org/pdf/1608.06993v3.pdf (page 5).
Args:
image: a Tensor.
Returns:
Tensor of the same shape as image.
"""
image = tf.image.resize_with_crop_or_pad(image, 40, 40)
image = tf.image.random_crop(image, [32, 32, 3])
image = tf.image.random_flip_left_right(image)
return image
# Makes the function accessible in gin configs, even with all args denylisted.
@gin.configurable(denylist=['dataset', 'training'])
def cifar10_augmentation_preprocess(dataset, training):
"""Preprocessing for cifar10 with augmentation (see below)."""
def augment(features, targets):
features['image'] = _cifar_augment_image(features['image'])
return features, targets
def cast_image(features, targets):
features['image'] = tf.cast(features['image'], tf.float32) / 255.0
return features, targets
if training:
dataset = dataset.map(augment)
dataset = dataset.map(cast_image)
return dataset
@gin.configurable(denylist=['dataset', 'training'])
def cifar10_augmentation_flatten_preprocess(dataset,
training,
predict_image_train_weight=0.01):
"""Preprocessing for cifar10 that flattens it and appends targets."""
def augment(features, targets):
features['image'] = _cifar_augment_image(features['image'])
return features, targets
def flatten_image(features, targets):
"""Flatten the image."""
img = features['image']
flat = tf.cast(tf.reshape(img, [-1]), tf.int64)
tgt = tf.expand_dims(targets, axis=0)
flat_with_target = tf.concat([flat, tgt], axis=0)
new_features = {}
new_features['image'] = flat_with_target
predict_image_weight = predict_image_train_weight if training else 0.0
mask_begin = tf.ones_like(flat)
mask_begin = tf.cast(mask_begin, tf.float32) * predict_image_weight
mask_end = tf.cast(tf.ones_like(tgt), tf.float32)
new_features['mask'] = tf.concat([mask_begin, mask_end], axis=0)
return new_features, flat_with_target
if training:
dataset = dataset.map(augment)
dataset = dataset.map(flatten_image)
return dataset
@gin.configurable(denylist=['dataset', 'training'])
def concat_preprocess(dataset, training, pad_symbol=0):
"""Pre-processing function that concatenates input and target for LM."""
del training
def concat(features, targets):
inp = features['inputs']
pad = tf.expand_dims(tf.zeros_like(inp[0]) + pad_symbol, axis=0)
concat = tf.concat([pad, inp, pad, targets], axis=0)
# Note: we're updating existing features dictionary here, so make sure
# it is not re-used in some other ways outside of this function.
features['inputs'] = concat
return features, concat
dataset = dataset.map(concat)
return dataset
@gin.configurable(denylist=['dataset', 'training'])
def squeeze_targets_preprocess(dataset, training):
"""Pre-processing function that squeezes last axis of targets."""
del training
def squeeze(features, targets):
if targets.shape[-1] == 1:
targets = tf.squeeze(targets, axis=-1)
return features, targets
dataset = dataset.map(squeeze)
return dataset
@gin.configurable(denylist=['dataset', 'training'])
def lm1b_preprocess(dataset,
training,
max_target_length=-1,
max_eval_target_length=-1):
"""Preprocessing for LM1B: filter out targets exceeding maximum length."""
def target_right_length(_, target):
return tf.less(tf.shape(target)[0], max_target_length + 1)
def eval_target_right_length(_, target):
return tf.less(tf.shape(target)[0], max_eval_target_length + 1)
if max_target_length > 0 and training:
dataset = dataset.filter(target_right_length)
if max_eval_target_length > 0 and not training:
dataset = dataset.filter(eval_target_right_length)
return dataset
# TODO(lukaszkaiser): find a single more abstract way of text pre-processing.
@gin.configurable(denylist=['dataset', 'training'])
def wmt_preprocess(dataset, training, max_length=-1, max_eval_length=-1):
"""Preprocessing | |
It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get("_return_http_data_only", True)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_host_index"] = kwargs.get("_host_index")
return self.statistics_endpoint.call_with_http_info(**kwargs)
def statistics2variable(self, **kwargs):
"""statistics2variable # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.statistics2variable(async_req=True)
>>> result = thread.get()
Keyword Args:
username (str): [optional] if omitted the server will use the default value of "GAST"
password (str): [optional] if omitted the server will use the default value of "GAST"
name (str): [optional]
selection (str): [optional]
area (str): [optional] if omitted the server will use the default value of "free"
searchcriterion (str): [optional] if omitted the server will use the default value of "Code"
sortcriterion (str): [optional] if omitted the server will use the default value of "Name"
pagelength (str): [optional] if omitted the server will use the default value of "100"
language (str): [optional] if omitted the server will use the default value of "de"
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get("_return_http_data_only", True)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_host_index"] = kwargs.get("_host_index")
return self.statistics2variable_endpoint.call_with_http_info(**kwargs)
def table(self, **kwargs):
"""table # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.table(async_req=True)
>>> result = thread.get()
Keyword Args:
username (str): [optional] if omitted the server will use the default value of "GAST"
password (str): [optional] if omitted the server will use the default value of "GAST"
name (str): [optional]
area (str): [optional] if omitted the server will use the default value of "free"
compress (str): [optional] if omitted the server will use the default value of "false"
transpose (str): [optional] if omitted the server will use the default value of "false"
startyear (str): [optional]
endyear (str): [optional]
timeslices (str): [optional]
regionalvariable (str): [optional]
regionalkey (str): [optional]
classifyingvariable1 (str): [optional]
classifyingkey1 (str): [optional]
classifyingvariable2 (str): [optional]
classifyingkey2 (str): [optional]
classifyingvariable3 (str): [optional]
classifyingkey3 (str): [optional]
job (str): [optional] if omitted the server will use the default value of "false"
stand (str): [optional] if omitted the server will use the default value of "01.01.1970 01:00"
language (str): [optional] if omitted the server will use the default value of "de"
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get("_return_http_data_only", True)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_host_index"] = kwargs.get("_host_index")
return self.table_endpoint.call_with_http_info(**kwargs)
def table_meta(self, **kwargs):
"""table_meta # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.table_meta(async_req=True)
>>> result = thread.get()
Keyword Args:
username (str): [optional] if omitted the server will use the default value of "GAST"
password (str): [optional] if omitted the server will use the default value of "GAST"
name (str): [optional]
area (str): [optional] if omitted the server will use the default value of "free"
language (str): [optional] if omitted the server will use the default value of "de"
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get("_return_http_data_only", True)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_host_index"] = kwargs.get("_host_index")
return self.table_meta_endpoint.call_with_http_info(**kwargs)
def tablefile(self, **kwargs):
"""tablefile # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.tablefile(async_req=True)
>>> result = thread.get()
Keyword Args:
username (str): [optional] if omitted the server will use the default value of "GAST"
password (str): [optional] if omitted the server will use the default value of "G<PASSWORD>"
name (str): [optional]
area (str): [optional] if omitted the server will use the default value of "free"
compress (str): [optional] if omitted the server will use the default value of "false"
transpose (str): [optional] if omitted the server will use the default value of "false"
startyear (str): [optional]
endyear (str): [optional]
timeslices (str): [optional]
regionalvariable (str): [optional]
regionalkey (str): [optional]
classifyingvariable1 (str): [optional]
classifyingkey1 (str): [optional]
classifyingvariable2 (str): [optional]
classifyingkey2 (str): [optional]
classifyingvariable3 (str): [optional]
classifyingkey3 (str): [optional]
format (str): [optional] if omitted the server will use the default value of "csv"
job (str): [optional] if omitted the server will use the default value of "false"
stand (str): [optional] if omitted the server will use the default value of "01.01.1970 01:00"
language (str): [optional] if omitted the server will use the default value of "de"
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a | |
<filename>nova/virt/libvirt/migration.py
# Copyright (c) 2016 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utility methods to manage guests migration
"""
from collections import deque
from collections import namedtuple
from lxml import etree
from oslo_log import log as logging
import six
from nova import objects
from nova.virt import hardware
from nova.virt.libvirt import config as vconfig
from nova.compute import power_state
import nova.conf
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
# TODO(berrange): hack to avoid a "import libvirt" in this file.
# Remove this and similar hacks in guest.py, driver.py, host.py
# etc in Ocata.
libvirt = None
DriverInterface = namedtuple(
'DriverInterface', ['get_volume_config',
'get_guest_numa_config',
'get_guest_memory_backing_config',
'get_guest_cpu_config', ])
def graphics_listen_addrs(migrate_data):
"""Returns listen addresses of vnc/spice from a LibvirtLiveMigrateData"""
listen_addrs = None
if (migrate_data.obj_attr_is_set('graphics_listen_addr_vnc')
or migrate_data.obj_attr_is_set('graphics_listen_addr_spice')):
listen_addrs = {'vnc': None, 'spice': None}
if migrate_data.obj_attr_is_set('graphics_listen_addr_vnc'):
listen_addrs['vnc'] = str(migrate_data.graphics_listen_addr_vnc)
if migrate_data.obj_attr_is_set('graphics_listen_addr_spice'):
listen_addrs['spice'] = str(
migrate_data.graphics_listen_addr_spice)
return listen_addrs
def serial_listen_addr(migrate_data):
"""Returns listen address serial from a LibvirtLiveMigrateData"""
listen_addr = None
# NOTE (markus_z/dansmith): Our own from_legacy_dict() code can return
# an object with nothing set here. That can happen based on the
# compute RPC version pin. Until we can bump that major (which we
# can do just before Ocata releases), we may still get a legacy
# dict over the wire, converted to an object, and thus is may be unset
# here.
if migrate_data.obj_attr_is_set('serial_listen_addr'):
# NOTE (markus_z): The value of 'serial_listen_addr' is either
# an IP address (as string type) or None. There's no need of a
# conversion, in fact, doing a string conversion of None leads to
# 'None', which is an invalid (string type) value here.
listen_addr = migrate_data.serial_listen_addr
return listen_addr
# TODO(sahid): remove me for Q*
def serial_listen_ports(migrate_data):
"""Returns ports serial from a LibvirtLiveMigrateData"""
ports = []
if migrate_data.obj_attr_is_set('serial_listen_ports'):
ports = migrate_data.serial_listen_ports
return ports
def get_updated_guest_xml(guest, migrate_data, driver_interface, instance):
xml_doc = etree.fromstring(guest.get_xml_desc(dump_migratable=True))
xml_doc = _update_graphics_xml(xml_doc, migrate_data)
xml_doc = _update_serial_xml(xml_doc, migrate_data)
xml_doc = _update_volume_xml(xml_doc, migrate_data, driver_interface)
xml_doc = _update_perf_events_xml(xml_doc, migrate_data)
xml_doc = _update_numa_xml(xml_doc, driver_interface, instance)
return etree.tostring(xml_doc)
def _update_graphics_xml(xml_doc, migrate_data):
listen_addrs = graphics_listen_addrs(migrate_data)
# change over listen addresses
for dev in xml_doc.findall('./devices/graphics'):
gr_type = dev.get('type')
listen_tag = dev.find('listen')
if gr_type in ('vnc', 'spice'):
if listen_tag is not None:
listen_tag.set('address', listen_addrs[gr_type])
if dev.get('listen') is not None:
dev.set('listen', listen_addrs[gr_type])
return xml_doc
def _update_serial_xml(xml_doc, migrate_data):
listen_addr = serial_listen_addr(migrate_data)
listen_ports = serial_listen_ports(migrate_data)
def set_listen_addr_and_port(source, listen_addr, serial_listen_ports):
# The XML nodes can be empty, which would make checks like
# "if source.get('host'):" different to an explicit check for
# None. That's why we have to check for None in this method.
if source.get('host') is not None:
source.set('host', listen_addr)
device = source.getparent()
target = device.find("target")
if target is not None and source.get('service') is not None:
port_index = int(target.get('port'))
# NOTE (markus_z): Previous releases might not give us the
# ports yet, that's why we have this check here.
if len(serial_listen_ports) > port_index:
source.set('service', str(serial_listen_ports[port_index]))
# This updates all "LibvirtConfigGuestSerial" devices
for source in xml_doc.findall("./devices/serial[@type='tcp']/source"):
set_listen_addr_and_port(source, listen_addr, listen_ports)
# This updates all "LibvirtConfigGuestConsole" devices
for source in xml_doc.findall("./devices/console[@type='tcp']/source"):
set_listen_addr_and_port(source, listen_addr, listen_ports)
return xml_doc
def _update_volume_xml(xml_doc, migrate_data, driver_interface):
"""Update XML using device information of destination host."""
migrate_bdm_info = migrate_data.bdms
# Update volume xml
parser = etree.XMLParser(remove_blank_text=True)
disk_nodes = xml_doc.findall('./devices/disk')
bdm_info_by_serial = {x.serial: x for x in migrate_bdm_info}
for pos, disk_dev in enumerate(disk_nodes):
serial_source = disk_dev.findtext('serial')
bdm_info = bdm_info_by_serial.get(serial_source)
if (serial_source is None or
not bdm_info or not bdm_info.connection_info or
serial_source not in bdm_info_by_serial):
continue
conf = driver_interface.get_volume_config(
bdm_info.connection_info, bdm_info.as_disk_info())
xml_doc2 = etree.XML(conf.to_xml(), parser)
serial_dest = xml_doc2.findtext('serial')
# Compare source serial and destination serial number.
# If these serial numbers match, continue the process.
if (serial_dest and (serial_source == serial_dest)):
LOG.debug("Find same serial number: pos=%(pos)s, "
"serial=%(num)s",
{'pos': pos, 'num': serial_source})
for cnt, item_src in enumerate(disk_dev):
# If source and destination have same item, update
# the item using destination value.
for item_dst in xml_doc2.findall(item_src.tag):
disk_dev.remove(item_src)
item_dst.tail = None
disk_dev.insert(cnt, item_dst)
# If destination has additional items, thses items should be
# added here.
for item_dst in list(xml_doc2):
item_dst.tail = None
disk_dev.insert(cnt, item_dst)
return xml_doc
def _update_perf_events_xml(xml_doc, migrate_data):
"""Update XML by the supported events of destination host."""
supported_perf_events = []
old_xml_has_perf = True
if 'supported_perf_events' in migrate_data:
supported_perf_events = migrate_data.supported_perf_events
perf_events = xml_doc.findall('./perf')
# remove perf events from xml
if not perf_events:
perf_events = etree.Element("perf")
old_xml_has_perf = False
else:
perf_events = perf_events[0]
for _, event in enumerate(perf_events):
perf_events.remove(event)
if not supported_perf_events:
return xml_doc
# add supported perf events
for e in supported_perf_events:
new_event = etree.Element("event", enabled="yes", name=e)
perf_events.append(new_event)
if not old_xml_has_perf:
xml_doc.append(perf_events)
return xml_doc
def find_job_type(guest, instance):
"""Determine the (likely) current migration job type
:param guest: a nova.virt.libvirt.guest.Guest
:param instance: a nova.objects.Instance
Annoyingly when job type == NONE and migration is
no longer running, we don't know whether we stopped
because of failure or completion. We can distinguish
these cases by seeing if the VM still exists & is
running on the current host
:returns: a libvirt job type constant
"""
try:
if guest.is_active():
LOG.debug("VM running on src, migration failed",
instance=instance)
return libvirt.VIR_DOMAIN_JOB_FAILED
else:
LOG.debug("VM is shutoff, migration finished",
instance=instance)
return libvirt.VIR_DOMAIN_JOB_COMPLETED
except libvirt.libvirtError as ex:
LOG.debug("Error checking domain status %(ex)s",
{"ex": ex}, instance=instance)
if ex.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
LOG.debug("VM is missing, migration finished",
instance=instance)
return libvirt.VIR_DOMAIN_JOB_COMPLETED
else:
LOG.info("Error %(ex)s, migration failed",
{"ex": ex}, instance=instance)
return libvirt.VIR_DOMAIN_JOB_FAILED
def should_abort(instance, now,
progress_time, progress_timeout,
elapsed, completion_timeout,
migration_status):
"""Determine if the migration should be aborted
:param instance: a nova.objects.Instance
:param now: current time in secs since epoch
:param progress_time: when progress was last made in secs since epoch
:param progress_timeout: time in secs to allow for progress
:param elapsed: total elapsed time of migration in secs
:param completion_timeout: time in secs to allow for completion
:param migration_status: current status of the migration
Check the progress and completion timeouts to determine if either
of them have been hit, and should thus cause migration to be aborted
Avoid migration to be aborted if it is running in post-copy mode
:returns: (result, fault): result is True if migration should be aborted,
False otherwise; fault is the description of failure when abort
"""
if migration_status == 'running (post-copy)':
return False, None
if (progress_timeout != 0 and
(now - progress_time) > progress_timeout):
LOG.warning("Live migration stuck for %d sec",
(now - progress_time), instance=instance)
fault = "Live migration stuck for %d sec." \
% (now - progress_time)
return True, fault
if (completion_timeout != 0 and
elapsed > completion_timeout):
LOG.warning("Live migration not completed after %d sec",
completion_timeout, instance=instance)
fault = "Live migration timeout after %d sec." \
% completion_timeout
return True, fault
return False, None
def should_switch_to_postcopy(memory_iteration, current_data_remaining,
previous_data_remaining, migration_status):
"""Determine if the migration should be switched to postcopy mode
:param memory_iteration: Number of memory iterations during the migration
:param current_data_remaining: amount of memory to be transferred
:param previous_data_remaining: previous memory to be transferred
:param migration_status: current status of the migration
Check the progress after the first memory iteration to determine if the
migration should be switched to post-copy mode
Avoid post-copy switch if already running in post-copy mode
:returns: True if migration should be switched to postcopy mode,
False otherwise
"""
if (migration_status == 'running (post-copy)' or
previous_data_remaining <= 0):
return False
if memory_iteration > 1:
progress_percentage = round((previous_data_remaining -
current_data_remaining) *
100 / previous_data_remaining)
# If migration progress is less than 10% per iteration after the
# first memory page copying pass, the migration is switched to
# postcopy mode
if progress_percentage < 10:
return True
return False
def update_downtime(guest, instance,
olddowntime,
downtime_steps, elapsed):
"""Update max downtime if needed
:param guest: a nova.virt.libvirt.guest.Guest to set downtime for
:param instance: a nova.objects.Instance
:param olddowntime: current set downtime, or None
:param downtime_steps: list of | |
White":0xe8e6e7,
"Minuette":0xd47791,
"Minute Mauve":0xcfc9c8,
"Mirabella":0x886793,
"Mirabelle Yellow":0xf3be67,
"Miracle":0x898696,
"Miracle Bay":0x799292,
"Miracle Elixir":0x617ba6,
"Mirador":0xbcdccd,
"Mirage":0x373f43,
"Mirage Blue":0x636c77,
"Mirage Grey":0xabafae,
"Mirage Lake":0x4f938f,
"Mirage White":0xf5f4e6,
"Miranda's Spike":0x614251,
"Mirror Ball":0xd6d4d7,
"Mirror Lake":0x7aa8cb,
"Mirror Mirror":0xa8b0b2,
"Mirrored Willow":0x8e876e,
"Mischief Maker":0x954738,
"Mischief Mouse":0xb7bab9,
"Mischievous":0xdff2dd,
"Mischka":0xa5a9b2,
"Missed":0xeff0c0,
"Missing Link":0x6f5d57,
"Mission Bay Blue":0x9ba9ab,
"Mission Brown":0x775c47,
"Mission Control":0x818387,
"Mission Courtyard":0xf3d1b3,
"Mission Gold":0xb78d61,
"Mission Hills":0xb29c7f,
"Mission Jewel":0x456252,
"Mission Stone":0xdac5b6,
"Mission Tan":0xdac6a8,
"Mission Tile":0x874c3e,
"Mission Trail":0x857a64,
"Mission White":0xe2d8c2,
"Mission Wildflower":0x9e5566,
"Mississippi Mud":0x99886f,
"Mississippi River":0x3b638c,
"Missouri Mud":0xa6a19b,
"Mist Green":0xaacebc,
"Mist Grey":0xc4c4bc,
"Mist of Green":0xe3f1eb,
"Mist Spirit":0xe4ebe7,
"Mist Yellow":0xf8eed6,
"Misted Eve":0xa2b7cf,
"Misted Fern":0xe1ecd1,
"Misted Yellow":0xdab965,
"Mistletoe":0x8aa282,
"Mistletoe Kiss":0x98b489,
"Mistral":0xb8bfcc,
"Misty":0xcdd2d2,
"Misty Afternoon":0xc6dcc7,
"Misty Aqua":0xbcdbdb,
"Misty Beach Cattle":0xf1eedf,
"Misty Bead":0xd2d59b,
"Misty Blue":0xbfcdcc,
"Misty Blush":0xddc9c6,
"Misty Coast":0xd5d9d3,
"Misty Dawn":0xe4e5e0,
"Misty Grape":0x65434d,
"Misty Hillside":0xdce5cc,
"Misty Isle":0xc5e4dc,
"Misty Jade":0xbcd9c8,
"Misty Lake":0xc2d5c4,
"Misty Lavender":0xdbd9e1,
"Misty Lawn":0xdffae1,
"Misty Lilac":0xbcb4c4,
"Misty Meadow":0xbec0b0,
"Misty Moonstone":0xe5e0cc,
"Misty Moor":0x718981,
"Misty Morn":0xe7e1e3,
"Misty Morning":0xb2c8bd,
"Misty Moss":0xbbb477,
"Misty Mustard":0xf7ebd1,
"Misty Rose":0xffe4e1,
"Misty Surf":0xb5c8c9,
"Misty Valley":0xbdc389,
"Misty Violet":0xdbd7e4,
"Mitchell Blue":0x0d789f,
"Mithril":0x878787,
"Mithril Silver":0xbbbbc1,
"Mix Or Match":0xccccba,
"Mixed Berries":0x96819a,
"Mixed Berry Jam":0x6a4652,
"Mixed Fruit":0xf9bab2,
"Mixed Veggies":0x719166,
"Miyamoto Red":0xe4030f,
"Miyazaki Verdant":0x6fea3e,
"Mizu":0x70c1e0,
"Mizu Cyan":0xa7dbed,
"Mizuasagi Green":0x749f8d,
"Moat":0x3e6a6b,
"Mobster":0x605a67,
"Moby Dick":0xdde8ed,
"Moccasin":0xfbebd6,
"Mocha":0x9d7651,
"Mocha Accent":0x8d8171,
"Mocha Bisque":0x8c543a,
"Mocha Black":0x6f5b52,
"Mocha Brown":0x6b565e,
"Mocha Foam":0xbba28e,
"Mocha Glow":0x773322,
"Mocha Ice":0xdfd2ca,
"Mocha Latte":0x82715f,
"Mocha Light":0xd7cfc2,
"Mocha Magic":0x88796d,
"Mocha Mousse":0xa47864,
"Mocha Tan":0xac9680,
"Mocha Wisp":0x918278,
"Mochaccino":0x945200,
"Mochachino":0xbeaf93,
"Mochito":0x8efa00,
"Mock Orange":0xffa368,
"Mod Orange":0xd8583c,
"Modal":0x31a6d1,
"Modal Blue":0x40a6ac,
"Mode Beige":0x96711f,
"Moderate White":0xe9decf,
"Modern Blue":0xbad1e9,
"Modern Gray":0xd5cec2,
"Modern History":0xbea27d,
"Modern Ivory":0xf5ecdc,
"Modern Lavender":0xa8aab3,
"Modern Mint":0x88a395,
"Modern Mocha":0x9d8376,
"Modern Monument":0xd6d6d1,
"Modern Zen":0xe0deb2,
"Moderne Class":0x745b49,
"Modest Mauve":0x838492,
"Modest Violet":0xe9e4ef,
"Modest White":0xe6ddd4,
"Modestly Peach":0xeea59d,
"Modesty":0xd4c7d9,
"Modish Moss":0xc3b68b,
"Moegi Green":0xf19172,
"Moelleux Au Chocolat":0x553311,
"Moenkopi Soil":0xc8a692,
"Mogwa-Cheong Yellow":0xddcc00,
"M<NAME>":0xbfa59e,
"Mohair Pink":0xa78594,
"Mohair Soft Blue Grey":0x97b2b7,
"Mohalla":0xa79b7e,
"Moire":0xbeadb0,
"Moire Satin":0x665d63,
"Moist Gold":0xdbdb70,
"Moist Silver":0xe0e7dd,
"Moisty Mire":0x004422,
"Mojave Desert":0xc7b595,
"Mojave Dusk":0xb99178,
"Mojave Gold":0xbf9c65,
"Mojave Sunset":0xaa6a53,
"Mojito":0xe4f3e0,
"Mojo":0x97463c,
"Molasses":0x574a47,
"Molasses Cookie":0x8b714b,
"Moldy Ochre":0xd5a300,
"Mole":0x392d2b,
"Mole Grey":0x938f8a,
"Moleskin":0xb0a196,
"Molly Green":0xe3efe3,
"Molly Robins":0x4d8b72,
"Molten Bronze":0xc69c04,
"Molten Core":0xff5800,
"Molten Ice":0xe1ede6,
"Molten Lava":0xb5332e,
"Molten Lead":0x686a69,
"Mom's Apple Pie":0xeab781,
"Mom's Love":0xffd4bb,
"Momentum":0x746f5c,
"Momo Peach":0xf47983,
"M<NAME>":0x542d24,
"Mona Lisa":0xff9889,
"Monaco":0xabd4e6,
"Monaco Blue":0x274374,
"Monarch":0x6b252c,
"Monarch Gold":0xb7813c,
"Monarch Migration":0xbf764c,
"Monarch Orange":0xefa06b,
"Monarch Wing":0xff8d25,
"Monarch's Cocoon":0x8cb293,
"Monarchist":0x4b62d2,
"Monarchy":0x9093ad,
"Monastery Mantle":0x41363a,
"Monastic":0xaba9d2,
"Monastir":0xb78999,
"Moncur":0x9bb9ae,
"Mondo":0x554d42,
"Mondrian Blue":0x0f478c,
"Monet":0xc3cfdc,
"Monet Lily":0xcdd7e6,
"Monet Magic":0xc1acc3,
"Monet Moonrise":0xeef0d1,
"Monet's Lavender":0xdde0ea,
"Money":0x7b9a6d,
"Money Banks":0xaabe49,
"Money Tree":0xc9937a,
"Mongolian Plateau":0x777700,
"Mongoose":0xa58b6f,
"Monk's Cloth":0x6e6355,
"Monkey Island":0x553b39,
"Monkey Madness":0x63584c,
"Monks Robe":0x704822,
"Monogram":0x595747,
"Monologue":0xa1bcd8,
"Monorail Silver":0xb8bcbb,
"Monroe Kiss":0xdec1b8,
"Monsoon":0x7a7679,
"Monstera Deliciosa":0x75bf0a,
"Monstrous Green":0x22cc11,
"Mont Blanc":0x9eb6d8,
"Mont Blanc Peak":0xf2e7e7,
"Montage":0x8190a4,
"Montana":0x393b3c,
"Montana Grape":0x6c5971,
"Montana Sky":0x6ab0b9,
"Montauk Sands":0xbbad9e,
"Monte Carlo":0x7ac5b4,
"Montecito":0xb6a180,
"Montego Bay":0x3fbabd,
"Monterey Brown":0x946e5c,
"Monterey Chestnut":0x7d4235,
"Montezuma":0xd2cdb6,
"Montezuma Gold":0xeecc44,
"Montezuma Hills":0xa6b2a4,
"Montezuma's Castle":0xd9ad9e,
"Montreux Blue":0x5879a2,
"Montrose Rose":0x9d6a73,
"Monument":0x84898c,
"Monument Grey":0x7a807a,
"Monza":0xc7031e,
"Moo":0xfbe5bd,
"Mood Indigo":0x353a4c,
"Mood Lighting":0xffe7d5,
"Mood Mode":0x7f90cb,
"Moody Black":0x49555d,
"Moody Blue":0x8378c7,
"Moody Blues":0x586e75,
"Mooloolaba":0xc7b8a9,
"Moon Base":0x7d7d77,
"Moon Buggy":0xc7bdc1,
"Moon Dance":0xfaefbe,
"Moon Drop":0xddd5c9,
"Moon Dust":0xe0e6f0,
"Moon Glass":0xbcd1c7,
"Moon Glow":0xf5f3ce,
"Moon Goddess":0xcfc7d5,
"Moon Jellyfish":0x8eb8ce,
"Moon Lily":0xe6e6e7,
"Moon Mist":0xcecdb8,
"Moon Rise":0xf4f4e8,
"Moon Rock":0x958b84,
"Moon Rose":0xb9aba5,
"Moon Shell":0xe9e3d8,
"Moon Valley":0xfcf1de,
"Moon White":0xeaf4fc,
"Moon Yellow":0xf0c420,
"Moonbeam":0xcdc6bd,
"Moondance":0xe5decc,
"Moondoggie":0xf3debf,
"Moonglade Water":0x65ffff,
"Moonglow":0xf8e4c4,
"Moonless Night":0x2f2d30,
"Moonlight":0xf6eed5,
"Moonlight Blue":0x506886,
"Moonlight Green":0xd2e8d8,
"Moonlight Jade":0xc7e5df,
"Moonlight Melody":0xaf73b0,
"Moonlight White":0xf9f0de,
"Moonlight Yellow":0xe1c38b,
"Moonlit Beach":0xf9f0e6,
"Moonlit Mauve":0xd28fb0,
"Moonlit Ocean":0x293b4d,
"Moonlit Orchid":0x949194,
"Moonlit Pool":0x205a61,
"Moonlit Snow":0xeaeeec,
"Moonmist":0xc9d9e0,
"Moonquake":0x8d9596,
"Moonraker":0xc0b2d7,
"Moonrose":0xa53f48,
"Moonscape":0x725f69,
"Moonshade":0x5a6e9c,
"Moonshadow":0x9845b0,
"Moonstone":0x3aa8c1,
"Moonstone Blue":0x73a9c2,
"Moonstruck":0xfcf0c2,
"Moonwalk":0xbebec4,
"Moonwort":0xa5ae9f,
"Moor Oak Grey":0x6a584d,
"Moor Pond Green":0x3c6461,
"Moorland":0xa6ab9b,
"Moorland Heather":0xcc94be,
"Moorstone":0xcfd1ca,
"Moose Fur":0x725440,
"Moose Trail":0x6b5445,
"Moosewood":0x5d5744,
"Moot Green":0xa2db10,
"Moping Green":0x00ee33,
"Morado Purple":0x9955cc,
"Morality":0xb4cde5,
"Morass":0x726138,
"Moray":0xc8bd6a,
"Moray Eel":0x00a78b,
"Mordant Blue":0x2a6671,
"Mordant Red 19":0xae0c00,
"Mordian Blue":0x2f5684,
"More Maple":0xd0ab70,
"More Melon":0xe0e3c8,
"More Mint":0xe6e8c5,
"More Than A Week":0x8d8d8d,
"Morel":0x685c53,
"Morganite":0xdfcdc6,
"Morning at Sea":0x82979b,
"Morning Blue":0x8da399,
"Morning Blush":0xf9e8df,
"Morning Bread":0xe7e6de,
"Morning Breeze":0xd5e3de,
"Morning Calm":0xceeeef,
"Morning Dew":0xb0b9ac,
"Morning Dew White":0xc6dbd6,
"Morning Fog":0xd0dbd7,
"Morning Forest":0x6dae81,
"Morning Frost":0xebf4df,
"Morning Glory":0x9ed1d3,
"Morning Glory Pink":0xca99b7,
"Morning Glow":0xeef0d6,
"Morning Green":0x89bab2,
"Morning Haze":0xe0e8ed,
"Morning Light Wave":0xe0efe9,
"Morning Mist":0xe5edf1,
"Morning Mist Grey":0xada7b9,
"Morning Moon":0xf7eecf,
"Morning Moor":0xdad6ae,
"Morning Parlor":0xacc0bd,
"Morning Rush":0xdee4dc,
"Morning Shine":0xf8eaed,
"Morning Sigh":0xfce9de,
"Morning Sky":0xc7ecea,
"Morning Snow":0xf5f4ed,
"Morning Song":0xe4ece9,
"Morning Sun":0xf3e6ce,
"Morning Sunlight":0xfdefcc,
"Morning Tea":0xcabd94,
"Morning Wheat":0xe7d2a9,
"Morning Zen":0xcbcdb9,
"Morning's Egg":0xd9be77,
"Morningside":0xf3e2df,
"Mornington":0xdcc6b9,
"Moroccan Blue":0x0f4e67,
"Moroccan Blunt":0x75583d,
"Moroccan Brown":0x7c726c,
"Moroccan Dusk":0x6b5e5d,
"Moroccan Henna":0x6e5043,
"Moroccan Leather":0x6d4444,
"Moroccan Moonlight":0xeae0d4,
"Moroccan Ruby":0x8d504b,
"Moroccan Sky":0xbf7756,
"Moroccan Spice":0x8f623b,
"Morocco":0xb67267,
"Morocco Brown":0x442d21,
"Morocco Red":0x96453b,
"Morocco Sand":0xece3cc,
"<NAME>":0x8cb295,
"Morris Leaf":0xc2d3af,
"Morris Room Grey":0xada193,
"Morro Bay":0x546b78,
"Morrow White":0xfcfccf,
"Mortar":0x565051,
"Mortar Grey":0x9e9f9e,
"Mosaic Blue":0x00758f,
"Mosaic Green":0x599f68,
"Mosaic Tile":0x1c6b69,
"Moscow Midnight":0x204652,
"Moscow Mule":0xeecc77,
"Moscow Papyrus":0x937c00,
"Moselle Green":0x2e4e36,
"Mosque":0x005f5b,
"Moss":0x009051,
"Moss Beach":0x6b7061,
"Moss Brown":0x715b2e,
"Moss Cottage":0x42544c,
"Moss Covered":0x7a7e66,
"Moss Glen":0x4a473f,
"Moss Green":0x638b27,
"Moss Grey":0xafab97,
"Moss Ink":0xc7cac1,
"Moss Island":0xc8c6b4,
"Moss Landing":0x6d7e40,
"Moss Mist":0xdee1d3,
"Moss Point Green":0x7e8d60,
"Moss Print":0xafb796,
"Moss Ring":0x729067,
"Moss Rock":0x5e5b4d,
"Moss Rose":0x8f6d6b,
"Moss Stone":0xb4a54b,
"Moss Vale":0x38614c,
"Mossa":0xb4c2b6,
"Mosslands":0x779966,
"Mossleaf":0x8c9d8f,
"Mosstone":0x858961,
"Mossy":0x857349,
"Mossy Bank":0x8b8770,
"Mossy Bench":0x83a28f,
"Mossy Bronze":0x525f48,
"Mossy Cavern":0xa4a97b,
"Mossy Gold":0x9c9273,
"Mossy Green":0x5a7c46,
"Mossy Oak":0x848178,
"Mossy Pavement":0x908c7e,
"Mossy Rock":0xa9965d,
"Mossy Shade":0x7e6c44,
"Mossy Statue":0x828e74,
"Mossy White":0xe7f2de,
"Mossy Woods":0x7a9703,
"Mostly Metal":0x575e5f,
"Mote of Dust":0xc1c1c5,
"Moth":0xd2cbaf,
"Moth Green":0x007700,
"Moth Grey":0xdad3cb,
"Moth Mist":0xedebde,
"Moth Orchid":0xc875c4,
"Moth Pink":0xcfbdba,
"Moth Wing":0xccbca9,
"Moth's Wing":0xedf1db,
"Mother Earth":0x849c8d,
"Mother Lode":0xa28761,
"Mother Nature":0xbde1c4,
"Mother of Pearl":0xe9d4c3,
"Mother-Of-Pearl Green":0x8fd89f,
"Mother-Of-Pearl Pink":0xd1c4c6,
"Mother-Of-Pearl Silver":0xccd6e6,
"Motherland":0xbcb667,
"Mothra Wing":0xeedd82,
"Mothy":0xcebbb3,
"Motto":0x917c6f,
"Mount Eden":0xe7efe0,
"Mount Etna":0x3d484c,
"Mount Hyjal":0x3d703e,
"Mount Olive":0x716646,
"Mount Olympus":0xd4ffff,
"Mount Sterling":0xcad3d4,
"Mount Tam":0x7c7b6a,
"Mountain Air":0xe6e0e0,
"Mountain Ash":0xcc7700,
"Mountain Blueberry":0x3c4b6c,
"Mountain Bluebird":0x4c98c2,
"Mountain Crystal Silver":0xe2efe8,
"Mountain Dew":0xcfe2e0,
"Mountain Elk":0x867965,
"Mountain Falls":0xbdcac0,
"Mountain Fern":0x94b491,
"Mountain Fig":0x383c49,
"Mountain Flower Mauve":0x6c71a6,
"Mountain Fog":0xf4dbc7,
"Mountain Forest":0x4d663e,
"Mountain Green":0xb2b599,
"Mountain Grey":0xe8e3db,
"Mountain Haze":0x6c6e7e,
"Mountain Heather":0xeedae6,
"Mountain Lake":0x2d5975,
"Mountain Lake Azure":0x4cbca7,
"Mountain Lake Blue":0x85d4d4,
"Mountain Lake Green":0x75b996,
"Mountain Laurel":0xf4c8d5,
"Mountain Lichen":0xa7ae9e,
"Mountain Main":0x8db8d0,
"Mountain Maize":0xefcc7c,
"Mountain Meadow":0x30ba8f,
"Mountain Meadow Green":0x418638,
"Mountain Mint":0xa7e0c2,
"Mountain Mist":0xa09f9c,
"Mountain Morn":0xd4dcd1,
"Mountain Moss":0x94a293,
"Mountain Pass":0x5c6a6a,
"Mountain Pine":0x3b5257,
"Mountain Range Blue":0x53b8c9,
"Mountain Range Green":0x283123,
"Mountain Ridge":0x75665e,
"Mountain Road":0x868578,
"Mountain Sage":0xa3aa8c,
"Mountain Shade":0xb1ab9a,
"Mountain Spring":0xd9e1c1,
"Mountain Stream":0x96afb7,
"Mountain Trail":0x615742,
"Mountain View":0x2e3d30,
"Mountain's Majesty":0xd8d0e3,
"Mountbatten Pink":0x997a8d,
"Mourn Mountain Snow":0xe9eaeb,
"Mournfang Brown":0x6f5749,
"Mourning Dove":0x94908b,
"Mourning Violet":0x474354,
"Mouse Catcher":0x9e928f,
"Mouse Nose":0xffe5b4,
"Mouse Tail":0x727664,
"Mouse Trap":0xbeb1b0,
"Moussaka":0x6d2a13,
"Mousy Brown":0x5c4939,
"Mousy Indigo":0x5c544e,
"Moutarde de Bénichon":0xbf9005,
"Move Mint":0x4effcd,
"Mover & Shaker":0x9cce9e,
"Mover and Shaker":0x855d44,
"Movie Magic":0xb2bfd5,
"Movie Star":0xc52033,
"Mow the Lawn":0xa9b49a,
"Mown Grass":0x627948,
"Mown Hay":0xe6d3bb,
"Moxie":0xe5dad8,
"Mozart":0x485480,
"Mozzarella Covered Chorizo":0xe39b7a,
"Mr Frosty":0xa3c5db,
"Mr Mustard":0xe4b857,
"Mr. Glass":0xc0d5ef,
"Ms. Pac-Man Kiss":0xff00aa,
"MSU Green":0x18453b,
"Mt Burleigh":0x597766,
"Mt. Hood White":0xe7e9e6,
"Mt. Rushmore":0x7f8181,
"M<NAME>":0xf1f2d3,
"Mud":0x70543e,
"Mud Ball":0x966544,
"Mud Bath":0x7c6841,
"Mud Berry":0xd0c8c4,
"Mud Brown":0x60460f,
"Mud Green":0x606602,
"Mud House":0x847146,
"Mud Pack":0x9d9588,
"Mud Pink":0xdcc0c3,
"Mud Pots":0xb6b5b1,
"Mud Puddle":0x9d958b,
"Mud Room":0x60584b,
"Mud Yellow":0xc18136,
"Mud-Dell":0xa08b76,
"Mudbrick":0xa46960,
"Muddled Basil":0x5a5243,
"<NAME>":0x886806,
"Muddy Green":0x657432,
"<NAME>":0xe4b3cc,
"Muddy Olive":0x4b5d46,
"Muddy Quicksand":0xc3988b,
"Muddy River":0x715d3d,
"Muddy Rose":0xe2beb4,
"Muddy Waters":0xa9844f,
"Muddy Yellow":0xbfac05,
"Mudra":0xb8d0da,
"Mudskipper":0x897a69,
"Mudslide":0x84735f,
"Mudstone":0x84846f,
"Muesli":0x9e7e53,
"Muffin Magic":0xf9ddc7,
"Muffin Mix":0xf5e0d0,
"Mughal Green":0x448800,
"Mukluks":0xa38961,
"Mulberry":0x920a4e,
"Mulberry Brown":0x956f29,
"Mulberry Bush":0xad6ea0,
"Mulberry Mauve Black":0x463f60,
"Mulberry Mix":0x9f556c,
"Mulberry Purple":0x493c62,
"Mulberry Silk":0x94766c,
"Mulberry Stain":0xc6babe,
"Mulberry Thorn":0xc57f2e,
"Mulberry Wine":0x997c85,
"Mulberry Wood":0x5c0536,
"Mulberry Yogurt":0xc54b8c,
"Mulch":0x433937,
"Mule":0x827b77,
"Mule Fawn":0x884f40,
"Mulgore Mustard":0xc2b332,
"Mulled Cider":0xa18162,
"Mulled Grape":0x675a74,
"Mulled Wine":0x524d5b,
"Mulled Wine Red":0x3b2932,
"Mullen Pink":0xca4042,
"Mulling Spice":0xc18654,
"Multnomah Falls":0xccd0dd,
"Mulu Frog":0x55bb00,
"Mummy's Tomb":0x828e84,
"Munch On Melon":0xf23e67,
"Munchkin":0x9bb139,
"Munsell Blue":0x0093af,
"Munsell Yellow":0xefcc00,
"Muntok White Pepper":0xd2a172,
"Murano Soft Blue":0xc5d6ee,
"Murasaki":0x4f284b,
"Murasaki Purple":0x884898,
"Murdoch":0x5b8d6b,
"Murex":0x847eb1,
"Murky Green":0x6c7a0e,
"Murmur":0xd2d8d2,
"Murray Red":0x6b3c39,
"Muscat Blanc":0xebe2cf,
"Muscat Grape":0x5e5067,
"Muscatel":0x7b6a68,
"Muscovado Sugar":0x9b6957,
"Muse":0xa5857f,
"Museum":0x685951,
"Mushiao Green":0x2d4436,
"Mushroom":0xbdaca3,
"Mushroom Basket":0x977a76,
"Mushroom Bisque":0xcab49b,
"Mushroom Brown":0x906e58,
"Mushroom Risotto":0xdbd0ca,
"Mushroom White":0xf0e1cd,
"Musical Mist":0xf8eae6,
"Musk":0xcca195,
"Musk Deer":0x7e5b58,
"Musk Dusk":0xcfbfb9,
"Musk Memory":0x774548,
"Musket":0x7d6d39,
"Muskmelon":0xec935e,
"Muskrat":0x7e6f4f,
"Muslin":0xd3d1c4,
"Muslin Tint":0xe0cdb1,
"Mussel Green":0x24342a,
"Mussel White":0xf0e2de,
"Mustang":0x5e4a47,
"Mustard":0xceb301,
"Mustard Brown":0xac7e04,
"Mustard Crusted Salmon":0xef8144,
"Mustard Field":0xd8b076,
"Mustard Flower":0xd2bd0a,
"Mustard Gold":0xb08e51,
"Mustard Green":0xa8b504,
"Mustard Magic":0x857139,
"Mustard Musketeers":0xd5a129,
"Mustard Oil":0xd5bd66,
"Mustard On Toast":0xddcc33,
"Mustard Sauce":0xedbd68,
"Mustard Seed":0xc69f26,
"Mustard Seed Beige":0xc5a574,
"Mustard Yellow":0xe1ad01,
"Mutabilis":0xc29594,
"Muted Berry":0x91788c,
"Muted Blue":0x3b719f,
"Muted Clay":0xd29380,
"Muted Green":0x5fa052,
"Muted Lime":0xd1c87c,
"Muted Mauve":0xb3a9a3,
"Muted Mulberry":0x66626d,
"Muted Pink":0xd1768f,
"Muted Purple":0x805b87,
"Muted Sage":0x93907e,
"MVS Red":0xee0000,
"My Fair Lady":0xf3c4c2,
"My Love":0xe1c6a8,
"My Pink":0xd68b80,
"My Place or Yours?":0x4f434e,
"My Sin":0xfdae45,
"My Sweetheart":0xf8e7df,
"Mykonos":0x387abe,
"Mykonos Blue":0x005780,
"Myoga Purple":0xe0218a,
"Myrtle":0x21421e,
"Myrtle Deep Green":0x00524c,
"Myrtle Green":0x317873,
"Myrtle Pepper":0xb77961,
"Myself":0x8e6f76,
"Mystere":0x98817c,
"Mysteria":0x826f7a,
"Mysterioso":0x46394b,
"Mysterious":0x535e63,
"Mysterious Blue":0x3e7a85,
"Mysterious Mauve":0xa6a3a9,
"Mysterious Moss":0x6f6a52,
"Mysterious Night":0x5c6070,
"Mystery":0xa4cdcc,
"Mystic":0xd8ddda,
"Mystic Blue":0x48a8d0,
"Mystic Fog":0xeae9e1,
"Mystic Green":0xd8f878,
"Mystic Harbor":0xd2e4ee,
"Mystic Iris":0x8596d2,
"Mystic Light":0xdde5ec,
"Mystic Magenta":0xe02e82,
"Mystic Maroon":0xad4379,
"Mystic Mauve":0xdbb7ba,
"Mystic Melon":0xedebb4,
"Mystic Opal":0xfbddbe,
"Mystic Pool":0xd5dde2,
"Mystic Red":0xff5500,
"Mystic River":0xb7cae0,
"Mystic Tulip":0xf9b3a3,
"Mystic Turquoise":0x00877b,
"Mystical":0x5f4e72,
"Mystical Mist":0xe5e2e3,
"Mystical Purple":0x745d83,
"Mystical Sea":0xdce3d1,
"Mystical Shade":0x4c5364,
"Mystical Trip":0x7a6a75,
"Mystification":0x2a4071,
"Mystified":0xc9dbc7,
"Mystique":0xa598a0,
"Myth":0x657175,
"Mythic Forest":0x4a686c,
"Mythical":0x7e778e,
"Mythical Blue":0x93a8a7,
"Mythical Forest":0x398467,
"Mythical Orange":0xff7f49,
"Nacre":0xe8e2d4,
"Nadeshiko Pink":0xf6adc6,
"Nadia":0xafc9c0,
"Naga Morich":0xc90406,
"Naga Viper Pepper":0xed292b,
"Naggaroth Night":0x3d3354,
"Nǎi Yóu Sè Cream":0xfdedc3,
"Nail Polish Pink":0xbd4e84,
"Nairobi Dusk":0xd9a787,
"Naive Peach":0xfce7d3,
"Nakabeni Pink":0xc93756,
"Naked Lady":0xd6b3a9,
"Naked Light":0xe9b6c1,
"Naked Pink":0xd8c6d6,
"Naked Rose":0xebb5b3,
"Namakabe Brown":0x785e49,
"Namara Grey":0x7b7c7d,
"Namaste":0xbdd8c0,
"Namibia":0x7c6d61,
"Nana":0xa08da7,
"Nancy":0x57b8dc,
"Nandi Bear":0x8f423d,
"Nandor":0x4e5d4e,
"Nankeen":0xb89e82,
"Nano White":0xf2f0ea,
"Nanohanacha Gold":0xe3b130,
"Nantucket Dune":0xd0bfaa,
"Nantucket Mist":0xcabfbf,
"Nantucket Sands":0xb4a89a,
"Napa":0xa39a87,
"Napa Grape":0x5b5162,
"Napa Harvest":0x534853,
"Napa Sunset":0xcd915c,
"Napa Wine":0x5d4149,
"Napa Winery":0x6a5c7d,
"Napery":0xefddc1,
"Napier Green":0x2a8000,
"Naples Yellow":0xfada5f,
"Napoleon":0x404149,
"Nārangī Orange":0xff8050,
"Narcissus":0xc39449,
"Narcomedusae":0xe6e3d8,
"Nârenji Orange":0xffc14b,
"Narvik":0xe9e6dc,
"Narwhal Grey":0x080813,
"Nasake":0x746062,
"Nashi Pear Beige":0xedd4b1,
"Nasturtium":0xfe6347,
"Nasturtium Flower":0xe64d1d,
"Nasturtium Leaf":0x87b369,
"Nasturtium Shoot":0x869f49,
"Nasty Green":0x70b23f,
"Nasu Purple":0x5d21d0,
"Nataneyu Gold":0xa17917,
"Natchez":0xba9f95,
"Natchez Moss":0xb1a76f,
"National Anthem":0x3f6f98,
"Native Berry":0xdc6b67,
"Native Flora":0x9aa099,
"Native Hue of Resolution":0xd33300,
"Native Soil":0x887b69,
"Nato Blue":0x153043,
"NATO Olive":0x555548,
"Natrolite":0xebbc71,
"Natural":0xaa907d,
"Natural Almond":0xded2bb,
"Natural Bark":0x6d574d,
"Natural Bridge":0xa29171,
"Natural Candy Pink":0xe4717a,
"Natural Chamois":0xbba88b,
"Natural Choice":0xe3ded0,
"Natural Copper":0x8b655a,
"Natural Green":0xbccd91,
"Natural Grey":0xc4c0bb,
"Natural Indigo":0x003740,
"Natural Instinct Green":0x017374,
"Natural Leather":0xa80e00,
"Natural Light":0xf1ebc8,
"Natural Linen":0xecdfcf,
"Natural Pumice":0x4a4a43,
"Natural Radiance":0xe7dcc1,
"Natural Rice Beige":0xdcc39f,
"Natural Silk Grey":0xd3c5c0,
"Natural Spring":0xaa838b,
"Natural Steel":0x8a8287,
"Natural Stone":0xaea295,
"Natural Tan":0xdcd2c3,
"Natural Twine":0xdbc39b,
"Natural Whisper":0xf0e8cf,
"Natural White":0xfbede2,
"Natural Wool":0xfff6d7,
"Natural Yellow":0xeed88b,
"Natural Youth":0xd7e5b4,
"Naturale":0xf1e0cf,
"Naturalism":0x68685d,
"Naturalist Grey":0x8b8c83,
"Naturally Calm":0xced0d9,
"Nature":0xbfd5b3,
"Nature Apricot":0xfeb7a5,
"Nature Green":0x7daf94,
"Nature Retreat":0x7b8787,
"Nature Spirits":0xc8c8b4,
"Nature Surrounds":0x52634b,
"Nature Trail":0xe6d7bb,
"Nature's Delight":0xa6d292,
"Nature's Gate":0x666a60,
"Nature's Gift":0x99a399,
"Nature's Reflection":0xc5d4cd,
"Nature's Strength":0x117733,
"Naturel":0xcbc0ad,
"Naughty Hottie":0xba403a,
"Naughty Marietta":0xe3ccdc,
"Nauseous Blue":0x483d8b,
"Nautical":0x2e4a7d,
"Nautical Blue":0x1a5091,
"Nautical Star":0xaab5b7,
"Nautilus":0x273c5a,
"Navagio Bay":0x3183a0,
"Navajo":0xefdcc3,
"Navajo Turquoise":0x007c78,
"Navajo White":0xffdead,
"Naval":0x41729f,
"Naval Passage":0x386782,
"Navel":0xec8430,
"Navigate":0x008583,
"Navigator":0x5d83ab,
"Navy":0x01153e,
"Navy Black":0x263032,
"Navy Blazer":0x282d3c,
"Navy Blue":0x000080,
"Navy Cosmos":0x503b53,
"Navy Damask":0x425166,
"Navy Dark Blue":0x004c6a,
"Navy Green":0x35530a,
"Navy Peony":0x223a5e,
"Navy Purple":0x9556eb,
"Navy Teal":0x20576e,
"Navy Trim":0x203462,
"Neapolitan":0x9b7a78,
"Neapolitan Blue":0x4d7faa,
"Near Moon":0x5ee7df,
"Nearly Brown":0xa88e76,
"Nearly Peach":0xefded1,
"Nearsighted":0xc8d5dd,
"Nebula":0xa104c3,
"Nebula Outpost":0x922b9c,
"Nebulas Blue":0x2d62a3,
"Nebulous":0xc4b9b8,
"Nebulous White":0xdedfdc,
"Necklace Pearl":0xf6eeed,
"Necron Compound":0x828b8e,
"Necrotic Flesh":0x9faf6c,
"Nectar of the Gods":0x513439,
"Nectar Red":0x7f4c64,
"Nectarina":0xd38d72,
"Nectarine":0xff8656,
"Nectarous Nectarine":0xdd5566,
"Needlepoint Navy":0x546670,
"Nefarious Blue":0xc5ced8,
"Nefarious Mauve":0xe6d1dc,
"Negishi Green":0x938b4b,
"Negroni":0xeec7a2,
"Neighborly Peach":0xf3c1a3,
"Nelson's Milk Snake":0x933d41,
"Neo Mint":0xaaffcc,
"Neo Tokyo Grey":0xbec0c2,
"Neon Blue":0x04d9ff,
"Neon Boneyard":0xdfc5fe,
"Neon Carrot":0xff9933,
"Neon Fuchsia":0xfe4164,
"Neon Green":0x39ff14,
"Neon Light":0xffdf5e,
"Neon Pink":0xfe019a,
"Neon Purple":0xbc13fe,
"Neon Red":0xff073a,
"Neon Violet":0x674876,
"Neon Yellow":0xcfff04,
"Nepal":0x93aab9,
"Nephrite":0x6d9288,
"Neptune":0x007dac,
"Neptune Blue":0x2e5d9d,
"Neptune Green":0x7fbb9e,
"Neptune's Wrath":0x11425d,
"Nereus":0x4c793c,
"Nero":0x252525,
"Nero's Green":0x318181,
"Nervous Neon Pink":0xff6ec7,
"Nervy Hue":0xd7c65b,
"Nessie":0x716748,
"Nesting Dove":0xeeeada,
"Net Worker":0xb6a194,
"Netherworld":0x881111,
"Netsuke":0xe0cfb0,
"Nettle":0xbbac7d,
"Nettle Green":0x364c2e,
"Nettle Rash":0xe4f7e7,
"Network Gray":0xa0a5a7,
"Neutral Buff":0x9d928f,
"Neutral Green":0xaaa583,
"Neutral Grey":0x8e918f,
"Neutral Ground":0xe2daca,
"Neutral Peach":0xffe6c3,
"Neutral Valley":0x8b694d,
"Neutrino Blue":0x01248f,
"Nevada":0x666f6f,
"Nevada Morning":0xffd5a7,
"Nevada Sand":0xead5b9,
"Nevada Sky":0xa1d9e7,
"Never Cry Wolf":0x6e6455,
"Never Forget":0xa67283,
"Nevergreen":0x666556,
"Neverland":0x9ce5d6,
"Nevermind Nirvana":0x7bc8f6,
"New Age Blue":0x496ead,
"New Amber":0x6d3b24,
"New Bamboo":0xadac84,
"New Brick":0x934c3d,
"New Brick Red":0xcb4154,
"New Bulgarian Rose":0x482427,
"New Car":0x214fc6,
"New Chestnut":0xa28367,
"New Clay":0xefc1b5,
"New Colonial Yellow":0xd9ad7f,
"New Cork":0xb89b6b,
"New Cream":0xede0c0,
"New England Brick":0xad7065,
"New England Roast":0xaa7755,
"New Fawn":0xc9a171,
"New Foliage":0xc2bc90,
"New Forest":0x47514d,
"New Frond":0xbacca0,
"New Gold":0xead151,
"New Green":0xb5ac31,
"New Harvest Moon":0xeddfc7,
"New Hope":0xe2efc2,
"New House White":0xf1ede7,
"New Hunter":0x4a5f58,
"New Kenyan Copper":0x7c1c05,
"New Khaki":0xd9c7aa,
"New Life":0x7c916e,
"New Limerick":0x9dc209,
"New Love":0xc6bbdb,
"New Moss":0xc6d6c7,
"New Navy Blue":0x3b4a55,
"New Neutral":0xbec0aa,
"New Orleans":0xe4c385,
"New Penny":0xa27d66,
"New Roof":0x875251,
"New Shoot":0x869e3e,
"New Sled":0x933c3c,
"New Steel":0x738595,
"New Violet":0xd6c1dd,
"New Wave Green":0x11ff11,
"New Wave Pink":0xff22ff,
"New Wheat":0xd7b57f,
"New Wool":0xd6c3b9,
"New Yellow":0xe8c247,
"New York Pink":0xdd8374,
"New Youth":0xf0e1df,
"Newbury Moss":0x616550,
"Newburyport":0x445a79,
"Newman's Eye":0xb2c7e1,
"Newmarket Sausage":0xeae2dc,
"Newport Blue":0x1c8ac9,
"Newport Indigo":0x313d6c,
"Newsprint":0x756f6d,
"Niagara":0x29a98b,
"Niagara Falls":0xcbe3ee,
"Niagara Mist":0xc5e8ee,
"Niblet Green":0x7dc734,
"Nice Blue":0x107ab0,
"Nice Cream":0xfaecd1,
"Nice White":0xe6ddd5,
"Niche":0x65758f,
"Nick's Nook":0x909062,
"Nickel":0x929292,
"Nickel Ore Green":0x537e7e,
"Nicotine Gold":0xeebb33,
"Niebla Azul":0xb6c3c4,
"Nifty Turquoise":0x019187,
"Night Black":0x312f36,
"Night Bloom":0x613e3d,
"Night Blooming Jasmine":0xf9f7ec,
"Night Blue":0x040348,
"Night Brown":0x44281b,
"Night Brown Black":0x322d25,
"Night Club":0x494b4e,
"Night Dive":0x003355,
"Night Flight":0x434d5c,
"Night Fog":0x2d1962,
"Night Green":0x302f27,
"Night Grey":0x45444d,
"Night Gull Grey":0x615d5c,
"Night in the Woods":0x443300,
"Night Kite":0x005572,
"Night Market":0x4c6177,
"Night Mauve":0x5d3b41,
"Night Mission":0x5e5c50,
"Night Mode":0x234e86,
"Night Music":0x9c96af,
"Night Night":0x4f4f5e,
"Night Out":0x656a6e,
"Night Owl":0x5d7b89,
"Night Pearl":0x11ffbb,
"Night Red":0x3c2727,
"Night Rendezvous":0x66787e,
"Night Rider":0x332e2e,
"Night Romance":0x715055,
"Night Rose":0xb0807a,
"Night Shadz":0xa23d54,
"Night Shift":0x2a5c6a,
"Night Sky":0x2a2a35,
"Night Snow":0xaaccff,
"Night Tan":0xab967b,
"Night Thistle":0x6b7ba7,
"Night Tide":0x455360,
"Night Turquoise":0x003833,
"Night Watch":0x3c4f4e,
"Night White":0xe1e1dd,
"Night Wind":0xd7e2db,
"Night Wizard":0x313740,
"Nightfall":0x43535e,
"Nightfall in Suburbia":0x0011dd,
"Nighthawk":0x615452,
"Nightingale":0x5c4827,
"Nightingale Grey":0xbaaea3,
"Nightlife":0x27426b,
"Nightly Aurora":0x9beec1,
"Nightly Blade":0x5a7d9a,
"Nightly Escapade":0x0433ff,
"Nightly Expedition":0x221188,
"Nightly Ivy":0x444940,
"Nightly Silhouette":0x4f5b93,
"Nightly Violet":0x784384,
"Nightly Woods":0x013220,
"Nightmare":0x112211,
"Nightshade":0x3c464b,
"Nightshade Berries":0x1b1811,
"Nightshade Blue":0x293135,
"Nightshade Purple":0x535872,
"Nightshade Violet":0xa383ac,
"Nightshadow Blue":0x4e5368,
"Nihilakh Oxide":0xa0d6b4,
"Nīlā Blue":0x0055ff,
"Nile":0xb4bb85,
"Nile Blue":0x253f4e,
"Nile Clay":0x8b8174,
"Nile Green":0xa7c796,
"Nile Reed":0x968f5f,
"Nile River":0x9ab6a9,
"Nile Sand":0xbbad94,
"Nile Stone":0x61c9c1,
"Nilla Vanilla":0xf1ebe0,
"Nimbus Blue":0x4422ff,
"Nimbus Cloud":0xd5d5d8,
"Nina":0xf5e3ea,
"Nine Iron":0x46434a,
"Níng Méng Huáng Lemon":0xffef19,
"Ninja":0x020308,
"Ninja Turtle":0x94b1a9,
"Ninjin Orange":0xe5aa70,
"Nipple":0xbb7777,
"Nippon":0xbc002c,
"Nirvana":0xa2919b,
"Nirvana Jewel":0x64a5ad,
"Nisemurasaki Purple":0x43242a,
"Ní<NAME> <NAME>im":0x056eee,
"No More Drama":0xa33f40,
"No Need to Blush":0xffd6dd,
"No Way Rosé":0xfbaa95,
"No$GMB Yellow":0xf8e888,
"Nobel":0xa99d9d,
"Nobility Blue":0x414969,
"Noble Blue":0x697991,
"Noble Blush":0xe8b9b2,
"Noble Cause Purple":0x7e1e9c,
"Noble Crown":0x8d755d,
"Noble Fir":0x5a736d,
"Noble Grey":0xc1beb9,
"Noble Honor":0x69354f,
"Noble Knight":0x394d78,
"Noble Lilac":0xb28392,
"Noble Plum":0x871f78,
"Noble Purple":0xafb1c5,
"Noble Robe":0x807070,
"Noble Silver":0x73777f,
"Noble Tone":0x884967,
"Noblesse":0x524b50,
"Noctis":0x646b77,
"Nocturnal Flight":0x675754,
"Nocturnal Rose":0xcc6699,
"Nocturnal Sea":0x0e6071,
"Nocturne":0x7a4b56,
"Nocturne Blue":0x344d58,
"Nocturne Shade":0x356fad,
"Noghrei Silver":0xbdbebd,
"Nomad":0xa19986,
"Nomad Grey":0x7e736f,
"Nomadic":0xaf9479,
"Nomadic Desert":0xc7b198,
"Nomadic Dream":0xdbdedb,
"Nomadic Taupe":0xd2c6ae,
"Nomadic Travels":0xe0c997,
"Nominee":0x357567,
"Non Skid Grey":0x8a8daa,
"Non-Photo Blue":0xa4dded,
"Non-Stop Orange":0xdd8811,
"Nonchalant White":0xdeddd1,
"Nonpareil Apple":0xc1a65c,
"Noodle Arms":0xf5ddc4,
"Noodles":0xf9e3b4,
"Nor'wester":0x99a9ad,
"Nora's Forest":0x003333,
"Nordic":0x1d393c,
"Nordic Breeze":0xd3dde7,
"Nordic Grass Green":0x1fab58,
"Nordic Noir":0x003344,
"Nordland Blue":0x7e95ab,
"Nordland Light Blue":0x96aec5,
"Norfolk Green":0x2e4b3c,
"Norfolk Sky":0x6cbae7,
"Nori Green":0x112a12,
"Nori Seaweed Green":0x464826,
"<NAME>":0xe9c68e,
"Norse Blue":0x4ca5c7,
"North Atlantic":0x536d70,
"North Beach Blue":0x849c9d,
"North Cape Grey":0x7a9595,
"North Grey":0x6a7777,
"North Island":0xbcb6b4,
"North Rim":0xd8a892,
"North Sea":0x316c6b,
"North Sea Blue":0x343c4c,
"North Star":0xf2dea4,
"North Star Blue":0x223399,
"North Texas Green":0x059033,
"North Wind":0x48bdc1,
"North Woods":0x555a51,
"Northampton Trees":0x767962,
"Northeast Trail":0x948666,
"Northern Barrens Dust":0xde743c,
"Northern Beach":0xe9dad2,
"Northern Exposure":0xbfc7d4,
"Northern Glen":0x536255,
"Northern Landscape":0xc5c1a3,
"Northern Light Grey":0xa7aeb4,
"Northern Lights":0xe6f0ea,
"Northern Pond":0xa3b9cd,
"Northern Sky":0x8daccc,
"Northern Star":0xffffea,
"Northern Territory":0x5e463c,
"Northgate Green":0xaaa388,
"Northpointe":0x9e9181,
"Northrend":0xb9f2ff,
"Norway":0xa4b88f,
"Norwegian Blue":0x78888e,
"Norwich Green":0xacb597,
"Nosegay":0xffe6ec,
"Nosferatu":0xa9a8a8,
"Noshime Flower":0x426579,
"Nostalgia":0xd6b8bd,
"Nostalgia Rose":0xa4777e,
"Nostalgic":0x666c7e,
"Nostalgic Evening":0x47626f,
"Not a Cloud in Sight":0x85c8d3,
"Not My Fault":0x7e7d78,
"Not So Innocent":0x6a6968,
"Not Yo Cheese":0xffc12c,
"Notable Hue":0x8ba7bb,
"Notebook Paper":0xe8ebe6,
"Notes of Plum":0x770f05,
"Noteworthy":0xd9bacc,
"Nothing Less":0xf2deb9,
"Notice Me":0xba8686,
"Notorious":0xbda998,
"Notorious Neanderthal":0x664400,
"Nottingham Forest":0x585d4e,
"Nougat":0xb69885,
"Nougat Brown":0x7c503f,
"Nouveau Copper":0xa05b42,
"Nouveau Rose":0x996872,
"Nouveau-Riche":0xffbb77,
"Nouvelle White":0xe1dcda,
"Novel Lilac":0xc2a4c2,
"Novelle Peach":0xe7cfbd,
"Novelty Navy":0x515b62,
"November":0xbe7767,
"November Gold":0xf6b265,
"November Green":0x767764,
"November Leaf":0xf1b690,
"November Pink":0xede6e8,
"November Skies":0x7cafb9,
"November Storms":0x423f3b,
"Noxious":0x89a203,
"Nuance":0xe2e0d6,
"Nuclear Blast":0xbbff00,
"Nuclear Fallout":0xaa9900,
"Nuclear Mango":0xee9933,
"Nuclear Meltdown":0x44ee00,
"Nuclear Throne":0x00de00,
"Nuclear Waste":0x7cfc00,
"Nude":0xf2d3bc,
"Nude Flamingo":0xe58f7c,
"Nude Lips":0xb5948d,
"Nugget":0xbc9229,
"Nugget Gold":0xc89720,
"Nuisette":0xb48395,
"Nuit Blanche":0x1e488f,
"Nuln Oil":0x14100e,
"Nuln Oil Gloss":0x171310,
"Numbers":0x929bac,
"Numero Uno":0xe2e6de,
"Nurgle's Rot":0x9b8f22,
"Nurgling Green":0xb8cc82,
"Nursery":0xefd0d2,
"Nursery Green":0xedf0de,
"Nursery Pink":0xf4d8e8,
"Nurture":0xd7dcd5,
"Nurture Green":0x98b092,
"Nurturing":0xa1a97b,
"Nurude Brown":0x9d896c,
"Nut":0x9e8a6d,
"Nut Brown":0x86695e,
"Nut Cracker":0x816c5b,
"Nut Flavor":0xd7bea4,
"Nut Milk":0xd9ccc8,
"Nut Oil":0x775d38,
"Nuthatch":0x8e725f,
"Nuthatch Back":0x445599,
"Nutmeg":0x7e4a3b,
"Nutmeg Frost":0xecd9ca,
"Nutmeg Glow":0xd8b691,
"Nutmeg Wood Finish":0x683600,
"Nutria":0x75663e,
"Nutria Fur Brown":0x514035,
"Nutshell":0xa9856b,
"Nutter Butter":0xf7d4c6,
"Nutty Beige":0xd4bca3,
"Nutty Brown":0x8a6f44,
"Nyanza":0xe9ffdb,
"NYC Taxi":0xf7b731,
"Nyctophobia Blue":0x4d587a,
"Nylon":0xe9e3cb,
"Nymph Green":0xaec2a5,
"Nymph's Delight":0x7b6c8e,
"Nymphaeaceae":0xcee0e3,
"Nypd":0x5f6e77,
"O Fortuna":0xe1b8b5,
"O Tannenbaum":0x005522,
"O'Brien Orange":0xf3a347,
"O'grady Green":0x58ac8f,
"O'Neal Green":0x395643,
"Oak Barrel":0x715636,
"Oak Brown":0xa18d80,
"Oak Buff":0xcf9c63,
"Oak Creek":0x5d504a,
"Oak Harbour":0xcdb386,
"Oak Plank":0x5d4f39,
"Oak Ridge":0xc0b0ab,
"Oak Shaving":0xeed8c2,
"Oak Tone":0xd0c7b6,
"Oakley Apricot":0xe0b695,
"Oakmoss":0x6d7244,
"Oakwood":0xbda58b,
"Oakwood Brown":0x8f716e,
"Oarsman Blue":0x648d95,
"Oasis":0x0092a3,
"Oasis Sand":0xfcedc5,
"Oasis Spring":0x47a3c6,
"Oasis Stream":0xa2ebd8,
"Oat Cake":0xe1cab3,
"Oat Field":0xc0ad89,
"Oat Flour":0xf7e4cd,
"Oat Milk":0xdedacd,
"Oat Straw":0xf1d694,
"Oath":0x4a465a,
"Oatmeal":0xcbc3b4,
"Oatmeal Bath":0xddc7a2,
"Oatmeal Biscuit":0xb7a86d,
"Oatmeal Cookie":0xeadac6,
"Obi Lilac":0xb0a3b6,
"Object of Desire":0xb7a8a8,
"Objectivity":0xbbc6de,
"Obligation":0x54645c,
"Oblivion":0x000435,
"Obscure Ochre":0x88654e,
"Obscure Ogre":0x771908,
"Obscure Olive":0x4a5d23,
"Obscure Orange":0xbb5500,
"Obscure Orchid":0x9d0759,
"Observatory":0x008f70,
"Obsession":0xae9550,
"Obsidian":0x445055,
"Obsidian Brown":0x523e35,
"Obsidian Lava Black":0x382b46,
"Obsidian Red":0x372a38,
"Obsidian Shard":0x060313,
"Obsidian Shell":0x441166,
"Obsidian Stone":0x3c3f40,
"Obstinate Orange":0xd7552a,
"Obtrusive Orange":0xffb077,
"Ocean":0x005493,
"Ocean Abyss":0x221166,
"Ocean Air":0xdae4ed,
"Ocean Blue":0x009dc4,
"Ocean Boat Blue":0x0077be,
"Ocean Boulevard":0xa4c8c8,
"Ocean Breeze":0xd3e5eb,
"Ocean Bubble":0x8cadcd,
"Ocean Call":0x2b6c8e,
"Ocean City":0x7896ba,
"Ocean Crest":0xd6dddd,
"Ocean Cruise":0x9cd4e1,
"Ocean Current":0x537783,
"Ocean Depths":0x006175,
"Ocean Dream":0xd4dde2,
"Ocean Drive":0xb0bec5,
"Ocean Droplet":0xafc3bc,
"Ocean Foam":0xcac8b4,
"Ocean Frigate":0x7a7878,
"Ocean Front":0xb8e3ed,
"Ocean Green":0x3d9973,
"Ocean Kiss":0xa4c3c5,
"Ocean Liner":0x189086,
"Ocean Melody":0x7d999f,
"Ocean Mirage":0x00748f,
"Ocean Night":0x637195,
"Ocean Oasis":0x006c68,
"Ocean Pearl":0xd3cfbd,
"Ocean Ridge":0x7594b3,
"Ocean Sand":0xe4d5cd,
"Ocean Shadow":0x5b7886,
"Ocean Spray":0x005379,
"Ocean Storm":0x3f677e,
"Ocean Surf":0x79a2bd,
"Ocean Swell":0x727c7e,
"Ocean Trapeze":0x2e526a,
"Ocean Trip":0x62aeba,
"Ocean Tropic":0x67a6d4,
"Ocean View":0x729bb3,
"Ocean Wave":0x8ec5b6,
"Ocean Weed":0x6c6541,
"Oceanic":0x4f6d82,
"Oceanic Climate":0xbbc8c9,
"Oceano":0x9ad6e5,
"Oceanside":0x015a6b,
"Oceanus":0x90aba8,
"Ocelot":0xf1e2c9,
"Ocher":0xbf9b0c,
"Ochre":0xcc7722,
"Ochre Brown":0x9f7b3e,
"Ochre Maroon":0xcc7733,
"Ochre Revival":0xeec987,
"Ochre Spice":0xe96d03,
"Ochre Yellow":0xefcc83,
"Octagon Ocean":0x085b73,
"Octarine":0xccdd00,
"Octavius":0x37393e,
"October":0xc67533,
"October Bounty":0xe3c6a3,
"October Harvest":0xd1bb98,
"October Haze":0xf8ac8c,
"October Leaves":0x855743,
"October Sky":0x8fa2a2,
"Odd Pea Pod":0x357911,
"Ode to Green":0xb6e5d6,
"Ode to Joy":0x9d404a,
"Ode to Purple":0xa798c2,
"Odious Orange":0xffdfbf,
"Odyssey":0x374a5a,
"Odyssey Grey":0x434452,
"Odyssey Lilac":0xd5c6cc,
"Odyssey Plum":0xe1c2c5,
"Off Blue":0x5684ae,
"Off Broadway":0x433f3d,
"Off Green":0x6ba353,
"Off Shore":0xd1cccb,
"Off the Grid":0x9f9049,
"Off The Grid":0xb8aea4,
"Off White":0xffffe4,
"Off Yellow":0xf1f33f,
"Off-Road Green":0x003723,
"Offbeat":0xd6d0c6,
"Offbeat Green":0x9c8b1f,
"Office Blue Green":0x006c65,
"Office Green":0x00800f,
"Office Grey":0x635d54,
"Office Neon Light":0xff2277,
"Official Violet":0x2e4182,
"Offshore Mist":0xcad8d8,
"Often Orange":0xff714e,
"Ogen Melon":0xd7b235,
"Ogre Odor":0xfd5240,
"<NAME>":0x9da94b,
"O<NAME>lesh Wash":0xd1a14e,
"Oh Boy!":0xbbdaf8,
"Oh Dahling":0xedeec5,
"Oh My Gold":0xeebb55,
"Oh Pistachio":0xabca99,
"Oh So Pretty":0xeac7cb,
"Oil":0x313330,
"Oil Blue":0x658c88,
"Oil Green":0x80856d,
"Oil Of Lavender":0xc7bebe,
"Oil on Fire":0xff5511,
"Oil Rush":0x333144,
"Oil Slick":0x031602,
"Oil Yellow":0xc4a647,
"Oilcloth Green":0x83ba8e,
"Oiled Teak":0x6c5a51,
"Oiled Up Kardashian":0x996644,
"Oilseed Crops":0xc2be0e,
"Oily Steel":0x99aaaa,
"Oitake Green":0x5e644f,
"OK Corral":0xd07360,
"Oklahoma Wheat":0xf5e0ba,
"Okra":0xfdefe9,
"Okroshka":0x40533d,
"Old Amethyst":0x87868f,
"Old Army Helmet":0x616652,
"Old Asparagus":0x929000,
"Old Bamboo":0x769164,
"Old Benchmark":0x029386,
"Old Bone":0xdbc2ab,
"Old Boot":0x7c644b,
"Old | |
<gh_stars>1-10
from collections import OrderedDict
import logging
from pathlib import Path, PureWindowsPath
import uuid
import matplotlib.pyplot as plt
import numpy as np
from pkg_resources import parse_version
from scipy import interpolate
import alf.io
from brainbox.core import Bunch
import ibllib.dsp as dsp
import ibllib.exceptions as err
from ibllib.io import raw_data_loaders, spikeglx
from ibllib.io.extractors import biased_trials
from ibllib.io.extractors.base import (
BaseBpodTrialsExtractor,
BaseExtractor,
run_extractor_classes,
)
from ibllib.io.extractors.training_wheel import extract_wheel_moves
import ibllib.plots as plots
_logger = logging.getLogger('ibllib')
SYNC_BATCH_SIZE_SAMPLES = 2 ** 18 # number of samples to read at once in bin file for sync
WHEEL_RADIUS_CM = 1 # stay in radians
WHEEL_TICKS = 1024
BPOD_FPGA_DRIFT_THRESHOLD_PPM = 150
CHMAPS = {'3A':
{'ap':
{'left_camera': 2,
'right_camera': 3,
'body_camera': 4,
'bpod': 7,
'frame2ttl': 12,
'rotary_encoder_0': 13,
'rotary_encoder_1': 14,
'audio': 15
}
},
'3B':
{'nidq':
{'left_camera': 0,
'right_camera': 1,
'body_camera': 2,
'imec_sync': 3,
'frame2ttl': 4,
'rotary_encoder_0': 5,
'rotary_encoder_1': 6,
'audio': 7,
'bpod': 16},
'ap':
{'imec_sync': 6}
},
}
def get_ibl_sync_map(ef, version):
"""
Gets default channel map for the version/binary file type combination
:param ef: ibllib.io.spikeglx.glob_ephys_file dictionary with field 'ap' or 'nidq'
:return: channel map dictionary
"""
if version == '3A':
default_chmap = CHMAPS['3A']['ap']
elif version == '3B':
if ef.get('nidq', None):
default_chmap = CHMAPS['3B']['nidq']
elif ef.get('ap', None):
default_chmap = CHMAPS['3B']['ap']
return spikeglx.get_sync_map(ef['path']) or default_chmap
def _sync_to_alf(raw_ephys_apfile, output_path=None, save=False, parts=''):
"""
Extracts sync.times, sync.channels and sync.polarities from binary ephys dataset
:param raw_ephys_apfile: bin file containing ephys data or spike
:param output_path: output directory
:param save: bool write to disk only if True
:param parts: string or list of strings that will be appended to the filename before extension
:return:
"""
# handles input argument: support ibllib.io.spikeglx.Reader, str and pathlib.Path
if isinstance(raw_ephys_apfile, spikeglx.Reader):
sr = raw_ephys_apfile
else:
raw_ephys_apfile = Path(raw_ephys_apfile)
sr = spikeglx.Reader(raw_ephys_apfile)
# if no output, need a temp folder to swap for big files
if not output_path:
output_path = raw_ephys_apfile.parent
file_ftcp = Path(output_path).joinpath(f'fronts_times_channel_polarity{str(uuid.uuid4())}.bin')
# loop over chunks of the raw ephys file
wg = dsp.WindowGenerator(sr.ns, SYNC_BATCH_SIZE_SAMPLES, overlap=1)
fid_ftcp = open(file_ftcp, 'wb')
for sl in wg.slice:
ss = sr.read_sync(sl)
ind, fronts = dsp.fronts(ss, axis=0)
# a = sr.read_sync_analog(sl)
sav = np.c_[(ind[0, :] + sl.start) / sr.fs, ind[1, :], fronts.astype(np.double)]
sav.tofile(fid_ftcp)
# print progress
wg.print_progress()
# close temp file, read from it and delete
fid_ftcp.close()
tim_chan_pol = np.fromfile(str(file_ftcp))
tim_chan_pol = tim_chan_pol.reshape((int(tim_chan_pol.size / 3), 3))
file_ftcp.unlink()
sync = {'times': tim_chan_pol[:, 0],
'channels': tim_chan_pol[:, 1],
'polarities': tim_chan_pol[:, 2]}
if save:
out_files = alf.io.save_object_npy(output_path, sync, '_spikeglx_sync', parts=parts)
return Bunch(sync), out_files
else:
return Bunch(sync)
def _assign_events_bpod(bpod_t, bpod_polarities, ignore_first_valve=True):
"""
From detected fronts on the bpod sync traces, outputs the synchronisation events
related to trial start and valve opening
:param bpod_t: numpy vector containing times of fronts
:param bpod_fronts: numpy vector containing polarity of fronts (1 rise, -1 fall)
:param ignore_first_valve (True): removes detected valve events at indices le 2
:return: numpy arrays of times t_trial_start, t_valve_open and t_iti_in
"""
TRIAL_START_TTL_LEN = 2.33e-4
ITI_TTL_LEN = 0.4
# make sure that there are no 2 consecutive fall or consecutive rise events
assert(np.all(np.abs(np.diff(bpod_polarities)) == 2))
# make sure that the first event is a rise
assert(bpod_polarities[0] == 1)
# take only even time differences: ie. from rising to falling fronts
dt = np.diff(bpod_t)[::2]
# detect start trials event assuming length is 0.23 ms except the first trial
i_trial_start = np.r_[0, np.where(dt <= TRIAL_START_TTL_LEN)[0] * 2]
t_trial_start = bpod_t[i_trial_start]
# the last trial is a dud and should be removed
t_trial_start = t_trial_start[:-1]
# valve open events are between 50ms to 300 ms
i_valve_open = np.where(np.logical_and(dt > TRIAL_START_TTL_LEN,
dt < ITI_TTL_LEN))[0] * 2
if ignore_first_valve:
i_valve_open = np.delete(i_valve_open, np.where(i_valve_open < 2))
t_valve_open = bpod_t[i_valve_open]
# ITI events are above 400 ms
i_iti_in = np.where(dt > ITI_TTL_LEN)[0] * 2
i_iti_in = np.delete(i_iti_in, np.where(i_valve_open < 2))
i_iti_in = bpod_t[i_iti_in]
# # some debug plots when needed
# import matplotlib.pyplot as plt
# import ibllib.plots as plots
# plt.figure()
# plots.squares(bpod_t, bpod_fronts)
# plots.vertical_lines(t_valve_open, ymin=-0.2, ymax=1.2, linewidth=0.5, color='g')
# plots.vertical_lines(t_trial_start, ymin=-0.2, ymax=1.2, linewidth=0.5, color='r')
return t_trial_start, t_valve_open, i_iti_in
def _rotary_encoder_positions_from_fronts(ta, pa, tb, pb, ticks=WHEEL_TICKS, radius=1,
coding='x4'):
"""
Extracts the rotary encoder absolute position as function of time from fronts detected
on the 2 channels. Outputs in units of radius parameters, by default radians
Coding options detailed here: http://www.ni.com/tutorial/7109/pt/
Here output is clockwise from subject perspective
:param ta: time of fronts on channel A
:param pa: polarity of fronts on channel A
:param tb: time of fronts on channel B
:param pb: polarity of fronts on channel B
:param ticks: number of ticks corresponding to a full revolution (1024 for IBL rotary encoder)
:param radius: radius of the wheel. Defaults to 1 for an output in radians
:param coding: x1, x2 or x4 coding (IBL default is x4)
:return: indices vector (ta) and position vector
"""
if coding == 'x1':
ia = np.searchsorted(tb, ta[pa == 1])
ia = ia[ia < ta.size]
ia = ia[pa[ia] == 1]
ib = np.searchsorted(ta, tb[pb == 1])
ib = ib[ib < tb.size]
ib = ib[pb[ib] == 1]
t = np.r_[ta[ia], tb[ib]]
p = np.r_[ia * 0 + 1, ib * 0 - 1]
ordre = np.argsort(t)
t = t[ordre]
p = p[ordre]
p = np.cumsum(p) / ticks * np.pi * 2 * radius
return t, p
elif coding == 'x2':
p = pb[np.searchsorted(tb, ta) - 1] * pa
p = - np.cumsum(p) / ticks * np.pi * 2 * radius / 2
return ta, p
elif coding == 'x4':
p = np.r_[pb[np.searchsorted(tb, ta) - 1] * pa, -pa[np.searchsorted(ta, tb) - 1] * pb]
t = np.r_[ta, tb]
ordre = np.argsort(t)
t = t[ordre]
p = p[ordre]
p = - np.cumsum(p) / ticks * np.pi * 2 * radius / 4
return t, p
def _assign_events_audio(audio_t, audio_polarities, return_indices=False):
"""
From detected fronts on the audio sync traces, outputs the synchronisation events
related to tone in
:param audio_t: numpy vector containing times of fronts
:param audio_fronts: numpy vector containing polarity of fronts (1 rise, -1 fall)
:param return_indices (False): returns indices of tones
:return: numpy arrays t_ready_tone_in, t_error_tone_in
:return: numpy arrays ind_ready_tone_in, ind_error_tone_in if return_indices=True
"""
# make sure that there are no 2 consecutive fall or consecutive rise events
assert(np.all(np.abs(np.diff(audio_polarities)) == 2))
# take only even time differences: ie. from rising to falling fronts
dt = np.diff(audio_t)[::2]
# detect ready tone by length below 110 ms
i_ready_tone_in = np.r_[np.where(dt <= 0.11)[0] * 2]
t_ready_tone_in = audio_t[i_ready_tone_in]
# error tones are events lasting from 400ms to 600ms
i_error_tone_in = np.where(np.logical_and(0.4 < dt, dt < 1.2))[0] * 2
t_error_tone_in = audio_t[i_error_tone_in]
if return_indices:
return t_ready_tone_in, t_error_tone_in, i_ready_tone_in, i_error_tone_in
else:
return t_ready_tone_in, t_error_tone_in
def _assign_events_to_trial(t_trial_start, t_event, take='last'):
"""
Assign events to a trial given trial start times and event times.
Trials without an event
result in nan value in output time vector.
The output has a consistent size with t_trial_start and ready to output to alf.
:param t_trial_start: numpy vector of trial start times
:param t_event: numpy vector of event times to assign to trials
:param take: 'last' or 'first' (optional, default 'last'): index to take in case of duplicates
:return: numpy array of event times with the same shape of trial start.
"""
# make sure the events are sorted
try:
assert(np.all(np.diff(t_trial_start) >= 0))
except AssertionError:
raise ValueError('Trial starts vector not sorted')
try:
assert(np.all(np.diff(t_event) >= 0))
except AssertionError:
raise ValueError('Events vector is not sorted')
# remove events that happened before the first trial start
t_event = t_event[t_event >= t_trial_start[0]]
ind = np.searchsorted(t_trial_start, t_event) - 1
t_event_nans = np.zeros_like(t_trial_start) * np.nan
# select first or last element matching each trial start
if take == 'last':
iall, iu = np.unique(np.flip(ind), return_index=True)
t_event_nans[iall] = t_event[- (iu - ind.size + 1)]
elif take == 'first':
iall, iu = np.unique(ind, return_index=True)
t_event_nans[iall] = t_event[iu]
else: # if the index is arbitrary, needs to be numeric (could be negative if from the end)
iall = np.unique(ind)
minsize = take + 1 if take >= 0 else - take
# for each trial, take the takenth element if there are enough values in trial
for iu in iall:
| |
#!/usr/bin/env python
# Copyright 2017-2021 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: <NAME>
# <NAME>
# <NAME>
# <NAME> <<EMAIL>>
# <NAME>
#
import itertools
import numpy as np
from pyscf import lib
from pyscf.lib import logger
from pyscf.pbc.cc import eom_kccsd_ghf as eom_kgccsd
from pyscf.pbc.cc import kccsd
from pyscf.pbc.lib import kpts_helper
from pyscf.lib.parameters import LOOSE_ZERO_TOL, LARGE_DENOM # noqa
from pyscf.pbc.cc import kintermediates_uhf
from pyscf.pbc.mp.kump2 import (get_frozen_mask, get_nocc, get_nmo,
padded_mo_coeff, padding_k_idx) # noqa
einsum = lib.einsum
########################################
# EOM-IP-CCSD
########################################
def amplitudes_to_vector_ip(r1, r2, kshift, kconserv):
r1a, r1b = r1
r2aaa, r2baa, r2abb, r2bbb = r2
nkpts = r2aaa.shape[0]
nocca, noccb = r1a.shape[0], r1b.shape[0]
nvira, nvirb = r2aaa.shape[-1], r2bbb.shape[-1]
# From symmetry for aaa and bbb terms, only store lower
# triangular part (ki,i) < (kj,j)
idxa, idya = np.tril_indices(nkpts*nocca, -1)
idxb, idyb = np.tril_indices(nkpts*noccb, -1)
r2aaa = r2aaa.transpose(0,2,1,3,4).reshape(nkpts*nocca,nkpts*nocca,nvira)
r2bbb = r2bbb.transpose(0,2,1,3,4).reshape(nkpts*noccb,nkpts*noccb,nvirb)
return np.hstack((r1a, r1b, r2aaa[idxa,idya].ravel(),
r2baa.ravel(), r2abb.ravel(),
r2bbb[idxb,idyb].ravel()))
def vector_to_amplitudes_ip(vector, kshift, nkpts, nmo, nocc, kconserv):
nocca, noccb = nocc
nmoa, nmob = nmo
nvira, nvirb = nmoa-nocca, nmob-noccb
sizes = (nocca, noccb, (nkpts*nocca)*(nkpts*nocca-1)*nvira//2,
nkpts**2*noccb*nocca*nvira, nkpts**2*nocca*noccb*nvirb,
nkpts*noccb*(nkpts*noccb-1)*nvirb//2)
sections = np.cumsum(sizes[:-1])
r1a, r1b, r2a, r2baa, r2abb, r2b = np.split(vector, sections)
r2a = r2a.reshape(nkpts*nocca*(nkpts*nocca-1)//2,nvira)
r2b = r2b.reshape(nkpts*noccb*(nkpts*noccb-1)//2,nvirb)
idxa, idya = np.tril_indices(nkpts*nocca, -1)
idxb, idyb = np.tril_indices(nkpts*noccb, -1)
r2aaa = np.zeros((nkpts*nocca,nkpts*nocca,nvira), dtype=r2a.dtype)
r2aaa[idxa,idya] = r2a.copy()
r2aaa[idya,idxa] = -r2a.copy() # Fill in value : kj, j < ki, i
r2aaa = r2aaa.reshape(nkpts,nocca,nkpts,nocca,nvira)
r2aaa = r2aaa.transpose(0,2,1,3,4)
r2baa = r2baa.reshape(nkpts,nkpts,noccb,nocca,nvira).copy()
r2abb = r2abb.reshape(nkpts,nkpts,nocca,noccb,nvirb).copy()
r2bbb = np.zeros((nkpts*noccb,nkpts*noccb,nvirb), dtype=r2b.dtype)
r2bbb[idxb,idyb] = r2b.copy()
r2bbb[idyb,idxb] = -r2b.copy() # Fill in value : kj, j < ki, i
r2bbb = r2bbb.reshape(nkpts,noccb,nkpts,noccb,nvirb)
r2bbb = r2bbb.transpose(0,2,1,3,4)
r1 = (r1a.copy(), r1b.copy())
r2 = (r2aaa, r2baa, r2abb, r2bbb)
return r1, r2
def ipccsd_matvec(eom, vector, kshift, imds=None, diag=None):
'''2ph operators are of the form s_{ij}^{ b}, i.e. 'jb' indices are coupled'''
if imds is None: imds = eom.make_imds()
t1, t2= imds.t1, imds.t2
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
nocca, noccb, nvira, nvirb = t2ab.shape[3:]
nmoa, nmob = nocca + nvira, noccb + nvirb
kconserv = imds.kconserv
nkpts = eom.nkpts
r1, r2 = eom.vector_to_amplitudes(vector, kshift, nkpts, (nmoa, nmob), (nocca, noccb), kconserv)
#nocc = eom.nocc
#nmo = eom.nmo
#nvir = (nmo[0]-nocc[0], nmo[1]-nocc[1])
#nocca, noccb = nocc
#nvira, nvirb = nvir
#nkpts = eom.nkpts
#r1, r2 = eom.vector_to_amplitudes(vector, nkpts, nmo[0]+nmo[1], nocc[0]+nocc[1]) # spin
#spatial_r1, spatial_r2 = eom_kgccsd.spin2spatial_ip_doublet(r1, r2, kconserv, kshift, orbspin)
#imds = imds._imds
#t2aa, t2ab, t2bb = t2
# k-point spin orbital version of ipccsd
#Hr1 = -0.0*np.einsum('mi,m->i', imds.Foo[kshift], r1)
#Hr2 = np.zeros_like(r2)
r1a, r1b = r1
r2aaa, r2baa, r2abb, r2bbb = r2
#Foo term
# -\sum_{kk,k} U_{kk,k,ki,i} s_{kk,k}
Hr1a = -np.einsum('mi,m->i', imds.Foo[kshift], r1a)
Hr1b = -np.einsum('MI,M->I', imds.FOO[kshift], r1b)
#Fov term
# \sum_{kL,kD,L,D} U_{kL,kD,L,D} S_{ki,i,kL,L}^{kD,D} + \sum_{kl,kd,l,d} U_{kl,kd,l,d} S_{ki,i,kl,l}^{kd,d}
for km in range(nkpts):
Hr1a += einsum('me,mie->i', imds.Fov[km], r2aaa[km,kshift])
Hr1a -= einsum('ME,iME->i', imds.FOV[km], r2abb[kshift,km])
Hr1b += einsum('ME,MIE->I', imds.FOV[km], r2bbb[km,kshift])
Hr1b -= einsum('me,Ime->I', imds.Fov[km], r2baa[kshift,km])
#Wooov
# \sum_{kk,kl,kd,k,l,d} W_{kk,ki,kl,kd,k,i,l,d} s_{kl,kk,l,k}^{kd,d}
# \sum_{kk,kL,kD,k,L,D} W_{kk,ki,kL,kD,k,i,L,D} s_{kL,kk,L,k}^{kD,D}
for km in range(nkpts):
for kn in range(nkpts):
Hr1a += -0.5 * einsum('nime,mne->i', imds.Wooov[kn,kshift,km], r2aaa[km,kn])
Hr1b += einsum('NIme,Nme->I', imds.WOOov[kn,kshift,km], r2baa[kn,km])
Hr1b += -0.5 * einsum('NIME,MNE->I', imds.WOOOV[kn,kshift,km], r2bbb[km,kn])
Hr1a += einsum('niME,nME->i', imds.WooOV[kn,kshift,km], r2abb[kn,km])
dtype = np.result_type(Hr1a, *r2)
Hr2aaa = np.zeros((nkpts, nkpts, nocca, nocca, nvira), dtype=dtype)
Hr2baa = np.zeros((nkpts, nkpts, noccb, nocca, nvira), dtype=dtype)
Hr2abb = np.zeros((nkpts, nkpts, nocca, noccb, nvirb), dtype=dtype)
Hr2bbb = np.zeros((nkpts, nkpts, noccb, noccb, nvirb), dtype=dtype)
# Fvv term
# \sum_{kd,d} U_{kb,kd,b,d} S_{ki,kj,i,j}^{kd,d} = (\bar{H}S)_{ki,kj,i,j}^{kb,b}
# \sum_{kD,D} S_{ki,kJ,i,J}^{kD,D} U_{kB,kD,B,D} = (\bar{H}S)_{ki,kJ,i,J}^{kB,B}
for kb, ki in itertools.product(range(nkpts),repeat=2):
kj = kconserv[kshift,ki,kb]
Hr2aaa[ki,kj] += lib.einsum('be,ije->ijb', imds.Fvv[kb], r2aaa[ki,kj])
Hr2abb[ki,kj] += lib.einsum('BE,iJE->iJB', imds.FVV[kb], r2abb[ki,kj])
Hr2bbb[ki,kj] += lib.einsum('BE,IJE->IJB', imds.FVV[kb], r2bbb[ki,kj])
Hr2baa[ki,kj] += lib.einsum('be,Ije->Ijb', imds.Fvv[kb], r2baa[ki,kj])
# Foo term
# \sum_{kl,l} U_{kl,ki,l,i} s_{kl,kj,l,j}^{kb,b} = (\bar{H}S)_{ki,kj,i,j}^{kb,b}
# \sum_{kl,l} U_{kl,kj,l,j} S_{ki,kl,i,l}^{kb,b} = (\bar{H}S)_{ki,kj,i,j}^{kb,b}
# \sum_{kl,l} S_{kl,kJ,l,J}^{kB,B} U_{kl,ki,l,i} = (\bar{H}S)_{ki,kJ,i,J}^{kB,B}
# \sum_{KL,L} S_{ki,kL,i,L}^{kB,B} U_{kL,kJ,L,J} = (\bar{H}S)_{ki,kJ,i,J}^{kB,B}
for ki, kj in itertools.product(range(nkpts), repeat=2):
tmpa = lib.einsum('mi,mjb->ijb', imds.Foo[ki], r2aaa[ki,kj])
tmpb = lib.einsum('mj,mib->ijb', imds.Foo[kj], r2aaa[kj,ki])
Hr2aaa[ki,kj] -= tmpa - tmpb
Hr2abb[ki,kj] -= lib.einsum('mi,mJB->iJB', imds.Foo[ki], r2abb[ki,kj])
Hr2abb[ki,kj] -= lib.einsum('MJ,iMB->iJB', imds.FOO[kj], r2abb[ki,kj])
Hr2baa[ki,kj] -= lib.einsum('MI,Mjb->Ijb', imds.FOO[ki], r2baa[ki,kj])
Hr2baa[ki,kj] -= lib.einsum('mj,Imb->Ijb', imds.Foo[kj], r2baa[ki,kj])
tmpb = lib.einsum('MI,MJB->IJB', imds.FOO[ki], r2bbb[ki,kj])
tmpa = lib.einsum('MJ,MIB->IJB', imds.FOO[kj], r2bbb[kj,ki])
Hr2bbb[ki,kj] -= tmpb - tmpa
# Wovoo term
# \sum_{kk,k} W_{kk,kb,kj,ki,k,b,j,i} s_{kk,k} = (\bar{H}S)_{ki,kj,i,j}^{kb,b}
# \sum_{kk,k} W_{kk,kB,ki,kJ,k,B,i,J} S_{kk,k} = (\bar{H}S)_{ki,kJ,i,J}^{kB,B}
for ki, kj in itertools.product(range(nkpts), repeat=2):
kb = kconserv[ki, kshift, kj]
Hr2aaa[ki,kj] -= einsum('mjbi,m->ijb', imds.Woovo[kshift,kj,kb], r1a)
Hr2abb[ki,kj] += einsum('miBJ,m->iJB', imds.WooVO[kshift,ki,kb], r1a)
Hr2baa[ki,kj] += einsum('MIbj,M->Ijb', imds.WOOvo[kshift,ki,kb], r1b)
Hr2bbb[ki,kj] -= einsum('MJBI,M->IJB', imds.WOOVO[kshift,kj,kb], r1b)
# Woooo term
# \sum_{kk,kl,k,l} W_{kk,ki,kl,kj,k,i,l,j} S_{kk,kl,k,l}^{kb,b} = (\bar{H}S)_{ki,kj,i,j}^{kb,b}
# \sum_{kk,kL,k,L} W_{kk,kL,ki,kJ,k,L,i,J} S_{kk,kl,k,L}^{kB,B} = (\bar{H}S)_{ki,kJ,i,J}^{kB,B}
for ki, kj in itertools.product(range(nkpts), repeat=2):
kb = kconserv[ki, kshift, kj]
for kn in range(nkpts):
km = kconserv[kj, kn, ki]
Hr2aaa[ki, kj] += .5 * lib.einsum('minj,mnb->ijb', imds.Woooo[km, ki, kn], r2aaa[km, kn])
Hr2abb[ki, kj] += lib.einsum('miNJ,mNB->iJB', imds.WooOO[km, ki, kn], r2abb[km, kn])
Hr2bbb[ki, kj] += .5 * lib.einsum('MINJ,MNB->IJB', imds.WOOOO[km, ki, kn], r2bbb[km, kn])
Hr2baa[ki, kj] += lib.einsum('njMI,Mnb->Ijb', imds.WooOO[kn, kj, km], r2baa[km, kn])
# T2 term
# - \sum_{kc,c} t_{kj,ki,j,i}^{kb,kc,b,c} [ \sum_{kk,kL,kD,k,L,D} W_{kL,kk,kD,kc,L,k,D,c} S_{kk,kL,k,L}^{kD,D}
# + \sum{kk,kl,kd,k,l,d} W_{kl,kk,kd,kc,l,k,d,c} S_{kk,kl,k,l}^{kd,d} ] = (\bar{H}S)_{ki,kj,i,j}^{kb,b}
#
# - \sum_{kc,c} t_{ki,kJ,i,J}^{kc,kB,c,B} [ \sum_{kk,kL,kD,k,L,D} W_{kL,kk,kD,kc,L,k,D,c} S_{Kk,kL,k,L}^{kD,D}
# + \sum{kk,kl,kd,k,l,d} W_{kl,kk,kd,kc,l,k,d,c} S_{kk,kl,k,l}^{kd,d} ] = (\bar{H}S)_{ki,kJ,i,J}^{kB,B}
tmp_aaa = lib.einsum('xymenf,xymnf->e', imds.Wovov[:,kshift,:], r2aaa)
tmp_bbb = lib.einsum('xyMENF,xyMNF->E', imds.WOVOV[:,kshift,:], r2bbb)
tmp_abb = lib.einsum('xymeNF,xymNF->e', imds.WovOV[:,kshift,:], r2abb)
tmp_baa = np.zeros(tmp_bbb.shape, dtype=tmp_bbb.dtype)
for km, kn in itertools.product(range(nkpts), repeat=2):
kf = kconserv[kn, kshift, km]
tmp_baa += lib.einsum('nfME, Mnf->E', imds.WovOV[kn, kf, km], r2baa[km, kn])
for ki, kj in itertools.product(range(nkpts), repeat=2):
kb = kconserv[ki, kshift, kj]
Hr2aaa[ki,kj] -= 0.5 * lib.einsum('e,jibe->ijb', tmp_aaa, t2aa[kj,ki,kb])
Hr2aaa[ki,kj] -= lib.einsum('e,jibe->ijb', tmp_abb, t2aa[kj,ki,kb])
Hr2abb[ki,kj] -= 0.5 * lib.einsum('e,iJeB->iJB', tmp_aaa, t2ab[ki,kj,kshift])
Hr2abb[ki,kj] -= lib.einsum('e,iJeB->iJB', tmp_abb, t2ab[ki,kj,kshift])
Hr2baa[ki,kj] -= 0.5 * lib.einsum('E,jIbE->Ijb', tmp_bbb, t2ab[kj,ki,kb])
Hr2baa[ki,kj] -= lib.einsum('E,jIbE->Ijb', tmp_baa, t2ab[kj,ki,kb])
Hr2bbb[ki,kj] -= 0.5 * lib.einsum('E,JIBE->IJB', tmp_bbb, t2bb[kj,ki,kb])
Hr2bbb[ki,kj] -= lib.einsum('E,JIBE->IJB', tmp_baa, t2bb[kj,ki,kb])
#idxoa = [np.where(orbspin[k][:nocca+noccb] == 0)[0] for k in range(nkpts)]
#idxva = [np.where(orbspin[k][nocca+noccb:] == 0)[0] for k in range(nkpts)]
#idxob = [np.where(orbspin[k][:nocca+noccb] == 1)[0] for k in range(nkpts)]
#idxvb = [np.where(orbspin[k][nocca+noccb:] == 1)[0] for k in range(nkpts)]
# j \/ b | i
# --- |
# /\ |
# m \/ e|
# -------
for ki, kj in itertools.product(range(nkpts), repeat=2):
kb = kconserv[ki, kshift, kj]
for km in range(nkpts):
ke = kconserv[km, kshift, ki]
# \sum_{kL,kD,L,D} W_{kL,kD,kb,kj,L,D,b,j} S_{ki,kL,i,L}^{kb,b}
# \sum_{kl,kd,l,d} W_{kl,kd,kb,kj,l,d,b,j} S_{ki,kl,i,l}^{kb,b}
Hr2aaa[ki, kj] += lib.einsum('mebj,ime->ijb', imds.Wovvo[km, ke, kb],
r2aaa[ki, km])
Hr2aaa[ki, kj] += lib.einsum('MEbj,iME->ijb', imds.WOVvo[km, ke, kb],
r2abb[ki, km])
# P(ij)
ke = kconserv[km, kshift, kj]
Hr2aaa[ki, kj] -= lib.einsum('mebi,jme->ijb', imds.Wovvo[km, ke, kb],
r2aaa[kj, km])
Hr2aaa[ki, kj] -= lib.einsum('MEbi,jME->ijb', imds.WOVvo[km, ke, kb],
r2abb[kj, km])
# \sum_{kL,kD,L,D} W_{kL,kD,kb,kJ,L,D,b,J} S_{ki,kL,i,L}^{kD,D}
# \sum_{kl,kd,l,d} W_{kl,kd,kB,kJ,l,d,B,J} S_{ki,kl,i,l}^{kd,d}
ke = kconserv[km, kshift, ki]
Hr2abb[ki, kj] += lib.einsum('meBJ,ime->iJB', imds.WovVO[km, ke, kb],
r2aaa[ki, km])
Hr2abb[ki, kj] += lib.einsum('MEBJ,iME->iJB', imds.WOVVO[km, ke, kb],
r2abb[ki, km])
ke = kconserv[km, kshift, kj]
Hr2abb[ki, kj] -= lib.einsum('miBE,mJE->iJB', imds.WooVV[km, ki, kb],
r2abb[km, kj])
ke = kconserv[km, kshift, ki]
Hr2baa[ki, kj] += lib.einsum('MEbj,IME->Ijb', imds.WOVvo[km, ke, kb],
r2bbb[ki, km])
Hr2baa[ki, kj] += lib.einsum('mebj,Ime->Ijb', imds.Wovvo[km, ke, kb],
r2baa[ki, km])
ke = kconserv[km, kshift, kj]
Hr2baa[ki, kj] -= lib.einsum('MIbe,Mje->Ijb', imds.WOOvv[km, ki, kb],
r2baa[km, kj])
ke = kconserv[km, kshift, ki]
Hr2bbb[ki, kj] += lib.einsum('MEBJ,IME->IJB', imds.WOVVO[km, ke, kb],
r2bbb[ki, km])
Hr2bbb[ki, kj] += lib.einsum('meBJ,Ime->IJB', imds.WovVO[km, ke, kb],
r2baa[ki, km])
# P(ij)
ke = kconserv[km, kshift, kj]
Hr2bbb[ki, kj] -= lib.einsum('MEBI,JME->IJB', imds.WOVVO[km, ke, kb],
r2bbb[kj, km])
Hr2bbb[ki, kj] -= lib.einsum('meBI,Jme->IJB', imds.WovVO[km, ke, kb],
r2baa[kj, km])
#spatial_Hr1 = [Hr1a, Hr1b]
#spatial_Hr2 = [Hr2aaa, Hr2baa, Hr2abb, Hr2bbb]
#spin_Hr1, spin_Hr2 = eom_kgccsd.spatial2spin_ip_doublet(spatial_Hr1, spatial_Hr2,
# kconserv, kshift, orbspin)
#Hr1 += spin_Hr1
#Hr2 += spin_Hr2
#vector = eom.amplitudes_to_vector(Hr1, Hr2)
vector = amplitudes_to_vector_ip([Hr1a, Hr1b], [Hr2aaa, Hr2baa, Hr2abb, Hr2bbb], kshift, kconserv)
return vector
def ipccsd_diag(eom, kshift, imds=None):
if imds is | |
<filename>polynomials_over_Zp.py
# -*- coding: utf-8 -*-
"""
"""
# =============================================================================
from sympy.polys import galoistools as gt
from sympy.ntheory import isprime
from sympy.polys.domains import ZZ
# =============================================================================
#teste
def _remove_trailing_zeros(lst):
"""
Removes any zeros at the end of the list.
"""
k=0
for k, value in enumerate( lst[::-1] ):
if value != 0:
break
lst_no_trailing_zeroes = lst if k == 0 else lst[:-k]
return lst_no_trailing_zeroes
def _represents_natural_number(string):
"""
Returns True if the input string is an integer >= 0, False otherwise.
"""
try:
i = int(string)
return i >= 0
except:
return False
def make_poly_ring(p):
"""
Arguments:
p - prime number >= 2.
Returns a class representing the ring of polynomials over the finite
field Z/(p) =: Zp.
"""
assert(isprime(p)), '%d is not a prime number' %p
class PolynomialsOverZp:
"""
A polynomial's attribute will be a list with coefficients listed in
increasing order of degree.
"""
#class attribute
mod_p = p
def __init__(self, lst):
# lists the remainders of the integer division by p
lst = [x % p for x in lst]
# removes any zeros at the end of the list
lst = _remove_trailing_zeros(lst)
self.coefs = lst
@classmethod
def from_dict(cls, dct):
"""
In case we want to instantiate a polynomial using a dict.
Makes it easier to create polynomials such as 1 + x^100.
It is assumed the dict has the form {..., power : coefficient, ...},
where type(power) = string and type(coefficient) = int.
Coefficients are assumed to be zero if they have no corresponding key.
"""
if len(dct) == 0:
result = cls([])
else:
assert(
all([_represents_natural_number(key) for key in dct.keys()])
), 'Invalid keys in dict'
powers = [int(x) for x in dct.keys()]
degree = max(powers)
# An m-degree polynomial has m+1 coefficients
lst = [0]*(degree+1)
# fills in the coefficients
for i in powers:
lst[i] = dct[str(i)]
result = cls(lst)
return result
@classmethod
def from_int(cls, int_):
"""
Allows the instatiation of a constant polynomial from an int type,
as in
f = PolynomialsOverZp(2).
Creates an element of the ring Zp viewed as a subring of Zp[X].
"""
assert(type(int_) == int), 'Input is not of type int.'
return cls([int_])
def is_zero(self):
return self.coefs == []
def typesetter(func):
"""
Decorator.
Allows binary operations to accept several input types: class instances
of polynomials, lists, dicts and ints.
"""
def new_func(self, other):
# in this case there is nothing to do, the default methods assume
# that the input is a class instance
if isinstance(other, PolynomialsOverZp):
result = func(self, other)
# instantiates a polynomial from the list
elif type(other) == list:
other = PolynomialsOverZp(other)
result = func(self, other)
# instantiates a polynomial from the dict
elif type(other) == dict:
other = PolynomialsOverZp.from_dict(other)
result = func(self, other)
#TODO: ver se não há problemas com o tipo int nos vários métodos.
#audaxH - não percebi
# instatiates a constant polynomial
elif type(other) == int:
other = PolynomialsOverZp([other])
result = func(self, other)
else:
error_msg = 'Not a valid polynomial.'
raise TypeError(error_msg)
return result
return new_func
@typesetter
def is_equal(self, other):
"""
Returns True if self = other in Zp[X], False otherwise. The equality
is coefficient-wise.
The method assumes that the argument "other" is a class instance,
but the typesetter decorator allows the following expressions to be
evaluated to True:
PolynomialsOverZp([1, 0, 1]).is_equal([1, 0, 1])
and
PolynomialsOverZp([1,0,1]).is_equal({'0': 1, '2': 1}).
"""
return self.coefs == other.coefs
def __eq__(self, other):
"""
Overriding the is_equal() method.
Allows expressions like the following to be evaluated to True:
Zp_X([1, 0, 1]) == Zp_X.from_dict({'0':1, '2':1}).
The decorator in the is_equal() method also allows
PolynomialsOverZp([1, 0, 1]) == [1, 0, 1]
and
PolynomialsOverZp([1,0,1]) == {'0': 1, '2': 1}.
"""
return self.is_equal(other)
@typesetter
def add(self, other):
"""
Polynomial sum in Zp[x].
The method assumes that the argument "other" is a class instance,
but the typesetter decorator allows the following:
PolynomialsOverZp([1, 0, 1]).add([0, 1, 0, 1])
and
PolynomialsOverZp([1, 0, 1]).add({'1' : 1, '3' : 1}).
"""
# the copy assures that the self instance is not changed.
self_coefs = self.coefs.copy()
# the functions that were implemented in sympy.polys.galoistools
# assume that the coefficients are in decreasing order of degree,
# unlike this class.
self_coefs.reverse()
# same as above
other_coefs = other.coefs.copy()
other_coefs.reverse()
result_coefs = gt.gf_add(self_coefs, other_coefs, p, ZZ)
# lists the coefficients in increasing order of degree
result_coefs.reverse()
result = PolynomialsOverZp(result_coefs)
return result
def __add__(self, other):
"""
Overriding. Allows to write expressions like f + g, where f and g
are class instances.
Also allows things like
f + [1, 0, 1]
and
f + {'100' : 1},
because of the use of the typesetter decorator in the add() method.
"""
return self.add(other)
def neg(self):
"""
Return the additive inverse of "self" in the ring Zp[X].
Remark: not necessary if p=2, since h = -h in Z2[X].
"""
return PolynomialsOverZp([(-n) % p for n in self.coefs])
def __neg__(self):
"""
Overriding. Allows one to write -h for some class instance h.
"""
return self.neg()
@typesetter
def sub(self, other):
"""
Polynomial difference in Zp[X].
The method assumes that the argument "other" is a class instance,
but the typesetter decorator allows the following:
PolynomialsOverZp([1, 0, 1]).sub([0, 1, 0, 1])
and
PolynomialsOverZp([1, 0, 1]).sub({'1' : 1, '3' : 1}).
"""
return self.add(other.neg())
def __sub__(self, other):
"""
Overriding. Allows to write expressions like f - g, where f and g
are class instances.
Also allows things like
f - [1, 0, 1]
and
f - {'100' : 1},
because of the use of the typesetter decorator in the sub() method.
"""
return self.sub(other)
#TODO: docstring
@typesetter
def mul(self, other):
"""
Polynomial multiplication in Zp[X].
"""
# the copy assures that the self instance is not changed
self_coefs = self.coefs.copy()
# the functions that were implemented in sympy.polys.galoistools
# assume that the coefficients are in decreasing order of degree,
# unlike this class
self_coefs.reverse()
# same as above
other_coefs = other.coefs.copy()
other_coefs.reverse()
result_coefs = gt.gf_mul(self_coefs, other_coefs, p, ZZ)
# lists the coefficients in increasing order of degree
result_coefs.reverse()
result = PolynomialsOverZp(result_coefs)
return result
def __mul__(self, other):
"""
Overriding. Allows to write expressions like f*g, where f and g
are class instances.
Also allows things like
f * [1, 0, 1]
and
f * {'100' : 1},
because of the use of the typesetter decorator in the mul() method.
"""
return self.mul(other)
#TODO: testar
#TODO: doctring
@typesetter
def div_mod(self, other):
"""
Division with remainder in Zp[X].
Recall that, since Zp is a field, then Zp[X] is an euclidean domain
and thus this division is possible whenever "other" != 0.
"""
# the copy assures that the self instance is not changed
self_coefs = self.coefs.copy()
# the functions that were implemented in sympy.polys.galoistools
# assume that the coefficients are in decreasing order of degree,
# unlike this class
self_coefs.reverse()
# same as above
other_coefs = other.coefs.copy()
other_coefs.reverse()
quot_coefs, remainder_coefs = gt.gf_div(self_coefs, other_coefs,
p, ZZ)
# lists the coefficients in increasing order of degree
quot_coefs.reverse()
remainder_coefs.reverse()
quot = PolynomialsOverZp(quot_coefs)
remainder = PolynomialsOverZp(remainder_coefs)
return quot, remainder
#TODO: docstring
def quotient(self, other):
"""
Returns the quotient of the euclidean division in Zp[X].
"""
result, _ = self.div_mod(other)
return result
#TODO: docstring
def __floordiv__(self, other):
"""
Overriding. Allows to write f // g.
"""
result = self.quotient(other)
return result
#TODO: testar
#TODO: docstring
def mod(self, other):
"""
Returns the remainder of the euclidean division in Zp[X].
"""
_, result = self.div_mod(other)
return result
def __mod__(self, other):
"""
Overriding. Allows to write f % g.
"""
return self.mod(other)
#TODO: testar
#TODO: docstring
def is_irreducible(self):
"""
Tests irreducibility.
"""
if self.is_zero():
return False
else:
# the | |
<reponame>cottongin/coolchat-discord-bot<filename>cogs/scores.py
# coding=utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import pickle
import shlex
import aiohttp
import coloredlogs
import pendulum
import requests
import discord
from discord.ext import commands, tasks
from discord.utils import get
LOGGER = logging.getLogger(__name__)
coloredlogs.install(
level='DEBUG', logger=LOGGER,
fmt="[{asctime}] <{name}> {levelname:>8} | {message}",
datefmt='%Y-%m-%d %H:%M:%S',
style='{'
)
class ScoresCog(commands.Cog, name="Scores"):
"""Scores Plugin featuring various scores-related commands"""
def __init__(self, bot):
self.bot = bot
self.__name__ = __name__
self.max_check = 5 * 60
self.base_mlb_url = (
"https://bdfed.stitch.mlbinfra.com/bdfed/transform-mlb-scoreboard"
"?stitch_env=prod"
"&sortTemplate=4"
"&sportId=1"
"&startDate={date}&endDate={date}"
"&gameType=E"
"&&gameType=S"
"&&gameType=R"
"&&gameType=F"
"&&gameType=D"
"&&gameType=L"
"&&gameType=W"
"&&gameType=A"
"&language=en"
"&leagueId=104"
"&&leagueId=103"
"&contextTeamId="
)
self.date = pendulum.today()
self.api_date = self.date.format("YYYY-MM-DD")
self.mlb_json = self.fetch_json_requests(self.base_mlb_url.format(
date=self.api_date
))
self.states = {
'live': ['isLive', 'isWarmup'],
'ppd': ['isCancelled', 'isPostponed', 'isSuspended'],
'delay': ['isDelayed', 'isInGameDelay'],
'final': ['isFinal'],
}
try:
_ = pickle.loads(self.bot.db.get('scores_db'))
except Exception as err:
LOGGER.debug(err)
_ = {}
self.monitored = _.get('monitored', {})
self.mlb_games = _.get('mlb_games', {})
self.games_start = _.get('games_start', [])
self.games_end = _.get('games_end', [])
self.games_ppd = _.get('games_ppd', [])
self.dupes = _.get('dupes', [])
# self.monitored = _.get('monitored')
# self.mlb_games = {}
# self.games_start = []
# self.games_end = []
# self.games_ppd = []
# self.dupes = []
# except Exception as e:
# LOGGER.debug(e)
# pass
self._parse_mlb_json_into_gameIDs()
self._check_date.start()
self._check_games.start()
def cog_unload(self):
try:
del self.mlb_json
_ = {
'monitored': self.monitored,
'mlb_games': self.mlb_games,
'games_start': self.games_start,
'games_ppd': self.games_ppd,
'games_end': self.games_end,
'dupes': self.dupes,
}
__ = pickle.dumps(_)
self.bot.db.set('scores_db', __)
except Exception as err:
LOGGER.error(f"[1] {err}")
pass
del self.monitored
del self.mlb_games
del self.mlb_json
del self.games_start
del self.games_end
del self.games_ppd
del self.dupes
self._check_date.cancel()
self._check_games.cancel()
def _get_emoji(self, guild_query, emoji_query, mode=None):
emoji_name = f"{guild_query.lower()}_{emoji_query.lower()}"
guild = get(self.bot.guilds, name=guild_query.lower())
if not guild:
return ""
emoji = get(guild.emojis, name=emoji_name)
if not emoji:
return ""
if mode == "url":
emoji = emoji.url
else:
emoji = "{} ".format(emoji)
return emoji
def _parse_mlb_json_into_gameIDs(self):
if not self.mlb_json:
return
for game in self.mlb_json['dates'][0]['games']:
def _states(state):
return [game['gameUtils'].get(key) for key in self.states[state]]
gid = str(game['gamePk'])
if any(_states('live')) and not any(_states('ppd')) and not any(_states('final')):
if not self.mlb_games.get(gid):
self.mlb_games[gid] = {'check': True}
self.games_start.append(gid)
else:
self.mlb_games[gid]['check'] = True
# if not self.mlb_games[gid].get('full_json'):
self.mlb_games[gid]['full_json'] = self.fetch_json_requests(
f"https://statsapi.mlb.com/api/v1.1/game/{gid}/feed/live"
)
elif any(_states('ppd')) and not any(_states('delay')) and not any(_states('final')):
if not self.mlb_games.get(gid):
# self.mlb_games[gid] = {'check': False}
# self.games_ppd.append(gid)
continue
# else:
# self.mlb_games[gid]['check'] = False
# self.mlb_games[gid]['ppd'] = True
self.games_ppd.append(gid)
self.mlb_games.pop(gid, None)
elif any(_states('delay')) and not any(_states('ppd')):
if not self.mlb_games.get(gid):
self.mlb_games[gid] = {'check': False}
# self.games_ppd.append(gid)
else:
self.mlb_games[gid]['check'] = False
self.mlb_games[gid]['delay'] = True
elif any(_states('final')):
self.mlb_games.pop(gid, None)
self.games_end.append(gid)
else:
for stale_game in self.mlb_games.copy():
if not self.mlb_games.get(stale_game):
continue
if str(stale_game) not in [str(x.get('gamePk')) for x in self.mlb_json['dates'][0]['games']]:
self.mlb_games.pop(str(stale_game), None)
self.mlb_games.pop(gid, None)
@tasks.loop(seconds=10)
async def _check_games(self):
try:
LOGGER.info("--------------------------------------")
# now = pendulum.now().format("DD-MMM HH:mm:ss")
LOGGER.info(f"checking games...")
if not self.mlb_games:
old_interval = self._check_games.seconds
if old_interval <= self.max_check:
new_interval = max(10, min(old_interval + 10, self.max_check))
self._check_games.change_interval(seconds=new_interval)
self._check_date.change_interval(seconds=new_interval)
LOGGER.debug("no games, back off timers [{}s -> {}s]".format(
old_interval,
new_interval,
))
else:
LOGGER.debug("no games, timers maxed out [{}s]".format(
self.max_check
))
return
else:
old_interval = self._check_games.seconds
if old_interval != 10:
self._check_games.change_interval(seconds=10)
self._check_date.change_interval(seconds=10)
LOGGER.debug("new games, resetting timers [10s]")
# check starting games
for gid in self.games_start.copy():
if not self.mlb_games.get(gid):
continue
data = self.mlb_games[gid].get('full_json')
if not data:
continue
gd = data['gameData']
away = gd['teams']['away']
home = gd['teams']['home']
away_lineup = []
home_lineup = []
away_players = data['liveData']['boxscore']['teams']['away']
home_players = data['liveData']['boxscore']['teams']['home']
for idx, player in enumerate(away_players['battingOrder']):
pd = away_players['players'].get("ID{}".format(player))
away_lineup.append("{}. {} ({})".format(
idx + 1,
pd['person']['fullName'],
pd['position']['abbreviation'],
))
away_pitcher = gd['probablePitchers']['away']['id']
home_pitcher = gd['probablePitchers']['home']['id']
away_lineup.append("SP: {} ({})".format(
away_players['players'].get(f"ID{away_pitcher}")['person']['fullName'],
away_players['players'].get(f"ID{away_pitcher}")['seasonStats']['pitching']['era'],
))
for idx, player in enumerate(home_players['battingOrder']):
pd = home_players['players'].get("ID{}".format(player))
home_lineup.append("{}. {} ({})".format(
idx + 1,
pd['person']['fullName'],
pd['position']['abbreviation'],
))
home_lineup.append("SP: {} ({})".format(
home_players['players'].get(f"ID{home_pitcher}")['person']['fullName'],
home_players['players'].get(f"ID{home_pitcher}")['seasonStats']['pitching']['era'],
))
weather = (
"**Weather Report**\n"
"🌡 {temp}°F\n"
"🪟 Conditions: {condition}\n"
"💨 Wind: {wind}"
).format(**gd.get('weather', {}))
venue = (
"**Location**\n"
"__{name}__ {location}\n"
"{details}"
).format(
name=gd.get('venue', {}).get('name', 'UNK'),
location="{city} {stateAbbrev}".format(**gd.get('venue', {}).get('location', {})),
details="🏟 {capacity:,} / {roofType} / {turfType}".format(
**gd.get('venue', {}).get('fieldInfo', {})
)
)
away_emoji = self._get_emoji('mlb', away['abbreviation'])
home_emoji = self._get_emoji('mlb', home['abbreviation'])
embed = discord.Embed(
title="{}{} ({}) @ {}{} ({}) is _starting soon_".format(
away_emoji,
away['teamName'],
"{}-{} {}".format(
away.get('record', {}).get('wins', 0),
away.get('record', {}).get('losses', 0),
away.get('record', {}).get('winningPercentage', '.000'),
),
home_emoji,
home['teamName'],
"{}-{} {}".format(
home.get('record', {}).get('wins', 0),
home.get('record', {}).get('losses', 0),
home.get('record', {}).get('winningPercentage', '.000'),
),
),
color=0x80AC5F,
description="\n".join([weather, venue]),
)
embed.add_field(
name="`{} Lineup`".format(away['abbreviation']),
value="\n".join(away_lineup)
)
embed.add_field(
name="`{} Lineup`".format(home['abbreviation']),
value="\n".join(home_lineup)
)
message = (
"{} ({}) @ {} ({}) is **starting soon**\n"
"{} Lineup: {}\n"
"{} Lineup: {}"
).format(
away['teamName'],
"{}-{} {}".format(
away.get('record', {}).get('wins', 0),
away.get('record', {}).get('losses', 0),
away.get('record', {}).get('winningPercentage', '.000'),
),
home['teamName'],
"{}-{} {}".format(
home.get('record', {}).get('wins', 0),
home.get('record', {}).get('losses', 0),
home.get('record', {}).get('winningPercentage', '.000'),
),
away['abbreviation'], " ".join(away_lineup),
home['abbreviation'], " ".join(home_lineup),
)
msg_hash = hash(gid + message)
if msg_hash not in self.dupes:
for channel in self.monitored:
try:
await self.bot.get_channel(channel).send(embed=embed)
except AttributeError as err:
LOGGER.error(f"[3c] {err}")
pass
self.dupes.append(msg_hash)
self.games_start.remove(gid)
# check ending games
for gid in self.games_end.copy():
if not self.mlb_games.get(gid):
continue
data = self.mlb_games[gid].get('full_json')
if not data:
continue
message = " is ending"
# SD 2 [H5 E0 LOB6] @ TEX 0 [H5 E0 LOB10] is final! 9/F (W: <NAME> (4.05/1-1) L: <NAME> (4.09/0-2) S: <NAME> (0.00/5-0))
away_team = data['gameData']['teams']['away']['teamName']
away_score = data['liveData']['linescore']['teams']['away']['runs']
home_team = data['gameData']['teams']['home']['teamName']
home_score = data['liveData']['linescore']['teams']['home']['runs']
inning = data['liveData']['linescore']['currentInning']
away_emoji = self._get_emoji('mlb', data['gameData']['teams']['away']['abbreviation'])
home_emoji = self._get_emoji('mlb', data['gameData']['teams']['home']['abbreviation'])
if away_score > home_score:
away_team = f"**{away_team}"
away_score = f"{away_score}**"
elif home_score > away_score:
home_team = f"**{home_team}"
home_score = f"{home_score}**"
embed = discord.Embed(
description="{}{} {} @ {}{} {} is final! {}/F".format(
away_emoji,
away_team,
away_score,
home_emoji,
home_team,
home_score,
inning
),
color=0xD0021B
)
msg_hash = hash(gid + message)
if msg_hash not in self.dupes:
for channel in self.monitored:
try:
await self.bot.get_channel(channel).send(embed=embed)
except AttributeError as err:
LOGGER.error(f"[3b] {err}")
pass
self.dupes.append(msg_hash)
self.games_end.remove(gid)
# check ongoing games
for gid, game in self.mlb_games.copy().items():
if not self.mlb_games.get(gid):
continue
if not self.mlb_games[gid].get('check'):
continue
LOGGER.debug(f"fetching json for {gid}")
new_json = await self.fetch_json(
f"http://statsapi.mlb.com/api/v1/game/{gid}/playByPlay"
)
self.mlb_games[gid]['new_json'] = new_json
if not game.get('old_json'):
self.mlb_games[gid]['old_json'] = new_json.copy()
for gid, game in self.mlb_games.copy().items():
if not self.mlb_games.get(gid):
continue
if not game.get('check'):
continue
old_plays = game['old_json']['scoringPlays']
new_plays = game['new_json']['scoringPlays']
swap = False
if old_plays == new_plays:
swap = False
continue
else:
swap = True
# LOGGER.debug(old_plays)
# LOGGER.debug(new_plays)
# LOGGER.debug(old_plays + new_plays)
all_plays = old_plays + new_plays
scoring_plays = [play for play in all_plays if all_plays.count(play) == 1]
LOGGER.debug(scoring_plays)
# scoring_plays = set(old_plays + new_plays)
for idx in scoring_plays:
scoring_play = game['new_json']['allPlays'][idx]
# details = None
# for gd in self.mlb_json['dates'][0]['games'].copy():
# if int(gd['gamePk']) == int(gid):
# details = gd
# LOGGER.debug((
# "{}\t[{}] found details ... "
# "mlb_json['{}'] ... {}").format(
# now,
# gid,
# gd['gamePk'],
# (int(gd['gamePk']) == int(gid)),
# ))
# break
details = game.get('full_json', {}).get('gameData', {})
halfInning = {
'bottom': '⬇',
'top': '⬆',
}
homer = False
event = ""
if scoring_play['result'].get('event'):
event = "{} · ".format(scoring_play['result']['event'].upper())
homer = True if scoring_play['result']['eventType'] == "home_run" else False
if homer:
hit_details = ""
for play in scoring_play['playEvents']:
if play.get('hitData'):
try:
hit_details = "**{launchSpeed} mph** · ∡{launchAngle}° · **{totalDistance} ft**".format(
**play['hitData']
)
except KeyError as err:
LOGGER.error(f"[2] {err}")
continue
break
else:
hit_details = ""
message = "{} {} - {}{}{}".format(
halfInning.get(scoring_play['about']['halfInning']),
self.make_ordinal(scoring_play['about']['inning']),
event,
scoring_play['result'].get('description', 'Error fetching scoring details'),
hit_details,
)
away_emoji = self._get_emoji('mlb', details['teams']['away']['abbreviation'])
home_emoji = self._get_emoji('mlb', details['teams']['home']['abbreviation'])
scoring_team = ""
scoring_team_emoji_url = ""
if details:
if scoring_play['about']['halfInning'] == "bottom":
home_tag = "**"
away_tag = ""
scoring_team = "{} · ".format(details['teams']['home']['abbreviation'])
scoring_team_emoji_url = self._get_emoji('mlb', details['teams']['home']['abbreviation'], 'url')
away_or_home = "away"
else:
home_tag = ""
away_tag = "**"
scoring_team = "{} · ".format(details['teams']['away']['abbreviation'])
scoring_team_emoji_url = self._get_emoji('mlb', details['teams']['away']['abbreviation'], 'url')
away_or_home = "home"
# linescore = game.get('full_json', {}) \
# .get('liveData', {}) \
# .get('linescore', {})
message = "{}{} {}{} @ {}{} {}{} - {}".format(
away_tag,
details['teams']['away']['abbreviation'],
# linescore['teams']['away']['runs'],
scoring_play['result'].get('awayScore', 0),
away_tag,
home_tag,
details['teams']['home']['abbreviation'],
# linescore['teams']['home']['runs'],
scoring_play['result'].get('homeScore', 0),
home_tag,
message,
)
scoring_player = [scoring_play['matchup']['batter']['id'], scoring_play['matchup']['batter']['fullName']]
pitcher_id = "ID{}".format(scoring_play['matchup']['pitcher']['id'])
# t = game.get('full_json')
# t = t.get('liveData')
# t = t.get('boxscore')
# t = t.get('teams')
# t = t.get(away_or_home)
# t = t.get('players')
# print(t, pitcher_id)
# t = t.get(pitcher_id)
# t = t.get('stats')
# t = t.get('pitching')
# t = t.get('numberOfPitches', 0)
# print(t)
num_pitches = game.get(
'full_json', {}).get(
'liveData', {}).get(
'boxscore', {}).get(
'teams', {}).get(
away_or_home, {}).get(
'players', {}).get(
pitcher_id, {}).get(
'stats', | |
define at line ' + str(line_number))
def parse_messages(self):
self.File.setup_iterator()
while(self.File.has_next()):
[line_number, line] = self.File.get_next()
if(line[0] == 'message'):
# Create new message object
message = Message.get_message(line)
# Extract message block
block = self.File.extract_block(line_number)
if(not block == None):
self.File.remove_lines(block.line_numbers)
# iterate through the message block
block.setup_iterator()
while(block.has_next()):
[block_line_number, block_line] = block.get_next()
try:
# Check different fields
field = RequiredField.get_field(block_line)
if(not field == None):
message.add_field(field)
block.remove_lines([block_line_number])
continue
field = RepeatedField.get_field(block_line)
if(not field == None):
message.add_field(field)
block.remove_lines([block_line_number])
continue
field = FixedRepeatedField.get_field(block_line)
if(not field == None):
message.add_field(field)
block.remove_lines([block_line_number])
continue
field = OptionalField.get_field(block_line)
if(not field == None):
message.add_field(field)
block.remove_lines([block_line_number])
continue
if(OneofField.is_field(block_line)): # check if oneof-field
oneof_block = block.extract_block(block_line_number)
field = OneofField.get_field(oneof_block)
message.add_field(field)
block.remove_lines(oneof_block.line_numbers)
continue
except Exception as e:
print("Exception at line " + str(block_line_number) + ":")
print(e)
print(traceback.format_exc())
sys.exit()
# Add message to Messages
Messages.append(message)
else:
raise Exception('Could not parse message at line ' + str(line_number))
sys.exit()
class OutputFile:
def __init__(self, output_file):
self.output_file = output_file
self.file_output = ""
def append_line(self, s = ""):
self.file_output += s
self.file_output += "\n"
def write_to_file(self):
with open(self.output_file, "w") as f:
f.write(self.file_output)
def search_size_type(size):
size_types = [["uint8", 1], ["uint16", 2], ["uint32", 4], ["uint64", 8]]
for i in range(0, len(size_types)):
if((((2**8)**size_types[i][1]) > size)):
return [size_types[i][0], size_types[i][1]]
raise Exception("Not found a valid size-type for size: " + str(size))
class C_Creator:
def __init__(self, output_path, output_name, imports, defines, messages):
self.output_path = output_path
self.output_name = output_name
self.imports = imports
self.defines = defines
self.messages = messages
self.create()
def create(self):
print "Creating C/H-Files..."
self.c_file = OutputFile(self.output_path + "/" + self.output_name + ".c")
self.h_file = OutputFile(self.output_path + "/" + self.output_name + ".h")
# First write the include-pattern for header files:
self.h_file.append_line("#ifndef " + "__" + self.output_name.upper() + "_H")
self.h_file.append_line("#define " + "__" + self.output_name.upper() + "_H")
self.h_file.append_line()
# Then write the imports
self.h_file.append_line("#include <stdint.h>")
self.h_file.append_line('#include "tinybuf.h"')
self.c_file.append_line('#include "tinybuf.h"')
self.c_file.append_line('#include "' + self.output_name + ".h" + '"')
self.c_file.append_line()
# First create the imports
for imp in self.imports:
self.h_file.append_line('#include "' + imp.name + '.h"')
self.h_file.append_line()
# Then create the defines
for define in self.defines:
self.h_file.append_line("#define " + define.name + " " + str(define.number))
self.h_file.append_line()
# Then create the oneof_tags from the messages
for message in self.messages:
self.create_oneof_tags(message)
self.h_file.append_line()
# Then create the structs from the messages
for message in self.messages:
self.create_struct(message)
self.h_file.append_line()
# Then create the field-arrays from the messages
for message in self.messages:
self.create_message_fields(message)
self.c_file.append_line()
# Finally close the header-file with #endif
self.h_file.append_line()
self.h_file.append_line("#endif")
#print "H-File:"
#print self.h_file.file_output
#print "C-File:"
#print self.c_file.file_output
self.c_file.write_to_file()
self.h_file.write_to_file()
def create_oneof_tags(self, message):
for field in message.fields:
if(isinstance(field, OneofField)): # Is oneof field
for inner_field in field.inner_fields:
self.h_file.append_line("#define " + message.name + "_" + inner_field.name + "_tag " + str(inner_field.tag))
def get_field_type_mapping(self, field_type):
field_type_mapping = {"uint8": "uint8_t", "int8": "int8_t", "uint16": "uint16_t", "int16": "int16_t",
"uint32": "uint32_t", "int32": "int32_t", "uint64": "uint64_t", "int64": "int64_t",
"float": "float", "double": "double"}
if field_type in field_type_mapping:
return field_type_mapping[field_type]
else: # For example in the case of message as field type
return field_type
def get_field_type_identifier(self, field_type):
field_type_identifier ={"uint8": FIELD_TYPE_UINT, "int8": FIELD_TYPE_INT, "uint16": FIELD_TYPE_UINT, "int16": FIELD_TYPE_INT,
"uint32": FIELD_TYPE_UINT, "int32": FIELD_TYPE_INT, "uint64": FIELD_TYPE_UINT, "int64": FIELD_TYPE_INT,
"float": FIELD_TYPE_FLOAT, "double": FIELD_TYPE_DOUBLE}
if field_type in field_type_identifier:
return field_type_identifier[field_type]
else: # For example in the case of message as field type
return FIELD_TYPE_MESSAGE
def create_struct(self, message):
self.h_file.append_line("typedef struct {")
for field in message.fields:
if(isinstance(field, RequiredField)): # Is required field
self.h_file.append_line("\t" + self.get_field_type_mapping(field.type) + " " + field.name + ";")
elif(isinstance(field, OptionalField)): # Is optional field
self.h_file.append_line("\t" + "uint8_t has_" + field.name + ";")
self.h_file.append_line("\t" + self.get_field_type_mapping(field.type) + " " + field.name + ";")
elif(isinstance(field, RepeatedField)): # Is repeated field
[size_type, size_type_byte_number] = search_size_type(field.size)
self.h_file.append_line("\t" + self.get_field_type_mapping(size_type) + " " + field.name + "_count;")
self.h_file.append_line("\t" + self.get_field_type_mapping(field.type) + " " + field.name + "[" + str(field.size) + "];")
elif(isinstance(field, FixedRepeatedField)): # Is fixed repeated field
self.h_file.append_line("\t" + self.get_field_type_mapping(field.type) + " " + field.name + "[" + str(field.size) + "];")
elif(isinstance(field, OneofField)): # Is oneof field
self.h_file.append_line("\t" + "uint8_t which_" + field.name + ";")
self.h_file.append_line("\t" + "union {")
for inner_field in field.inner_fields:
self.h_file.append_line("\t\t" + self.get_field_type_mapping(inner_field.type) + " " + inner_field.name + ";")
self.h_file.append_line("\t" + "} " + field.name + ";")
else:
raise Exception ("Field " + str(field) + " is not supported")
self.h_file.append_line("} " + message.name + ";")
def create_message_fields(self, message):
C_FIELD_TYPE_NAME = "tb_field_t"
C_OFFSETOF_MAKRO_NAME = "tb_offsetof"
C_MEMBERSIZE_MAKRO_NAME = "tb_membersize"
C_DELTA_MAKRO_NAME = "tb_delta"
C_LAST_FIELD_NAME = "TB_LAST_FIELD"
# Determine the number of fields in the message
num_fields = 0
for field in message.fields:
if(isinstance(field, OneofField)): # Is oneof field
num_fields = num_fields + len(field.inner_fields)
else:
num_fields = num_fields + 1
# Declare the field-array in the H-file
self.h_file.append_line("extern const " + C_FIELD_TYPE_NAME + " " + message.name + "_fields[" + str(num_fields + 1) + "];")
# Create the field-arrays in the C-file
self.c_file.append_line("const " + C_FIELD_TYPE_NAME + " " + message.name + "_fields[" + str(num_fields + 1) + "] = {")
for field in message.fields:
if(isinstance(field, RequiredField)): # Is required field
field_identifier = self.get_field_type_identifier(field.type) | FIELD_RULE_REQUIRED
field_ptr = str("&" + field.type + "_fields") if (field_identifier & FIELD_TYPE_MESSAGE) else "NULL"
s = "\t{" + str(field_identifier) + ", ";
s += C_OFFSETOF_MAKRO_NAME + "(" + message.name + ", " + field.name + "), "
s += "0, 0, "
s += C_MEMBERSIZE_MAKRO_NAME + "(" + message.name + ", " + field.name + "), 0, " + "0, 0, " + field_ptr + "},"
self.c_file.append_line(s)
elif(isinstance(field, OptionalField)): # Is optional field
field_identifier = self.get_field_type_identifier(field.type) | FIELD_RULE_OPTIONAL
field_ptr = str("&" + field.type + "_fields") if (field_identifier & FIELD_TYPE_MESSAGE) else "NULL"
s = "\t{" + str(field_identifier) + ", ";
s += C_OFFSETOF_MAKRO_NAME + "(" + message.name + ", " + field.name + "), "
s += C_DELTA_MAKRO_NAME + "(" + message.name + ", " + "has_" + field.name + ", " + field.name + "), 1, "
s += C_MEMBERSIZE_MAKRO_NAME + "(" + message.name + ", " + field.name + "), 0, " + "0, 0, " + field_ptr + "},"
self.c_file.append_line(s)
elif(isinstance(field, RepeatedField)): # Is repeated field
[size_type, size_type_byte_number] = search_size_type(field.size)
field_identifier = self.get_field_type_identifier(field.type) | FIELD_RULE_REPEATED
field_ptr = str("&" + field.type + "_fields") if (field_identifier & FIELD_TYPE_MESSAGE) else "NULL"
s = "\t{" + str(field_identifier) + ", ";
s += C_OFFSETOF_MAKRO_NAME + "(" + message.name + ", " + field.name + "), "
s += C_DELTA_MAKRO_NAME + "(" + message.name + ", " + field.name + "_count" + ", " + field.name + "), " + str(size_type_byte_number) + ", "
s += C_MEMBERSIZE_MAKRO_NAME + "(" + message.name + ", " + field.name + "[0]" + "), "
s += C_MEMBERSIZE_MAKRO_NAME + "(" + message.name + ", " + field.name + ")/" + C_MEMBERSIZE_MAKRO_NAME + "(" + message.name + ", " + field.name + "[0]" + "), "
s += "0, 0, "
s += field_ptr + "},"
self.c_file.append_line(s)
elif(isinstance(field, FixedRepeatedField)): # Is fixed repeated field
field_identifier = self.get_field_type_identifier(field.type) | FIELD_RULE_FIXED_REPEATED
field_ptr = str("&" + field.type + "_fields") if (field_identifier & FIELD_TYPE_MESSAGE) else "NULL"
s = "\t{" + str(field_identifier) + ", ";
s += C_OFFSETOF_MAKRO_NAME + "(" + message.name + ", " + field.name + "), "
s += "0, 0, "
s += C_MEMBERSIZE_MAKRO_NAME + "(" + message.name + ", " + field.name + "[0]" + "), "
s += C_MEMBERSIZE_MAKRO_NAME + "(" + message.name + ", " + field.name + ")/" + C_MEMBERSIZE_MAKRO_NAME + "(" + message.name + ", " + field.name + "[0]" + "), "
s += "0, 0, "
s += field_ptr + "},"
self.c_file.append_line(s)
elif(isinstance(field, OneofField)): # Is oneof field
for i in range(0, len(field.inner_fields)):
inner_field = field.inner_fields[i]
inner_field_identifier = self.get_field_type_identifier(inner_field.type) | FIELD_RULE_ONEOF
inner_field_ptr = str("&" + inner_field.type + "_fields") if (inner_field_identifier & FIELD_TYPE_MESSAGE) else "NULL"
s = "\t{" + str(inner_field_identifier) + ", ";
s += C_OFFSETOF_MAKRO_NAME + "(" + message.name + ", " + field.name + "." + inner_field.name + "), "
s += C_DELTA_MAKRO_NAME + "(" + message.name + ", " + "which_" + field.name + ", " + field.name + "." + inner_field.name + "), " + "1" + ", "
s += C_MEMBERSIZE_MAKRO_NAME + "(" + message.name + ", " + field.name + "." + inner_field.name + "), "
s += "0, "
s += str(inner_field.tag) + ", "
s += "1" if i == 0 else "0"
s += ", " + inner_field_ptr + "},"
self.c_file.append_line(s)
else:
raise Exception ("Field " + str(field) + " is not supported")
self.c_file.append_line("\t" + C_LAST_FIELD_NAME + ",")
self.c_file.append_line("};")
class Python_Creator:
def __init__(self, output_path, output_name, imports, defines, messages, endianness):
self.output_path = output_path
self.output_name = output_name
self.imports = imports
self.defines = defines
self.messages = messages
self.endianness = endianness
self.create()
def create(self):
print "Creating python-file..."
self.python_file = OutputFile(self.output_path + "/" + self.output_name + ".py")
self.python_file.append_line("import struct")
# First create the imports
for imp in self.imports:
self.python_file.append_line("from " + imp.name + " import *")
self.python_file.append_line()
# Then create the defines
for define in self.defines:
self.python_file.append_line(define.name + " = " + str(define.number))
self.python_file.append_line()
# Then create the oneof_tags from the messages
for message in self.messages:
self.create_oneof_tags(message)
self.python_file.append_line()
# Create Ostream-class
self.python_file.append_line("class _Ostream:")
self.python_file.append_line("\tdef __init__(self):")
self.python_file.append_line("\t\tself.buf = b''")
self.python_file.append_line("\tdef write(self, data):")
self.python_file.append_line("\t\tself.buf += data")
self.python_file.append_line()
# Create Istream-class
self.python_file.append_line("class _Istream:")
self.python_file.append_line("\tdef __init__(self, buf):")
self.python_file.append_line("\t\tself.buf = buf")
self.python_file.append_line("\tdef read(self, l):")
self.python_file.append_line("\t\tif(l > len(self.buf)):")
self.python_file.append_line('\t\t\traise Exception("Not enough bytes in Istream to read")')
self.python_file.append_line("\t\tret = self.buf[0:l]")
self.python_file.append_line("\t\tself.buf = self.buf[l:]")
self.python_file.append_line("\t\treturn ret")
self.python_file.append_line()
# Then create the python-classes from the messages
for message in self.messages:
self.create_class(message)
self.python_file.append_line()
#print "Python file:"
#print self.python_file.file_output
self.python_file.write_to_file()
def create_oneof_tags(self, message):
for field in message.fields:
if(isinstance(field, OneofField)): # Is oneof field
for inner_field in field.inner_fields:
self.python_file.append_line(message.name + "_" + inner_field.name + "_tag = " + str(inner_field.tag))
def get_default_value(self, field_type):
field_type_values = {"uint8": "0", "int8": "0", "uint16": | |
<gh_stars>0
# Copyright (c) 2015 Infortrend Technology, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from cinder import test
from cinder.volume.drivers.infortrend.raidcmd_cli import cli_factory as cli
class InfortrendCLITestData(object):
"""CLI Test Data."""
# Infortrend entry
fake_lv_id = ['5DE94FF775D81C30', '1234567890']
fake_partition_id = ['6A41315B0EDC8EB7', '51B4283E4E159173',
'987654321', '123456789',
'2667FE351FC505AE', '53F3E98141A2E871']
fake_pair_id = ['<KEY>', '095A184B0ED2DB10']
fake_snapshot_id = ['2C7A8D211F3B1E36', '60135EE53C14D5EB']
fake_data_port_ip = ['172.27.0.1', '172.27.0.2',
'172.27.0.3', '172.27.0.4',
'172.27.0.5', '172.27.0.6']
fake_model = ['DS S12F-G2852-6']
fake_manage_port_ip = ['172.27.0.10']
fake_system_id = ['DEEC']
fake_host_ip = ['172.27.0.2']
fake_target_wwnns = ['100123D02300DEEC', '100123D02310DEEC']
fake_target_wwpns = ['110123D02300DEEC', '120123D02300DEEC',
'110123D02310DEEC', '120123D02310DEEC']
fake_initiator_wwnns = ['2234567890123456', '2234567890543216']
fake_initiator_wwpns = ['1234567890123456', '1234567890543216']
fake_initiator_iqn = ['iqn.1991-05.com.infortrend:pc123',
'iqn.1991-05.com.infortrend:pc456']
fake_lun_map = [0, 1, 2]
# cinder entry
test_provider_location = [(
'system_id^%s@partition_id^%s') % (
int(fake_system_id[0], 16), fake_partition_id[0]),
]
test_volume = {
'id': '5aa119a8-d25b-45a7-8d1b-88e127885635',
'size': 1,
'name': 'Part-1',
'host': 'infortrend-server1@backend_1#LV-1',
'name_id': '5aa119a8-d25b-45a7-8d1b-88e127885635',
'provider_auth': None,
'project_id': 'project',
'display_name': None,
'display_description': 'Part-1',
'volume_type_id': None,
'provider_location': test_provider_location[0],
'volume_attachment': [],
}
test_dst_volume = {
'id': '6bb119a8-d25b-45a7-8d1b-88e127885666',
'size': 1,
'name': 'Part-1-Copy',
'host': 'infortrend-server1@backend_1',
'name_id': '6bb119a8-d25b-45a7-8d1b-88e127885666',
'provider_auth': None,
'project_id': 'project',
'display_name': None,
'_name_id': '6bb119a8-d25b-45a7-8d1b-88e127885666',
'display_description': 'Part-1-Copy',
'volume_type_id': None,
'provider_location': '',
'volume_attachment': [],
}
test_ref_volume = {
'source-id': '6bb119a8-d25b-45a7-8d1b-88e127885666',
'size': 1,
}
test_ref_volume_with_import = {
'source-name': 'import_into_openstack',
'size': 1,
}
test_snapshot = {
'id': 'ffa9bc5e-1172-4021-acaf-cdcd78a9584d',
'volume_id': test_volume['id'],
'volume_name': test_volume['name'],
'volume_size': 2,
'project_id': 'project',
'display_name': None,
'display_description': 'SI-1',
'volume_type_id': None,
'provider_location': fake_snapshot_id[0],
}
test_iqn = [(
'iqn.2002-10.com.infortrend:raid.uid%s.%s%s%s') % (
int(fake_system_id[0], 16), 1, 0, 1), (
'iqn.2002-10.com.infortrend:raid.uid%s.%s%s%s') % (
int(fake_system_id[0], 16), 1, 0, 1),
]
test_iscsi_properties = {
'driver_volume_type': 'iscsi',
'data': {
'target_discovered': True,
'target_portal': '%s:3260' % fake_data_port_ip[2],
'target_iqn': test_iqn[0],
'target_lun': fake_lun_map[0],
'volume_id': test_volume['id'],
},
}
test_iscsi_properties_with_mcs = {
'driver_volume_type': 'iscsi',
'data': {
'target_discovered': True,
'target_portal': '%s:3260' % fake_data_port_ip[0],
'target_iqn': test_iqn[1],
'target_lun': fake_lun_map[2],
'volume_id': test_volume['id'],
},
}
test_iqn_empty_map = [(
'iqn.2002-10.com.infortrend:raid.uid%s.%s%s%s') % (
int(fake_system_id[0], 16), 0, 0, 1),
]
test_iscsi_properties_empty_map = {
'driver_volume_type': 'iscsi',
'data': {
'target_discovered': True,
'target_portal': '%s:3260' % fake_data_port_ip[0],
'target_iqn': test_iqn_empty_map[0],
'target_lun': fake_lun_map[0],
'volume_id': test_volume['id'],
},
}
test_initiator_target_map = {
fake_initiator_wwpns[0]: fake_target_wwpns[0:2],
fake_initiator_wwpns[1]: fake_target_wwpns[0:2],
}
test_fc_properties = {
'driver_volume_type': 'fibre_channel',
'data': {
'target_discovered': True,
'target_lun': fake_lun_map[0],
'target_wwn': fake_target_wwpns[0:2],
'initiator_target_map': test_initiator_target_map,
},
}
test_initiator_target_map_specific_channel = {
fake_initiator_wwpns[0]: [fake_target_wwpns[1]],
fake_initiator_wwpns[1]: [fake_target_wwpns[1]],
}
test_fc_properties_with_specific_channel = {
'driver_volume_type': 'fibre_channel',
'data': {
'target_discovered': True,
'target_lun': fake_lun_map[0],
'target_wwn': [fake_target_wwpns[1]],
'initiator_target_map': test_initiator_target_map_specific_channel,
},
}
test_target_wwpns_map_multipath_r_model = [
fake_target_wwpns[0],
fake_target_wwpns[2],
fake_target_wwpns[1],
fake_target_wwpns[3],
]
test_initiator_target_map_multipath_r_model = {
fake_initiator_wwpns[0]: test_target_wwpns_map_multipath_r_model[:],
fake_initiator_wwpns[1]: test_target_wwpns_map_multipath_r_model[:],
}
test_fc_properties_multipath_r_model = {
'driver_volume_type': 'fibre_channel',
'data': {
'target_discovered': True,
'target_lun': fake_lun_map[0],
'target_wwn': test_target_wwpns_map_multipath_r_model[:],
'initiator_target_map':
test_initiator_target_map_multipath_r_model,
},
}
test_initiator_target_map_zoning = {
fake_initiator_wwpns[0].lower():
[x.lower() for x in fake_target_wwpns[0:2]],
fake_initiator_wwpns[1].lower():
[x.lower() for x in fake_target_wwpns[0:2]],
}
test_fc_properties_zoning = {
'driver_volume_type': 'fibre_channel',
'data': {
'target_discovered': True,
'target_lun': fake_lun_map[0],
'target_wwn': [x.lower() for x in fake_target_wwpns[0:2]],
'initiator_target_map': test_initiator_target_map_zoning,
},
}
test_initiator_target_map_zoning_r_model = {
fake_initiator_wwpns[0].lower():
[x.lower() for x in fake_target_wwpns[1:3]],
fake_initiator_wwpns[1].lower():
[x.lower() for x in fake_target_wwpns[1:3]],
}
test_fc_properties_zoning_r_model = {
'driver_volume_type': 'fibre_channel',
'data': {
'target_discovered': True,
'target_lun': fake_lun_map[0],
'target_wwn': [x.lower() for x in fake_target_wwpns[1:3]],
'initiator_target_map': test_initiator_target_map_zoning_r_model,
},
}
test_fc_terminate_conn_info = {
'driver_volume_type': 'fibre_channel',
'data': {
'initiator_target_map': test_initiator_target_map_zoning,
},
}
test_connector_iscsi = {
'ip': fake_host_ip[0],
'initiator': fake_initiator_iqn[0],
'host': 'infortrend-server1@backend_1',
}
test_connector_fc = {
'wwpns': fake_initiator_wwpns,
'wwnns': fake_initiator_wwnns,
'host': 'infortrend-server1@backend_1',
}
fake_pool = {
'pool_name': 'LV-2',
'pool_id': fake_lv_id[1],
'total_capacity_gb': 1000,
'free_capacity_gb': 1000,
'reserved_percentage': 0,
'QoS_support': False,
'thin_provisioning_support': False,
}
test_pools = [{
'pool_name': 'LV-1',
'pool_id': fake_lv_id[0],
'total_capacity_gb': round(857982.0 / 1024, 2),
'free_capacity_gb': round(841978.0 / 1024, 2),
'reserved_percentage': 0,
'QoS_support': False,
'max_over_subscription_ratio': 20.0,
'thin_provisioning_support': False,
'thick_provisioning_support': True,
'provisioned_capacity_gb':
round((400) / 1024, 2),
'infortrend_provisioning': 'full',
}]
test_volume_states = {
'volume_backend_name': 'infortrend_backend_1',
'vendor_name': 'Infortrend',
'driver_version': '99.99',
'storage_protocol': 'iSCSI',
'pools': test_pools,
}
test_host = {
'host': 'infortrend-server1@backend_1',
'capabilities': test_volume_states,
}
test_migrate_volume_states = {
'volume_backend_name': 'infortrend_backend_1',
'vendor_name': 'Infortrend',
'driver_version': '99.99',
'storage_protocol': 'iSCSI',
'pool_name': 'LV-1',
'pool_id': fake_lv_id[1],
'total_capacity_gb': round(857982.0 / 1024, 2),
'free_capacity_gb': round(841978.0 / 1024, 2),
'reserved_percentage': 0,
'QoS_support': False,
'infortrend_provisioning': 'full',
}
test_migrate_host = {
'host': 'infortrend-server1@backend_1#LV-2',
'capabilities': test_migrate_volume_states,
}
test_migrate_volume_states_2 = {
'volume_backend_name': 'infortrend_backend_1',
'vendor_name': 'Infortrend',
'driver_version': '99.99',
'storage_protocol': 'iSCSI',
'pool_name': 'LV-1',
'pool_id': fake_lv_id[1],
'total_capacity_gb': round(857982.0 / 1024, 2),
'free_capacity_gb': round(841978.0 / 1024, 2),
'reserved_percentage': 0,
'QoS_support': False,
'infortrend_provisioning': 'full',
}
test_migrate_host_2 = {
'host': 'infortrend-server1@backend_1#LV-1',
'capabilities': test_migrate_volume_states_2,
}
fake_host = {
'host': 'infortrend-server1@backend_1',
'capabilities': {},
}
fake_volume_id = [test_volume['id'], test_dst_volume['id']]
fake_lookup_map = {
'12345678': {
'initiator_port_wwn_list':
[x.lower() for x in fake_initiator_wwpns],
'target_port_wwn_list':
[x.lower() for x in fake_target_wwpns[0:2]],
},
}
fake_lookup_map_r_model = {
'12345678': {
'initiator_port_wwn_list':
[x.lower() for x in fake_initiator_wwpns[:]],
'target_port_wwn_list':
[x.lower() for x in fake_target_wwpns[1:3]],
},
}
test_new_type = {
'name': 'type0',
'qos_specs_id': None,
'deleted': False,
'extra_specs': {'infortrend_provisioning': 'thin'},
'id': '28c8f82f-416e-148b-b1ae-2556c032d3c0',
}
test_diff = {'extra_specs': {'infortrend_provisioning': ('full', 'thin')}}
def get_fake_cli_failed(self):
return """
CLI: Failed
Return: 0x0001
CLI: No selected device
Return: 0x000c
"""
def get_fake_cli_failed_with_network(self):
return """
CLI: Failed
Return: 0x0001
CLI: No network
Return: 0x000b
"""
def get_fake_cli_succeed(self):
return """
CLI: Successful: Device(UID:77124, Name:, Model:DS S12F-G2852-6) selected.
Return: 0x0000
CLI: Successful: 0 mapping(s) shown
Return: 0x0000
"""
def get_test_show_empty_list(self):
return (0, [])
def get_test_show_snapshot(self, partition_id=None, snapshot_id=None):
if partition_id and snapshot_id:
return (0, [{
'Map': 'No',
'Partition-ID': partition_id,
'SI-ID': snapshot_id,
'Name': '---',
'Activated-time': 'Thu, Jan 09 01:33:11 2020',
'Index': '1',
}])
else:
return (0, [{
'Map': 'No',
'Partition-ID': self.fake_partition_id[0],
'SI-ID': self.fake_snapshot_id[0],
'Name': '---',
'Activated-time': 'Thu, Jan 09 01:33:11 2020',
'Index': '1',
}, {
'Map': 'No',
'Partition-ID': self.fake_partition_id[0],
'SI-ID': self.fake_snapshot_id[1],
'Name': '---',
'Activated-time': 'Thu, Jan 09 01:35:50 2020',
'Index': '2',
}])
def get_fake_show_snapshot(self):
msg = r"""
CLI: Successful: Device(UID:77124, Name:, Model:DS S12F-G2852-6) selected.
Return: 0x0000
\/\/\/-
\
/
-
\
/
-
\/-\/- Index SI-ID Name Partition-ID Map Activated-time
---------------------------------------------------------------------------------
1 %s --- %s No Thu, Jan 09 01:33:11 2020
2 %s --- %s No Thu, Jan 09 01:35:50 2020
CLI: Successful: 2 snapshot image(s) shown
Return: 0x0000
"""
return msg % (self.fake_snapshot_id[0],
self.fake_partition_id[0],
self.fake_snapshot_id[1],
self.fake_partition_id[0])
def get_test_show_snapshot_detail_filled_block(self):
return (0, [{
'Mapped': 'Yes',
'Created-time': 'Wed, Jun 10 10:57:16 2015',
'ID': self.fake_snapshot_id[0],
'Last-modification-time': 'Wed, Jun 10 10:57:16 2015',
'Description': '---',
'Total-filled-block': '1',
'LV-ID': self.fake_lv_id[0],
'Activation-schedule-time': 'Not Actived',
'Mapping': 'CH:0/ID:0/LUN:1',
'Index': '1',
'Used': '0',
'Name': '---',
'Valid-filled-block': '0',
'Partition-ID': self.fake_partition_id[0],
}])
def get_test_show_snapshot_detail(self):
return (0, [{
'Mapped': 'Yes',
'Created-time': 'Wed, Jun 10 10:57:16 2015',
'ID': self.fake_snapshot_id[0],
'Last-modification-time': 'Wed, Jun 10 10:57:16 2015',
'Description': '---',
'Total-filled-block': '0',
'LV-ID': self.fake_lv_id[0],
'Activation-schedule-time': 'Not Actived',
'Mapping': 'CH:0/ID:0/LUN:1',
'Index': '1',
'Used': '0',
'Name': '---',
'Valid-filled-block': '0',
'Partition-ID': self.fake_partition_id[0],
}])
def get_fake_show_snapshot_detail(self):
msg = """
CLI: Successful: Device(UID:25090, Name:, Model:DS 1016RE) selected.
Return: 0x0000
ID: %s
Index: 1
Name: ---
Partition-ID: %s
LV-ID: %s
Created-time: Wed, Jun 10 10:57:16 2015
Last-modification-time: Wed, Jun 10 10:57:16 2015
Activation-schedule-time: Not Actived
Used: 0
Valid-filled-block: 0
Total-filled-block: 0
Description: ---
Mapped: Yes
Mapping: CH:0/ID:0/LUN:1
CLI: Successful: 1 snapshot image(s) shown
Return: 0x0000
"""
return msg % (self.fake_snapshot_id[0],
self.fake_partition_id[0],
self.fake_lv_id[0])
def get_test_show_net(self):
return (0, [{
'Slot': 'slotA',
'MAC': '10D02380DEEC',
'ID': '1',
'IPv4': self.fake_data_port_ip[0],
'Mode': 'Disabled',
'IPv6': '---',
}, {
'Slot': 'slotB',
'MAC': '10D02390DEEC',
'ID': '1',
'IPv4': self.fake_data_port_ip[1],
'Mode': 'Disabled',
'IPv6': '---',
}, {
'Slot': 'slotA',
'MAC': '10D02340DEEC',
'ID': '2',
'IPv4': self.fake_data_port_ip[2],
'Mode': 'Disabled',
'IPv6': '---',
}, {
'Slot': 'slotB',
'MAC': '10D02350DEEC',
'ID': '2',
'IPv4': self.fake_data_port_ip[3],
'Mode': 'Disabled',
'IPv6': '---',
}, {
'Slot': 'slotA',
'MAC': '10D02310DEEC',
'ID': '4',
'IPv4': self.fake_data_port_ip[4],
'Mode': 'Disabled',
'IPv6': '---',
}, {
'Slot': 'slotB',
'MAC': '10D02320DEEC',
'ID': '4',
'IPv4': self.fake_data_port_ip[5],
'Mode': 'Disabled',
'IPv6': '---',
}, {
'Slot': '---',
'MAC': '10D023077124',
'ID': '32',
'IPv4': '172.27.1.1',
'Mode': 'Disabled',
'IPv6': '---',
}])
def get_fake_show_net(self):
msg = """
CLI: Successful: Device(UID:77124, Name:, Model:DS S12F-G2852-6) selected.
Return: 0x0000
ID MAC Mode IPv4 Mode IPv6 Slot
---------------------------------------------------------------
1 10D02380DEEC DHCP %s Disabled --- slotA
1 10D02390DEEC DHCP %s Disabled --- slotB
2 10D02340DEEC DHCP %s Disabled --- slotA
2 10D02350DEEC DHCP %s Disabled --- slotB
4 10D02310DEEC DHCP %s Disabled --- slotA
4 10D02320DEEC DHCP %s Disabled --- slotB
32 10D023077124 DHCP 172.27.1.1 Disabled --- ---
CLI: Successful: 2 record(s) found
Return: 0x0000
"""
return msg % (self.fake_data_port_ip[0], self.fake_data_port_ip[1],
self.fake_data_port_ip[2], self.fake_data_port_ip[3],
self.fake_data_port_ip[4], self.fake_data_port_ip[5])
| |
Player.white): 2294607763040053819,
(Point(row=9, col=8), Player.black): 5178250685268040999,
(Point(row=9, col=8), Player.white): 3364679157508528694,
(Point(row=9, col=9), Player.black): 3553967243152693124,
(Point(row=9, col=9), Player.white): 3068058057478630282,
(Point(row=9, col=10), Player.black): 6184238169682215319,
(Point(row=9, col=10), Player.white): 532168233786036918,
(Point(row=9, col=11), Player.black): 3092733305208152615,
(Point(row=9, col=11), Player.white): 8489026477172734834,
(Point(row=9, col=12), Player.black): 8466596885845881353,
(Point(row=9, col=12), Player.white): 233550669275107378,
(Point(row=9, col=13), Player.black): 8603581440897487625,
(Point(row=9, col=13), Player.white): 9034915872764336272,
(Point(row=9, col=14), Player.black): 8064684217439572746,
(Point(row=9, col=14), Player.white): 5194590206921700316,
(Point(row=9, col=15), Player.black): 4306432062946416892,
(Point(row=9, col=15), Player.white): 2309616088345580816,
(Point(row=9, col=16), Player.black): 8258188514420365838,
(Point(row=9, col=16), Player.white): 6138395356060930650,
(Point(row=9, col=17), Player.black): 6672053316594332959,
(Point(row=9, col=17), Player.white): 3638024551847104885,
(Point(row=9, col=18), Player.black): 5720854717145016673,
(Point(row=9, col=18), Player.white): 1549651879493766372,
(Point(row=9, col=19), Player.black): 5340785436349693495,
(Point(row=9, col=19), Player.white): 2405856358043503939,
(Point(row=10, col=1), Player.black): 6012164709255398847,
(Point(row=10, col=1), Player.white): 812907851686256787,
(Point(row=10, col=2), Player.black): 7949402815097777999,
(Point(row=10, col=2), Player.white): 4758655524963025727,
(Point(row=10, col=3), Player.black): 2008566195663888707,
(Point(row=10, col=3), Player.white): 5225240679930537767,
(Point(row=10, col=4), Player.black): 6015740187305539517,
(Point(row=10, col=4), Player.white): 4186688677666698354,
(Point(row=10, col=5), Player.black): 5719852225235663443,
(Point(row=10, col=5), Player.white): 826011302979319411,
(Point(row=10, col=6), Player.black): 8242258740436620776,
(Point(row=10, col=6), Player.white): 6778969168835768563,
(Point(row=10, col=7), Player.black): 1649291342741470337,
(Point(row=10, col=7), Player.white): 8384196651020892748,
(Point(row=10, col=8), Player.black): 3219805316829239618,
(Point(row=10, col=8), Player.white): 6596907969102854814,
(Point(row=10, col=9), Player.black): 962723493104342355,
(Point(row=10, col=9), Player.white): 8968650977280178689,
(Point(row=10, col=10), Player.black): 1468664499759050832,
(Point(row=10, col=10), Player.white): 914485010029843310,
(Point(row=10, col=11), Player.black): 8501539474215164558,
(Point(row=10, col=11), Player.white): 8798578597941390750,
(Point(row=10, col=12), Player.black): 329776455727647811,
(Point(row=10, col=12), Player.white): 6706312749522123953,
(Point(row=10, col=13), Player.black): 9203013348898942872,
(Point(row=10, col=13), Player.white): 1188765220685651133,
(Point(row=10, col=14), Player.black): 912994098216386387,
(Point(row=10, col=14), Player.white): 1842201333713261434,
(Point(row=10, col=15), Player.black): 6041960408571691593,
(Point(row=10, col=15), Player.white): 6450489929744294489,
(Point(row=10, col=16), Player.black): 1169223709141443575,
(Point(row=10, col=16), Player.white): 1357962677395804436,
(Point(row=10, col=17), Player.black): 4646077370747128681,
(Point(row=10, col=17), Player.white): 3704161794426167131,
(Point(row=10, col=18), Player.black): 4573468167625885166,
(Point(row=10, col=18), Player.white): 6511588956916800774,
(Point(row=10, col=19), Player.black): 4329635167829691310,
(Point(row=10, col=19), Player.white): 4923893081858853917,
(Point(row=11, col=1), Player.black): 4811888116128159477,
(Point(row=11, col=1), Player.white): 718588455940040384,
(Point(row=11, col=2), Player.black): 7674812639233200986,
(Point(row=11, col=2), Player.white): 4331023918212183699,
(Point(row=11, col=3), Player.black): 3897432141041501556,
(Point(row=11, col=3), Player.white): 7276519453137688286,
(Point(row=11, col=4), Player.black): 6510030071580938266,
(Point(row=11, col=4), Player.white): 8357803872914616079,
(Point(row=11, col=5), Player.black): 1733557852229300439,
(Point(row=11, col=5), Player.white): 1887858117103523412,
(Point(row=11, col=6), Player.black): 2050309162712522735,
(Point(row=11, col=6), Player.white): 5134667517624134734,
(Point(row=11, col=7), Player.black): 1935323196572736143,
(Point(row=11, col=7), Player.white): 823776149897020121,
(Point(row=11, col=8), Player.black): 5183697550720478054,
(Point(row=11, col=8), Player.white): 2806161877752714854,
(Point(row=11, col=9), Player.black): 639365824564049855,
(Point(row=11, col=9), Player.white): 3105484068242501121,
(Point(row=11, col=10), Player.black): 8892143254196528380,
(Point(row=11, col=10), Player.white): 5166469057964800694,
(Point(row=11, col=11), Player.black): 292605160015608161,
(Point(row=11, col=11), Player.white): 7514567951599881472,
(Point(row=11, col=12), Player.black): 5109949366761472505,
(Point(row=11, col=12), Player.white): 310762339342946547,
(Point(row=11, col=13), Player.black): 2104946795875927320,
(Point(row=11, col=13), Player.white): 1235460730147809452,
(Point(row=11, col=14), Player.black): 2127497427209123011,
(Point(row=11, col=14), Player.white): 7161002026604519025,
(Point(row=11, col=15), Player.black): 59233478003238444,
(Point(row=11, col=15), Player.white): 5970353674800714203,
(Point(row=11, col=16), Player.black): 6665616474731630602,
(Point(row=11, col=16), Player.white): 152650024831436796,
(Point(row=11, col=17), Player.black): 934842009288313548,
(Point(row=11, col=17), Player.white): 8355856797725665648,
(Point(row=11, col=18), Player.black): 1477828799968727451,
(Point(row=11, col=18), Player.white): 2901806850750856486,
(Point(row=11, col=19), Player.black): 7573653378761984057,
(Point(row=11, col=19), Player.white): 7716881570339655225,
(Point(row=12, col=1), Player.black): 2568918803525316736,
(Point(row=12, col=1), Player.white): 5628114972944723995,
(Point(row=12, col=2), Player.black): 4423011986110571904,
(Point(row=12, col=2), Player.white): 1004637634072591177,
(Point(row=12, col=3), Player.black): 5461707783998271703,
(Point(row=12, col=3), Player.white): 638790804607383716,
(Point(row=12, col=4), Player.black): 2724719862170090354,
(Point(row=12, col=4), Player.white): 4421566525462598702,
(Point(row=12, col=5), Player.black): 8648906312566693100,
(Point(row=12, col=5), Player.white): 8270055473898685871,
(Point(row=12, col=6), Player.black): 1915749061367669068,
(Point(row=12, col=6), Player.white): 4876832281198499170,
(Point(row=12, col=7), Player.black): 46140495477523623,
(Point(row=12, col=7), Player.white): 5007714131133085886,
(Point(row=12, col=8), Player.black): 8743444844221333325,
(Point(row=12, col=8), Player.white): 908456928534611497,
(Point(row=12, col=9), Player.black): 6983096771564974099,
(Point(row=12, col=9), Player.white): 7430779576557722399,
(Point(row=12, col=10), Player.black): 3152448216524677614,
(Point(row=12, col=10), Player.white): 3180436423603504440,
(Point(row=12, col=11), Player.black): 5269207326232247596,
(Point(row=12, col=11), Player.white): 9212597296607879019,
(Point(row=12, col=12), Player.black): 7233784749342434823,
(Point(row=12, col=12), Player.white): 5359592143185856960,
(Point(row=12, col=13), Player.black): 4245159514132041091,
(Point(row=12, col=13), Player.white): 7483203165114225163,
(Point(row=12, col=14), Player.black): 2594209199301730105,
(Point(row=12, col=14), Player.white): 4330511663839218487,
(Point(row=12, col=15), Player.black): 1756401805040965301,
(Point(row=12, col=15), Player.white): 8443740481163251139,
(Point(row=12, col=16), Player.black): 8532686399133135373,
(Point(row=12, col=16), Player.white): 8648108130553396024,
(Point(row=12, col=17), Player.black): 9029748126466626304,
(Point(row=12, col=17), Player.white): 366767265422515200,
(Point(row=12, col=18), Player.black): 5634370494228795321,
(Point(row=12, col=18), Player.white): 3028166990928692045,
(Point(row=12, col=19), Player.black): 6128886499703201992,
(Point(row=12, col=19), Player.white): 7223805971713766564,
(Point(row=13, col=1), Player.black): 2496253901433903144,
(Point(row=13, col=1), Player.white): 1551493066650163627,
(Point(row=13, col=2), Player.black): 2672840148010019750,
(Point(row=13, col=2), Player.white): 5994868224347809475,
(Point(row=13, col=3), Player.black): 4628383235437429108,
(Point(row=13, col=3), Player.white): 6064829097099335572,
(Point(row=13, col=4), Player.black): 5413486961964612412,
(Point(row=13, col=4), Player.white): 4045128829439945174,
(Point(row=13, col=5), Player.black): 4712295427500302995,
(Point(row=13, col=5), Player.white): 248402942764617862,
(Point(row=13, col=6), Player.black): 999667821658964017,
(Point(row=13, col=6), Player.white): 8820974176258972514,
(Point(row=13, col=7), Player.black): 3476986111144434482,
(Point(row=13, col=7), Player.white): 7942783663244623527,
(Point(row=13, col=8), Player.black): 4234845736483310370,
(Point(row=13, col=8), Player.white): 4216551395056226649,
(Point(row=13, col=9), Player.black): 177265600322039330,
(Point(row=13, col=9), Player.white): 8990080846893998186,
(Point(row=13, col=10), Player.black): 3147286474627477037,
(Point(row=13, col=10), Player.white): 6471007499395169134,
(Point(row=13, col=11), Player.black): 1936045838813747550,
(Point(row=13, col=11), Player.white): 4644151093964356408,
(Point(row=13, col=12), Player.black): 4435131806849743185,
(Point(row=13, col=12), Player.white): 54569717782380574,
(Point(row=13, col=13), Player.black): 1508359860504878315,
(Point(row=13, col=13), Player.white): 6891830587880295481,
(Point(row=13, col=14), Player.black): 3762684221329973240,
(Point(row=13, col=14), Player.white): 3017599301607054197,
(Point(row=13, col=15), Player.black): 7503851802710739479,
(Point(row=13, col=15), Player.white): 2388026796289288600,
(Point(row=13, col=16), Player.black): 4460949656925498045,
(Point(row=13, col=16), Player.white): 6121498320763557205,
(Point(row=13, col=17), Player.black): 5454912114241197983,
(Point(row=13, col=17), Player.white): 5677909618846838515,
(Point(row=13, col=18), Player.black): 5464352961587447142,
(Point(row=13, col=18), Player.white): 7114550787615396229,
(Point(row=13, col=19), Player.black): 3485712230029563806,
(Point(row=13, col=19), Player.white): 6446897201405604194,
(Point(row=14, col=1), Player.black): 7528920080599778132,
(Point(row=14, col=1), Player.white): 5669940686033599629,
(Point(row=14, col=2), Player.black): 5228752949828262741,
(Point(row=14, col=2), Player.white): 4897727087692140998,
(Point(row=14, col=3), Player.black): 8331520318636135471,
(Point(row=14, col=3), Player.white): 6240564650456223273,
(Point(row=14, col=4), Player.black): 4147571497243833121,
(Point(row=14, col=4), Player.white): 5287851769372107454,
(Point(row=14, col=5), Player.black): 3929856055856918635,
(Point(row=14, col=5), Player.white): 4942395218791830311,
(Point(row=14, col=6), Player.black): 3419131812142225330,
(Point(row=14, col=6), Player.white): 3946706912536054517,
(Point(row=14, col=7), Player.black): 2216854722824718528,
(Point(row=14, col=7), Player.white): 5751840383776158854,
(Point(row=14, col=8), Player.black): 6039954508464465971,
(Point(row=14, col=8), Player.white): 4880067768099333403,
(Point(row=14, col=9), Player.black): 1391277164658920957,
(Point(row=14, col=9), Player.white): 7451285533597502911,
(Point(row=14, col=10), Player.black): 9114931203345433206,
(Point(row=14, col=10), Player.white): 9151177955674363973,
(Point(row=14, col=11), Player.black): 5286716825816304093,
(Point(row=14, col=11), Player.white): 8299991677645306266,
(Point(row=14, col=12), Player.black): 5195464055129666925,
(Point(row=14, col=12), Player.white): 7360092883003763047,
(Point(row=14, col=13), Player.black): 8091126652869256455,
(Point(row=14, col=13), Player.white): 859632643926738720,
(Point(row=14, col=14), Player.black): 1956576428005199048,
(Point(row=14, col=14), Player.white): 3373854362230632022,
(Point(row=14, col=15), Player.black): 6142182356819019331,
(Point(row=14, col=15), Player.white): 599882345282402750,
(Point(row=14, col=16), Player.black): 6495330481411294612,
(Point(row=14, col=16), Player.white): 4678318561357907878,
(Point(row=14, col=17), Player.black): 1613163634528474933,
(Point(row=14, col=17), Player.white): 2149394070598892722,
(Point(row=14, col=18), Player.black): 3613731393981415918,
(Point(row=14, col=18), Player.white): 670492166825950545,
(Point(row=14, col=19), Player.black): 3576884676864696223,
(Point(row=14, col=19), Player.white): 8013845603496590037,
(Point(row=15, col=1), Player.black): 4444720605802559994,
(Point(row=15, col=1), Player.white): 4690308037902459056,
(Point(row=15, col=2), Player.black): 198794688626774049,
(Point(row=15, col=2), Player.white): 754101414028084990,
(Point(row=15, col=3), Player.black): 9031834267182088450,
(Point(row=15, col=3), Player.white): 4955142136487065963,
(Point(row=15, col=4), Player.black): 5950356114654535765,
(Point(row=15, col=4), Player.white): 1673708508551501787,
(Point(row=15, col=5), Player.black): 3059942019337751169,
(Point(row=15, col=5), Player.white): 6236001435181494065,
(Point(row=15, col=6), Player.black): 3840642542836249231,
(Point(row=15, col=6), Player.white): 4494437239109309561,
(Point(row=15, col=7), Player.black): 3418524991248282035,
(Point(row=15, col=7), Player.white): 4088710049109644123,
(Point(row=15, col=8), Player.black): 3370871771055988106,
(Point(row=15, col=8), Player.white): 6205297688850580126,
(Point(row=15, col=9), Player.black): 2729132856797520113,
(Point(row=15, col=9), Player.white): 1872963579855844925,
(Point(row=15, col=10), Player.black): 4796469616123085704,
(Point(row=15, col=10), Player.white): 2737956312985495735,
(Point(row=15, col=11), Player.black): 7425165379650361569,
(Point(row=15, col=11), Player.white): 4744913956310027049,
(Point(row=15, col=12), Player.black): 4432452777664043550,
(Point(row=15, col=12), Player.white): 7149390510594997576,
(Point(row=15, col=13), Player.black): 1127377434164446909,
(Point(row=15, col=13), Player.white): 7139372936332834674,
(Point(row=15, col=14), Player.black): 8623185287159014896,
(Point(row=15, col=14), Player.white): 2749311309393092071,
(Point(row=15, col=15), Player.black): 3889399041204032560,
(Point(row=15, col=15), Player.white): 5440568414932730887,
(Point(row=15, col=16), Player.black): 7961130044126580240,
(Point(row=15, col=16), Player.white): 5522153684761811918,
(Point(row=15, col=17), Player.black): 1051894275733779402,
(Point(row=15, col=17), Player.white): 7775123139195000322,
(Point(row=15, col=18), Player.black): 405047372528169449,
(Point(row=15, col=18), Player.white): 6777693792695027781,
(Point(row=15, col=19), Player.black): 2707900765333949110,
(Point(row=15, col=19), Player.white): 1748580443724313214,
(Point(row=16, col=1), Player.black): 2310585590428078863,
(Point(row=16, col=1), Player.white): 4217038584119906912,
(Point(row=16, col=2), Player.black): 985202624705196243,
(Point(row=16, col=2), Player.white): 9151409094950142193,
(Point(row=16, col=3), Player.black): 750140955532969349,
(Point(row=16, col=3), Player.white): 2226854903279338690,
(Point(row=16, col=4), Player.black): 1753443221335135301,
(Point(row=16, col=4), Player.white): 112641559000638460,
(Point(row=16, col=5), Player.black): 2469310201348258380,
(Point(row=16, col=5), Player.white): 2933066886110832647,
(Point(row=16, col=6), Player.black): 8760290595356794758,
(Point(row=16, col=6), Player.white): 3938325596318156201,
(Point(row=16, col=7), Player.black): 351392362800699236,
(Point(row=16, col=7), Player.white): 5965923125256290133,
(Point(row=16, col=8), Player.black): 4226241391525466462,
(Point(row=16, col=8), Player.white): 2296189491115798113,
(Point(row=16, col=9), Player.black): 5818906966146137497,
(Point(row=16, col=9), Player.white): 2978814809930535951,
(Point(row=16, col=10), Player.black): 5221313999467075534,
(Point(row=16, col=10), Player.white): 5525586090026314079,
(Point(row=16, col=11), Player.black): 7150888125699917692,
(Point(row=16, col=11), Player.white): 7967098806646820797,
(Point(row=16, col=12), Player.black): 2809819457402689956,
(Point(row=16, col=12), Player.white): 7425517738724027933,
(Point(row=16, col=13), Player.black): 9043041476972987773,
(Point(row=16, col=13), Player.white): 4126568890528841830,
(Point(row=16, col=14), Player.black): 405195601380630992,
(Point(row=16, col=14), Player.white): 2859364994541233088,
(Point(row=16, col=15), Player.black): 9100706054278752322,
(Point(row=16, col=15), Player.white): 2698432353369862483,
(Point(row=16, col=16), Player.black): 233570791189122902,
(Point(row=16, col=16), Player.white): 1955797661194269678,
(Point(row=16, col=17), Player.black): 3215444208293145815,
(Point(row=16, col=17), Player.white): 7749899566907810501,
(Point(row=16, col=18), Player.black): 4434257155996611207,
(Point(row=16, col=18), Player.white): 5524138211073814901,
(Point(row=16, col=19), Player.black): 3879442844657515242,
(Point(row=16, col=19), Player.white): 4095000128396542164,
(Point(row=17, col=1), Player.black): 3117304355126357036,
(Point(row=17, col=1), Player.white): 8288379997552988803,
(Point(row=17, col=2), Player.black): 5322299816902250572,
(Point(row=17, col=2), Player.white): 7955260886908761297,
(Point(row=17, col=3), Player.black): 7425435021255007715,
(Point(row=17, col=3), Player.white): 3941956462330213296,
(Point(row=17, col=4), Player.black): 1667932789968984978,
(Point(row=17, col=4), Player.white): 7761984596295380582,
(Point(row=17, col=5), Player.black): 2614708018742137051,
(Point(row=17, col=5), Player.white): 5622728983365429270,
(Point(row=17, col=6), Player.black): 3166135214757456351,
(Point(row=17, col=6), Player.white): 1820234063889877538,
(Point(row=17, col=7), Player.black): 7600848454149943870,
(Point(row=17, col=7), Player.white): 2753551240055896921,
(Point(row=17, col=8), Player.black): 1106934710738997852,
(Point(row=17, col=8), Player.white): 4526974971888515393,
(Point(row=17, col=9), Player.black): 6549127170110706395,
(Point(row=17, col=9), Player.white): 5670307432083885437,
(Point(row=17, col=10), Player.black): 7309255636469744981,
(Point(row=17, col=10), Player.white): 4822125742706822460,
(Point(row=17, col=11), Player.black): 7220686973131194088,
(Point(row=17, col=11), Player.white): 3764391926682480440,
(Point(row=17, col=12), Player.black): 2913903513213258547,
(Point(row=17, col=12), Player.white): 6507070047539847251,
(Point(row=17, col=13), Player.black): 7872528269466684417,
(Point(row=17, col=13), Player.white): 4192693152170512736,
(Point(row=17, col=14), Player.black): 4559365332905827812,
(Point(row=17, col=14), Player.white): 3554627099004559818,
(Point(row=17, col=15), Player.black): 2545070106672825097,
(Point(row=17, col=15), | |
import os
import json
import tempfile
import logging
import contextlib
from maya import cmds
from . import capsule, xgen
from .vendor import capture
from .. import utils
log = logging.getLogger(__name__)
def export_fbx(out_path, selected=True):
from pymel.core import mel as pymel
try:
pymel.FBXExport(f=out_path, s=selected)
finally:
pymel.FBXResetExport()
@contextlib.contextmanager
def export_fbx_set_pointcache(cache_set_name):
set_node = cmds.sets(cmds.ls(sl=True), name=cache_set_name)
fbx_export_settings(reset=True,
log=False,
ascii=True,
cameras=False,
lights=False,
cache_file=True,
cache_set=cache_set_name,
anim_only=False,
bake_anim=True,
key_reduce=True,
shapes=False,
skins=False,
input_conns=False,
)
try:
yield
finally:
cmds.delete(set_node)
def export_fbx_set_camera():
fbx_export_settings(reset=True,
log=False,
ascii=True,
cameras=True,
lights=False,
)
def fbx_export_settings(reset=False, **kwargs):
"""
"""
from pymel.core import mel as pymel
if reset:
pymel.FBXResetExport()
fbx_export_cmd_map = {
"log": pymel.FBXExportGenerateLog,
"ascii": pymel.FBXExportInAscii,
"version": pymel.FBXExportFileVersion,
"cameras": pymel.FBXExportCameras,
"lights": pymel.FBXExportLights,
"instances": pymel.FBXExportInstances,
"referenced": pymel.FBXExportReferencedAssetsContent,
"smoothing_groups": pymel.FBXExportSmoothingGroups,
"smooth_mesh": pymel.FBXExportSmoothMesh,
"tangents": pymel.FBXExportTangents,
"triangulate": pymel.FBXExportTriangulate,
"hardEdges": pymel.FBXExportHardEdges,
"constraints": pymel.FBXExportConstraints,
"input_conns": pymel.FBXExportInputConnections,
"shapes": pymel.FBXExportShapes,
"skins": pymel.FBXExportSkins,
"skeleton": pymel.FBXExportSkeletonDefinitions,
"anim_only": pymel.FBXExportAnimationOnly,
"cache_file": pymel.FBXExportCacheFile,
"cache_set": pymel.FBXExportQuickSelectSetAsCache,
"bake_anim": pymel.FBXExportBakeComplexAnimation,
"bake_start": pymel.FBXExportBakeComplexStart,
"bake_end": pymel.FBXExportBakeComplexEnd,
"bake_step": pymel.FBXExportBakeComplexStep,
"bake_resample_all": pymel.FBXExportBakeResampleAll,
"key_reduce": pymel.FBXExportApplyConstantKeyReducer,
}
for key in kwargs:
fbx_export_cmd_map[key](v=kwargs[key])
# The maya alembic export types
_alembic_options = {
"startFrame": (int, float),
"endFrame": (int, float),
"frameRange": str, # "start end"; overrides startFrame & endFrame
"eulerFilter": bool,
"frameRelativeSample": float,
"noNormals": bool,
"renderableOnly": bool,
"step": float,
"stripNamespaces": bool,
"uvWrite": bool,
"writeUVSets": bool,
"wholeFrameGeo": bool,
"worldSpace": bool,
"writeVisibility": bool,
"writeColorSets": bool,
"writeFaceSets": bool,
"writeCreases": bool, # Maya 2015 Ext1+
"dataFormat": str,
"root": (list, tuple),
"attr": (list, tuple),
"attrPrefix": (list, tuple),
"userAttr": (list, tuple),
"melPerFrameCallback": str,
"melPostJobCallback": str,
"pythonPerFrameCallback": str,
"pythonPostJobCallback": str,
"selection": bool
}
def export_alembic(file,
startFrame=None,
endFrame=None,
selection=True,
uvWrite=True,
writeUVSets=True,
eulerFilter=False,
writeVisibility=True,
dataFormat="ogawa",
verbose=False,
**kwargs):
"""Extract a single Alembic Cache. (modified, from colorbleed config)
Arguments:
startFrame (float): Start frame of output. Ignored if `frameRange`
provided.
endFrame (float): End frame of output. Ignored if `frameRange`
provided.
frameRange (tuple or str): Two-tuple with start and end frame or a
string formatted as: "startFrame endFrame". This argument
overrides `startFrame` and `endFrame` arguments.
dataFormat (str): The data format to use for the cache,
defaults to "ogawa"
verbose (bool): When on, outputs frame number information to the
Script Editor or output window during extraction.
noNormals (bool): When on, normal data from the original polygon
objects is not included in the exported Alembic cache file.
renderableOnly (bool): When on, any non-renderable nodes or hierarchy,
such as hidden objects, are not included in the Alembic file.
Defaults to False.
stripNamespaces (bool): When on, any namespaces associated with the
exported objects are removed from the Alembic file. For example, an
object with the namespace taco:foo:bar appears as bar in the
Alembic file.
uvWrite (bool): When on, UV data from polygon meshes and subdivision
objects are written to the Alembic file. Only the current UV map is
included.
worldSpace (bool): When on, the top node in the node hierarchy is
stored as world space. By default, these nodes are stored as local
space. Defaults to False.
eulerFilter (bool): When on, X, Y, and Z rotation data is filtered with
an Euler filter. Euler filtering helps resolve irregularities in
rotations especially if X, Y, and Z rotations exceed 360 degrees.
Defaults to False.
writeVisibility (bool): If this flag is present, visibility state will
be stored in the Alembic file.
Otherwise everything written out is treated as visible.
wholeFrameGeo (bool): When on, geometry data at whole frames is sampled
and written to the file. When off (default), geometry data is
sampled at sub-frames and written to the file.
Examples: (Copied from MEL cmd `AbcExport -help`)
AbcExport -j
"-root |group|foo -root |test|path|bar -file /tmp/test.abc"
Writes out everything at foo and below and bar and below to
`/tmp/test.abc`.
foo and bar are siblings parented to the root of the Alembic scene.
AbcExport -j
"-frameRange 1 5 -step 0.5 -root |group|foo -file /tmp/test.abc"
Writes out everything at foo and below to `/tmp/test.abc` sampling
at frames: 1 1.5 2 2.5 3 3.5 4 4.5 5
AbcExport -j
"-fr 0 10 -frs -0.1 -frs 0.2 -step 5 -file /tmp/test.abc"
Writes out everything in the scene to `/tmp/test.abc` sampling at
frames: -0.1 0.2 4.9 5.2 9.9 10.2
Note: The difference between your highest and lowest
frameRelativeSample can not be greater than your step size.
AbcExport -j
"-step 0.25 -frs 0.3 -frs 0.60 -fr 1 5 -root foo -file test.abc"
Is illegal because the highest and lowest frameRelativeSamples are 0.3
frames apart.
AbcExport -j
"-sl -root |group|foo -file /tmp/test.abc"
Writes out all selected nodes and it's ancestor nodes including up to
foo.
foo will be parented to the root of the Alembic scene.
(NOTE) About alembic selection export
Say we have a hierarchy `A > B > C > D > E`, A is root and E is leaf.
when the export cmd is "-sl -root |A|B|C" and we select D, then we will
get `C > D` exported.
when the export cmd is "-sl" and we select D, then we will get
`A > B > C > D` exported.
when the export cmd is "-root |A|B|C", then we will get `C > D > E`
exported.
As you can see, flag `-sl` and `-root` are kind of end point and start
point of the DAG chain.
If there are multiple `-root`, and `-sl` has given, each root node must
have it's descendant node been selected, or the root will not be exported.
(NOTE) Scene unit
Looks like Alembic in Maya always stores transformation in `centimeter`,
and does not scale the value when importing into scenes that units are NOT
set to `centimeter`. But does save deformation in absolute form.
So one MUST ensure the scene units is set to `centimeter` when importing
transformation (or connect a unitConversion node), otherwise the transform
value will be incorrect.
"""
# Ensure alembic exporter is loaded
cmds.loadPlugin('AbcExport', quiet=True)
# Alembic Exporter requires forward slashes
file = file.replace('\\', '/')
# Pass the start and end frame on as `frameRange` so that it
# never conflicts with that argument
if "frameRange" not in kwargs:
# Fallback to maya timeline if no start or end frame provided.
if startFrame is None:
startFrame = cmds.playbackOptions(query=True, minTime=True)
if endFrame is None:
endFrame = cmds.playbackOptions(query=True, maxTime=True)
# Ensure valid types are converted to frame range
assert isinstance(startFrame, _alembic_options["startFrame"])
assert isinstance(endFrame, _alembic_options["endFrame"])
kwargs["frameRange"] = "{0} {1}".format(startFrame, endFrame)
else:
# Allow conversion from tuple for `frameRange`
frame_range = kwargs["frameRange"]
if isinstance(frame_range, (list, tuple)):
assert len(frame_range) == 2
kwargs["frameRange"] = "{0} {1}".format(frame_range[0],
frame_range[1])
# Assemble options
options = {
"selection": selection,
"uvWrite": uvWrite,
"writeUVSets": writeUVSets,
"eulerFilter": eulerFilter,
"writeVisibility": writeVisibility,
"dataFormat": dataFormat
}
options.update(kwargs)
# Validate options
for key, value in options.copy().items():
# Discard unknown options
if key not in _alembic_options:
options.pop(key)
continue
# Validate value type
valid_types = _alembic_options[key]
if not isinstance(value, valid_types):
raise TypeError("Alembic option unsupported type: "
"{0} (expected {1})".format(value, valid_types))
# The `writeCreases` argument was changed to `autoSubd` in Maya 2018+
maya_version = int(cmds.about(version=True))
if maya_version >= 2018:
options['autoSubd'] = options.pop('writeCreases', False)
# Format the job string from options
job_args = list()
for key, value in options.items():
if isinstance(value, (list, tuple)):
for entry in value:
job_args.append("-{} {}".format(key, entry))
elif isinstance(value, bool):
# Add only when state is set to True
if value:
job_args.append("-{0}".format(key))
else:
job_args.append("-{0} {1}".format(key, value))
job_str = " ".join(job_args)
job_str += ' -file "%s"' % file
# Ensure output directory exists
parent_dir = os.path.dirname(file)
if not os.path.exists(parent_dir):
os.makedirs(parent_dir)
if verbose:
log.debug("Preparing Alembic export with options: %s",
json.dumps(options, indent=4))
log.debug("Extracting Alembic with job arguments: %s", job_str)
# Perform extraction
print("Alembic Job Arguments : {}".format(job_str))
# Disable the parallel evaluation temporarily to ensure no buggy
# exports are made. (PLN-31)
# TODO: Make sure this actually fixes the issues
with capsule.evaluation("off"):
cmds.AbcExport(j=job_str, verbose=verbose)
if verbose:
log.debug("Extracted Alembic to: %s", file)
return file
def export_gpu(out_path, startFrame, endFrame):
# Ensure alembic exporter is loaded
cmds.loadPlugin("gpuCache", quiet=True)
cmds.gpuCache(cmds.ls(sl=True, long=True),
startTime=startFrame,
endTime=endFrame,
optimize=True,
optimizationThreshold=40000,
writeMaterials=True,
writeUVs=True,
dataFormat="ogawa",
saveMultipleFiles=False,
directory=os.path.dirname(out_path),
fileName=os.path.splitext(os.path.basename(out_path))[0]
)
def wrap_gpu(wrapper_path, gpu_files):
"""Wrapping GPU caches into a MayaAscii file
(NOTE) The file path of `gpu_files` should be a relative path, relative to
`wrapper_path`.
For example:
```python
wrapper_path = | |
= 'TheirEnum'
class WhoseEnum(TheirEnum):
def __init_subclass__(cls, **kwds):
pass
class NoEnum(WhoseEnum):
ONE = 1
self.assertEqual(TheirEnum.__dict__['_test1'], 'MyEnum')
self.assertEqual(WhoseEnum.__dict__['_test1'], 'MyEnum')
self.assertEqual(WhoseEnum.__dict__['_test2'], 'TheirEnum')
self.assertFalse(NoEnum.__dict__.get('_test1', False))
self.assertFalse(NoEnum.__dict__.get('_test2', False))
#
class OurEnum(MyEnum):
def __init_subclass__(cls, **kwds):
cls._test2 = 'OurEnum'
class WhereEnum(OurEnum):
def __init_subclass__(cls, **kwds):
pass
class NeverEnum(WhereEnum):
ONE = 1
self.assertEqual(OurEnum.__dict__['_test1'], 'MyEnum')
self.assertFalse(WhereEnum.__dict__.get('_test1', False))
self.assertEqual(WhereEnum.__dict__['_test2'], 'OurEnum')
self.assertFalse(NeverEnum.__dict__.get('_test1', False))
self.assertFalse(NeverEnum.__dict__.get('_test2', False))
def test_int_long_conversion(self):
class Perm(Flag):
EXEC = 1 << 0
WRITE = 1 << 1
READ = 1 << 2
MSB32 = 1 << 31
MSB64 = 1 << 63
# 32-bit system test
self.assertEqual(Perm.MSB32, Perm(0x80000000))
self.assertEqual(Perm.WRITE|Perm.MSB32, Perm(0x80000002))
# 64-bit system test
self.assertEqual(Perm.MSB64, Perm(0x8000000000000000))
self.assertEqual(Perm.MSB64|Perm.WRITE, Perm(0x8000000000000002))
class TestIntFlag(TestCase):
"""Tests of the IntFlags."""
def setUp(self):
#
class Perm(IntFlag):
_order_ = 'R W X'
R = 1 << 2
W = 1 << 1
X = 1 << 0
#
class Color(IntFlag):
BLACK = 0
RED = 1
GREEN = 2
BLUE = 4
PURPLE = RED|BLUE
#
class Open(IntFlag):
"not a good flag candidate"
RO = 0
WO = 1
RW = 2
AC = 3
CE = 1<<19
#
self.Perm = Perm
self.Color = Color
self.Open = Open
def test_set_name(self):
class Descriptor(object):
name = None
def __get__(self, instance, owner_class=None):
if instance is None:
return self
else:
return instance.__dict__[self.name]
def __set__(self, instance, value):
instance.__dict__[self.name] = value
def __set_name__(self, owner, name):
self.name = name
#
class AnEnum(Enum):
ONE = 1
two = Descriptor()
#
self.assertEqual(list(AnEnum), [AnEnum.ONE])
self.assertEqual(AnEnum.two.name, 'two')
AnEnum.ONE.two = 'three'
self.assertEqual(AnEnum.ONE.two, 'three')
self.assertEqual(AnEnum.ONE.__dict__['two'], 'three')
def test_private_names(self):
class Private(Enum):
__corporal = 'Radar'
__major_ = 'Hoolihan'
self.assertEqual(len(Private), 0)
self.assertEqual(Private._Private__corporal, 'Radar')
self.assertFalse(isinstance(Private._Private__corporal, Enum))
self.assertEqual(Private._Private__major_, 'Hoolihan')
self.assertFalse(isinstance(Private._Private__major_, Enum))
def test_membership(self):
Color = self.Color
Open = self.Open
self.assertRaises(TypeError, lambda: 'GREEN' in Color)
self.assertRaises(TypeError, lambda: 'RW' in Open)
self.assertTrue(Color.GREEN in Color)
self.assertTrue(Open.RW in Open)
self.assertFalse(Color.GREEN in Open)
self.assertFalse(Open.RW in Color)
self.assertRaises(TypeError, lambda: 2 in Color)
self.assertRaises(TypeError, lambda: 2 in Open)
def test_member_contains(self):
Color = self.Color
self.assertRaises(TypeError, lambda: 'test' in Color.RED)
self.assertRaises(TypeError, lambda: 1 in Color.RED)
self.assertTrue(Color.RED in Color.RED)
self.assertTrue(Color.RED in Color.PURPLE)
def test_name_lookup(self):
Color = self.Color
self.assertTrue(Color.RED is Color['RED'])
self.assertTrue(Color.RED|Color.GREEN is Color['RED|GREEN'])
self.assertTrue(Color.PURPLE is Color['RED|BLUE'])
def test_type(self):
Perm = self.Perm
Open = self.Open
for f in Perm:
self.assertTrue(isinstance(f, Perm))
self.assertEqual(f, f.value)
self.assertTrue(isinstance(Perm.W | Perm.X, Perm))
self.assertEqual(Perm.W | Perm.X, 3)
for f in Open:
self.assertTrue(isinstance(f, Open))
self.assertEqual(f, f.value)
self.assertTrue(isinstance(Open.WO | Open.RW, Open))
self.assertEqual(Open.WO | Open.RW, 3)
def test_str(self):
Perm = self.Perm
self.assertEqual(str(Perm.R), 'Perm.R')
self.assertEqual(str(Perm.W), 'Perm.W')
self.assertEqual(str(Perm.X), 'Perm.X')
self.assertEqual(str(Perm.R | Perm.W), 'Perm.R|W')
self.assertEqual(str(Perm.R | Perm.W | Perm.X), 'Perm.R|W|X')
self.assertEqual(str(Perm(0)), 'Perm(0)')
self.assertEqual(str(~Perm.R), 'Perm.W|X')
self.assertEqual(str(~Perm.W), 'Perm.R|X')
self.assertEqual(str(~Perm.X), 'Perm.R|W')
self.assertEqual(str(~(Perm.R | Perm.W)), 'Perm.X')
self.assertEqual(str(~(Perm.R | Perm.W | Perm.X)), 'Perm(0)')
self.assertEqual(str(Perm(~0)), 'Perm.R|W|X')
Open = self.Open
self.assertEqual(str(Open.RO), 'Open.RO')
self.assertEqual(str(Open.WO), 'Open.WO')
self.assertEqual(str(Open.AC), 'Open.AC')
self.assertEqual(str(Open.RO | Open.CE), 'Open.CE')
self.assertEqual(str(Open.WO | Open.CE), 'Open.WO|CE')
self.assertEqual(str(~Open.RO), 'Open.WO|RW|CE')
self.assertEqual(str(~Open.WO), 'Open.RW|CE')
self.assertEqual(str(~Open.AC), 'Open.CE')
self.assertEqual(str(~(Open.RO | Open.CE)), 'Open.AC')
self.assertEqual(str(~(Open.WO | Open.CE)), 'Open.RW')
def test_repr_strict(self):
class Perm(IntFlag):
_order_ = 'R W X'
R = 1 << 2
W = 1 << 1
X = 1 << 0
Perm._boundary_ = aenum.STRICT
self.assertEqual(repr(Perm.R), '<Perm.R: 4>')
self.assertEqual(repr(Perm.W), '<Perm.W: 2>')
self.assertEqual(repr(Perm.X), '<Perm.X: 1>')
self.assertEqual(repr(Perm.R | Perm.W), '<Perm.R|W: 6>')
self.assertEqual(repr(Perm.R | Perm.W | Perm.X), '<Perm.R|W|X: 7>')
self.assertEqual(repr(Perm(0)), '<Perm: 0>')
self.assertEqual(repr(~Perm.R), '<Perm.W|X: 3>')
self.assertEqual(repr(~Perm.W), '<Perm.R|X: 5>')
self.assertEqual(repr(~Perm.X), '<Perm.R|W: 6>')
self.assertEqual(repr(~(Perm.R | Perm.W)), '<Perm.X: 1>')
self.assertEqual(repr(~(Perm.R | Perm.W | Perm.X)), '<Perm: 0>')
#
with self.assertRaisesRegex(ValueError, r'invalid value: 12'):
repr(Perm.R | 8)
with self.assertRaisesRegex(ValueError, r'invalid value: 12'):
repr(~(Perm.R | 8))
with self.assertRaisesRegex(ValueError, r'invalid value: -9'):
repr(Perm(~8))
def test_repr_conform(self):
class Perm(IntFlag):
_order_ = 'R W X'
R = 1 << 2
W = 1 << 1
X = 1 << 0
Perm._boundary_ = aenum.CONFORM
self.assertEqual(repr(Perm.R), '<Perm.R: 4>')
self.assertEqual(repr(Perm.W), '<Perm.W: 2>')
self.assertEqual(repr(Perm.X), '<Perm.X: 1>')
self.assertEqual(repr(Perm.R | Perm.W), '<Perm.R|W: 6>')
self.assertEqual(repr(Perm.R | Perm.W | Perm.X), '<Perm.R|W|X: 7>')
self.assertEqual(repr(Perm(0)), '<Perm: 0>')
self.assertEqual(repr(~Perm.R), '<Perm.W|X: 3>')
self.assertEqual(repr(~Perm.W), '<Perm.R|X: 5>')
self.assertEqual(repr(~Perm.X), '<Perm.R|W: 6>')
self.assertEqual(repr(~(Perm.R | Perm.W)), '<Perm.X: 1>')
self.assertEqual(repr(~(Perm.R | Perm.W | Perm.X)), '<Perm: 0>')
self.assertEqual(repr(Perm.R | 8), '<Perm.R: 4>')
self.assertEqual(repr(Perm(8)), '<Perm: 0>')
self.assertEqual(repr(~(Perm.R | 8)), '<Perm.W|X: 3>')
self.assertEqual(repr(Perm(~8)), '<Perm.R|W|X: 7>')
def test_repr_eject(self):
class Perm(IntFlag):
_order_ = 'R W X'
_boundary_ = EJECT
R = 1 << 2
W = 1 << 1
X = 1 << 0
self.assertEqual(repr(Perm.R), '<Perm.R: 4>')
self.assertEqual(repr(Perm.W), '<Perm.W: 2>')
self.assertEqual(repr(Perm.X), '<Perm.X: 1>')
self.assertEqual(repr(Perm.R | Perm.W), '<Perm.R|W: 6>')
self.assertEqual(repr(Perm.R | Perm.W | Perm.X), '<Perm.R|W|X: 7>')
self.assertEqual(repr(Perm(0)), '<Perm: 0>')
self.assertEqual(repr(~Perm.R), '<Perm.W|X: 3>')
self.assertEqual(repr(~Perm.W), '<Perm.R|X: 5>')
self.assertEqual(repr(~Perm.X), '<Perm.R|W: 6>')
self.assertEqual(repr(~(Perm.R | Perm.W)), '<Perm.X: 1>')
self.assertEqual(repr(~(Perm.R | Perm.W | Perm.X)), '<Perm: 0>')
self.assertEqual(repr(Perm.R | 8), '12')
self.assertEqual(repr(Perm(8)), '8')
self.assertEqual(repr(~(Perm.R | 8)), '-13')
self.assertEqual(repr(Perm(~8)), '-9')
def test_repr_open(self):
class Open(IntFlag):
"not a good flag candidate"
RO = 0
WO = 1
RW = 2
AC = 3
CE = 1<<19
Open._boundary_ = aenum.STRICT
self.assertEqual(repr(Open.RO), '<Open.RO: 0>')
self.assertEqual(repr(Open.WO), '<Open.WO: 1>')
self.assertEqual(repr(Open.AC), '<Open.AC: 3>')
self.assertEqual(repr(Open.RO | Open.CE), '<Open.CE: 524288>')
self.assertEqual(repr(Open.WO | Open.CE), '<Open.WO|CE: 524289>')
self.assertEqual(repr(~Open.RO), '<Open.WO|RW|CE: 524291>')
self.assertEqual(repr(~Open.WO), '<Open.RW|CE: 524290>')
self.assertEqual(repr(~Open.AC), '<Open.CE: 524288>')
self.assertEqual(repr(~(Open.RO | Open.CE)), '<Open.AC: 3>')
self.assertEqual(repr(~(Open.WO | Open.CE)), '<Open.RW: 2>')
with self.assertRaisesRegex(ValueError, r'invalid value: -5'):
repr(Open(~4))
with self.assertRaisesRegex(ValueError, r'invalid value: 4'):
repr(Open(4))
#
class Open(IntFlag):
"not a good flag candidate"
RO = 0
WO = 1
RW = 2
AC = 3
CE = 1<<19
Open._boundary_ = aenum.CONFORM
self.assertEqual(repr(Open.RO), '<Open.RO: 0>')
self.assertEqual(repr(Open.WO), '<Open.WO: 1>')
self.assertEqual(repr(Open.AC), '<Open.AC: 3>')
self.assertEqual(repr(Open.RO | Open.CE), '<Open.CE: 524288>')
self.assertEqual(repr(Open.WO | Open.CE), '<Open.WO|CE: 524289>')
self.assertEqual(repr(~Open.RO), '<Open.WO|RW|CE: 524291>')
self.assertEqual(repr(~Open.WO), '<Open.RW|CE: 524290>')
self.assertEqual(repr(~Open.AC), '<Open.CE: 524288>')
self.assertEqual(repr(~(Open.RO | Open.CE)), '<Open.AC: 3>')
self.assertEqual(repr(~(Open.WO | Open.CE)), '<Open.RW: 2>')
self.assertEqual(repr(Open(~4)), '<Open.WO|RW|CE: 524291>')
self.assertEqual(repr(Open(4)), '<Open.RO: 0>')
#
class Open(IntFlag):
"not a good flag candidate"
RO = 0
WO = 1
RW = 2
AC = 3
CE = 1<<19
Open._boundary_ = aenum.EJECT
self.assertEqual(repr(Open.RO), '<Open.RO: 0>')
self.assertEqual(repr(Open.WO), '<Open.WO: 1>')
self.assertEqual(repr(Open.AC), '<Open.AC: 3>')
self.assertEqual(repr(Open.RO | Open.CE), '<Open.CE: 524288>')
self.assertEqual(repr(Open.WO | Open.CE), '<Open.WO|CE: 524289>')
self.assertEqual(repr(~Open.RO), '<Open.WO|RW|CE: 524291>')
self.assertEqual(repr(~Open.WO), '<Open.RW|CE: 524290>')
self.assertEqual(repr(~Open.AC), '<Open.CE: 524288>')
self.assertEqual(repr(~(Open.RO | Open.CE)), '<Open.AC: 3>')
self.assertEqual(repr(~(Open.WO | Open.CE)), '<Open.RW: 2>')
self.assertEqual(repr(Open(~4)), '-5')
self.assertEqual(repr(Open(4)), '4')
def test_or(self):
Perm = self.Perm
for i in Perm:
for j in Perm:
self.assertEqual(i | j, i.value | j.value)
self.assertEqual((i | j).value, i.value | j.value)
self.assertIs(type(i | j), Perm)
for j in range(8):
self.assertEqual(i | j, i.value | j)
self.assertEqual((i | j).value, i.value | j)
self.assertIs(type(i | j), Perm)
self.assertEqual(j | i, j | i.value)
self.assertEqual((j | i).value, j | i.value)
self.assertIs(type(j | i), Perm)
for i in Perm:
self.assertIs(i | i, i)
self.assertIs(i | 0, i)
self.assertIs(0 | i, i)
Open = self.Open
self.assertIs(Open.RO | Open.CE, Open.CE)
def test_and(self):
Perm = self.Perm
RW = Perm.R | Perm.W
RX = Perm.R | Perm.X
WX = Perm.W | Perm.X
RWX = Perm.R | Perm.W | Perm.X
values = list(Perm) + [RW, RX, WX, RWX, Perm(0)]
for i in values:
for j in values:
self.assertEqual(i & j, i.value & j.value, 'i is %r, j is %r' % (i, j))
self.assertEqual((i & j).value, i.value & j.value, 'i is %r, j is %r' % (i, j))
self.assertIs(type(i & j), Perm, 'i is %r, j is %r' % (i, j))
for j in range(8):
self.assertEqual(i & j, i.value & j)
self.assertEqual((i & j).value, i.value & j)
self.assertIs(type(i & j), Perm)
self.assertEqual(j & i, j & i.value)
self.assertEqual((j & i).value, j & i.value)
self.assertIs(type(j & i), Perm)
for i in Perm:
self.assertIs(i & i, i)
self.assertIs(i & 7, i)
self.assertIs(7 & i, i)
Open = self.Open
self.assertIs(Open.RO & Open.CE, Open.RO)
def test_xor(self):
Perm = self.Perm
for i in Perm:
for j in Perm:
self.assertEqual(i ^ j, i.value ^ j.value)
self.assertEqual((i ^ j).value, i.value ^ j.value)
self.assertIs(type(i ^ j), Perm)
for j in range(8):
self.assertEqual(i ^ j, i.value ^ j)
self.assertEqual((i ^ j).value, i.value ^ j)
self.assertIs(type(i ^ j), Perm)
self.assertEqual(j ^ i, j ^ i.value)
self.assertEqual((j ^ i).value, j ^ i.value)
self.assertIs(type(j ^ i), Perm)
for i in Perm:
self.assertIs(i ^ 0, i)
self.assertIs(0 ^ i, i)
Open = self.Open
self.assertIs(Open.RO ^ Open.CE, Open.CE)
self.assertIs(Open.CE ^ Open.CE, Open.RO)
def test_invert(self):
Perm = self.Perm
RW = Perm.R | Perm.W
RX = Perm.R | Perm.X
WX = Perm.W | Perm.X
RWX = Perm.R | Perm.W | Perm.X
values = list(Perm) + [RW, RX, WX, RWX, | |
a voice channel'.format(ctx.author.name), color=color)
embed.set_author(name="Voice", icon_url=url_author_music)
await ctx.send(embed=embed)
else:
embed = discord.Embed(description="{}, buddy, connect to a voice channel first 🔊".format(ctx.author.name), color=color)
embed.set_author(name="Walkman™", icon_url=url_author_music)
await ctx.send(embed=embed)
except AttributeError:
embed = discord.Embed(description='I am not connected to a voice channel'.format(ctx.author.name), color=color)
embed.set_author(name="Voice", icon_url=url_author_music)
await ctx.send(embed=embed)
@bot.command(aliases=["songinfo"])
async def fetch_current_song(ctx):
number_of_requests()
global server_index
operation = "SELECT * FROM music_queue WHERE server={}".format(str(ctx.guild.id))
cursor.execute(operation)
server_queue = cursor.fetchall()
if len(server_queue) <= 0:
embed = discord.Embed(description="There are no songs in the queue currently 🤔")
embed.set_author(name="Uh oh...", icon_url=url_author_music)
await ctx.send(embed=embed)
else:
try:
embed = discord.Embed(description="**Song: **{a}\n**Index: **{b}\n**Views: **{c}\n**Description: **\n{d}".format(a=server_queue[server_index[str(ctx.guild.id)]][0], b=server_index[str(ctx.guild.id)], c=pytube.YouTube(url=server_queue[server_index[str(ctx.guild.id)]][1]).views, d=pytube.YouTube(url=server_queue[server_index[str(ctx.guild.id)]][1]).description), color=color)
embed.set_thumbnail(url=pytube.YouTube(url=server_queue[server_index[str(ctx.guild.id)]][1]).thumbnail_url)
embed.set_footer(text="Voice Channel Bitrate: {} kbps".format(ctx.guild.voice_client.channel.bitrate/1000))
embed.set_author(name="Currently Playing", icon_url=url_author_music)
player = await ctx.send(embed=embed)
await player.add_reaction("⏮") # previous track
await player.add_reaction("▶") # resume
await player.add_reaction("⏸") # pause
await player.add_reaction("⏭") # next
await player.add_reaction("🔂") # repeat
await player.add_reaction("⏹") # stop
await player.add_reaction("🔀") # shuffle
await player.add_reaction("*️⃣") # current song
await player.add_reaction("🔼") # move up
await player.add_reaction("🔽") # move down
except Exception:
embed = discord.Embed(description="Looks like you weren't playing anything before this so there is no current song. Use _p <name> / <index> to set a current song", color=color)
embed.set_author(name="Uh oh...", icon_url=url_author_music)
await ctx.send(embed=embed)
@bot.command(aliases=["prev","previous"])
async def previous_song(ctx):
number_of_requests()
global server_index
server_index[str(ctx.guild.id)] -= 1
operation = "SELECT * FROM music_queue WHERE server={}".format(str(ctx.guild.id))
cursor.execute(operation)
server_queue = cursor.fetchall()
voice = discord.utils.get(bot.voice_clients, guild=ctx.guild)
if ctx.author.id in [member.id for member in ctx.voice_client.channel.members]:
try:
URL_queue = youtube_download(ctx, server_queue[server_index[str(ctx.guild.id)]][1])
if ctx.voice_client.is_playing() != True:
embed = discord.Embed(description="**Song: **{}".format(server_queue[server_index[str(ctx.guild.id)]][0]).replace(" - YouTube", " "), color=color)
embed.set_author(name="Now playing", icon_url=url_author_music)
embed.set_thumbnail(url=pytube.YouTube(url=server_queue[server_index[str(ctx.guild.id)]][1]).thumbnail_url)
embed.add_field(name="Uploader", value=pytube.YouTube(url=server_queue[server_index[str(ctx.guild.id)]][1]).author, inline=True)
embed.add_field(name="Duration", value=time_converter(pytube.YouTube(url=server_queue[server_index[str(ctx.guild.id)]][1]).length), inline=True)
embed.set_footer(text="Voice Channel Bitrate: {} kbps".format(ctx.guild.voice_client.channel.bitrate/1000))
player = await ctx.send(embed=embed)
voice.play(discord.FFmpegPCMAudio(URL_queue, **FFMPEG_OPTS))
await player.add_reaction("⏮") # previous track
await player.add_reaction("▶") # resume
await player.add_reaction("⏸") # pause
await player.add_reaction("⏭") # next
await player.add_reaction("🔂") # repeat
await player.add_reaction("⏹") # stop
await player.add_reaction("🔀") # shuffle
await player.add_reaction("*️⃣") # current song
await player.add_reaction("🔼") # move up
await player.add_reaction("🔽") # move down
else:
voice.stop()
embed = discord.Embed(description="**Song: **{}".format(server_queue[server_index[str(ctx.guild.id)]][0]).replace(" - YouTube", " "), color=color)
embed.set_author(name="Now playing", icon_url=url_author_music)
embed.set_thumbnail(url=pytube.YouTube(url=server_queue[server_index[str(ctx.guild.id)]][1]).thumbnail_url)
embed.add_field(name="Uploader", value=pytube.YouTube(url=server_queue[server_index[str(ctx.guild.id)]][1]).author, inline=True)
embed.add_field(name="Duration", value=time_converter(pytube.YouTube(url=server_queue[server_index[str(ctx.guild.id)]][1]).length), inline=True)
embed.set_footer(text="Voice Channel Bitrate: {} kbps".format(ctx.guild.voice_client.channel.bitrate/1000))
player = await ctx.send(embed=embed)
voice.play(discord.FFmpegPCMAudio(URL_queue, **FFMPEG_OPTS))
await player.add_reaction("⏮") # previous track
await player.add_reaction("▶") # resume
await player.add_reaction("⏸") # pause
await player.add_reaction("⏭") # next
await player.add_reaction("🔂") # repeat
await player.add_reaction("⏹") # stop
await player.add_reaction("🔀") # shuffle
await player.add_reaction("*️⃣") # current song
await player.add_reaction("🔼") # move up
await player.add_reaction("🔽") # move down
except IndexError:
embed = discord.Embed(description="Looks like there is no song at this index", color=color)
embed.set_author(name="Oops...", icon_url=url_author_music)
embed.set_footer(text="Voice Channel Bitrate: {} kbps".format(ctx.guild.voice_client.channel.bitrate/1000))
await ctx.send(embed=embed)
else:
embed = discord.Embed(description="{}, buddy, connect to a voice channel first 🔊".format(ctx.author.name), color=color)
embed.set_author(name="Walkman™", icon_url=url_author_music)
await ctx.send(embed=embed)
@bot.command(aliases=["rep","repeat"])
async def repeat_song(ctx):
operation = "SELECT * FROM music_queue WHERE server={}".format(str(ctx.guild.id))
cursor.execute(operation)
server_queue = cursor.fetchall()
voice = discord.utils.get(bot.voice_clients, guild=ctx.guild)
try:
URL_queue = youtube_download(ctx, server_queue[server_index[str(ctx.guild.id)]][1])
if ctx.voice_client.is_playing() != True:
embed = discord.Embed(description="**Song: **{}".format(server_queue[server_index[str(ctx.guild.id)]][0]).replace(" - YouTube", " "), color=color)
embed.set_author(name="Repeating Song", icon_url=url_author_music)
embed.set_thumbnail(url=pytube.YouTube(url=server_queue[server_index[str(ctx.guild.id)]][1]).thumbnail_url)
embed.add_field(name="Uploader", value=pytube.YouTube(url=server_queue[server_index[str(ctx.guild.id)]][1]).author, inline=True)
embed.add_field(name="Duration", value=time_converter(pytube.YouTube(url=server_queue[server_index[str(ctx.guild.id)]][1]).length), inline=True)
embed.set_footer(text="Voice Channel Bitrate: {} kbps".format(ctx.guild.voice_client.channel.bitrate/1000))
player = await ctx.send(embed=embed)
voice.play(discord.FFmpegPCMAudio(URL_queue, **FFMPEG_OPTS))
await player.add_reaction("⏮") # previous track
await player.add_reaction("▶") # resume
await player.add_reaction("⏸") # pause
await player.add_reaction("⏭") # next
await player.add_reaction("🔂") # repeat
await player.add_reaction("⏹") # stop
await player.add_reaction("🔀") # shuffle
await player.add_reaction("*️⃣") # current song
await player.add_reaction("🔼") # move up
await player.add_reaction("🔽") # move down
else:
voice.stop()
embed = discord.Embed(description="**Song: **{}".format(server_queue[server_index[str(ctx.guild.id)]][0]).replace(" - YouTube", " "), color=color)
embed.set_author(name="Repeating Song", icon_url=url_author_music)
embed.set_thumbnail(url=pytube.YouTube(url=server_queue[server_index[str(ctx.guild.id)]][1]).thumbnail_url)
embed.add_field(name="Uploader", value=pytube.YouTube(url=server_queue[server_index[str(ctx.guild.id)]][1]).author, inline=True)
embed.add_field(name="Duration", value=time_converter(pytube.YouTube(url=server_queue[server_index[str(ctx.guild.id)]][1]).length), inline=True)
embed.set_footer(text="Voice Channel Bitrate: {} kbps".format(ctx.guild.voice_client.channel.bitrate/1000))
player = await ctx.send(embed=embed)
voice.play(discord.FFmpegPCMAudio(URL_queue, **FFMPEG_OPTS))
await player.add_reaction("⏮") # previous track
await player.add_reaction("▶") # resume
await player.add_reaction("⏸") # pause
await player.add_reaction("⏭") # next
await player.add_reaction("🔂") # repeat
await player.add_reaction("⏹") # stop
await player.add_reaction("🔀") # shuffle
await player.add_reaction("*️⃣") # current song
await player.add_reaction("🔼") # move up
await player.add_reaction("🔽") # move down
except Exception as e:
embed = discord.Embed(description=str(e), color=color)
embed.set_author(name="Error", icon_url=url_author_music)
await ctx.send(embed=embed)
@bot.command(aliases=["skip","next"])
async def skip_song(ctx):
number_of_requests()
global server_index
server_index[str(ctx.guild.id)] += 1
operation = "SELECT * FROM music_queue WHERE server={}".format(str(ctx.guild.id))
cursor.execute(operation)
server_queue = cursor.fetchall()
voice = discord.utils.get(bot.voice_clients, guild=ctx.guild)
if ctx.author.id in [member.id for member in ctx.voice_client.channel.members]:
try:
URL_queue = youtube_download(ctx, server_queue[server_index[str(ctx.guild.id)]][1])
if ctx.voice_client.is_playing() != True:
embed = discord.Embed(description="**Song: **{}".format(server_queue[server_index[str(ctx.guild.id)]][0]).replace(" - YouTube", " "), color=color)
embed.set_author(name="Now Playing", icon_url=url_author_music)
embed.set_thumbnail(url=pytube.YouTube(url=server_queue[server_index[str(ctx.guild.id)]][1]).thumbnail_url)
embed.add_field(name="Uploader", value=pytube.YouTube(url=server_queue[server_index[str(ctx.guild.id)]][1]).author, inline=True)
embed.add_field(name="Duration", value=time_converter(pytube.YouTube(url=server_queue[server_index[str(ctx.guild.id)]][1]).length), inline=True)
embed.set_footer(text="Voice Channel Bitrate: {} kbps".format(ctx.guild.voice_client.channel.bitrate/1000))
player = await ctx.send(embed=embed)
voice.play(discord.FFmpegPCMAudio(URL_queue, **FFMPEG_OPTS))
await player.add_reaction("⏮") # previous track
await player.add_reaction("▶") # resume
await player.add_reaction("⏸") # pause
await player.add_reaction("⏭") # next
await player.add_reaction("🔂") # repeat
await player.add_reaction("⏹") # stop
await player.add_reaction("🔀") # shuffle
await player.add_reaction("*️⃣") # current song
await player.add_reaction("🔼") # move up
await player.add_reaction("🔽") # move down
else:
voice.stop()
embed = discord.Embed(description="**Song: **{}".format(server_queue[server_index[str(ctx.guild.id)]][0]).replace(" - YouTube", " "), color=color)
embed.set_author(name="Now playing", icon_url=url_author_music)
embed.set_thumbnail(url=pytube.YouTube(url=server_queue[server_index[str(ctx.guild.id)]][1]).thumbnail_url)
embed.add_field(name="Uploader", value=pytube.YouTube(url=server_queue[server_index[str(ctx.guild.id)]][1]).author, inline=True)
embed.add_field(name="Duration", value=time_converter(pytube.YouTube(url=server_queue[server_index[str(ctx.guild.id)]][1]).length), inline=True)
embed.set_footer(text="Voice Channel Bitrate: {} kbps".format(ctx.guild.voice_client.channel.bitrate/1000))
player = await ctx.send(embed=embed)
voice.play(discord.FFmpegPCMAudio(URL_queue, **FFMPEG_OPTS))
await player.add_reaction("⏮") # previous track
await player.add_reaction("▶") # resume
await player.add_reaction("⏸") # pause
await player.add_reaction("⏭") # next
await player.add_reaction("🔂") # repeat
await player.add_reaction("⏹") # stop
await player.add_reaction("🔀") # shuffle
await player.add_reaction("*️⃣") # current song
await player.add_reaction("🔼") # move up
await player.add_reaction("🔽") # move down
except IndexError:
embed = discord.Embed(description="Looks like there is no song at this index", color=color)
embed.set_author(name="Oops...", icon_url=url_author_music)
embed.set_footer(text="Voice Channel Bitrate: {} kbps".format(ctx.guild.voice_client.channel.bitrate/1000))
await ctx.send(embed=embed)
else:
embed = discord.Embed(description="{}, buddy, connect to a voice channel first 🔊".format(ctx.author.name), color=color)
embed.set_author(name="Walkman™", icon_url=url_author_music)
await ctx.send(embed=embed)
@bot.command(aliases=["pause"])
async def pause_song(ctx):
number_of_requests()
voice_client = ctx.message.guild.voice_client
pause = ctx.voice_client.is_paused()
playing = ctx.voice_client.is_playing()
if ctx.author.id in [mem.id for mem in ctx.voice_client.channel.members]:
try:
if playing == True:
voice_client.pause()
message = await ctx.send("Song paused")
await message.add_reaction("⏸")
else:
if pause == True:
embed = discord.Embed(description="Song is already paused ❗", color=color)
embed.set_footer(text="Voice Channel Bitrate: {} kbps".format(ctx.guild.voice_client.channel.bitrate/1000))
await ctx.send(embed=embed)
else:
embed = discord.Embed(description="No song playing currently ❗", color=color)
embed.set_footer(text="Voice Channel Bitrate: {} kbps".format(ctx.guild.voice_client.channel.bitrate/1000))
await ctx.send(embed=embed)
except Exception as e:
embed = discord.Embed(description=str(e), color=color)
embed.set_author(name="Error", icon_url=url_author_music)
await ctx.send(embed=embed)
else:
embed = discord.Embed(description="{}, buddy, connect to a voice channel first 🔊".format(ctx.author.name), color=color)
embed.set_author(name="Walkman™", icon_url=url_author_music)
await ctx.send(embed=embed)
@bot.command(aliases=["resume","res"])
async def resume_song(ctx):
number_of_requests()
voice_client = ctx.message.guild.voice_client
pause = ctx.voice_client.is_paused()
playing = ctx.voice_client.is_playing()
if ctx.author.id in [member.id for member in ctx.voice_client.channel.members]:
try:
if pause == True:
voice_client.resume()
message = await ctx.send("Song resumed")
await message.add_reaction("▶")
else:
if playing == True:
embed = discord.Embed(description="Song is not paused 🤔", color=color)
embed.set_author(name="Walkman™", icon_url=url_author_music)
embed.set_footer(text="Voice Channel Bitrate: {} kbps".format(ctx.guild.voice_client.channel.bitrate/1000))
await ctx.send(embed=embed)
else:
embed = discord.Embed(description="Nothing is playing right now", color=color)
embed.set_author(name="Walkman™", icon_url=url_author_music)
embed.set_footer(text="Voice Channel Bitrate: {} kbps".format(ctx.guild.voice_client.channel.bitrate/1000))
await ctx.send(embed=embed)
except Exception as e:
embed = discord.Embed(description=str(e), color=color)
embed.set_author(name="Error", icon_url=url_author_music)
await ctx.send(embed=embed)
else:
embed = discord.Embed(description="{}, buddy, connect to a voice channel first 🔊".format(ctx.author.name), color=color)
embed.set_author(name="Walkman™", icon_url=url_author_music)
await ctx.send(embed=embed)
@bot.command(aliases=["stop","st"])
async def stop_song(ctx):
number_of_requests()
voice_client = ctx.message.guild.voice_client
pause = ctx.voice_client.is_paused()
playing = ctx.voice_client.is_playing()
if ctx.author.id in [member.id for member in ctx.voice_client.channel.members]:
try:
if playing == True or pause == True:
voice_client.stop()
message = await ctx.send("Song stopped")
await message.add_reaction("⏹")
else:
embed = discord.Embed(description="Nothing is playing right now", color=color)
embed.set_author(name="Walkman™", icon_url=url_author_music)
embed.set_footer(text="Voice Channel Bitrate: {} kbps".format(ctx.guild.voice_client.channel.bitrate/1000))
await ctx.send(embed=embed)
except Exception as e:
embed = discord.Embed(description=str(e), color=color)
embed.set_author(name="Error", icon_url=url_author_music)
await ctx.send(embed=embed)
else:
embed = discord.Embed(description="{}, buddy, connect to a voice channel first 🔊".format(ctx.author.name), color=color)
embed.set_author(name="Walkman™", icon_url=url_author_music)
await ctx.send(embed=embed)
@bot.command(aliases=["rem","remove"])
async def remove_song(ctx, index):
global cursor
number_of_requests()
operation_view = 'SELECT * FROM music_queue WHERE server="{}"'.format(str(ctx.guild.id))
cursor.execute(operation_view)
songs = cursor.fetchall()
embed = discord.Embed(description="{}".format(songs[int(index)][0]), color=color)
embed.set_author(name="Song removed", icon_url=url_author_music)
await ctx.send(embed=embed)
operation_remove = "DELETE FROM music_queue WHERE song_url = '{a}' AND server='{b}'".format(a=songs[int(index)][1], b=str(ctx.guild.id))
cursor.execute(operation_remove)
@bot.command(aliases=["clear_queue","cq"])
async def clear_song_queue(ctx):
number_of_requests()
global cursor
operation_queue = "SELECT * FROM music_queue WHERE server={}".format(str(ctx.guild.id))
cursor.execute(operation_queue)
songs = cursor.fetchall()
if len(songs) > 0:
operation_clear_song = "DELETE FROM music_queue WHERE server={}".format(str(ctx.guild.id))
cursor.execute(operation_clear_song)
message = await ctx.send("Queue Cleared")
await message.add_reaction("✅")
else:
embed_empty = discord.Embed(description="Queue is already empty 🤷🏻♂️", color=color)
embed_empty.set_author(name="Hmm...", icon_url=url_author_music)
await ctx.send(embed=embed_empty)
# /////////////////////////////////////////// EXTRA //////////////////////////////////////////////////
@bot.command(aliases=["thwip"])
async def thwipper(ctx):
number_of_requests()
await ctx.send(embed=discord.Embed(title="*Thwip!*", color=color))
@bot.command(aliases=["addbday"])
async def add_user_bday(ctx, member:discord.Member, month, day):
op_check = "SELECT mem_id FROM birthdays"
cursor.execute(op_check)
memIDs = cursor.fetchall()
try:
a = str([memID for memID in memIDs]).replace("('","").replace("',)","")
if str(member.id) not in a:
op_insert = "INSERT INTO birthdays(mem_id, mem_month, mem_day)VALUES('{a}',{b},{c})".format(a=member.id, b=month, c=day)
cursor.execute(op_insert)
await ctx.send(embed=discord.Embed(description="{}'s birthday added to database".format(member.display_name), color=color))
else:
await ctx.send(embed=discord.Embed(description="{}'s birthday is already added in my database".format(member.display_name), color=color))
| |
the
``LogEntry``
arg: from_log_id (osid.id.Id): the ``Id`` of the current
``Log``
arg: to_log_id (osid.id.Id): the ``Id`` of the destination
``Log``
raise: NotFound - ``log_entry_id, from_log_id,`` or
``to_log_id`` not found or ``log_entry_id`` not mapped
to ``from_log_id``
raise: NullArgument - ``log_entry_id, from_log_id,`` or
``to_log_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinAssignmentSession.reassign_resource_to_bin
self.assign_log_entry_to_log(log_entry_id, to_log_id)
try:
self.unassign_log_entry_from_log(log_entry_id, from_log_id)
except: # something went wrong, roll back assignment to to_log_id
self.unassign_log_entry_from_log(log_entry_id, to_log_id)
raise
class LogLookupSession(abc_logging_sessions.LogLookupSession, osid_sessions.OsidSession):
"""This session provides methods for retrieving ``Log`` objects.
The ``Log`` represents a collection of log entries.
This session defines views that offer differing behaviors when
retrieving multiple objects.
* comparative view: elements may be silently omitted or re-ordered
* plenary view: provides a complete set or is an error condition
Generally, the comparative view should be used for most applications
as it permits operation even if there is data that cannot be
accessed. For example, a browsing application may only need to
examine the ``Logs`` it can access, without breaking execution.
However, an assessment may only be useful if all ``Logs`` referenced
by it are available, and a test-taking applicationmay sacrifice some
interoperability for the sake of precision.
"""
_session_namespace = 'logging.LogLookupSession'
def __init__(self, proxy=None, runtime=None, **kwargs):
OsidSession.__init__(self)
OsidSession._init_catalog(self, proxy, runtime)
if self._cataloging_manager is not None:
self._catalog_session = self._cataloging_manager.get_catalog_lookup_session()
self._catalog_session.use_comparative_catalog_view()
self._catalog_view = COMPARATIVE
self._kwargs = kwargs
def can_lookup_logs(self):
"""Tests if this user can perform ``Log`` lookups.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known all methods in this
session will result in a ``PermissionDenied``. This is intended
as a hint to an application that may opt not to offer lookup
operations to unauthorized users.
return: (boolean) - ``false`` if lookup methods are not
authorized, ``true`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinLookupSession.can_lookup_bins
# NOTE: It is expected that real authentication hints will be
# handled in a service adapter above the pay grade of this impl.
if self._catalog_session is not None:
return self._catalog_session.can_lookup_catalogs()
return True
def use_comparative_log_view(self):
"""The returns from the lookup methods may omit or translate elements based on this session, such as authorization, and not result in an error.
This view is used when greater interoperability is desired at
the expense of precision.
*compliance: mandatory -- This method is must be implemented.*
"""
# Implemented from template for
# osid.resource.BinLookupSession.use_comparative_bin_view
self._catalog_view = COMPARATIVE
if self._catalog_session is not None:
self._catalog_session.use_comparative_catalog_view()
def use_plenary_log_view(self):
"""A complete view of the ``Log`` returns is desired.
Methods will return what is requested or result in an error.
This view is used when greater precision is desired at the
expense of interoperability.
*compliance: mandatory -- This method is must be implemented.*
"""
# Implemented from template for
# osid.resource.BinLookupSession.use_plenary_bin_view
self._catalog_view = PLENARY
if self._catalog_session is not None:
self._catalog_session.use_plenary_catalog_view()
@utilities.arguments_not_none
def get_log(self, log_id):
"""Gets the ``Log`` specified by its ``Id``.
In plenary mode, the exact ``Id`` is found or a ``NotFound``
results. Otherwise, the returned ``Log`` may have a different
``Id`` than requested, such as the case where a duplicate ``Id``
was assigned to a ``Log`` and retained for compatibility.
arg: log_id (osid.id.Id): ``Id`` of the ``Log``
return: (osid.logging.Log) - the log
raise: NotFound - ``log_id`` not found
raise: NullArgument - ``log_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method is must be implemented.*
"""
# Implemented from template for
# osid.resource.BinLookupSession.get_bin
if self._catalog_session is not None:
return self._catalog_session.get_catalog(catalog_id=log_id)
collection = JSONClientValidated('logging',
collection='Log',
runtime=self._runtime)
# Need to consider how to best deal with the "phantom root" catalog issue
if log_id.get_identifier() == PHANTOM_ROOT_IDENTIFIER:
return self._get_phantom_root_catalog(cat_class=objects.Log, cat_name='Log')
try:
result = collection.find_one({'_id': ObjectId(self._get_id(log_id, 'logging').get_identifier())})
except errors.NotFound:
# Try creating an orchestrated Log. Let it raise errors.NotFound()
result = self._create_orchestrated_cat(log_id, 'logging', 'Log')
return objects.Log(osid_object_map=result, runtime=self._runtime, proxy=self._proxy)
@utilities.arguments_not_none
def get_logs_by_ids(self, log_ids):
"""Gets a ``LogList`` corresponding to the given ``IdList``.
In plenary mode, the returned list contains all of the logs
specified in the ``Id`` list, in the order of the list,
including duplicates, or an error results if an ``Id`` in the
supplied list is not found or inaccessible. Otherwise,
inaccessible ``Logs`` may be omitted from the list and may
present the elements in any order including returning a unique
set.
arg: log_ids (osid.id.IdList): the list of ``Ids`` to
retrieve
return: (osid.logging.LogList) - the returned ``Log list``
raise: NotFound - an ``Id was`` not found
raise: NullArgument - ``log_ids`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinLookupSession.get_bins_by_ids_template
# NOTE: This implementation currently ignores plenary view
# Also, this should be implemented to use get_Log() instead of direct to database
if self._catalog_session is not None:
return self._catalog_session.get_catalogs_by_ids(catalog_ids=log_ids)
catalog_id_list = []
for i in log_ids:
catalog_id_list.append(ObjectId(i.get_identifier()))
collection = JSONClientValidated('logging',
collection='Log',
runtime=self._runtime)
result = collection.find({'_id': {'$in': catalog_id_list}}).sort('_id', DESCENDING)
return objects.LogList(result, runtime=self._runtime, proxy=self._proxy)
@utilities.arguments_not_none
def get_logs_by_genus_type(self, log_genus_type):
"""Gets a ``LogList`` corresponding to the given log genus ``Type`` which does not include logs of genus types derived from the specified ``Type``.
In plenary mode, the returned list contains all known logs or an
error results. Otherwise, the returned list may contain only
those logs that are accessible through this session.
arg: log_genus_type (osid.type.Type): a log genus type
return: (osid.logging.LogList) - the returned ``Log list``
raise: NullArgument - ``log_genus_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinLookupSession.get_bins_by_genus_type_template
# NOTE: This implementation currently ignores plenary view
if self._catalog_session is not None:
return self._catalog_session.get_catalogs_by_genus_type(catalog_genus_type=log_genus_type)
collection = JSONClientValidated('logging',
collection='Log',
runtime=self._runtime)
result = collection.find({"genusTypeId": str(log_genus_type)}).sort('_id', DESCENDING)
return objects.LogList(result, runtime=self._runtime, proxy=self._proxy)
@utilities.arguments_not_none
def get_logs_by_parent_genus_type(self, log_genus_type):
"""Gets a ``LogList`` corresponding to the given log genus ``Type`` and include any additional logs with genus types derived from the specified ``Type``.
In plenary mode, the returned list contains all known logs or an
error results. Otherwise, the returned list may contain only
those logs that are accessible through this session.
arg: log_genus_type (osid.type.Type): a log genus type
return: (osid.logging.LogList) - the returned ``Log list``
raise: NullArgument - ``log_genus_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
@utilities.arguments_not_none
def get_logs_by_record_type(self, log_record_type):
"""Gets a ``LogList`` containing the given log record ``Type``.
In plenary mode, the returned list contains all known logs or an
error results. Otherwise, the returned list may contain only
those logs that are accessible through this session.
arg: log_record_type (osid.type.Type): a log record type
return: (osid.logging.LogList) - the returned ``Log list``
raise: NullArgument - ``log_record_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
@utilities.arguments_not_none
def get_logs_by_provider(self, resource_id):
"""Gets a ``LogList`` for a given provider.
In plenary mode, the returned list contains all known logs or an
error results. Otherwise, the returned list may contain only
those logs that are accessible through this session.
arg: resource_id (osid.id.Id): a resource ``Id``
return: (osid.logging.LogList) - the returned ``Log list``
raise: NullArgument - ``resource_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def get_logs(self):
"""Gets all ``Logs``.
In plenary mode, the returned list contains all known logs or an
error results. Otherwise, the returned list may contain only
those logs that are accessible through this session.
return: (osid.logging.LogList) - a list of ``Logs``
raise: OperationFailed - unable to complete request
| |
<reponame>thieu1995/mealpy
#!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "<NAME>" at 07:03, 18/03/2020 %
# %
# Email: <EMAIL> %
# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %
# Github: https://github.com/thieu1995 %
#-------------------------------------------------------------------------------------------------------%
import concurrent.futures as parallel
from functools import partial
import numpy as np
from mealpy.optimizer import Optimizer
class BaseEO(Optimizer):
"""
The original version of: Equilibrium Optimizer (EO)
(Equilibrium Optimizer: A Novel Optimization Algorithm)
Link:
https://doi.org/10.1016/j.knosys.2019.105190
https://www.mathworks.com/matlabcentral/fileexchange/73352-equilibrium-optimizer-eo
"""
def __init__(self, problem, epoch=10000, pop_size=100, **kwargs):
"""
Args:
epoch (int): maximum number of iterations, default = 10000
pop_size (int): number of population size, default = 100
"""
super().__init__(problem, kwargs)
self.nfe_per_epoch = pop_size
self.sort_flag = False
self.epoch = epoch
self.pop_size = pop_size
## Fixed parameter proposed by authors
self.V = 1
self.a1 = 2
self.a2 = 1
self.GP = 0.5
def make_equilibrium_pool(self, list_equilibrium=None):
pos_list = [item[self.ID_POS] for item in list_equilibrium]
pos_mean = np.mean(pos_list, axis=0)
fit = self.get_fitness_position(pos_mean)
list_equilibrium.append([pos_mean, fit])
return list_equilibrium
def create_child(self, idx, pop_copy, c_pool, t):
current_agent = pop_copy[idx].copy()
lamda = np.random.uniform(0, 1, self.problem.n_dims) # lambda in Eq. 11
r = np.random.uniform(0, 1, self.problem.n_dims) # r in Eq. 11
c_eq = c_pool[np.random.randint(0, len(c_pool))][self.ID_POS] # random selection 1 of candidate from the pool
f = self.a1 * np.sign(r - 0.5) * (np.exp(-lamda * t) - 1.0) # Eq. 11
r1 = np.random.uniform()
r2 = np.random.uniform() # r1, r2 in Eq. 15
gcp = 0.5 * r1 * np.ones(self.problem.n_dims) * (r2 >= self.GP) # Eq. 15
g0 = gcp * (c_eq - lamda * current_agent[self.ID_POS]) # Eq. 14
g = g0 * f # Eq. 13
pos_new = c_eq + (current_agent[self.ID_POS] - c_eq) * f + (g * self.V / lamda) * (1.0 - f) # Eq. 16
pos_new = self.amend_position_faster(pos_new)
fit_new = self.get_fitness_position(pos_new)
return [pos_new, fit_new]
def evolve(self, mode='sequential', epoch=None, pop=None, g_best=None):
"""
Args:
mode (str): 'sequential', 'thread', 'process'
+ 'sequential': recommended for simple and small task (< 10 seconds for calculating objective)
+ 'thread': recommended for IO bound task, or small computing task (< 2 minutes for calculating objective)
+ 'process': recommended for hard and big task (> 2 minutes for calculating objective)
Returns:
[position, fitness value]
"""
pop_copy = pop.copy()
pop_idx = np.array(range(0, self.pop_size))
# ---------------- Memory saving------------------- make equilibrium pool
pop_sorted = sorted(pop, key=lambda item: item[self.ID_FIT][self.ID_TAR])
c_eq_list = pop_sorted[:4].copy()
c_pool = self.make_equilibrium_pool(c_eq_list)
# Eq. 9
t = (1 - epoch / self.epoch) ** (self.a2 * epoch / self.epoch)
if mode == "thread":
with parallel.ThreadPoolExecutor() as executor:
pop_child = executor.map(partial(self.create_child, pop_copy=pop_copy, c_pool=c_pool, t=t), pop_idx)
pop = [x for x in pop_child]
elif mode == "process":
with parallel.ProcessPoolExecutor() as executor:
pop_child = executor.map(partial(self.create_child, pop_copy=pop_copy, c_pool=c_pool, t=t), pop_idx)
pop = [x for x in pop_child]
else:
pop = [self.create_child(idx, pop_copy, c_pool, t) for idx in pop_idx]
return pop
class ModifiedEO(BaseEO):
"""
Original version of: Modified Equilibrium Optimizer (MEO)
(An efficient equilibrium optimizer with mutation strategy for numerical optimization)
Link:
https://doi.org/10.1016/j.asoc.2020.106542
"""
def __init__(self, problem, epoch=10000, pop_size=100, **kwargs):
"""
Args:
epoch (int): maximum number of iterations, default = 10000
pop_size (int): number of population size, default = 100
"""
super().__init__(problem, epoch, pop_size, **kwargs)
self.nfe_per_epoch = 2*pop_size
self.sort_flag = False
self.pop_len = int(self.pop_size / 3)
def evolve(self, mode='sequential', epoch=None, pop=None, g_best=None):
"""
Args:
mode (str): 'sequential', 'thread', 'process'
+ 'sequential': recommended for simple and small task (< 10 seconds for calculating objective)
+ 'thread': recommended for IO bound task, or small computing task (< 2 minutes for calculating objective)
+ 'process': recommended for hard and big task (> 2 minutes for calculating objective)
Returns:
[position, fitness value]
"""
pop_copy = pop.copy()
pop_idx = np.array(range(0, self.pop_size))
# ---------------- Memory saving------------------- make equilibrium pool
pop_sorted = sorted(pop, key=lambda item: item[self.ID_FIT][self.ID_TAR])
c_eq_list = pop_sorted[:4].copy()
c_pool = self.make_equilibrium_pool(c_eq_list)
# Eq. 9
t = (1 - epoch / self.epoch) ** (self.a2 * epoch / self.epoch)
if mode == "thread":
with parallel.ThreadPoolExecutor() as executor:
pop_child = executor.map(partial(self.create_child, pop_copy=pop_copy, c_pool=c_pool, t=t), pop_idx)
pop = [x for x in pop_child]
elif mode == "process":
with parallel.ProcessPoolExecutor() as executor:
pop_child = executor.map(partial(self.create_child, pop_copy=pop_copy, c_pool=c_pool, t=t), pop_idx)
pop = [x for x in pop_child]
else:
pop = [self.create_child(idx, pop_copy, c_pool, t) for idx in pop_idx]
## Sort the updated population based on fitness
pop_sorted = sorted(pop, key=lambda item: item[self.ID_FIT][self.ID_TAR])
pop_s1 = pop_sorted[:self.pop_len]
pop_s2 = pop_s1.copy()
pop_s3 = pop_s1.copy()
## Mutation scheme
for i in range(0, self.pop_len):
pos_new = pop_s1[i][self.ID_POS] * (1 + np.random.normal(0, 1, self.problem.n_dims)) # Eq. 12
pos_new = self.amend_position_faster(pos_new)
fit_new = self.get_fitness_position(pos_new)
pop_s2[i] = [pos_new, fit_new]
## Search Mechanism
pos_s1_list = [item[self.ID_POS] for item in pop_s1]
pos_s1_mean = np.mean(pos_s1_list, axis=0)
for i in range(0, self.pop_len):
pos_new = (c_pool[0][self.ID_POS] - pos_s1_mean) - np.random.random() * \
(self.problem.lb + np.random.random() * (self.problem.ub - self.problem.lb))
pos_new = self.amend_position_faster(pos_new)
fit_new = self.get_fitness_position(pos_new)
pop_s3[i] = [pos_new, fit_new]
## Construct a new population
pop = pop_s1 + pop_s2 + pop_s3
temp = self.pop_size - len(pop)
idx_selected = np.random.choice(range(0, len(c_pool)), temp, replace=False)
for i in range(0, temp):
pop.append(c_pool[idx_selected[i]])
return pop
class AdaptiveEO(BaseEO):
"""
Original version of: Adaptive Equilibrium Optimization (AEO)
(A novel interdependence based multilevel thresholding technique using adaptive equilibrium optimizer)
Link:
https://doi.org/10.1016/j.engappai.2020.103836
"""
def __init__(self, problem, epoch=10000, pop_size=100, **kwargs):
"""
Args:
epoch (int): maximum number of iterations, default = 10000
pop_size (int): number of population size, default = 100
"""
super().__init__(problem, epoch, pop_size, **kwargs)
self.nfe_per_epoch = pop_size
self.sort_flag = False
self.pop_len = int(self.pop_size / 3)
def create_child(self, idx, pop_copy, c_pool, t):
lamda = np.random.uniform(0, 1, self.problem.n_dims)
r = np.random.uniform(0, 1, self.problem.n_dims)
c_eq = c_pool[np.random.randint(0, len(c_pool))][self.ID_POS] # random selection 1 of candidate from the pool
f = self.a1 * np.sign(r - 0.5) * (np.exp(-lamda * t) - 1.0) # Eq. 14
r1 = np.random.uniform()
r2 = np.random.uniform()
gcp = 0.5 * r1 * np.ones(self.problem.n_dims) * (r2 >= self.GP)
g0 = gcp * (c_eq - lamda * pop_copy[idx][self.ID_POS])
g = g0 * f
fit_average = np.mean([item[self.ID_FIT][self.ID_TAR] for item in pop_copy]) # Eq. 19
pos_new = c_eq + (pop_copy[idx][self.ID_POS] - c_eq) * f + (g * self.V / lamda) * (1.0 - f) # Eq. 9
if pop_copy[idx][self.ID_FIT][self.ID_TAR] >= fit_average:
pos_new = np.multiply(pos_new, (0.5 + np.random.uniform(0, 1, self.problem.n_dims)))
pos_new = self.amend_position_faster(pos_new)
fit_new = self.get_fitness_position(pos_new)
return [pos_new, fit_new]
def evolve(self, mode='sequential', epoch=None, pop=None, g_best=None):
"""
Args:
mode (str): 'sequential', 'thread', 'process'
+ 'sequential': recommended for simple and small task (< 10 seconds for calculating objective)
+ 'thread': recommended for IO bound task, or small computing task (< 2 minutes for calculating objective)
+ 'process': recommended for hard and big task (> 2 minutes for calculating objective)
Returns:
[position, fitness value]
"""
pop_copy = pop.copy()
pop_idx = np.array(range(0, self.pop_size))
# ---------------- Memory saving------------------- make equilibrium pool
pop_sorted = sorted(pop, key=lambda item: item[self.ID_FIT][self.ID_TAR])
c_eq_list = pop_sorted[:4].copy()
c_pool = self.make_equilibrium_pool(c_eq_list)
# Eq. 9
t = (1 - epoch / self.epoch) ** (self.a2 * epoch / self.epoch)
## Memory saving, Eq 20, 21
if epoch != 0:
for i in range(0, self.pop_size):
pop_copy[i] = self.get_better_solution(pop[i], pop_copy[i])
t = (1 - epoch / self.epoch) ** (self.a2 * epoch / self.epoch)
if mode == "thread":
with parallel.ThreadPoolExecutor() as executor:
pop_child = executor.map(partial(self.create_child, pop_copy=pop_copy, c_pool=c_pool, t=t), pop_idx)
pop_copy = [x for x in pop_child]
elif mode == "process":
with parallel.ProcessPoolExecutor() as executor:
pop_child = executor.map(partial(self.create_child, pop_copy=pop_copy, c_pool=c_pool, t=t), pop_idx)
pop_copy = [x for x in pop_child]
else:
pop_copy = [self.create_child(idx, pop_copy, c_pool, t) for idx in pop_idx]
return pop_copy
class LevyEO(BaseEO):
"""
My modified version of: Equilibrium Optimizer (EO)
"""
def __init__(self, problem, epoch=10000, pop_size=100, **kwargs):
"""
Args:
epoch (int): maximum number of iterations, default = 10000
pop_size (int): number of population size, default = 100
"""
super().__init__(problem, epoch, pop_size, **kwargs)
self.nfe_per_epoch = pop_size
self.sort_flag = False
def create_child_new(self, idx, pop_copy, c_pool, t, epoch, g_best):
if np.random.uniform() < 0.5:
lamda = np.random.uniform(0, 1, self.problem.n_dims) # lambda in Eq. 11
r = np.random.uniform(0, 1, self.problem.n_dims) # r in Eq. 11
c_eq = c_pool[np.random.randint(0, len(c_pool))][self.ID_POS] # random selection 1 of candidate from the pool
f = self.a1 * np.sign(r - 0.5) * (np.exp(-lamda * t) - 1.0) # Eq. 11
r1 = np.random.uniform()
| |
"""
Created on Tue July 13 12:01 2021
@author: juliaroquette
Spin evolution model implemented in the development of the study by
Roquette et al. 2021.
The class SpinEvolutionCode performs the spin evolution modeling of stars
in the mass range 0.1-1.3 Msun. For a details on the theoretical background
in the model, see setailed notes in SpinEvolutionModel.ipynb
"""
import numpy as np
import astropy.units as u
from scipy.interpolate import UnivariateSpline
from scipy.interpolate import interp1d
from astropy.constants import G
from FIREstars.StarEvolution import BHAC15_MassTrack
class SpinEvolutionCode:
"""
My spin evolution code
"""
CHI = 10.
P = 2.
M_o = 1.99e33 << u.g
OMEGA_o = u.def_unit(r'\Omega_\odot', 2.6*1e-6*u.Hz)
R_o = 6.96e10 << u.cm
I_o = 7e53 << u.g*(u.cm**2)
TIME_o = 4.55e9 << u.yr
TAU_CZ_o = 13.7798 << u.d # Value updated in the context of B15
# 12.9 << u.d
def __init__(self, t0):
"""
M should be in solar masses
to should be in years - the initial time step of the model
Omegao should be in days
"""
#define constants
self.baraffe = BHAC15_MassTrack() #loads the Baraffe+15 grid.
self.t0 = t0 << u.yr
# Makes sure that the initial time isn't before the initial time in
#the BHAC15 grid
if self.t0.value < 10**np.nanmin(self.baraffe.BHAC15['log_t_yr']):
self.t0 = 10**np.nanmin(self.baraffe.BHAC15['log_t_yr']) << u.yr
print('Minimum value must be inside the age range of the Baraffe \
Model, I reset it to {0}'.format(self.t0.value/1e6))
def interpolateBaraffe(self,t):
"""
given a time t in yrs, interpolate parameters
of interest from Baraffe+2015 models
and update local variables for R, I and T
--
input:
t: time in yrs
"""
#bouds_error=False will returns np.nan if out of bounds
self.R, self.I, self.T = interp1d(self.baraffe.Age,
(self.baraffe.Radius,
self.baraffe.InertiaMomentum,
self.baraffe.Teff),
kind='linear', bounds_error=False
)(t)
def update_dIdt(self, t):
"""
calculates the derivative dI/dt at a position t
and updated the self.dIdt object
"""
y = self.baraffe.InertiaMomentum*self.M_o*self.R_o**2
x = (self.baraffe.Age*u.yr).to(u.s)
spl = UnivariateSpline(x, y, k=3)
self.dIdt = spl.derivative(n = 1)(t.to(u.s).value)*(u.cm**2)*u.g/u.s
def update_To(self):
"""
updates the Matt+15 torque scalling term `self.To`
"""
self.To = 6.3e30*((self.R)**3.1)*((self.M)**0.5) << u.erg
def tau_w(self, Omega):
"""
Derives the wind torque based on the current values of the objects for
M, R and T, while the rotation Omega is given as input
"""
if Omega < self.saturation_limit():
return - self.To*((self.tau_cz/self.TAU_CZ_o)**self.P)*\
((Omega.value)**(self.P + 1))
else:
return - self.To*(self.CHI**self.P)*(Omega.value)
def tau_cz_CS11(self):
"""
Derives the convective turnover timescale as in Cranmer & Saar 2011
"""
self.tau_cz = (314.24*np.exp( -(self.T/1952.5) - (self.T/6250.)**18) +
0.002) << u.d
def time_update(self,t):
"""
updates parameters at any time t
"""
self.interpolateBaraffe(t.value) #this will set the values of R,I&T
self.tau_cz_CS11() # set tau_cz
self.update_To() # set To
self.update_dIdt(t) #get dIdt
def saturation_limit(self):
"""
Gives the saturation limit based on the current
local variables.
"""
return (self.CHI*self.TAU_CZ_o/self.tau_cz).value << self.OMEGA_o
def Euler_(self, f, Omega, t0, dt, wind=True, structure=True,
breakup=True):
"""
Estimates one step of the Euler method
----
if breakup = True: saturates Omega at the critical value
"""
O = Omega + dt*self.f(Omega, wind = wind, structure = structure)
if bool(breakup):
Ocrit = self.BreakUp()
if (O > Ocrit):
O = Ocrit
return O, t0 + dt
def f_W(self, Omega):
"""
Wind torque term of the rotation evolution equation
Omega is the rotation rate in solar units
"""
return (self.tau_w(Omega)/(self.I*self.M_o*self.R_o**2))
def f_dIdt(self, Omega):
"""
Term including of the rotational evolution including the
variation in the Momentum of Ineria
"""
return - Omega*self.dIdt/(self.I*self.M_o*self.R_o**2)
def f(self, Omega, wind=True, structure=True):
"""
Set up a function that will be feed to the Euler method
"""
return bool(wind).real*self.f_W(Omega) + \
bool(structure).real*self.f_dIdt(Omega)
def get_dt(self, Omega, e, structure=True, wind=True):
"""
estimates the best timestep by comparing how much the wind and the
structure will contribute to changing omega, given an efficiency term
e
"""
if bool(wind) and bool(structure):
dt_s = - e*self.I*self.M_o*(self.R_o**2)/self.dIdt
dt_w = e*self.I*self.M_o*(self.R_o**2)*(Omega)/self.tau_w(Omega)
return min(abs(dt_s), abs(dt_w)).to(u.yr)
elif not bool(wind) and bool(structure):
return abs(e*self.I*self.M_o*(self.R_o**2)/self.dIdt)
elif bool(wind) and not bool(structure):
return abs(e*self.I*self.M_o*(self.R_o**2)*\
(Omega)/self.tau_w(Omega))
def BreakUp(self):
"""
estimates the breakup rotation
for a star of given mass and age.
Omega=sqrt(GM/Re**3)
Re=1.5Rp
Ro used in Baraffe models: Rs=6.96d10cm
Mo=1.99e33g
---
Returns the Breakup limit in OmegaSun units
"""
return np.sqrt((self.M*self.M_o*G.cgs)/(1.5*self.R*self.R_o)**3)\
<< self.OMEGA_o
def dOmegadt(self, M, Omega0, t, tau_d=0, e=0.1, wind=True,
structure=True, snapshot=False, breakup=True):
"""
__
input
_
M: stellar mass in solar masses
Omega0: initial rotation
t: vector with the key timesteps
e: [0,1] tolerance of minimal variation of Omega do define a
intermediary timestep
wind: [True] for activate wind term
structure: [True] for activate structure term
snapshot: [True] the model will return only data for the timesteps
listed in t
breakup: [True] saturates the rotation at the break-up speed
__
output
Omega and t with no units
_
"""
#define a local variable for the Mass
self.M = M
if M == 0.1:
self.M += 0.000001
# locally load the grid for M
self.baraffe.getMassTrack(self.M)
# define all initial parameters relevant to the spin evolution
self.time_update(self.t0)
#
try:
len(t)
except TypeError:
t = np.array([t])
else:
t = np.array(t)
Omega0 = Omega0 << self.OMEGA_o
if Omega0 >= self.BreakUp():
print('Initial rotation faster than Break-Up!')
t = t << u.yr
if bool(snapshot):
t_out = np.full(len(t) + 1, np.nan)
t_out[0] = self.t0.value
t_out[1:] = 1.*t
if bool(snapshot): t_mas = []
for i, T in enumerate(t):
if T.value > np.nanmax(self.baraffe.Age): #makes sure I am not
#going beyond Baraffe's models
if bool(snapshot): t_mas.append(i + 1) # save the index
t[i] = (np.nanmax(self.baraffe.Age) - 1e6) << u.yr
print('Maximum value must be inside the age range of the \
Baraffe Model')
tau_d= tau_d << u.yr
if (tau_d != 0) & (tau_d.value < 1e5):
print(r'Is $\tau_D$ in the right units?')
t_ = []
Omega_ = []
t_.append(self.t0.value)
Omega_.append(Omega0.value)
tk_o = 1.*self.t0
# test if spin-evolution needs to be calculated at all:
if t.max() > tau_d:
n = 0
# test if tau_D is before t0:
if tau_d > self.t0:
while t[n] < tau_d:
t_.append(t[n].value)
Omega_.append(Omega0.value)
tk_o = t[n]
n += 1
#next register the moment disk was lost:
if not bool(snapshot):
t_.append(tau_d.value)
Omega_.append(Omega0.value)
tk_o = tau_d
#initiate the parameters
self.time_update(tk_o)
dt = self.get_dt(Omega0, e, wind = wind, structure=structure)
for T in t[n:]:
while tk_o + dt < T:
O, tk = self.Euler_(self.f, Omega0, tk_o, dt, wind=wind,
structure=structure, breakup=breakup)
if not bool(snapshot):
t_.append(tk.value)
Omega_.append(O.value)
Omega0 = 1.*O
tk_o = tk
self.time_update(tk_o)
dt = self.get_dt(Omega0, e, wind=wind,
structure=structure)
self.time_update(tk_o)
dt = T - tk_o
O,tk = self.Euler_(self.f, Omega0, tk_o, dt, wind=wind,
structure=structure, breakup=breakup)
t_.append(tk.value)
Omega_.append(O.value)
Omega0 = 1.*O
tk_o = tk
self.time_update(tk_o)
dt = self.get_dt(Omega0, e, wind=wind, structure=structure)
t_ = np.array(t_)
Omega_ = np.array(Omega_)
if bool(snapshot):
t_ = t_out
Omega_[t_mas] = np.nan
return t_, Omega_
else:
return np.insert(t.value, 0, self.t0.value, axis=0), \
np.full(len(t) + 1, Omega0.value)
def get_BreakUp(self, M, t):
"""
input:
M in Msun
t in years
Returns the Breakup limit as a function of time for a star with
mass M in Msun
"""
self.M = M
if M == 0.1:
self.M += 0.000001
# locally load the grid for M
self.baraffe.getMassTrack(self.M)
try:
len(t)
except TypeError:
t = np.array([t])
else:
t = np.array(t)
t = t << u.yr
O_crit = []
for t_ in t:
self.time_update(t_)
O_crit.append(self.BreakUp().value)
return np.array(O_crit)
def get_SaturationLimit(self, M, t):
"""
input:
M in Msun
t in years
Returns the saturation limit as a function of time for a star with
mass M in Msun
"""
self.M = M
if M == 0.1:
self.M += 0.000001
# locally load the grid for M
self.baraffe.getMassTrack(self.M)
t = np.array(t)
t = t << u.yr
O_sat = []
for t_ in t:
self.time_update(t_)
O_sat.append(self.saturation_limit().value)
return np.array(O_sat)
def isogyrochrone(self, initial_period, time, fuv=False, tau_d=False,
dm=0.025, e=0.01, tau_vis=1.0, wind=True,
structure=True, breakup=True, initial_age=False,
get_breakup_limit=False):
"""
Calculate "isogyrochrones" by running the spin evolution model for the
range of masses 0.1-1.3Mo, considering coeval stars sharing the same
initial condition
----
input:
initial_period: Initial Period for the isogyrochrones
time: Isogyrochrone ages in years. It can be
either a single value or an array of values
fuv [False]: Set as False if no FUV level is considered.
Otherwise, give a | |
\
If you don't specify stds then pure elements are assumed. Otherwise standards is a dictionary mapping an element to a composition."""
if isinstance(comp, epq.ISpectrumData):
cp = comp.getProperties()
t = cp.getCompositionWithDefault(epq.SpectrumProperties.MicroanalyticalComposition, None)
if not t:
t = cp.getCompositionWithDefault(epq.SpectrumProperties.StandardComposition, None)
comp = t
comp = material(comp)
if (comp <> None) and (comp.getElementCount() > 0):
oldStrat = epq.AlgorithmUser.getGlobalStrategy()
s = epq.Strategy()
s.addAlgorithm(epq.MassAbsorptionCoefficient, mac)
epq.AlgorithmUser.applyGlobalOverride(s)
props = epq.SpectrumProperties()
props.setDetector(det)
props.setNumericProperty(epq.SpectrumProperties.BeamEnergy, e0)
props.addAll(xtra)
print "Material\t%s" % (comp.descriptiveString(0))
print "Detector\t%s" % det
print "Algorithm\t%s" % alg.getName()
print "MAC\t%s" % alg.getAlgorithm(epq.MassAbsorptionCoefficient).getName()
print "E0\t%g keV" % props.getNumericProperty(epq.SpectrumProperties.BeamEnergy)
print "Take-off\t%g%s" % (jl.Math.toDegrees(epq.SpectrumUtils.getTakeOffAngle(props)), epq.SpectrumProperties.TakeOffAngle.getUnits())
for sp in xtra.getPropertySet():
print "%s\t%s" % (sp, xtra.getTextProperty(sp))
if stds:
conv = {}
for z, c in stds.iteritems():
conv[element(z)] = material(c)
stds = conv
mode = mode.lower()
if mode.startswith("w") or mode.startswith("ex"):
print "\nIUPAC\tSeigbahn\tStandard\tEnergy\t ZAF\t Z\t A\t F\tk-ratio"
mtr = (majorTransitions(comp, e0, thresh=0.8) if mode.startswith("w") else majorTransitions(comp, e0, epq.XRayTransition.ALL_TRANSITIONS, 0.99))
for xrt in mtr:
if epq.FromSI.keV(xrt.getEdgeEnergy()) > 0.99 * e0:
continue
try:
elm = xrt.getElement()
std = (stds.get(elm) if stds else None)
if std:
ww = std.weightFraction(elm, False)
zaf = alg.relativeZAF(comp, xrt, props, std)
else:
ww = 1.0
zaf = alg.relativeZAF(comp, xrt, props)
k = zaf[3] * comp.weightFraction(elm, False) / ww
eTr = epq.FromSI.keV(xrt.getEnergy())
print "%s\t%s\t%s\t%2.4f\t%1.4f\t%1.4f\t%1.4f\t%1.4f\t%1.6f" % (xrt, xrt.getSiegbahnName(), (std if std else "Pure %s" % elm.toAbbrev()), eTr, zaf[3], zaf[0], zaf[1], zaf[2], k)
except jl.Throwable, th:
th.printStackTrace()
print th.getMessage()
else:
print "\n%-15s\tStandard\tEnergy\t ZAF\t Z\t A\t F\tk-ratio" % "IUPAC"
for xrts in majorTransitionSets(det, comp, e0, 0.01):
z, a, f, zaf, w = 0.0, 0.0, 0.0, 0.0, 0.0
elm = xrts.getElement()
std = (stds.get(elm) if stds else None)
ww = (std.weightFraction(elm, False) if std else 1.0)
nComp = comp
if comp.weightFraction(elm, False) < 1.0e-8:
nComp = epq.Composition(comp)
nComp.addElement(elm, 1.0e-8)
for xrt in xrts:
if epq.FromSI.keV(xrt.getEdgeEnergy()) > 0.9 * e0:
continue
rzaf = (alg.relativeZAF(nComp, xrt, props, std) if std else alg.relativeZAF(nComp, xrt, props))
wgt = xrt.getWeight(epq.XRayTransition.NormalizeFamily)
z = z + wgt * rzaf[0]
a = a + wgt * rzaf[1]
f = f + wgt * rzaf[2]
zaf = zaf + wgt * rzaf[3]
w = w + wgt
eTr = epq.FromSI.keV(xrt.getEnergy())
if w < 1.0e-10:
continue
z, a, f, zaf = z / w, a / w, f / w, zaf / w
k = zaf * nComp.weightFraction(elm, False) / ww
print "%-15s\t%s\t%2.4f\t%1.4f\t%1.4f\t%1.4f\t%1.4f\t%1.6f" % (xrts, (std if std else "Pure %s" % elm.toAbbrev()), eTr, zaf, z, a, f, k)
# Restore previous algorithm preferences
epq.AlgorithmUser.applyGlobalOverride(oldStrat)
def getEfficiency(det):
"""getEfficiency(d2).display()
Returns the efficiency of the detector (window+crystal) as a function of energy for the specified detector as a ScriptableSpectrum object."""
dp = det.getDetectorProperties()
cal = det.getCalibration()
da = dp.getProperties().getNumericWithDefault(epq.SpectrumProperties.DetectorArea, 10.0) * 1.0e-6
ss = epq.SpectrumUtils.toSpectrum(cal.getChannelWidth(), cal.getZeroOffset(), cal.getEfficiency(dp))
res = ScriptableSpectrum(epq.SpectrumUtils.scale(100.0 / da, ss))
res.getProperties().setDetector(det)
res.rename("Efficiency[%s]" % det)
return res
def helpStr(arg, recurse=True):
"""javaArgs(javaMethod)
Outputs the various different call syntaxes available for the specified java method."""
res = ""
da = dir(arg)
# Has a doc string, return a doc string....
if ('__doc__' in da) and (arg.__doc__ != None):
return str(arg.__doc__)
if ('__class__' in da):
cn = str(arg.__class__)
if (cn == "<type 'instancemethod'>"):
# Java instance method, return an arglist
res = "Java Method:"
if "argslist" in da:
i = 1
for arg in arg.argslist:
if arg != None:
res = "%s\n Style %d\n %s" % (res, i, arg.data)
i = i + 1
else:
res = "%s\n No arguments: %s()" % (res, arg.__name__)
elif(cn == "<type 'java.lang.Class'>"):
res = "Java class: %s" % (str(arg)[7, -2])
for m in da:
if (not str(m).startswith("_")) and recurse:
tmp.append(helpStr(eval("%s.%s" % (arg, m)), False))
if len(tmp) > 0:
res = "%s\nMethods:\n\t%s" % (res, "\n\t".join(tmp))
elif cn.startswith("<type '"):
res = "%sInstance of Java class %s" % (res, cn[7:-2])
tmp = []
for m in da:
if (not str(m).startswith("_")) and recurse:
tmp.append(helpStr(eval("%s.%s" % (arg, m)), False))
if len(tmp) > 0:
res = "%s\nMethods:\n\t%s" % (res, "\n\t".join(tmp))
elif cn.startswith("<class '"):
res = "%sInstance of Python class %s" % (res, cn[8:-2])
tmp = []
for m in da:
if (not str(m).startswith("_")) and recurse:
tmp.append(helpStr(eval("%s.%s" % (arg, m)), False))
res = "%s\nMethods:\n\t%s" % (res, "\n\t ".join(tmp))
else:
if len(res) == 0:
res = "No help available for %s" % str(arg)
res = res.replace("gov.nist.microanalysis.EPQLibrary", "epq")
res = res.replace("gov.nist.microanalysis.EPQTools", "ept")
res = res.replace("gov.nist.microanalysis.Utility", "epu")
res = res.replace("gov.nist.microanalysis.NISTMonte", "nm")
res = res.replace("gov.nist.microanalysis.EPQDatabase", "epdb")
res = res.replace("gov.nist.microanalysis.dtsa2", "dt2")
res = res.replace("gov.nist.microanalysis.EPQLibrary.Detector", "epd")
res = res.replace("java.lang", "jl")
res = res.replace("java.io", "jio")
elif '__name__' in da:
res = "%sAlias for %s" % (res, arg.__name__)
tmp = []
for m in da:
if not str(m).startswith("__"):
tmp.append("%s" % m)
res = "%s\nChildren:\n\t%s" % (res, ", ".join(tmp))
else:
res = "%s\n%s" % (res, str(da))
return res
def help(arg=None):
"""help(arg)
Displays ?useful? information about 'arg'"""
if arg:
print helpStr(arg, True)
else:
print __doc__
def wrap(s):
"""wrap(s)
Convert a spectrum to a ScriptableSpectrum object as required."""
return (s if isinstance(s, ScriptableSpectrum) else ScriptableSpectrum(s))
def unwrap(s):
"""wrap(s)
Convert a ScriptableSpectrum to a raw spectrum object."""
return (unwrap(s.getWrapped()) if isinstance(s, ScriptableSpectrum) else s)
def display(s):
"""display(s)
Display a spectrum in the spectrum plot window. This method takes care of converting internal spectra into ScriptableSpectra as necessary."""
if isinstance(s, list) or isinstance(s, tuple):
for ss in s:
display(ss)
else:
DataManager.addSpectrum((s.getWrapped() if isinstance(s, ScriptableSpectrum) else s), True)
def clearSpectra():
"""Clear the spectrum display"""
DataManager.clearSelections()
def report(html):
"""report(html)
Append the specific syntactically correct HTML to the end of the report in the report tab."""
MainFrame.appendHTML(html)
def mac(mat, xx, alg=epq.MassAbsorptionCoefficient.Chantler2005):
"""mac(mat, xx, [alg=epq.MassAbsorptionCoefficient.Chantler2005])
where mat is a material or element (by name, Material or Element object), xx is an element, atomic shell or x-ray transition and alg is the tabulation to use (by default Chantler2005). Displays the mass absorption coefficient for the specified element or x-ray transition in the specified material."""
mat = material(mat)
if not isinstance(mat, epq.Material):
mat = epq.Material(mat, epq.ToSI.gPerCC(1.0))
if isinstance(xx, str):
xx = element(xx)
print "Density = %g g/cm^3" % epq.FromSI.gPerCC(mat.getDensity())
print "XRT\tMAC\tu(MAC)\tRange"
print "\t(cm^2/g)\t(cm^2/g)\t(um)"
if isinstance(xx, epq.Element):
for tr in range(epq.XRayTransition.KA1, epq.XRayTransition.N5N6 + 1):
if epq.XRayTransition.exists(xx, tr):
xrt = epq.XRayTransition(xx, tr)
mm = alg.computeWithUncertaintyEstimate(mat, xrt)
print "%s\t%g\t%g\t%g" % (xrt.toString(), alg.toCmSqrPerGram(mm.doubleValue()), alg.toCmSqrPerGram(mm.uncertainty()), 1.0e6 * alg.meanFreePath(mat, xrt.getEnergy()))
elif isinstance(xx, epq.AtomicShell):
for tr in range(epq.XRayTransition.getFirstIntoShell(xx), epq.XRayTransition.getLastIntoShell(xx)):
if epq.XRayTransition.exists(xx, tr):
xrt = epq.XRayTransition(xx, tr)
mm = alg.computeWithUncertaintyEstimate(mat, xrt)
print "%s\t%g\t%g\t%g" % (xrt.toString(), alg.toCmSqrPerGram(mm.doubleValue()), alg.toCmSqrPerGram(mm.uncertainty()), 1.0e6 * alg.meanFreePath(mat, xrt.getEnergy()))
else:
if isinstance(xx, epq.XRayTransition):
xrt = xx
mm = alg.computeWithUncertaintyEstimate(mat, xrt)
print "%s\t%g\t%g\t%g" % (xrt.toString(), alg.toCmSqrPerGram(mm.doubleValue()), alg.toCmSqrPerGram(mm.uncertainty()), 1.0e6 * alg.meanFreePath(mat, xrt.getEnergy()))
else:
print xx
def massFraction(massFractionMap, density=None):
"""massFraction(massFractionMap, density=None):
massFraction({ "Si":0.25, "Fe":0.25, "O":0.5})
Builds a Material or Composition object from mass fractions."""
c = epq.Composition()
elms = []
massFrac = []
for elm, mf in massFractionMap.iteritems():
elms.append(element(elm))
massFrac.append(mf)
c.defineByWeightFraction(elms, massFrac)
if density:
c = epq.Material(c, epq.ToSI.gPerCC(density))
return c
def getMac(elm, det=None, alg=epq.MassAbsorptionCoefficient.Chantler2005):
"""getMac(elm, [det=d1], [alg=epq.MassAbsorptionCoefficient.Chantler2005])
Gets the mass absorption coefficient for the specified Element or Composition as a spectrum-like object. The det term (=d1) and the alg term (=epq.MassAbsorptionCoefficient.Chantler2005) are optional arguments."""
if not det:
det = findDetector("")
elm = material(elm)
if isinstance(det, epd.EDSDetector):
chCount, chWidth, zeroOff = det.getChannelCount(), det.getChannelWidth(), det.getZeroOffset()
else:
chCount, chWidth, zeroOff = det, 10.0, 0.0
macs = jarray.zeros(chCount, 'd')
for i in range(0, chCount):
e = chWidth * i + zeroOff
if e > 0.0:
macs[i] = alg.toCmSqrPerGram(alg.compute(elm, epq.ToSI.eV(e)))
res = epq.SpectrumUtils.toSpectrum(chWidth, zeroOff, macs)
epq.SpectrumUtils.rename(res, "MAC[%s,%s]" % (elm, alg.getName()))
return ScriptableSpectrum(res)
def displayMac(elm, eMax=20.48, alg=epq.MassAbsorptionCoefficient.Chantler2005):
"""displayMac(elm, eMax=20.48, alg=epq.MassAbsorptionCoefficient.Chantler2005)
Displays the mass absorption coefficient for the specified Element or Composition in the spectrum display. The eMax term (=20.48) and the alg term (=epq.MassAbsorptionCoefficient.Chantler2005) are optional arguments."""
display(getMac(elm, int(eMax / 0.01), alg))
def windowTransmission(name):
"""windowTransmission(name) or windowTransmission(window) or windowTransimission(detector)
Creates a spectrum object representing the transmission of the window specified by name (see epd.XRayWindowFactory.WindowTypes for a list)."""
if isinstance(name, str):
w = epd.XRayWindowFactory.createWindow(name)
elif isinstance(name, epd.EDSDetector):
w = name.getWindow()
elif isinstance(name, epd.IXRayWindowProperties):
w = name
if not isinstance(w, epd.IXRayWindowProperties):
print "%s is not a window, detector or window name" % name
return
chCx = | |
"""
The crypt module manages all of the cryptography functions for minions and
masters, encrypting and decrypting payloads, preparing messages, and
authenticating peers
"""
import base64
import binascii
import copy
import getpass
import hashlib
import hmac
import logging
import os
import random
import stat
import sys
import time
import traceback
import weakref
import salt.defaults.exitcodes
import salt.ext.tornado.gen
import salt.payload
import salt.transport.client
import salt.transport.frame
import salt.utils.crypt
import salt.utils.decorators
import salt.utils.event
import salt.utils.files
import salt.utils.rsax931
import salt.utils.sdb
import salt.utils.stringutils
import salt.utils.user
import salt.utils.verify
import salt.version
from salt.exceptions import (
AuthenticationError,
MasterExit,
SaltClientError,
SaltReqTimeoutError,
)
from salt.ext import six
try:
from M2Crypto import RSA, EVP, BIO
HAS_M2 = True
except ImportError:
HAS_M2 = False
if not HAS_M2:
try:
from Cryptodome.Cipher import AES, PKCS1_OAEP, PKCS1_v1_5 as PKCS1_v1_5_CIPHER
from Cryptodome.Hash import SHA
from Cryptodome.PublicKey import RSA
from Cryptodome.Signature import PKCS1_v1_5
from Cryptodome import Random
HAS_CRYPTO = True
except ImportError:
HAS_CRYPTO = False
if not HAS_M2 and not HAS_CRYPTO:
try:
from Crypto.Cipher import AES, PKCS1_OAEP, PKCS1_v1_5 as PKCS1_v1_5_CIPHER
from Crypto.Hash import SHA
from Crypto.PublicKey import RSA
from Crypto.Signature import PKCS1_v1_5
# let this be imported, if possible
from Crypto import Random
HAS_CRYPTO = True
except ImportError:
HAS_CRYPTO = False
log = logging.getLogger(__name__)
def dropfile(cachedir, user=None):
"""
Set an AES dropfile to request the master update the publish session key
"""
dfn = os.path.join(cachedir, ".dfn")
# set a mask (to avoid a race condition on file creation) and store original.
with salt.utils.files.set_umask(0o277):
log.info("Rotating AES key")
if os.path.isfile(dfn):
log.info("AES key rotation already requested")
return
if os.path.isfile(dfn) and not os.access(dfn, os.W_OK):
os.chmod(dfn, stat.S_IRUSR | stat.S_IWUSR)
with salt.utils.files.fopen(dfn, "wb+") as fp_:
fp_.write(b"")
os.chmod(dfn, stat.S_IRUSR)
if user:
try:
import pwd
uid = pwd.getpwnam(user).pw_uid
os.chown(dfn, uid, -1)
except (KeyError, ImportError, OSError):
pass
def gen_keys(keydir, keyname, keysize, user=None, passphrase=None):
"""
Generate a RSA public keypair for use with salt
:param str keydir: The directory to write the keypair to
:param str keyname: The type of salt server for whom this key should be written. (i.e. 'master' or 'minion')
:param int keysize: The number of bits in the key
:param str user: The user on the system who should own this keypair
:param str passphrase: The passphrase which should be used to encrypt the private key
:rtype: str
:return: Path on the filesystem to the RSA private key
"""
base = os.path.join(keydir, keyname)
priv = "{}.pem".format(base)
pub = "{}.pub".format(base)
if HAS_M2:
gen = RSA.gen_key(keysize, 65537, lambda: None)
else:
salt.utils.crypt.reinit_crypto()
gen = RSA.generate(bits=keysize, e=65537)
if os.path.isfile(priv):
# Between first checking and the generation another process has made
# a key! Use the winner's key
return priv
# Do not try writing anything, if directory has no permissions.
if not os.access(keydir, os.W_OK):
raise OSError(
'Write access denied to "{}" for user "{}".'.format(
os.path.abspath(keydir), getpass.getuser()
)
)
with salt.utils.files.set_umask(0o277):
if HAS_M2:
# if passphrase is empty or None use no cipher
if not passphrase:
gen.save_pem(priv, cipher=None)
else:
gen.save_pem(
priv,
cipher="des_ede3_cbc",
callback=lambda x: salt.utils.stringutils.to_bytes(passphrase),
)
else:
with salt.utils.files.fopen(priv, "wb+") as f:
f.write(gen.exportKey("PEM", passphrase))
if HAS_M2:
gen.save_pub_key(pub)
else:
with salt.utils.files.fopen(pub, "wb+") as f:
f.write(gen.publickey().exportKey("PEM"))
os.chmod(priv, 0o400)
if user:
try:
import pwd
uid = pwd.getpwnam(user).pw_uid
os.chown(priv, uid, -1)
os.chown(pub, uid, -1)
except (KeyError, ImportError, OSError):
# The specified user was not found, allow the backup systems to
# report the error
pass
return priv
@salt.utils.decorators.memoize
def _get_key_with_evict(path, timestamp, passphrase):
"""
Load a private key from disk. `timestamp` above is intended to be the
timestamp of the file's last modification. This fn is memoized so if it is
called with the same path and timestamp (the file's last modified time) the
second time the result is returned from the memoiziation. If the file gets
modified then the params are different and the key is loaded from disk.
"""
log.debug("salt.crypt._get_key_with_evict: Loading private key")
if HAS_M2:
key = RSA.load_key(path, lambda x: six.b(passphrase))
else:
with salt.utils.files.fopen(path) as f:
key = RSA.importKey(f.read(), passphrase)
return key
def get_rsa_key(path, passphrase):
"""
Read a private key off the disk. Poor man's simple cache in effect here,
we memoize the result of calling _get_rsa_with_evict. This means the first
time _get_key_with_evict is called with a path and a timestamp the result
is cached. If the file (the private key) does not change then its
timestamp will not change and the next time the result is returned from the
cache. If the key DOES change the next time _get_rsa_with_evict is called
it is called with different parameters and the fn is run fully to retrieve
the key from disk.
"""
log.debug("salt.crypt.get_rsa_key: Loading private key")
return _get_key_with_evict(path, str(os.path.getmtime(path)), passphrase)
def get_rsa_pub_key(path):
"""
Read a public key off the disk.
"""
log.debug("salt.crypt.get_rsa_pub_key: Loading public key")
if HAS_M2:
with salt.utils.files.fopen(path, "rb") as f:
data = f.read().replace(b"RSA ", b"")
bio = BIO.MemoryBuffer(data)
key = RSA.load_pub_key_bio(bio)
else:
with salt.utils.files.fopen(path) as f:
key = RSA.importKey(f.read())
return key
def sign_message(privkey_path, message, passphrase=None):
"""
Use Crypto.Signature.PKCS1_v1_5 to sign a message. Returns the signature.
"""
key = get_rsa_key(privkey_path, passphrase)
log.debug("salt.crypt.sign_message: Signing message.")
if HAS_M2:
md = EVP.MessageDigest("sha1")
md.update(salt.utils.stringutils.to_bytes(message))
digest = md.final()
return key.sign(digest)
else:
signer = PKCS1_v1_5.new(key)
return signer.sign(SHA.new(salt.utils.stringutils.to_bytes(message)))
def verify_signature(pubkey_path, message, signature):
"""
Use Crypto.Signature.PKCS1_v1_5 to verify the signature on a message.
Returns True for valid signature.
"""
log.debug("salt.crypt.verify_signature: Loading public key")
pubkey = get_rsa_pub_key(pubkey_path)
log.debug("salt.crypt.verify_signature: Verifying signature")
if HAS_M2:
md = EVP.MessageDigest("sha1")
md.update(salt.utils.stringutils.to_bytes(message))
digest = md.final()
return pubkey.verify(digest, signature)
else:
verifier = PKCS1_v1_5.new(pubkey)
return verifier.verify(
SHA.new(salt.utils.stringutils.to_bytes(message)), signature
)
def gen_signature(priv_path, pub_path, sign_path, passphrase=None):
"""
creates a signature for the given public-key with
the given private key and writes it to sign_path
"""
with salt.utils.files.fopen(pub_path) as fp_:
mpub_64 = fp_.read()
mpub_sig = sign_message(priv_path, mpub_64, passphrase)
mpub_sig_64 = binascii.b2a_base64(mpub_sig)
if os.path.isfile(sign_path):
return False
log.trace(
"Calculating signature for %s with %s",
os.path.basename(pub_path),
os.path.basename(priv_path),
)
if os.path.isfile(sign_path):
log.trace(
"Signature file %s already exists, please remove it first and " "try again",
sign_path,
)
else:
with salt.utils.files.fopen(sign_path, "wb+") as sig_f:
sig_f.write(salt.utils.stringutils.to_bytes(mpub_sig_64))
log.trace("Wrote signature to %s", sign_path)
return True
def private_encrypt(key, message):
"""
Generate an M2Crypto-compatible signature
:param Crypto.PublicKey.RSA._RSAobj key: The RSA key object
:param str message: The message to sign
:rtype: str
:return: The signature, or an empty string if the signature operation failed
"""
if HAS_M2:
return key.private_encrypt(message, salt.utils.rsax931.RSA_X931_PADDING)
else:
signer = salt.utils.rsax931.RSAX931Signer(key.exportKey("PEM"))
return signer.sign(message)
def public_decrypt(pub, message):
"""
Verify an M2Crypto-compatible signature
:param Crypto.PublicKey.RSA._RSAobj key: The RSA public key object
:param str message: The signed message to verify
:rtype: str
:return: The message (or digest) recovered from the signature, or an
empty string if the verification failed
"""
if HAS_M2:
return pub.public_decrypt(message, salt.utils.rsax931.RSA_X931_PADDING)
else:
verifier = salt.utils.rsax931.RSAX931Verifier(pub.exportKey("PEM"))
return verifier.verify(message)
def pwdata_decrypt(rsa_key, pwdata):
if HAS_M2:
key = RSA.load_key_string(salt.utils.stringutils.to_bytes(rsa_key, "ascii"))
password = key.private_decrypt(pwdata, RSA.pkcs1_padding)
else:
dsize = SHA.digest_size
sentinel = Random.new().read(15 + dsize)
key_obj = RSA.importKey(rsa_key)
key_obj = PKCS1_v1_5_CIPHER.new(key_obj)
password = key_obj.decrypt(pwdata, sentinel)
return salt.utils.stringutils.to_unicode(password)
class MasterKeys(dict):
"""
The Master Keys class is used to manage the RSA public key pair used for
authentication by the master.
It also generates a signing key-pair if enabled with master_sign_key_name.
"""
def __init__(self, opts):
super().__init__()
self.opts = opts
self.pub_path = os.path.join(self.opts["pki_dir"], "master.pub")
self.rsa_path = os.path.join(self.opts["pki_dir"], "master.pem")
key_pass = salt.utils.sdb.sdb_get(self.opts["key_pass"], self.opts)
self.key = self.__get_keys(passphrase=key_pass)
self.pub_signature = None
# set names for the signing key-pairs
if opts["master_sign_pubkey"]:
# if only the signature is available, use that
if opts["master_use_pubkey_signature"]:
self.sig_path = os.path.join(
self.opts["pki_dir"], opts["master_pubkey_signature"]
)
if os.path.isfile(self.sig_path):
with salt.utils.files.fopen(self.sig_path) as fp_:
self.pub_signature = fp_.read()
log.info(
"Read %s's signature from %s",
os.path.basename(self.pub_path),
self.opts["master_pubkey_signature"],
)
else:
log.error(
"Signing the master.pub key with a signature is "
"enabled but no signature file found at the defined "
"location %s",
self.sig_path,
)
log.error(
"The signature-file may be either named differently "
"or has to be created with 'salt-key --gen-signature'"
)
sys.exit(1)
# create a new signing key-pair to sign the masters
# auth-replies when a minion tries to connect
else:
key_pass = salt.utils.sdb.sdb_get(
self.opts["signing_key_pass"], self.opts
)
self.pub_sign_path = os.path.join(
self.opts["pki_dir"], opts["master_sign_key_name"] + ".pub"
)
self.rsa_sign_path = os.path.join(
self.opts["pki_dir"], opts["master_sign_key_name"] + ".pem"
)
self.sign_key = self.__get_keys(name=opts["master_sign_key_name"])
# We need __setstate__ and __getstate__ to avoid pickling errors since
# some of the member variables correspond to Cython objects which are
# not picklable.
# These methods are only used when pickling so will not be used on
# non-Windows platforms.
def __setstate__(self, state):
self.__init__(state["opts"])
def __getstate__(self):
return {"opts": self.opts}
def | |
<gh_stars>10-100
"""Test AdsSymbol class.
:author: <NAME> <<EMAIL>>
:license: MIT, see license file or https://opensource.org/licenses/MIT
:created on: 2020-11-16
"""
import time
from datetime import datetime, timedelta
import struct
from ctypes import sizeof, pointer
import unittest
from unittest import mock
import pyads
from pyads.testserver import AdsTestServer, AdvancedHandler, PLCVariable
from pyads import constants, AdsSymbol, bytes_from_dict
from tests.test_connection_class import create_notification_struct
# These are pretty arbitrary
TEST_SERVER_AMS_NET_ID = "127.0.0.1.1.1"
TEST_SERVER_IP_ADDRESS = "127.0.0.1"
TEST_SERVER_AMS_PORT = pyads.PORT_SPS1
class AdsSymbolTestCase(unittest.TestCase):
"""Testcase for ADS symbol class"""
@classmethod
def setUpClass(cls):
# type: () -> None
"""Setup the ADS test server."""
cls.handler = AdvancedHandler()
cls.test_server = AdsTestServer(handler=cls.handler, logging=False)
cls.test_server.start()
# wait a bit otherwise error might occur
time.sleep(1)
@classmethod
def tearDownClass(cls):
# type: () -> None
"""Tear down the test server."""
cls.test_server.stop()
# wait a bit for server to shutdown
time.sleep(1)
def setUp(self):
# type: () -> None
"""Establish connection to the test server."""
# Clear test server and handler
self.test_server.request_history = []
self.handler.reset()
# Create PLC variable that is added by default
self.test_var = PLCVariable(
"TestDouble", bytes(8), ads_type=constants.ADST_REAL64, symbol_type="LREAL"
)
self.test_var.comment = "Some variable of type double"
self.test_var_type = pyads.constants.PLCTYPE_LREAL # Corresponds with "LREAL"
self.handler.add_variable(self.test_var)
self.plc = pyads.Connection(
TEST_SERVER_AMS_NET_ID, TEST_SERVER_AMS_PORT, TEST_SERVER_IP_ADDRESS
)
def assertAdsRequestsCount(self, expected):
real = len(self.test_server.request_history)
self.assertEqual(
expected,
real,
msg="Expected {} requests, but {} have been made".format(expected, real),
)
def test_init_by_name(self):
"""Test symbol creation by name"""
with self.plc:
symbol = AdsSymbol(self.plc, name=self.test_var.name)
# Verify looked up info
self.assertEqual(self.test_var.name, symbol.name)
self.assertEqual(self.test_var.index_group, symbol.index_group)
self.assertEqual(self.test_var.index_offset, symbol.index_offset)
self.assertEqual(self.test_var_type, symbol.plc_type)
self.assertEqual(self.test_var.symbol_type, symbol.symbol_type)
self.assertEqual(self.test_var.comment, symbol.comment)
self.assertAdsRequestsCount(1) # Only a single READWRITE must have
# been made
def test_init_by_name_array(self):
"""Test symbol creation when it's an array"""
var = PLCVariable(
"ArrayVar",
struct.pack("<5h", *range(5)),
ads_type=constants.ADST_INT16, # dataType does not represent array unfortunately
symbol_type="ARRAY [1..5] OF INT", # Array looks like this in PLC
)
# manually
self.handler.add_variable(var)
self.plc.open()
symbol = AdsSymbol(self.plc, name=var.name)
# Verify looked up info
self.assertEqual(var.name, symbol.name)
self.assertEqual(var.index_group, symbol.index_group)
self.assertEqual(var.index_offset, symbol.index_offset)
self.assertEqual(constants.PLCTYPE_ARR_INT(5), symbol.plc_type)
self.assertEqual(var.symbol_type, symbol.symbol_type)
self.assertIsNone(symbol.comment)
my_list = symbol.read()
self.assertIsInstance(my_list, list)
self.assertEqual(5, len(my_list))
my_list[4] = 420
symbol.write(my_list) # Modify array
my_list2 = symbol.read() # Read again
self.assertEqual(my_list, my_list2)
self.assertAdsRequestsCount(4) # A READWRITE (for info), READ,
# WRITE AND a READ again
def test_init_by_name_matrix_style(self):
"""Test symbol creation when it's an array denoted as matrix
This is how an array originating from Simulink could look like.
"""
var = PLCVariable(
"ArrayVar",
struct.pack("<21b", *range(21)),
ads_type=constants.ADST_VOID,
symbol_type="matrix_21_int8_T", # Simulink array looks like this
index_group = 123,
index_offset = 100,
)
self.handler.add_variable(var)
self.plc.open()
symbol = AdsSymbol(
self.plc,
name=var.name,
index_group=var.index_group,
index_offset=var.index_offset,
symbol_type=var.symbol_type, # String initialization
) # No lookup
# Verify looked up info
self.assertEqual(constants.PLCTYPE_ARR_SINT(21), symbol.plc_type)
self.assertEqual(var.symbol_type, symbol.symbol_type)
self.assertAdsRequestsCount(0) # No requests
def test_init_missing_datatype(self):
"""Test symbol creation when integer datatype is missing"""
# Modify variable type
self.test_var.ads_type = 0
self.test_var.plc_type = constants.PLCTYPE_SINT
self.test_var.symbol_type = "SINT"
# Variable is reference to database entry, so no saving required
with self.plc:
symbol = AdsSymbol(self.plc, name=self.test_var.name)
# Verify looked up info
self.assertEqual(self.test_var.plc_type, symbol.plc_type)
self.assertEqual(self.test_var.symbol_type, symbol.symbol_type)
self.assertAdsRequestsCount(1) # Only a single READWRITE must have
# been made
def test_init_invalid(self):
"""Test symbol creation with missing info"""
with self.plc:
with self.assertRaises(ValueError):
AdsSymbol(
self.plc,
index_group=self.test_var.index_group,
index_offset=self.test_var.index_offset,
)
def test_repr(self):
"""Test debug string"""
with self.plc:
symbol = AdsSymbol(self.plc, name=self.test_var.name)
text = str(symbol)
self.assertIn(self.test_var.name, text)
self.assertIn(self.test_var.symbol_type, text) # Make sure name
# and type are printed
def test_type_resolve(self):
"""Test if PLCTYPE is resolved correctly"""
with self.plc:
symbol_const = AdsSymbol(self.plc, "NonExistentVar", 123, 0,
pyads.PLCTYPE_UDINT)
self.assertEqual(constants.PLCTYPE_UDINT, symbol_const.plc_type)
self.assertNotIsInstance(symbol_const.symbol_type, str) # symbol_type
# can't a neat human-readable string now
symbol_str = AdsSymbol(self.plc, "NonExistentVar", 123, 0, "UDINT")
self.assertEqual(constants.PLCTYPE_UDINT, symbol_str.plc_type)
self.assertEqual("UDINT", symbol_str.symbol_type)
symbol_missing = AdsSymbol(
self.plc, "NonExistentVar", 123, 0, "INCORRECT_TYPE"
)
self.assertIsNone(symbol_missing.plc_type)
self.assertAdsRequestsCount(0) # No requests
def test_init_manual(self):
"""Test symbol without lookup"""
with self.plc:
# Create symbol while providing everything:
symbol = AdsSymbol(
self.plc,
name=self.test_var.name,
index_group=self.test_var.index_group,
index_offset=self.test_var.index_offset,
symbol_type=self.test_var_type,
)
self.assertAdsRequestsCount(0) # No requests yet
self.plc.write(
self.test_var.index_group,
self.test_var.index_offset,
12.3,
self.test_var_type,
)
self.assertEqual(12.3, symbol.read())
self.assertAdsRequestsCount(2) # Only a WRITE followed by a READ
def test_init_invalid_type(self):
"""Test symbol lookup when type cannot be found
There was a read/write check that verifies the plc_typ was not None,
but this was removed.
"""
var = PLCVariable(
"UnknownType", b"\x00", ads_type=constants.ADST_VOID, symbol_type="non_existent_type", index_group=123,
index_offset=100
)
self.handler.add_variable(var)
with self.plc:
# Create symbol while providing everything:
symbol = AdsSymbol(self.plc, name=var.name)
self.assertEqual(var.symbol_type, symbol.symbol_type)
with self.assertRaises(TypeError) as cm:
# Error is thrown inside pyads_ex
symbol.read()
self.assertIn("NoneType", str(cm.exception))
self.assertAdsRequestsCount(1) # Only a WRITE followed by a READ
def test_read_write_errors(self):
"""Test read/write on invalid AdsSymbol
There was a read/write check that verifies the plc_typ was not None,
but this was removed.
"""
symbol = AdsSymbol(self.plc, "MySymbol", 123, 0, "BYTE")
with self.assertRaises(ValueError) as cm:
symbol.read() # Cannot read with unopened Connection
self.assertIn("missing or closed Connection", str(cm.exception))
self.plc.open()
symbol.index_offset = None # Set index to something invalid
# with self.assertRaises(ValueError) as cm:
# symbol.read() # Cannot read with invalid index
# self.assertIn('invalid values for', str(cm.exception))
with self.assertRaises(TypeError) as cm:
symbol.read() # Catch error inside pyads_ex
self.assertIn("integer is required", str(cm.exception))
symbol.index_group = None
with self.assertRaises(TypeError) as cm:
symbol.read() # Catch error inside pyads_ex
self.assertIn("integer is required", str(cm.exception))
symbol.index_offset = 'A'
with self.assertRaises(TypeError) as cm:
symbol.read() # Catch error inside pyads_ex
self.assertIn("integer is required", str(cm.exception))
symbol.index_group = 'B'
with self.assertRaises(TypeError) as cm:
symbol.read() # Catch error inside pyads_ex
self.assertIn("integer is required", str(cm.exception))
def test_read(self):
"""Test symbol value reading"""
with self.plc:
self.plc.write(
self.test_var.index_group,
self.test_var.index_offset,
420.0,
self.test_var_type,
)
symbol = AdsSymbol(self.plc, name=self.test_var.name)
self.assertEqual(420.0, symbol.read())
self.assertAdsRequestsCount(3) # WRITE, READWRITE for info and
# final read
def test_read_structure(self):
"""Test symbol value reading with structures."""
structure_def = (
("i", pyads.PLCTYPE_INT, 1),
("s", pyads.PLCTYPE_STRING, 1),
)
values = {"i": 1, "s": "foo"}
data = bytes(bytes_from_dict(values, structure_def))
self.handler.add_variable(PLCVariable("TestStructure", data, constants.ADST_VOID, symbol_type="TestStructure"))
with self.plc:
symbol = self.plc.get_symbol("TestStructure", structure_def=structure_def)
read_values = symbol.read()
self.assertEqual(values, read_values)
def test_read_structure_array(self):
"""Test symbol value reading with structures."""
structure_def = (
("a", pyads.PLCTYPE_INT, 1),
("b", pyads.PLCTYPE_INT, 1),
("s", pyads.PLCTYPE_STRING, 1)
)
values = [{"a": 1, "b": 2, "s": "foo"}, {"a": 3, "b": 4, "s": "bar"}]
data = bytes(bytes_from_dict(values, structure_def))
self.handler.add_variable(
PLCVariable("TestStructure", data, constants.ADST_VOID, symbol_type="TestStructure"))
with self.plc:
symbol = self.plc.get_symbol("TestStructure", structure_def=structure_def, array_size=2)
read_values = symbol.read()
self.assertEqual(values, read_values)
def test_write(self):
"""Test symbol value writing"""
with self.plc:
symbol = AdsSymbol(self.plc, name=self.test_var.name)
symbol.write(3.14) # Write
r_value = self.plc.read(
self.test_var.index_group,
self.test_var.index_offset,
self.test_var_type,
)
self.assertEqual(3.14, r_value)
self.assertAdsRequestsCount(3) # READWRITE for info, WRITE and
# test read
def test_write_structure(self):
"""Test symbol writing with structures."""
structure_def = (
("i", pyads.PLCTYPE_INT, 1),
("s", pyads.PLCTYPE_STRING, 1),
)
values = {"i": 1, "s": "foo"}
data = bytes(bytes_from_dict(values, structure_def))
self.handler.add_variable(PLCVariable("TestStructure", data, constants.ADST_VOID, symbol_type="TestStructure"))
write_values = {"i": 42, "s": "bar"}
with self.plc:
symbol = self.plc.get_symbol("TestStructure", structure_def=structure_def)
symbol.write(write_values)
read_values = symbol.read()
self.assertEqual(write_values, read_values)
def test_write_structure_array(self):
"""Test symbol value reading with structures."""
structure_def = (
("a", pyads.PLCTYPE_INT, 1),
("b", pyads.PLCTYPE_INT, 1),
("s", pyads.PLCTYPE_STRING, 1)
)
values = [{"a": 1, "b": 2, "s": "foo"}, {"a": 3, "b": 4, "s": "bar"}]
data = bytes(bytes_from_dict(values, structure_def))
self.handler.add_variable(
PLCVariable("TestStructure", data, constants.ADST_VOID, symbol_type="TestStructure"))
write_values = [{"a": 42, "b": 43, "s": "hello"}, {"a": 44, "b": 45, "s": "world"}]
with self.plc:
symbol = self.plc.get_symbol("TestStructure", structure_def=structure_def, array_size=2)
symbol.write(write_values)
read_values = symbol.read()
self.assertEqual(write_values, read_values)
def test_value(self):
"""Test the buffer property"""
with self.plc:
symbol = AdsSymbol(self.plc, name=self.test_var.name)
symbol.value = 420.0 # Shouldn't change anything yet
self.assertAdsRequestsCount(1) # Only a READWRITE for info
symbol.write()
self.assertAdsRequestsCount(2) # Written from buffer
symbol.read()
for i in range(10):
custom_buffer = symbol.value
self.assertEqual(420.0, symbol.value)
self.assertAdsRequestsCount(3) # Read only once
def test_get_symbol(self):
"""Test symbol by Connection method"""
with self.plc:
symbol = self.plc.get_symbol(self.test_var.name)
# Verify looked up info
self.assertEqual(self.test_var.name, symbol.name)
self.assertAdsRequestsCount(1) # Only a single READWRITE must have
# been made
def test_string(self):
"""Test symbol with a string value"""
variable = PLCVariable("my_text", bytes(50), ads_type=constants.ADST_STRING,
symbol_type="STRING(50)")
self.handler.add_variable(variable)
with self.plc:
symbol = self.plc.get_symbol("my_text")
symbol.write("I am a string!")
value = symbol.read()
self.assertEqual(value, "I am a string!")
def test_add_notification(self):
"""Test notification registering"""
def my_callback(*_):
return
with self.plc:
symbol = self.plc.get_symbol(self.test_var.name)
handles = symbol.add_device_notification(my_callback)
symbol.del_device_notification(handles)
self.assertAdsRequestsCount(3) # READWRITE, ADDNOTE and DELNOTE
def test_add_notification_delete(self):
"""Test notification registering"""
def my_callback(*_):
return
self.plc.open()
symbol = self.plc.get_symbol(self.test_var.name)
symbol.add_device_notification(my_callback)
# with `self.plc: ... ` without del_device_notification causes a
# socket write error
del symbol # Force variable deletion
self.assertAdsRequestsCount(3) # READWRITE, ADDNOTE and DELNOTE
def test_notification_callback(self):
| |
=16776960 # from enum XlRgbColor
rgbAquamarine =13959039 # from enum XlRgbColor
rgbAzure =16777200 # from enum XlRgbColor
rgbBeige =14480885 # from enum XlRgbColor
rgbBisque =12903679 # from enum XlRgbColor
rgbBlack =0 # from enum XlRgbColor
rgbBlanchedAlmond =13495295 # from enum XlRgbColor
rgbBlue =16711680 # from enum XlRgbColor
rgbBlueViolet =14822282 # from enum XlRgbColor
rgbBrown =2763429 # from enum XlRgbColor
rgbBurlyWood =8894686 # from enum XlRgbColor
rgbCadetBlue =10526303 # from enum XlRgbColor
rgbChartreuse =65407 # from enum XlRgbColor
rgbCoral =5275647 # from enum XlRgbColor
rgbCornflowerBlue =15570276 # from enum XlRgbColor
rgbCornsilk =14481663 # from enum XlRgbColor
rgbCrimson =3937500 # from enum XlRgbColor
rgbDarkBlue =9109504 # from enum XlRgbColor
rgbDarkCyan =9145088 # from enum XlRgbColor
rgbDarkGoldenrod =755384 # from enum XlRgbColor
rgbDarkGray =11119017 # from enum XlRgbColor
rgbDarkGreen =25600 # from enum XlRgbColor
rgbDarkGrey =11119017 # from enum XlRgbColor
rgbDarkKhaki =7059389 # from enum XlRgbColor
rgbDarkMagenta =9109643 # from enum XlRgbColor
rgbDarkOliveGreen =3107669 # from enum XlRgbColor
rgbDarkOrange =36095 # from enum XlRgbColor
rgbDarkOrchid =13382297 # from enum XlRgbColor
rgbDarkRed =139 # from enum XlRgbColor
rgbDarkSalmon =8034025 # from enum XlRgbColor
rgbDarkSeaGreen =9419919 # from enum XlRgbColor
rgbDarkSlateBlue =9125192 # from enum XlRgbColor
rgbDarkSlateGray =5197615 # from enum XlRgbColor
rgbDarkSlateGrey =5197615 # from enum XlRgbColor
rgbDarkTurquoise =13749760 # from enum XlRgbColor
rgbDarkViolet =13828244 # from enum XlRgbColor
rgbDeepPink =9639167 # from enum XlRgbColor
rgbDeepSkyBlue =16760576 # from enum XlRgbColor
rgbDimGray =6908265 # from enum XlRgbColor
rgbDimGrey =6908265 # from enum XlRgbColor
rgbDodgerBlue =16748574 # from enum XlRgbColor
rgbFireBrick =2237106 # from enum XlRgbColor
rgbFloralWhite =15792895 # from enum XlRgbColor
rgbForestGreen =2263842 # from enum XlRgbColor
rgbFuchsia =16711935 # from enum XlRgbColor
rgbGainsboro =14474460 # from enum XlRgbColor
rgbGhostWhite =16775416 # from enum XlRgbColor
rgbGold =55295 # from enum XlRgbColor
rgbGoldenrod =2139610 # from enum XlRgbColor
rgbGray =8421504 # from enum XlRgbColor
rgbGreen =32768 # from enum XlRgbColor
rgbGreenYellow =3145645 # from enum XlRgbColor
rgbGrey =8421504 # from enum XlRgbColor
rgbHoneydew =15794160 # from enum XlRgbColor
rgbHotPink =11823615 # from enum XlRgbColor
rgbIndianRed =6053069 # from enum XlRgbColor
rgbIndigo =8519755 # from enum XlRgbColor
rgbIvory =15794175 # from enum XlRgbColor
rgbKhaki =9234160 # from enum XlRgbColor
rgbLavender =16443110 # from enum XlRgbColor
rgbLavenderBlush =16118015 # from enum XlRgbColor
rgbLawnGreen =64636 # from enum XlRgbColor
rgbLemonChiffon =13499135 # from enum XlRgbColor
rgbLightBlue =15128749 # from enum XlRgbColor
rgbLightCoral =8421616 # from enum XlRgbColor
rgbLightCyan =9145088 # from enum XlRgbColor
rgbLightGoldenrodYellow =13826810 # from enum XlRgbColor
rgbLightGray =13882323 # from enum XlRgbColor
rgbLightGreen =9498256 # from enum XlRgbColor
rgbLightGrey =13882323 # from enum XlRgbColor
rgbLightPink =12695295 # from enum XlRgbColor
rgbLightSalmon =8036607 # from enum XlRgbColor
rgbLightSeaGreen =11186720 # from enum XlRgbColor
rgbLightSkyBlue =16436871 # from enum XlRgbColor
rgbLightSlateGray =10061943 # from enum XlRgbColor
rgbLightSlateGrey =10061943 # from enum XlRgbColor
rgbLightSteelBlue =14599344 # from enum XlRgbColor
rgbLightYellow =14745599 # from enum XlRgbColor
rgbLime =65280 # from enum XlRgbColor
rgbLimeGreen =3329330 # from enum XlRgbColor
rgbLinen =15134970 # from enum XlRgbColor
rgbMaroon =128 # from enum XlRgbColor
rgbMediumAquamarine =11206502 # from enum XlRgbColor
rgbMediumBlue =13434880 # from enum XlRgbColor
rgbMediumOrchid =13850042 # from enum XlRgbColor
rgbMediumPurple =14381203 # from enum XlRgbColor
rgbMediumSeaGreen =7451452 # from enum XlRgbColor
rgbMediumSlateBlue =15624315 # from enum XlRgbColor
rgbMediumSpringGreen =10156544 # from enum XlRgbColor
rgbMediumTurquoise =13422920 # from enum XlRgbColor
rgbMediumVioletRed =8721863 # from enum XlRgbColor
rgbMidnightBlue =7346457 # from enum XlRgbColor
rgbMintCream =16449525 # from enum XlRgbColor
rgbMistyRose =14804223 # from enum XlRgbColor
rgbMoccasin =11920639 # from enum XlRgbColor
rgbNavajoWhite =11394815 # from enum XlRgbColor
rgbNavy =8388608 # from enum XlRgbColor
rgbNavyBlue =8388608 # from enum XlRgbColor
rgbOldLace =15136253 # from enum XlRgbColor
rgbOlive =32896 # from enum XlRgbColor
rgbOliveDrab =2330219 # from enum XlRgbColor
rgbOrange =42495 # from enum XlRgbColor
rgbOrangeRed =17919 # from enum XlRgbColor
rgbOrchid =14053594 # from enum XlRgbColor
rgbPaleGoldenrod =7071982 # from enum XlRgbColor
rgbPaleGreen =10025880 # from enum XlRgbColor
rgbPaleTurquoise =15658671 # from enum XlRgbColor
rgbPaleVioletRed =9662683 # from enum XlRgbColor
rgbPapayaWhip =14020607 # from enum XlRgbColor
rgbPeachPuff =12180223 # from enum XlRgbColor
rgbPeru =4163021 # from enum XlRgbColor
rgbPink =13353215 # from enum XlRgbColor
rgbPlum =14524637 # from enum XlRgbColor
rgbPowderBlue =15130800 # from enum XlRgbColor
rgbPurple =8388736 # from enum XlRgbColor
rgbRed =255 # from enum XlRgbColor
rgbRosyBrown =9408444 # from enum XlRgbColor
rgbRoyalBlue =14772545 # from enum XlRgbColor
rgbSalmon =7504122 # from enum XlRgbColor
rgbSandyBrown =6333684 # from enum XlRgbColor
rgbSeaGreen =5737262 # from enum XlRgbColor
rgbSeashell =15660543 # from enum XlRgbColor
rgbSienna =2970272 # from enum XlRgbColor
rgbSilver =12632256 # from enum XlRgbColor
rgbSkyBlue =15453831 # from enum XlRgbColor
rgbSlateBlue =13458026 # from enum XlRgbColor
rgbSlateGray =9470064 # from enum XlRgbColor
rgbSlateGrey =9470064 # from enum XlRgbColor
rgbSnow =16448255 # from enum XlRgbColor
rgbSpringGreen =8388352 # from enum XlRgbColor
rgbSteelBlue =11829830 # from enum XlRgbColor
rgbTan =9221330 # from enum XlRgbColor
rgbTeal =8421376 # from enum XlRgbColor
rgbThistle =14204888 # from enum XlRgbColor
rgbTomato =4678655 # from enum XlRgbColor
rgbTurquoise =13688896 # from enum XlRgbColor
rgbViolet =15631086 # from enum XlRgbColor
rgbWheat =11788021 # from enum XlRgbColor
rgbWhite =16777215 # from enum XlRgbColor
rgbWhiteSmoke =16119285 # from enum XlRgbColor
rgbYellow =65535 # from enum XlRgbColor
rgbYellowGreen =3329434 # from enum XlRgbColor
xlAlways =1 # from enum XlRobustConnect
xlAsRequired =0 # from enum XlRobustConnect
xlNever =2 # from enum XlRobustConnect
xlAllAtOnce =2 # from enum XlRoutingSlipDelivery
xlOneAfterAnother =1 # from enum XlRoutingSlipDelivery
xlNotYetRouted =0 # from enum XlRoutingSlipStatus
xlRoutingComplete =2 # from enum XlRoutingSlipStatus
xlRoutingInProgress =1 # from enum XlRoutingSlipStatus
xlColumns =2 # from enum XlRowCol
xlRows =1 # from enum XlRowCol
xlAutoActivate =3 # from enum XlRunAutoMacro
xlAutoClose =2 # from enum XlRunAutoMacro
xlAutoDeactivate =4 # from enum XlRunAutoMacro
xlAutoOpen =1 # from enum XlRunAutoMacro
xlDoNotSaveChanges =2 # from enum XlSaveAction
xlSaveChanges =1 # from enum XlSaveAction
xlExclusive =3 # from enum XlSaveAsAccessMode
xlNoChange =1 # from enum XlSaveAsAccessMode
xlShared =2 # from enum XlSaveAsAccessMode
xlLocalSessionChanges =2 # from enum XlSaveConflictResolution
xlOtherSessionChanges =3 # from enum XlSaveConflictResolution
xlUserResolution =1 # from enum XlSaveConflictResolution
xlScaleLinear =-4132 # from enum XlScaleType
xlScaleLogarithmic =-4133 # from enum XlScaleType
xlNext =1 # from enum XlSearchDirection
xlPrevious =2 # from enum XlSearchDirection
xlByColumns =2 # from enum XlSearchOrder
xlByRows =1 # from enum XlSearchOrder
xlWithinSheet =1 # from enum XlSearchWithin
xlWithinWorkbook =2 # from enum XlSearchWithin
xlSeriesNameLevelAll =-1 # from enum XlSeriesNameLevel
xlSeriesNameLevelCustom =-2 # from enum XlSeriesNameLevel
xlSeriesNameLevelNone =-3 # from enum XlSeriesNameLevel
xlChart =-4109 # from enum XlSheetType
xlDialogSheet =-4116 # from enum XlSheetType
xlExcel4IntlMacroSheet =4 # from enum XlSheetType
xlExcel4MacroSheet =3 # from enum XlSheetType
xlWorksheet =-4167 # from enum XlSheetType
xlSheetHidden =0 # from enum XlSheetVisibility
xlSheetVeryHidden =2 # from enum XlSheetVisibility
xlSheetVisible =-1 # from enum XlSheetVisibility
xlSizeIsArea =1 # from enum XlSizeRepresents
xlSizeIsWidth =2 # from enum XlSizeRepresents
xlSlicer =1 # from enum XlSlicerCacheType
xlTimeline =2 # from enum XlSlicerCacheType
xlSlicerCrossFilterHideButtonsWithNoData=4 # from enum XlSlicerCrossFilterType
xlSlicerCrossFilterShowItemsWithDataAtTop=2 # from enum XlSlicerCrossFilterType
xlSlicerCrossFilterShowItemsWithNoData=3 # from enum XlSlicerCrossFilterType
xlSlicerNoCrossFilter =1 # from enum XlSlicerCrossFilterType
xlSlicerSortAscending =2 # from enum XlSlicerSort
xlSlicerSortDataSourceOrder =1 # from enum XlSlicerSort
xlSlicerSortDescending =3 # from enum XlSlicerSort
xlSmartTagControlActiveX =13 # from enum XlSmartTagControlType
xlSmartTagControlButton =6 # from enum XlSmartTagControlType
xlSmartTagControlCheckbox =9 # from enum XlSmartTagControlType
xlSmartTagControlCombo =12 # from enum XlSmartTagControlType
xlSmartTagControlHelp =3 # from enum XlSmartTagControlType
xlSmartTagControlHelpURL =4 # from enum XlSmartTagControlType
xlSmartTagControlImage =8 # from enum XlSmartTagControlType
xlSmartTagControlLabel =7 # from enum XlSmartTagControlType
xlSmartTagControlLink =2 # from enum XlSmartTagControlType
xlSmartTagControlListbox =11 # from enum XlSmartTagControlType
xlSmartTagControlRadioGroup =14 # from enum XlSmartTagControlType
xlSmartTagControlSeparator =5 # from enum XlSmartTagControlType
xlSmartTagControlSmartTag =1 # from enum XlSmartTagControlType
xlSmartTagControlTextbox =10 # from enum XlSmartTagControlType
xlButtonOnly =2 # from enum XlSmartTagDisplayMode
xlDisplayNone =1 # from enum XlSmartTagDisplayMode
xlIndicatorAndButton =0 # from enum XlSmartTagDisplayMode
xlSortNormal =0 # from enum XlSortDataOption
xlSortTextAsNumbers =1 # from enum XlSortDataOption
xlPinYin =1 # from enum XlSortMethod
xlStroke =2 # from enum XlSortMethod
xlCodePage =2 # from enum XlSortMethodOld
xlSyllabary =1 # from enum XlSortMethodOld
xlSortOnCellColor =1 # from enum XlSortOn
xlSortOnFontColor =2 # from enum XlSortOn
xlSortOnIcon =3 # from enum XlSortOn
xlSortOnValues =0 # from enum XlSortOn
xlAscending =1 # from enum XlSortOrder
xlDescending =2 # from enum XlSortOrder
xlSortColumns =1 # from enum XlSortOrientation
xlSortRows =2 # from enum XlSortOrientation
xlSortLabels =2 # from enum XlSortType
xlSortValues =1 # from enum XlSortType
xlSourceAutoFilter =3 # from enum XlSourceType
xlSourceChart =5 # from enum XlSourceType
xlSourcePivotTable =6 # from enum XlSourceType
xlSourcePrintArea =2 # from enum XlSourceType
xlSourceQuery =7 # from enum XlSourceType
xlSourceRange =4 # from enum XlSourceType
xlSourceSheet =1 # from enum XlSourceType
xlSourceWorkbook =0 # from enum XlSourceType
xlSpanishTuteoAndVoseo =1 # from enum XlSpanishModes
xlSpanishTuteoOnly =0 # from enum XlSpanishModes
xlSpanishVoseoOnly =2 # from enum XlSpanishModes
xlSparkScaleCustom =3 # from enum XlSparkScale
xlSparkScaleGroup =1 # from enum XlSparkScale
xlSparkScaleSingle =2 # from enum XlSparkScale
xlSparkColumn =2 # from enum XlSparkType
xlSparkColumnStacked100 =3 # from enum XlSparkType
xlSparkLine =1 # from enum XlSparkType
xlSparklineColumnsSquare =2 # from enum XlSparklineRowCol
xlSparklineNonSquare =0 # from enum XlSparklineRowCol
xlSparklineRowsSquare =1 # from enum XlSparklineRowCol
xlSpeakByColumns =1 # from enum XlSpeakDirection
xlSpeakByRows =0 # from enum XlSpeakDirection
xlErrors =16 # from enum XlSpecialCellsValue
xlLogical =4 # from enum XlSpecialCellsValue
xlNumbers =1 # from enum XlSpecialCellsValue
xlTextValues =2 # from enum XlSpecialCellsValue
xlColorScaleBlackWhite =3 # from enum XlStdColorScale
xlColorScaleGYR =2 # from enum XlStdColorScale
xlColorScaleRYG =1 # from enum XlStdColorScale
xlColorScaleWhiteBlack =4 # from | |
threshold = 0.015 # acceptance thr when comparing //PARAM
matchings = list()
steps = 32 # steps on spin //PARAM
for i in range(steps):
trans = np.eye(4) # set transformation amtrix
trans[:3,:3] = source.get_rotation_matrix_from_xyz((0,0, -(np.pi/(steps/2))*i)) # add rotation
reg_p2l = o3d.pipelines.registration.evaluate_registration(source, target, threshold, trans) # evaluate registration
matchings.append(reg_p2l.fitness)
#print("- matching: " + str(reg_p2l.fitness))
#draw_registration_result(source, target, trans)
best_matching = max(matchings) # get best fitness
best_idx = matchings.index(best_matching) # get idx of rotation with best fitness
return best_matching, (360/steps)*(best_idx)
def print_chain(chain, maxs = np.array([])):
if maxs.size == 0:
maxs = np.amax(chain, axis=0) # get voxel maxs
matrix = np.zeros((maxs[1]+1, maxs[2]+1), dtype=int)
for i, v in enumerate(chain):
matrix[v[0],v[1]] = 1
plt.imshow(matrix)
plt.show()
def get_connectivity(array):
chains = list()
connexions = list()
# get starting point
nonzero = np.transpose(np.nonzero(array)) # get nonzero positions of array
mid = False # set to false the mid-chain start flag
start_points = list()
for index in nonzero: # for each nonzero
neighbour_list = get_neighbours(index, array, 1) # get its neighbours //PARAM
if len(neighbour_list)==1: # select starting point as the first point found with 1 neighbour
start_points.append(index)
break
if not start_points: # if there are not 1 neighbour pixels -> mid, select a 2 neighbour pixel
mid = True # set to true the mid-chain start flag
for index in nonzero: # for each nonzero
neighbour_list = get_neighbours(index, array, 1) # get its neighbours //PARAM
if len(neighbour_list)==2: # select starting point as the first point found with 2 neighbours
start_points.append(index)
break
while start_points: # while there are starting points
start = start_points[0] # set start point of new chain
start_points.pop(0) # delete start from start_points list
array[start[0],start[1]] = 0 # delete start point from array (set to 0)
if mid == True: # if starting mid-chain
mid = False # set flag to false
neighbour_list = get_neighbours(start, array, 1) # get both neighbours //PARAM
chain0 = [np.array(start)] # start chain0
chain1, connexion1, new_starts1 = get_chain(neighbour_list[0], array, chain0) # get chain1 with chain0 as starting point
for idx in chain1: # delete chain1 points from array (set to 0)
array[idx[0], idx[1]] = 0
if connexion1.size != 0: # if a connexion was found
connexions.append(connexion1) # save it
array[connexion1[0], connexion1[1]] = 0 # delete connexion point from array (set to 0)
start_points = start_points + new_starts1 # if new starting points were found, concatenate them to start_points list
for idx in new_starts1: # delete new starts points from array (set to 0)
array[idx[0], idx[1]] = 0
chain0 = [np.array(start)] # start chain0
chain2, connexion2, new_starts2 = get_chain(neighbour_list[1], array, chain0) # get chain2 with chain0 as starting point
for idx in chain2:
array[idx[0], idx[1]] = 0
if connexion2.size != 0:
connexions.append(connexion2)
array[connexion2[0], connexion2[1]] = 0
start_points = start_points + new_starts2
for idx in new_starts2:
array[idx[0], idx[1]] = 0
if chain2.shape[0]>2: # if chain2 longer than chain0 + starting point
chain2 = np.delete(chain2, 0, 0) # delete starting point
chain2 = np.flipud(chain2) # flip
chain = np.vstack((chain2,chain1)) # stack
chains.append(chain) # store chain
else:
chain, connexion, new_starts = get_chain(start, array, []) # get chain
chains.append(chain) # store chain
for idx in chain:
array[idx[0],idx[1]] = 0
if connexion.size != 0:
connexions.append(connexion)
array[connexion[0],connexion[1]] = 0
start_points = start_points + new_starts
for idx in new_starts:
array[idx[0], idx[1]] = 0
return chains, connexions
def get_chain(start, array, chain = []):
chain.append(start) # append to parsed chain the starting point
connexion = np.array([])
new_starts = list()
next = True
while next == True: # while chain is going
neighbour_list = get_neighbours(chain[-1], array, 1) # get last link neighbours //PARAM
del_list = list()
for i, n in enumerate(neighbour_list): # for each neighbour
for c in chain: # for each chain link
if np.all(n == c): # if neighbour = link
del_list.append(i) # mark to delete, ensuring advance in one way
for i in sorted(del_list, reverse=True): # delete amrked neoghbours
del neighbour_list[i]
if len(neighbour_list) == 0: # if no neighbours survived, end of chain
next = False
elif len(neighbour_list) == 1: # if one neighbour survived
chain.append(neighbour_list[0]) # append it to chaing and keep going
else: # if more than one neighbour durvived
next = False # stop chain
connexion = chain.pop() # pop last link and mark it as a connexion
new_starts = neighbour_list # mark neighbours as new starts
chain_np = np.array(chain)
return chain_np, connexion, new_starts
def get_neighbours(idx, array, dist):
neighbour_list = list()
# define neighbour scope based on parameter "dist" and avoid out of ranges
row0 = max(idx[0]-dist,0)
row1 = min(idx[0]+dist+1, array.shape[0])
col0 = max(idx[1]-dist,0)
col1 = min(idx[1]+dist+1, array.shape[1])
for row in range(row0, row1): # for each row
for col in range(col0, col1): # for each col
if array[row, col] == 1: # if position == 1
neighbour = np.array([row, col]) # posible neighbour found
if np.array_equal(neighbour, idx) == False: # if not central position
neighbour_list.append(neighbour) # append neighbour
return neighbour_list
def get_info_connexions(connexions, chains):
connexions_info = list()
# get near chains for each connexions
for connexion in connexions: # for each connexion
near_chains_list = list()
for i, chain in enumerate(chains): # for each chain
d_to_start = distance.cityblock(connexion, chain[0]) # get distance from coneexion to start of chain
d_to_end = distance.cityblock(connexion, chain[-1]) # get distance from connexion to end of chain
if d_to_start <= 3 or d_to_end <= 3: # if distance < thr //PARAM (3 to reach further diagonally)
near_chains_list.append(i) # mark chain as near_chain
connexions_info.append([connexion, near_chains_list])
# merge connexions that are near
connexion_del_list = list()
new_connexions_info = list()
for i, c_info1 in enumerate(connexions_info): # for each connexion (i)
if i not in connexion_del_list: # if connexion i not marked to be deleted
for j, c_info2 in enumerate(connexions_info): # for each connexion (j)
if i != j: # if not same connexion
if j not in connexion_del_list: # if connexion j not marked to be deleted
d_to_c = distance.cityblock(c_info1[0],c_info2[0]) # compute distance between connexions
if d_to_c <= 3: # if distance < thr //PARAM (3 to reach further diagonally)
connexion_del_list.append(i) # mark to delete i
connexion_del_list.append(j) # mark to delete j
new_near_chains_list = list(set(c_info1[1]+c_info2[1])) # new near chain list as a set of both lists concatenated
new_connexions_info.append([c_info1[0], new_near_chains_list]) # append new connexion
for i in sorted(connexion_del_list, reverse=True): # delete marked connexions
del connexions_info[i]
connexions_info = connexions_info + new_connexions_info # concatenate remaining connexions with new connexions
#delete connexions with only one near chain, possible due to the deletion of short chains
connexion_del_list = list()
for i, c_info in enumerate(connexions_info):
if len(c_info[1])<=1:
connexion_del_list.append(i)
for i in sorted(connexion_del_list, reverse=True):
del connexions_info[i]
# delete connexions with only two near chains, possible due to the deletion of short chains, and merge chains
connexion_del_list = list()
for i, c_info in enumerate(connexions_info):
if len(c_info[1])==2:
if any(np.array_equal(chains[c_info[1][0]][0], x) for x in chains[c_info[1][1]]) == False and any(np.array_equal(chains[c_info[1][1]][0], x) for x in chains[c_info[1][0]]) == False: # avoid comparision between overlaping chians (always should pass)
connexion_del_list.append(i) # mark connexion to be deleted
# compute distances from connexion to start and end of both chins
d_to_start1 = distance.cityblock(c_info[0], chains[c_info[1][0]][0])
d_to_end1 = distance.cityblock(c_info[0], chains[c_info[1][0]][-1])
d_to_start2 = distance.cityblock(c_info[0], chains[c_info[1][1]][0])
d_to_end2 = distance.cityblock(c_info[0], chains[c_info[1][1]][-1])
# find where the connexions lies regarding both chains
union1 = [d_to_start1, d_to_end1].index(min([d_to_start1, d_to_end1]))
union2 = [d_to_start2, d_to_end2].index(min([d_to_start2, d_to_end2]))
chain1 = chains[c_info[1][0]]
chain2 = chains[c_info[1][1]]
# concatenate chains depending on where the connexion is
if union1 == 0:
if union2 == 0:
chain2 = np.flipud(chain2)
chain2 = np.vstack((chain2, c_info[0]))
new_chain = np.vstack((chain2, chain1))
else:
chain2 = np.vstack((chain2, c_info[0]))
new_chain = np.vstack((chain2, chain1))
else:
if union2 == 0:
chain1 = np.vstack((chain1, c_info[0]))
new_chain = np.vstack((chain1, chain2))
else:
chain2 = np.flipud(chain2)
chain1 = np.vstack((chain1, c_info[0]))
new_chain = np.vstack((chain1, chain2))
# both chains are now the concatenated chain, this allows further concatenation and is controlled by the previous if (if any(...))
chains[c_info[1][0]] = new_chain
chains[c_info[1][1]] = new_chain
for i in |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.