input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import base64
import bz2
import errno
import gzip
import os
import re
import select
import signal
import struct
import sys
import threading
import time
from xml.sax import saxutils
from conary.errors import ConaryError
try:
import fcntl
import termios
import tty
except ImportError:
fcntl = termios = tty = None # pyflakes=ignore
BUFFER=1024*4096
MARKER, FREETEXT, NEWLINE, CARRIAGE_RETURN, COMMAND, CLOSE = range(6)
LINEBREAKS = ('\r', '\n')
def callable(func):
func._callable = True
return func
def makeRecord(d):
res = "<record>"
for key, val in sorted(d.iteritems()):
res += "<%s>%s</%s>" % (key, val, key)
res += "</record>"
return res
def getTime():
"""Return ISO8601 compliant time string.
Return a formatted time string which is ISO8601 compliant.
Time is expressed in UTC"""
curTime = time.time()
msecs = 1000 * (curTime - long(curTime))
fmtStr = "%Y-%m-%dT%H:%M:%S.%%03dZ"
return time.strftime(fmtStr, time.gmtime(curTime)) % msecs
def openPath(path):
class BZ2File(bz2.BZ2File):
def flush(self):
pass
if path.endswith('.bz2'):
return BZ2File(path, 'w')
if path.endswith('.gz'):
return gzip.GzipFile(path, 'w')
return open(path, 'w')
class Lexer(object):
def __init__(self, marker, callbacks = None):
self.marker = marker
self.callbacks = callbacks or []
self.stream = ''
self.mark = False
self.markMatch = ''
self.state = FREETEXT
def registerCallback(self, callback):
self.callbacks.append(callback)
def freetext(self, text):
self.emit((FREETEXT, text))
def newline(self):
self.emit((NEWLINE, None))
def carriageReturn(self):
self.emit((CARRIAGE_RETURN, None))
def command(self, text):
self.emit((COMMAND, text.split(None, 1)))
def close(self):
# newline is the only state that can be left half flushed
if self.state == NEWLINE:
self.newline()
self.emit((CLOSE, None))
def scan(self, sequence):
"""
scan a sequence of characters, tokenizing it on the fly
This code is implemented as a simple state machine.
The general state rules are:
Freetext can go to freetext or newline;
Newline can go to newline, freetext or marker;
Marker can go to marker, freetext, newline or command;
Command can go to command or freetext.
If anything is going to be emitted, it generally happens on
state change.
If the state machine finishes parsing and it is in freetext,
it will flush with a freetext token."""
for char in sequence:
if self.state == FREETEXT:
if char in LINEBREAKS:
if self.stream:
self.freetext(self.stream)
self.stream = ''
if char == '\n':
self.state = NEWLINE
else:
# emit a CR token, but leave the state as FREETEXT
self.carriageReturn()
else:
self.stream += char
elif self.state == NEWLINE:
if char in LINEBREAKS:
# this means two linebreaks in a row. emit the newline
self.newline()
self.stream = ''
if char == '\r':
self.carriageReturn()
self.state = FREETEXT
else:
if self.marker.startswith(char):
self.stream = char
self.state = MARKER
else:
# emit the newline we were holding
self.newline()
self.stream = char
self.state = FREETEXT
elif self.state == MARKER:
if char in LINEBREAKS:
# don't forget the newline that was held in abeyance to
# get into the marker state.
self.newline()
if self.stream:
self.freetext(self.stream)
self.stream = ''
if char == '\r':
self.carriageReturn()
self.state = FREETEXT
else:
self.state = NEWLINE
else:
candidate = self.stream + char
self.stream += char
if self.stream == self.marker:
self.stream = ''
self.state = COMMAND
else:
if not self.marker.startswith(candidate):
self.newline()
self.state = FREETEXT
elif self.state == COMMAND:
if char == '\n':
self.command(self.stream.lstrip())
self.stream = ''
self.state = FREETEXT
else:
self.stream += char
if self.state == FREETEXT:
if self.stream:
self.freetext(self.stream)
self.stream = ''
def write(self, text):
return self.scan(text)
def flush(self):
self.scan('')
def emit(self, token):
for callback in self.callbacks:
callback(token)
class LogWriter(object):
def handleToken(self, token):
mode, param = token
if mode == FREETEXT:
self.freetext(param)
elif mode == NEWLINE:
self.newline()
elif mode == CARRIAGE_RETURN:
self.carriageReturn()
elif mode == COMMAND:
self.command(*param)
elif mode == CLOSE:
self.close()
def freetext(self, text):
pass
def write(self, text):
# alias to freetext to define a more file-object-like interface
return self.freetext(text)
def flush(self):
pass
def newline(self):
pass
def carriageReturn(self):
pass
def start(self):
pass
@callable
def reportMissingBuildRequires(self, data):
self.freetext("warning: Suggested buildRequires additions: ['%s']"
%"', '".join(data.split(' ')))
self.newline()
@callable
def reportExcessBuildRequires(self, data):
self.freetext("info: Possible excessive buildRequires: ['%s']"
%"', '".join(data.split(' ')))
self.newline()
@callable
def reportExcessSuperclassBuildRequires(self, data):
self.freetext("info: Possible excessive superclass buildRequires: ['%s']"
%"', '".join(data.split(' ')))
self.newline()
def command(self, cmd, *args):
func = getattr(self.__class__, cmd, False)
# silently ignore nonsensical calls because the logger loops over each
# writer and passes the command separately to all of them
if func and func.__dict__.get('_callable', False):
try:
return func(self, *args)
except TypeError:
# Probably the wrong number of arguments; make it
# possible to debug the problem
self.freetext('\nERROR: failed attempt to call'
' function %s with arguments %s\n' %(cmd, repr(args)))
except Exception, e:
# Unknown problem; provide information so that we can
# debug it and fix it later
self.freetext('\nERROR: unhandled exception %s: %s'
' calling function %s with arguments %s\n' %(
str(e.__class__), str(e), cmd, repr(args)))
def close(self):
pass
class XmlLogWriter(LogWriter):
def __init__(self, path):
self.data = threading.local()
self.messageId = 0
self.path = path
self.logging = False
self.text = ''
self.stream = None
LogWriter.__init__(self)
def flush(self):
self.stream.flush()
def start(self):
self.stream = openPath(self.path)
print >> self.stream, '<?xml version="1.0"?>'
print >> self.stream, \
"<log xmlns='http://www.rpath.com/permanent/log-v1.xsd'>"
self.log('begin log', 'DEBUG')
self.stream.flush()
self.logging = True
def _getDescriptorStack(self):
if not hasattr(self.data, 'descriptorStack'):
self.data.descriptorStack = []
return self.data.descriptorStack
def _getRecordData(self):
if not hasattr(self.data, 'recordData'):
self.data.recordData = {}
return self.data.recordData
def close(self):
if not self.logging:
return
del self._getDescriptorStack()[:]
self._getRecordData().clear()
self.log('end log', 'DEBUG')
print >> self.stream, "</log>"
self.stream.flush()
self.stream.close()
def freetext(self, text):
self.text += text
def newline(self):
if self.text:
self.log(self.text)
self.text = ''
carriageReturn = newline
def _getDescriptor(self):
descriptorStack = self._getDescriptorStack()
return '.'.join(descriptorStack)
def log(self, message, levelname = 'INFO'):
# escape xml delimiters and newline characters
message = saxutils.escape(message)
message = message.replace('\n', '\\n')
macros = {}
recordData = self._getRecordData()
macros.update(recordData)
macros['time'] = getTime()
macros['message'] = message
macros['level'] = levelname
macros['pid'] = os.getpid()
threadName = threading.currentThread().getName()
if threadName != 'MainThread':
macros['threadName'] = threadName
macros['messageId'] = self.messageId
self.messageId += 1
descriptor = self._getDescriptor()
if descriptor:
macros['descriptor'] = descriptor
print >> self.stream, makeRecord(macros)
@callable
def pushDescriptor(self, descriptor):
descriptorStack = self._getDescriptorStack()
descriptorStack.append(descriptor)
@callable
def popDescriptor(self, descriptor = None):
descriptorStack = self._getDescriptorStack()
desc = descriptorStack.pop()
if descriptor:
assert descriptor == desc
return desc
@callable
def addRecordData(self, *args):
if not args:
# handle bad input
return
if len(args) < 2:
# called via lexer
key, val = args[0].split(None, 1)
else:
# called via xmllog:addRecordData
key, val = args
if key[0].isdigit() or \
not re.match('^\w[a-zA-Z0-9_.-]*$', key,
flags = re.LOCALE | re.UNICODE):
raise RuntimeError("'%s' is not a legal XML name" % key)
if isinstance(val, (str, unicode)):
val = saxutils.escape(val)
recordData = self._getRecordData()
recordData[key] = val
@callable
def delRecordData(self, key):
recordData = self._getRecordData()
recordData.pop(key, None)
@callable
def reportMissingBuildRequires(self, data):
self.pushDescriptor('missingBuildRequires')
self.log(data, levelname = 'WARNING')
self.popDescriptor('missingBuildRequires')
@callable
def reportExcessBuildRequires(self, data):
self.pushDescriptor('excessBuildRequires')
self.log(data, levelname = 'INFO')
self.popDescriptor('excessBuildRequires')
@callable
def reportExcessSuperclassBuildRequires(self, data):
self.pushDescriptor('excessSuperclassBuildRequires')
self.log(data, levelname = 'DEBUG')
self.popDescriptor('excessSuperclassBuildRequires')
class FileLogWriter(LogWriter):
def __init__(self, path):
self.path = path
self.stream = None
LogWriter.__init__(self)
self.logging = False
def start(self):
self.stream = openPath(self.path)
self.logging = True
def freetext(self, text):
if self.logging:
self.stream.write(text)
self.stream.flush()
def newline(self):
if self.logging:
self.stream.write('\n')
self.stream.flush()
carriageReturn = newline
def close(self):
self.stream.close()
self.logging = False
class StreamLogWriter(LogWriter):
def __init__(self, stream = None):
self.data = threading.local()
self.data.hideLog = False
self.stream = stream
LogWriter.__init__(self)
self.index = 0
self.closed = bool(self.stream)
def start(self):
if not self.stream:
self.stream = sys.stdout
def freetext(self, text):
if not self.data.__dict__.get('hideLog'):
self.stream.write(text)
self.stream.flush()
self.index += len(text)
def newline(self):
if not self.data.__dict__.get('hideLog'):
self.stream.write('\n')
self.stream.flush()
self.index = 0
def carriageReturn(self):
if not self.data.__dict__.get('hideLog'):
if (self.index % 80):
spaces = 78 - (self.index % 80)
self.stream.write(spaces * ' ')
self.stream.write('\r')
self.stream.flush()
self.index = 0
@callable
def pushDescriptor(self, descriptor):
if descriptor == 'environment':
self.data.hideLog = True
@callable
def popDescriptor(self, descriptor = None):
if descriptor is None:
return
if descriptor == 'environment':
self.data.hideLog = False
@callable
def reportExcessSuperclassBuildRequires(self, data):
# This is really only for debugging Conary itself, and so is
| |
- name: query_body
# in: body
# description: 日志查询条件
# required: true
# type: json
# responses:
# '200':
# description: 返回点击列表
# schema:
# $ref: '#/definitions/clickItems'
# default:
# description: Error
# schema:
# $ref: '#/definitions/Error'
# """
# query_body = json.loads(self.request.body)
# from_time = query_body.get('from_time', 0)
# end_time = query_body.get('end_time', 0)
# key = query_body.get('key', '')
# if key:
# key = key.encode('utf-8')
# key_type = query_body.get('key_type', '')
# size = query_body.get('size', 20)
# query = query_body.get('query', [])
#
# if not (from_time and end_time and key and key_type):
# self.process_error(-1, "接口参数不能为空")
# return
# try:
# result = get_online_clicks(key, key_type, from_time, end_time, size, query)
# self.finish(json.dumps({"status":0, "values":result}))
# except Exception as e:
# traceback.print_exc()
# self.finish(json.dumps({"status":-1, "error":e.message}))
#
#
#class ClickListHandler(BaseHandler):
# REST_URL = '/platform/behavior/clicks'
#
# def post(self):
# """
# 获取时间段内的点击列表
# @API
# summary: 获取时间段内的点击列表
# description: 获取指定时间段内指定名单的所有点击资料
# tags:
# - platform
# parameters:
# - name: query_body
# in: body
# description: 日志查询条件
# required: true
# type: json
# responses:
# '200':
# description: 返回点击列表
# schema:
# $ref: '#/definitions/clickItems'
# default:
# description: Error
# schema:
# $ref: '#/definitions/Error'
# """
# query_body = json.loads(self.request.body)
# filter_cols = ["uri_stem", "sid", "uid"]
# from_time = query_body.get('from_time', 0)
# end_time = query_body.get('end_time', 0)
# key = query_body.get('key', '')
# if key:
# key = key.encode('utf-8')
# key_type = query_body.get('key_type', '')
# size = query_body.get('size', 20)
# query = query_body.get('query', [])
#
# if not (from_time and end_time and key and key_type):
# self.process_error(-1, "接口参数不能为空")
# return
#
# ts = int(from_time) / 1000.0
# end_ts = int(end_time) / 1000.0
# now = millis_now()
# now_in_hour_start = now / 1000 / 3600 * 3600
# try:
# db_lock.acquire()
# records = []
# errors = []
# if ts < now_in_hour_start:
# # 从离线持久化数据里面查找
# logger.debug(DEBUG_PREFIX+'从历史里面查找点击列表...')
# # 为了实现日志点击列表倒序,需要扫描整个时间段日志,再截取列表
# limit = 10000000
# ret, err = persist_utils.get_request_log(key, ts, key_type, query=query, end=end_ts, limit=limit)
# records = ret if ret else []
# if err:
# errors.append('%s: %s;' % (ts, err))
# self.finish(json.dumps({"status":0, "values":[]}))
# return
# # 过滤一条日志中任意uri_stem uid sid 字段是否包含query
# if records:
# logger.debug(DEBUG_PREFIX+'过滤关键词%s之前, 日志的大小是%s', query, len(records))
# else:
# logger.debug(DEBUG_PREFIX+'过滤关键词%s之前, 日志就为空了', query)
#
# # 过滤records @todo 优化,现在下面支持查询功能了, 至少可以用闭包弄个过滤函数
## records = filter_records(records, filter_cols, query)
# if records:
# logger.debug(DEBUG_PREFIX+'过滤关键词%s之后, 日志的大小是%s', query, len(records))
#
# if end_ts - 1 >= now_in_hour_start and len(records) <= size:
# logger.debug(DEBUG_PREFIX+'从当前小时里面查找点击列表...')
# latest_events = get_latest_events(key, key_type, fromtime=from_time, size=size * 2)
## print >> sys.stderr, "filter before",len(latest_events)
## print >> sys.stderr, latest_events[0]["timestamp"], fromtime
# logger.debug(DEBUG_PREFIX+u"返回的事件们是%s, type:%s", latest_events, type(latest_events))
## latest_events = filter( lambda x: x["timestamp"]>= int(fromtime), latest_events)
## print >> sys.stderr, "filter time",len(latest_events)
# # 过滤当前日志中任意uri_stem uid sid 字段是否包含query
## result.extend(latest_events)
# records.extend(filter_records(latest_events, filter_cols, query))
## print >> sys.stderr, "filter query", len(latest_events)
#
# # 合并日志,将日志更新到父日志中。日志中的固定字段不更改,notices字段合并,其余的特殊字段覆盖父日志
# record_dict = dict()
# for record in records:
# record_id = record['id']
# record_pid = record['pid']
#
# if record_pid in record_dict:
# pid_dict = record_dict[record_pid]
#
# for key, value in record.items():
# if key in Event_Schema['fixed']:
# continue
# elif key == 'notices':
# if record.get('notices', ''):
# if pid_dict.get('notices', ''):
# pid_dict['notices'] = ','.join([pid_dict['notices'], record['notices']])
# else:
# pid_dict['notices'] = record['notices']
# else:
# pid_dict[key] = value
#
# pid_dict['merged'] += 1
# record_dict[record_pid] = pid_dict
# else:
# record['merged'] = 1
# record_dict[record_id] = record
#
# # 根据每天记录的notice字段计算风险场景和风险值
# for _, record in record_dict.items():
# event_count = record.pop('merged', 1)
#
# if record.get('notices', None):
# score = dict()
# notices = list(set(record.get('notices', '').split(',')))
# for n in notices:
# weigh = cache.Strategy_Weigh_Cache.get(n, dict())
# if weigh:
# category = weigh['category']
#
# if category in score:
# score[category] += weigh['score']
# else:
# score[category] = weigh['score']
# record['category'] = score.keys()
# record['risk_score'] = max([int(value / event_count) for value in score.values()]) if score else 0
#
# # 去除log多余字段
# for attr in ['buff_endpoint', 'record_size', 'buff_startpoint']:
# record.pop(attr, None)
#
# result = record_dict.values()
# result.sort(key=lambda r: r["timestamp"], reverse=True)
# result = result[:size]
# self.finish(json.dumps({"status": 0, "values": result}))
#
# except Exception as e:
# traceback.print_exc()
# self.finish(json.dumps({"status": -1, "error": e.message}))
# finally:
# db_lock.release()
#
#
#class ContinuousRelatedStatHandler(BaseHandler):
#
# REST_URL = '/platform/behavior/continuous_related_statistic'
#
# @authenticated
# def get(self):
# """
# 获取时间段内的点击列表
#
# @API
# summary: 获取时间段内的每个小时的点击数
# description: ''
# tags:
# - platform
# parameters:
# - name: from_time
# in: query
# description: 起始时间
# required: true
# type: integer
# - name: end_time
# in: query
# description: 结束时间
# required: true
# type: integer
# - name: key
# in: query
# description: 关键字
# required: false
# type: string
# - name: key_type
# in: query
# description: 关键字类型
# required: false
# type: string
# responses:
# '200':
# description: 返回点击统计列表
# schema:
# $ref: '#/definitions/clickStatistics'
# default:
# description: Error
# schema:
# $ref: '#/definitions/Error'
# """
# from_time = int(self.get_argument('from_time', 0))
# end_time = int(self.get_argument('end_time', 0))
# key = self.get_argument('key', '')
# key_type = self.get_argument('key_type', '')
#
# if not (from_time and end_time):
# self.process_error(400, 'parameters error')
# ts = int(from_time) / 1000.0
# end_ts = int(end_time) / 1000.0
# now = millis_now()
# now_in_hour_start = now / 1000 / 3600 * 3600
# logger.debug(DEBUG_PREFIX+u"查询的时间范围是%s ~ %s", datetime.fromtimestamp(ts), datetime.fromtimestamp(end_ts))
#
# ContinuousDB.get_db()
# group_tags = ['ip', 'user', 'page', 'did', 'incident']
# if end_ts >= now_in_hour_start:
# timestamps = get_hour_strs_fromtimestamp(ts, now_in_hour_start-1)
# else:
# timestamps = get_hour_strs_fromtimestamp(ts, end_ts)
#
# timestamps = map(lambda x: str(x), timestamps)
## logger.debug(DEBUG_PREFIX+u"查询的时间戳: %s", timestamps)
# related_vars = dict(
# did=['did__visit__dynamic_distinct_ip__1h__slot', #did 关联ip数
# 'did__visit__dynamic_distinct_user__1h__slot',# did 关联user数
# 'did__visit__dynamic_distinct_page__1h__slot',# did 关联page数
# 'did__visit__incident_count__1h__slot'],#did 风险事件数
# user=['user__visit__dynamic_distinct_ip__1h__slot',# user 关联ip数
# 'user__visit__dynamic_distinct_did__1h__slot',# user 关联did数
# 'user__visit__dynamic_distinct_page__1h__slot',# user 关联page数
# 'user__visit__incident_count__1h__slot'],# user 风险事件数
# ip=['ip__visit__dynamic_distinct_did__1h__slot',# ip 关联did数
# 'ip__visit__dynamic_distinct_user__1h__slot',# ip 关联user数
# 'ip__visit__dynamic_distinct_page__1h__slot',# ip 关联page数
# 'ip__visit__incident_count__1h__slot'],# ip 风险事件数
# page=['page__visit__dynamic_distinct_ip__1h__slot',# page 关联ip数
# 'page__visit__dynamic_distinct_user__1h__slot',# page 关联user数
# 'page__visit__dynamic_distinct_did__1h__slot',# page 关联did数
# 'page__visit__incident_count__1h__slot'],)# page 风险事件数
#
# # var:col
# vars_col_dict = {
# 'did__visit__dynamic_distinct_ip__1h__slot': 'ip',
# 'did__visit__dynamic_distinct_user__1h__slot':'user',# did 关联user数
# 'did__visit__dynamic_distinct_page__1h__slot':'page',# did 关联page数
# 'did__visit__incident_count__1h__slot':'incident',#did 风险事件数
# 'user__visit__dynamic_distinct_ip__1h__slot':'ip',# user 关联ip数
# 'user__visit__dynamic_distinct_did__1h__slot':'did',# user 关联did数
# 'user__visit__dynamic_distinct_page__1h__slot':'page',# user 关联page数
# 'user__visit__incident_count__1h__slot':'incident',# user 风险事件数
# 'ip__visit__dynamic_distinct_did__1h__slot':'did',# ip 关联did数
# 'ip__visit__dynamic_distinct_user__1h__slot':'user',# ip 关联user数
# 'ip__visit__dynamic_distinct_page__1h__slot':'page',# ip 关联page数
# 'ip__visit__incident_count__1h__slot':'incident',# ip 风险事件数
# 'page__visit__dynamic_distinct_ip__1h__slot':'ip',# page 关联ip数
# 'page__visit__dynamic_distinct_user__1h__slot':'user',# page 关联user数
# 'page__visit__dynamic_distinct_did__1h__slot':'did',# page 关联did数
# 'page__visit__incident_count__1h__slot':'incident',# page 风险事件数
# 'total__visit__dynamic_distinct_did__1h__slot':'did',
# 'total__visit__incident_count__1h__slot':'incident',
# 'total__visit__dynamic_distinct_user__1h__slot':'user',
# 'total__visit__dynamic_distinct_ip__1h__slot':'ip',
# }
# try:
# if key_type and key:
# related_dim = [dim for dim in DIM_LIST if dim != key_type]
# click_var = '%s__visit__dynamic_count__1h__slot' % key_type
# incident_var = '%s__visit__incident_count__1h__slot' % key_type
# var_list = ['%s__visit__dynamic_distinct_%s__1h__slot' % (key_type, dim) for dim in related_dim]
# var_list.append(click_var)
# var_list.append(incident_var)
# if timestamps:
# records = ContinuousDB.query_many(key, key_type, timestamps, var_list)
# else:
# records = dict()
# else:
# click_var = 'total__visit__dynamic_count__1h__slot'
# related_vars = {dim: 'total__visit__dynamic_distinct_{}__1h__slot'.format(dim) for dim in ['did', 'ip', 'user']}
# related_vars['incident'] = 'total__visit__incident_count__1h__slot'
# tmp_var_list = related_vars.values()
# tmp_var_list.append(click_var)
# if timestamps:
# records = ContinuousDB.query_many('all', 'total', timestamps, tmp_var_list)
# else:
# records = dict()
#
# logger.debug(DEBUG_PREFIX+u"查询的key: %s, key_type:%s, 返回的查询结果是:%s", key, key_type, records)
# except Exception as e:
# traceback.print_exc()
# logger.error(e)
# self.process_error(400, 'fail to get incidents from metrics')
# return
#
# click_statistics = list()
# for ts in timestamps:
# r = records.get(ts, None) if records else None # if aerospike fail.
# if r is None:
# click_statistics.append(dict(count=0, time_frame=int(float(ts)*1000), related_count={tag: 0 for tag in group_tags}))
# continue
#
# t = dict(count=r.pop(click_var, 0), related_count=dict(), time_frame=int(float(ts)*1000))
# for k,v in r.iteritems():
# col = vars_col_dict.get(k)
# t['related_count'][col] = v
# click_statistics.append(t)
# try:
#
# ts = get_current_hour_timestamp()
# if end_time > ts:
# if key_type and key:
# click = get_latest_statistic(key, key_type, var_list)
# related_counts = dict()
# if click:
# for dim in related_dim:
# var = '%s__visit__dynamic_distinct_%s__1h__slot' % (key_type, dim)
# related_counts[dim] = len(click[var])
# related_counts['incident'] = int(click[incident_var])
# click_statistics.append(
# {'count': int(click[click_var]), 'related_count': related_counts, 'time_frame': ts})
# else:
# click_statistics.append(dict(count=0, time_frame=ts, related_count={tag: 0 for tag in group_tags}))
# else:
# click = get_latest_statistic(key='', key_type='', var_list=[click_var] + related_vars.values())
# if click:
# a_stat = dict()
# for tag, var in related_vars.iteritems():
# var_value = click[var]
# if isinstance(var_value, int):
# a_stat[tag] = var_value
# else:
# a_stat[tag] = len(var_value)
# click_statistics.append({'count': int(click[click_var]), 'related_count': a_stat, 'time_frame':ts})
# else:
# click_statistics.append(dict(count=0, time_frame=ts, related_count={tag: 0 for tag in group_tags}))
## records[ts] = {'count': int(click[click_var]), 'related_count': click_statistics}
#
# self.finish(json_dumps(click_statistics))
# return
# except Exception as e:
# traceback.print_exc()
# logger.error(e)
# self.process_error(400, 'fail to statistics click')
#
#
#class ContinuousTopRelatedStatHandler(BaseHandler):
#
# REST_URL = '/platform/behavior/continuous_top_related_statistic'
#
# @authenticated
# def get(self):
# """
# 获取指定时间点击数最高的7位用户点击量
#
# @API
# summary: 获取用户历史点击量
# description: ''
# tags:
# - platform
# parameters:
# - name: from_time
# in: query
# description: 起始时间
# required: true
# type: integer
# - name: end_time
# in: query
# description: 结束时间
# required: true
# type: integer
# - name: key
# in: query
# description: 关键字
# required: true
# type: string
# - name: key_type
# in: query
# description: 关键字类型
# required: true
# type: string
# responses:
# '200':
# description: 返回点击统计列表
# schema:
# $ref: '#/definitions/clickStatistics'
# default:
# description: Error
# schema:
# $ref: '#/definitions/Error'
# """
# from_time = int(self.get_argument('from_time', 0))
# end_time = int(self.get_argument('end_time', 0))
# key = self.get_argument('key', '')
# key_type = self.get_argument('key_type', '')
#
# if not (from_time and end_time and key and key_type):
# self.process_error(400, 'parameters error')
#
# interval = 60 * 60 * 1000
# db = 'default'
# metrics_name = 'click.related.{}'.format(key_type)
# top_related = get_current_top_related(key_type, key)
# top_related_keys = top_related.keys()
# if key_type == 'ip':
# group_tags = ['user']
# filter_tags = {'user': top_related_keys}
# else:
# group_tags = ['ip']
# filter_tags = {'ip': top_related_keys}
#
# try:
# metrics = MetricsAgent.get_instance().query(db, metrics_name, 'sum', from_time,
# end_time, interval, filter_tags, group_tags)
# click_statistics = {top: [] for top in top_related_keys}
# for time_frame in range(from_time, end_time, interval):
# clicks = metrics.get(time_frame, {})
# related_tops = {tags[0]: int(value) for tags, value in clicks.iteritems()}
#
# for top in top_related_keys:
# if top in related_tops:
# click_statistics[top].append(dict(time_frame=time_frame, count=related_tops[top]))
# else:
# click_statistics[top].append(dict(time_frame=time_frame, count=0))
#
# ts = get_current_hour_timestamp()
# if end_time > ts:
# for top, count in top_related.iteritems():
# click_statistics[top][-1]['count'] = count
#
# self.finish(json_dumps(click_statistics))
# except Exception as e:
# logger.error(e)
# self.process_error(400, 'fail to statistics click')
#
#
#def get_current_top_related(key_type, key):
# if key_type == 'ip':
# var = 'ip__visit__user_dynamic_count__1h__slot'
# else:
# var = '{}__visit__ip_dynamic_count__1h__slot'.format(key_type)
# variables = get_latest_statistic(key=key, key_type=key_type, var_list=[var])
# if variables:
# top_related = variables[var]
# sorted_top_related | |
"""
def __init__(self):
r"""
:param IssueTypes: 异常详情。
:type IssueTypes: list of IssueTypeInfo
:param EventsTotalCount: 异常事件总数。
:type EventsTotalCount: int
:param HealthScore: 健康得分。
:type HealthScore: int
:param HealthLevel: 健康等级, 如:"HEALTH", "SUB_HEALTH", "RISK", "HIGH_RISK"。
:type HealthLevel: str
"""
self.IssueTypes = None
self.EventsTotalCount = None
self.HealthScore = None
self.HealthLevel = None
def _deserialize(self, params):
if params.get("IssueTypes") is not None:
self.IssueTypes = []
for item in params.get("IssueTypes"):
obj = IssueTypeInfo()
obj._deserialize(item)
self.IssueTypes.append(obj)
self.EventsTotalCount = params.get("EventsTotalCount")
self.HealthScore = params.get("HealthScore")
self.HealthLevel = params.get("HealthLevel")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class HealthStatus(AbstractModel):
"""实例健康详情。
"""
def __init__(self):
r"""
:param HealthScore: 健康分数,满分100。
:type HealthScore: int
:param HealthLevel: 健康等级,取值包括:"HEALTH" - 健康;"SUB_HEALTH" - 亚健康;"RISK"- 危险;"HIGH_RISK" - 高危。
:type HealthLevel: str
:param ScoreLost: 总扣分分数。
:type ScoreLost: int
:param ScoreDetails: 扣分详情。
注意:此字段可能返回 null,表示取不到有效值。
:type ScoreDetails: list of ScoreDetail
"""
self.HealthScore = None
self.HealthLevel = None
self.ScoreLost = None
self.ScoreDetails = None
def _deserialize(self, params):
self.HealthScore = params.get("HealthScore")
self.HealthLevel = params.get("HealthLevel")
self.ScoreLost = params.get("ScoreLost")
if params.get("ScoreDetails") is not None:
self.ScoreDetails = []
for item in params.get("ScoreDetails"):
obj = ScoreDetail()
obj._deserialize(item)
self.ScoreDetails.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class InstanceBasicInfo(AbstractModel):
"""实例基础信息。
"""
def __init__(self):
r"""
:param InstanceId: 实例ID。
:type InstanceId: str
:param InstanceName: 实例名称。
:type InstanceName: str
:param Vip: 实例内网IP。
:type Vip: str
:param Vport: 实例内网Port。
:type Vport: int
:param Product: 实例产品。
:type Product: str
:param EngineVersion: 实例引擎版本。
:type EngineVersion: str
"""
self.InstanceId = None
self.InstanceName = None
self.Vip = None
self.Vport = None
self.Product = None
self.EngineVersion = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.InstanceName = params.get("InstanceName")
self.Vip = params.get("Vip")
self.Vport = params.get("Vport")
self.Product = params.get("Product")
self.EngineVersion = params.get("EngineVersion")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class InstanceConfs(AbstractModel):
"""实例配置。
"""
def __init__(self):
r"""
:param DailyInspection: 数据库巡检开关, Yes/No。
:type DailyInspection: str
:param OverviewDisplay: 实例概览开关,Yes/No。
:type OverviewDisplay: str
"""
self.DailyInspection = None
self.OverviewDisplay = None
def _deserialize(self, params):
self.DailyInspection = params.get("DailyInspection")
self.OverviewDisplay = params.get("OverviewDisplay")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class InstanceInfo(AbstractModel):
"""查询实例列表,返回实例的相关信息的对象。
"""
def __init__(self):
r"""
:param InstanceId: 实例ID。
:type InstanceId: str
:param InstanceName: 实例名称。
:type InstanceName: str
:param Region: 实例所属地域。
:type Region: str
:param HealthScore: 健康得分。
:type HealthScore: int
:param Product: 所属产品。
:type Product: str
:param EventCount: 异常事件数量。
:type EventCount: int
:param InstanceType: 实例类型:1:MASTER;2:DR,3:RO,4:SDR。
:type InstanceType: int
:param Cpu: 核心数。
:type Cpu: int
:param Memory: 内存,单位MB。
:type Memory: int
:param Volume: 硬盘存储,单位GB。
:type Volume: int
:param EngineVersion: 数据库版本。
:type EngineVersion: str
:param Vip: 内网地址。
:type Vip: str
:param Vport: 内网端口。
:type Vport: int
:param Source: 接入来源。
:type Source: str
:param GroupId: 分组ID。
:type GroupId: str
:param GroupName: 分组组名。
:type GroupName: str
:param Status: 实例状态:0:发货中;1:运行正常;4:销毁中;5:隔离中。
:type Status: int
:param UniqSubnetId: 子网统一ID。
:type UniqSubnetId: str
:param DeployMode: cdb类型。
:type DeployMode: str
:param InitFlag: cdb实例初始化标志:0:未初始化;1:已初始化。
:type InitFlag: int
:param TaskStatus: 任务状态。
:type TaskStatus: int
:param UniqVpcId: 私有网络统一ID。
:type UniqVpcId: str
:param InstanceConf: 实例巡检/概览的状态。
:type InstanceConf: :class:`tencentcloud.dbbrain.v20210527.models.InstanceConfs`
:param DeadlineTime: 资源到期时间。
:type DeadlineTime: str
:param IsSupported: 是否是DBbrain支持的实例。
:type IsSupported: bool
:param SecAuditStatus: 实例安全审计日志开启状态:ON: 安全审计开启;OFF: 未开启安全审计。
:type SecAuditStatus: str
:param AuditPolicyStatus: 实例审计日志开启状态,ALL_AUDIT: 开启全审计;RULE_AUDIT: 开启规则审计;UNBOUND: 未开启审计。
:type AuditPolicyStatus: str
:param AuditRunningStatus: 实例审计日志运行状态:normal: 运行中; paused: 欠费暂停。
:type AuditRunningStatus: str
"""
self.InstanceId = None
self.InstanceName = None
self.Region = None
self.HealthScore = None
self.Product = None
self.EventCount = None
self.InstanceType = None
self.Cpu = None
self.Memory = None
self.Volume = None
self.EngineVersion = None
self.Vip = None
self.Vport = None
self.Source = None
self.GroupId = None
self.GroupName = None
self.Status = None
self.UniqSubnetId = None
self.DeployMode = None
self.InitFlag = None
self.TaskStatus = None
self.UniqVpcId = None
self.InstanceConf = None
self.DeadlineTime = None
self.IsSupported = None
self.SecAuditStatus = None
self.AuditPolicyStatus = None
self.AuditRunningStatus = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.InstanceName = params.get("InstanceName")
self.Region = params.get("Region")
self.HealthScore = params.get("HealthScore")
self.Product = params.get("Product")
self.EventCount = params.get("EventCount")
self.InstanceType = params.get("InstanceType")
self.Cpu = params.get("Cpu")
self.Memory = params.get("Memory")
self.Volume = params.get("Volume")
self.EngineVersion = params.get("EngineVersion")
self.Vip = params.get("Vip")
self.Vport = params.get("Vport")
self.Source = params.get("Source")
self.GroupId = params.get("GroupId")
self.GroupName = params.get("GroupName")
self.Status = params.get("Status")
self.UniqSubnetId = params.get("UniqSubnetId")
self.DeployMode = params.get("DeployMode")
self.InitFlag = params.get("InitFlag")
self.TaskStatus = params.get("TaskStatus")
self.UniqVpcId = params.get("UniqVpcId")
if params.get("InstanceConf") is not None:
self.InstanceConf = InstanceConfs()
self.InstanceConf._deserialize(params.get("InstanceConf"))
self.DeadlineTime = params.get("DeadlineTime")
self.IsSupported = params.get("IsSupported")
self.SecAuditStatus = params.get("SecAuditStatus")
self.AuditPolicyStatus = params.get("AuditPolicyStatus")
self.AuditRunningStatus = params.get("AuditRunningStatus")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class IssueTypeInfo(AbstractModel):
"""指标信息。
"""
def __init__(self):
r"""
:param IssueType: 指标分类:AVAILABILITY:可用性,MAINTAINABILITY:可维护性,PERFORMANCE,性能,RELIABILITY可靠性。
:type IssueType: str
:param Events: 异常事件。
:type Events: list of EventInfo
:param TotalCount: 异常事件总数。
:type TotalCount: int
"""
self.IssueType = None
self.Events = None
self.TotalCount = None
def _deserialize(self, params):
self.IssueType = params.get("IssueType")
if params.get("Events") is not None:
self.Events = []
for item in params.get("Events"):
obj = EventInfo()
obj._deserialize(item)
self.Events.append(obj)
self.TotalCount = params.get("TotalCount")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class KillMySqlThreadsRequest(AbstractModel):
"""KillMySqlThreads请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 实例ID。
:type InstanceId: str
:param Stage: kill会话任务的阶段,取值包括:"Prepare"-准备阶段,"Commit"-提交阶段。
:type Stage: str
:param Threads: 需要kill的sql会话ID列表,此参数用于Prepare阶段。
:type Threads: list of int
:param SqlExecId: 执行ID,此参数用于Commit阶段。
:type SqlExecId: str
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.Stage = None
self.Threads = None
self.SqlExecId = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.Stage = params.get("Stage")
self.Threads = params.get("Threads")
self.SqlExecId = params.get("SqlExecId")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class KillMySqlThreadsResponse(AbstractModel):
"""KillMySqlThreads返回参数结构体
"""
def __init__(self):
r"""
:param Threads: kill完成的sql会话ID列表。
:type Threads: list of int
:param SqlExecId: 执行ID, Prepare阶段的任务输出,用于Commit阶段中指定执行kill操作的会话ID。
注意:此字段可能返回 null,表示取不到有效值。
:type SqlExecId: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Threads = None
self.SqlExecId = None
self.RequestId = None
def _deserialize(self, params):
self.Threads = params.get("Threads")
self.SqlExecId = params.get("SqlExecId")
self.RequestId = params.get("RequestId")
class MailConfiguration(AbstractModel):
"""邮件发送配置
"""
def __init__(self):
r"""
:param SendMail: 是否开启邮件发送: 0, 否; 1, 是。
:type SendMail: int
:param Region: 地域配置, 如["ap-guangzhou", "ap-shanghai"]。巡检的邮件发送模版,配置需要发送巡检邮件的地域;订阅的邮件发送模版,配置当前订阅实例的所属地域。
:type Region: list of str
:param HealthStatus: 发送指定的健康等级的报告, 如["HEALTH", "SUB_HEALTH", "RISK", "HIGH_RISK"]。
:type HealthStatus: list of str
:param ContactPerson: 联系人id, 联系人/联系组不能都为空。
:type ContactPerson: list of int
:param ContactGroup: 联系组id, 联系人/联系组不能都为空。
:type ContactGroup: list of int
"""
self.SendMail = None
self.Region = None
self.HealthStatus = None
self.ContactPerson = None
self.ContactGroup = None
def _deserialize(self, params):
self.SendMail = params.get("SendMail")
self.Region = params.get("Region")
self.HealthStatus = params.get("HealthStatus")
self.ContactPerson = params.get("ContactPerson")
self.ContactGroup = params.get("ContactGroup")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyDiagDBInstanceConfRequest(AbstractModel):
"""ModifyDiagDBInstanceConf请求参数结构体
"""
def __init__(self):
r"""
:param InstanceConfs: 实例配置,包括巡检、概览开关等。
:type InstanceConfs: :class:`tencentcloud.dbbrain.v20210527.models.InstanceConfs`
:param Regions: 生效实例地域,取值为"All",代表全地域。
:type Regions: str
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL。
:type Product: str
:param InstanceIds: 指定更改巡检状态的实例ID。
:type InstanceIds: list of str
"""
self.InstanceConfs = None
self.Regions = None
self.Product = None
self.InstanceIds = None
def _deserialize(self, params):
if params.get("InstanceConfs") is not None:
self.InstanceConfs = InstanceConfs()
self.InstanceConfs._deserialize(params.get("InstanceConfs"))
self.Regions = params.get("Regions")
self.Product = params.get("Product")
self.InstanceIds = params.get("InstanceIds")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyDiagDBInstanceConfResponse(AbstractModel):
"""ModifyDiagDBInstanceConf返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class MonitorFloatMetric(AbstractModel):
"""监控数据(浮点型)
"""
def __init__(self):
r"""
:param Metric: 指标名称。
:type Metric: str
:param Unit: 指标单位。
:type Unit: str
:param Values: 指标值。
注意:此字段可能返回 null,表示取不到有效值。
:type Values: list of float
"""
self.Metric = None
self.Unit = None
self.Values = None
def _deserialize(self, params):
self.Metric = params.get("Metric")
| |
> len(self.X_train):
self.y_train = self.y_train.iloc[len(self.y_train)-len(self.X_train):]
# If this is a Keras estimator, we require the preprocessing to return a data frame instead of a numpy array
prep_return = 'df' if self.model.using_keras else 'np'
# Construct the preprocessor
prep = Preprocessor(self.model.features_df, return_type=prep_return, scale_hashed=self.model.scale_hashed, scale_vectors=self.model.scale_vectors,\
missing=self.model.missing, scaler=self.model.scaler, logfile=self.logfile, **self.model.scaler_kwargs)
# Setup a list to store steps for the sklearn pipeline
pipe_steps = [('preprocessor', prep)]
if self.model.dim_reduction:
# Construct the dimensionality reduction object
reduction = self.decomposers[self.model.reduction](**self.model.dim_reduction_args)
# Include dimensionality reduction in the pipeline steps
pipe_steps.append(('reduction', reduction))
self.model.estimation_step = 2
else:
self.model.estimation_step = 1
# If this is a Keras estimator, update the input shape and reshape the data if required
if self.model.using_keras:
# Update the input shape based on the final number of features after preprocessing
self._keras_update_shape(prep)
# Add the Keras build function, architecture and prediction_periods to the estimator keyword arguments
self.model.estimator_kwargs['build_fn'] = self._keras_build_fn
self.model.estimator_kwargs['architecture'] = self.model.architecture
self.model.estimator_kwargs['prediction_periods'] = self.model.prediction_periods
# Debug information is printed to the terminal and logs if the paramater debug = true
if self.model.debug:
self._print_log(10)
# Check than an identifier has been provided for sorting data if this is a sequence prediction problem
if self.model.lags or len(self.model.first_layer_kwargs["input_shape"]) > 1:
assert len(self.model.original_features_df[self.model.original_features_df['variable_type'].isin(["identifier"])]) == 1, \
"An identifier is mandatory when using lags or with sequence prediction problems. Define this field in your feature definitions."
# Cater for multi-step predictions
if self.model.prediction_periods > 1:
# Transform y to a vector of values equal to prediction_periods
self.y_train = utils.vectorize_array(self.y_train, steps=self.model.prediction_periods)
# Drop values from x for which we don't have sufficient y values
self.X_train = self.X_train.iloc[:-len(self.X_train)+len(self.y_train)]
# Add a pipeline step to update the input shape and reshape the data if required
# This transform will also add lag observations if specified through the lags parameter
# If lag_target is True, an additional feature will be created for each sample using the previous value of y
reshape = Reshaper(first_layer_kwargs=self.model.first_layer_kwargs, logfile=self.logfile)
pipe_steps.append(('reshape', reshape))
self.model.estimation_step += self.model.estimation_step
# Avoid tensorflow error for keras models
# https://github.com/tensorflow/tensorflow/issues/14356
# https://stackoverflow.com/questions/40785224/tensorflow-cannot-interpret-feed-dict-key-as-tensor
kerasbackend.clear_session()
# Try assuming the pipeline involves a grid search
try:
# Construct an estimator
estimator = self.algorithms[self.model.estimator](**self.model.estimator_kwargs)
# Prepare the grid search using the previously set parameter grid
grid_search = GridSearchCV(estimator=estimator, param_grid=self.model.param_grid, **self.model.grid_search_args)
# Add grid search to the pipeline steps
pipe_steps.append(('grid_search', grid_search))
# Construct the sklearn pipeline using the list of steps
self.model.pipe = Pipeline(pipe_steps)
if self.model.validation in ["k-fold", "timeseries"]:
# Perform K-fold cross validation
self._cross_validate()
# Fit the training data to the pipeline
if self.model.using_keras:
# https://stackoverflow.com/questions/54652536/keras-tensorflow-backend-error-tensor-input-10-specified-in-either-feed-de
session = tf.Session()
kerasbackend.set_session(session)
with session.as_default():
with session.graph.as_default():
sys.stdout.write("\nMODEL: {}, INPUT SHAPE: {}\n\n".format(self.model.name, self.model.first_layer_kwargs['input_shape']))
y = self.y_train.values if self.y_train.shape[1] > 1 else self.y_train.values.ravel()
self.model.pipe.fit(self.X_train, y)
else:
self.model.pipe.fit(self.X_train, self.y_train.values.ravel())
# Get the best parameters and the cross validation results
grid_search = self.model.pipe.named_steps['grid_search']
self.model.best_params = grid_search.best_params_
self.model.cv_results = grid_search.cv_results_
# Get the best estimator to add to the final pipeline
estimator = grid_search.best_estimator_
# Update the pipeline with the best estimator
self.model.pipe.steps[self.model.estimation_step] = ('estimator', estimator)
except AttributeError:
# Construct an estimator
estimator = self.algorithms[self.model.estimator](**self.model.estimator_kwargs)
# Add the estimator to the pipeline steps
pipe_steps.append(('estimator', estimator))
# Construct the sklearn pipeline using the list of steps
self.model.pipe = Pipeline(pipe_steps)
if self.model.validation in ["k-fold", "timeseries"]:
# Perform K-fold cross validation
self._cross_validate()
# Fit the training data to the pipeline
if self.model.using_keras:
# https://stackoverflow.com/questions/54652536/keras-tensorflow-backend-error-tensor-input-10-specified-in-either-feed-de
session = tf.Session()
kerasbackend.set_session(session)
with session.as_default():
with session.graph.as_default():
sys.stdout.write("\nMODEL: {}, INPUT SHAPE: {}\n\n".format(self.model.name, self.model.first_layer_kwargs['input_shape']))
y = self.y_train.values if self.y_train.shape[1] > 1 else self.y_train.values.ravel()
self.model.pipe.fit(self.X_train, y)
else:
self.model.pipe.fit(self.X_train, self.y_train.values.ravel())
if self.model.validation == "hold-out":
# Evaluate the model using the test data
self.calculate_metrics(caller="internal")
if self.model.calc_feature_importances:
# Select the dataset for calculating importances
if self.model.validation == "hold-out":
X = self.X_test
y = self.y_test # Already a numpy array after calculate_metrics
else:
X = self.X_train
y = self.y_train.values.ravel()
# Calculate model agnostic feature importances
self._calc_importances(X = X, y = y)
# Persist the model to disk
self.model = self.model.save(self.model.name, self.path, overwrite=self.model.overwrite, compress=self.model.compress)
# Update the cache to keep this model in memory
self._update_cache()
# Prepare the output
if self.model.validation != "external":
message = [[self.model.name, 'Model successfully trained, tested and saved to disk.',\
time.strftime('%X %x %Z', time.localtime(self.model.state_timestamp)),\
"{0} model has a score of {1:.3f} against the test data."\
.format(self.model.estimator, self.model.score), self.model.score]]
else:
message = [[self.model.name, 'Model successfully trained and saved to disk.',\
time.strftime('%X %x %Z', time.localtime(self.model.state_timestamp)),\
"{0} model score unknown as test_size was <= 0."\
.format(self.model.estimator), np.NaN]]
self.response = pd.DataFrame(message, columns=['model_name', 'result', 'time_stamp', 'score_result', 'score'])
# Send the reponse table description to Qlik
self._send_table_description("fit")
# Debug information is printed to the terminal and logs if the paramater debug = true
if self.model.debug:
self._print_log(4)
# Finally send the response
return self.response
# TO DO : Allow for larger datasets by using partial fitting methods avaialble with some sklearn algorithmns
# def partial_fit(self):
def fit_transform(self, load_script=False):
"""
Fit the data to the model and then transform.
This method is meant to be used for unsupervised learning models for clustering and dimensionality reduction.
The models can be fit and tranformed through the load script or through chart expressions in Qlik.
The load_script flag needs to be set accordingly for the correct response.
"""
# Interpret the request data based on the expected row and column structure
row_template = ['strData', 'strData']
col_headers = ['model_name', 'n_features']
feature_col_num = 1
# An additional key field column is expected if the call is made through the load script
if load_script:
row_template = ['strData', 'strData', 'strData']
col_headers = ['model_name', 'key', 'n_features']
feature_col_num = 2
# Create a Pandas Data Frame for the request data
self.request_df = utils.request_df(self.request, row_template, col_headers)
# Initialize the persistent model
self.model = PersistentModel()
# Get the model name from the request dataframe
self.model.name = self.request_df.loc[0, 'model_name']
# Get the model from cache or disk
self._get_model()
# Debug information is printed to the terminal and logs if the paramater debug = true
if self.model.debug:
self._print_log(3)
# Check that the estimator is an unsupervised ML algorithm
if self.model.estimator_type not in ["decomposer", "clusterer"]:
err = "Incorrect usage. The estimator specified is not a known decompostion or clustering algorithm: {0}".format(self.model.estimator)
raise Exception(err)
if load_script:
# Set the key column as the index
self.request_df.set_index("key", drop=False, inplace=True)
# Split the features provided as a string into individual columns
self.X = pd.DataFrame([x[feature_col_num].split("|") for x in self.request_df.values.tolist()], columns=self.model.features_df.loc[:,"name"].tolist(),\
index=self.request_df.index)
# Convert the data types based on feature definitions
self.X = utils.convert_types(self.X, self.model.features_df)
# Construct the preprocessor
prep = Preprocessor(self.model.features_df, scale_hashed=self.model.scale_hashed, scale_vectors=self.model.scale_vectors,\
missing=self.model.missing, scaler=self.model.scaler, logfile=self.logfile, **self.model.scaler_kwargs)
# Create a chache for the pipeline's transformers
# https://scikit-learn.org/stable/modules/compose.html#caching-transformers-avoid-repeated-computation
# cachedir = mkdtemp()
# Construct a sklearn pipeline
self.model.pipe = Pipeline([('preprocessor', prep)]) #, memory=cachedir)
if self.model.dim_reduction:
# Construct the dimensionality reduction object
reduction = self.decomposers[self.model.reduction](**self.model.dim_reduction_args)
# Include dimensionality reduction in the sklearn pipeline
self.model.pipe.steps.insert(1, ('reduction', reduction))
self.model.estimation_step = 2
else:
self.model.estimation_step = 1
# Construct an estimator
estimator = self.algorithms[self.model.estimator](**self.model.estimator_kwargs)
# Add the estimator to the sklearn pipeline
self.model.pipe.steps.append(('estimator', estimator))
# Fit the data to the pipeline
if self.model.estimator_type == "decomposer":
# If the estimator is a decomposer we apply the fit_transform method at the end of the pipeline
self.y = self.model.pipe.fit_transform(self.X)
# Prepare the response
self.response = pd.DataFrame(self.y, index=self.X.index)
elif self.model.estimator_type == "clusterer":
# If the estimator is a decomposer we apply the fit_predict method at the end of the pipeline
self.y = self.model.pipe.fit_predict(self.X)
# Prepare the response
self.response = pd.DataFrame(self.y, columns=["result"], index=self.X.index)
# Clear the cache directory setup for the pipeline's transformers
# rmtree(cachedir)
# Update the cache to keep this model in memory
self._update_cache()
if load_script:
# Add the key field column to the response
self.response = self.request_df.join(self.response).drop(['n_features'], axis=1)
# If the function was called through the load script we return a Data Frame
if self.model.estimator_type == "decomposer":
self._send_table_description("reduce")
| |
start ( )
if 64 - 64: Ii1I / i1IIi % I1IiiI - o0oOOo0O0Ooo
if 11 - 11: I1ii11iIi11i - OoooooooOO
if 16 - 16: IiII % OoooooooOO - ooOoO0o * Ii1I - Ii1I
if 27 - 27: IiII + iIii1I11I1II1 / Oo0Ooo + OoO0O00 % Oo0Ooo + OoO0O00
if 77 - 77: Oo0Ooo * ooOoO0o % Ii1I
if 2 - 2: I11i / Oo0Ooo / Ii1I / I1ii11iIi11i / OoooooooOO
if ( os . path . exists ( "./lisp.config" ) == False ) :
lisp . lprint ( ( "./lisp.config does not exist, creating a copy " + "from lisp.config.example" ) )
if 22 - 22: iIii1I11I1II1 * I1IiiI / I11i + OoOoOO00
O0O0oOOo0O ( )
if 98 - 98: OOooOOo
if 69 - 69: II111iiii + Oo0Ooo - oO0o . Oo0Ooo / iIii1I11I1II1 * iIii1I11I1II1
if 75 - 75: OoO0O00 % OoooooooOO
if 16 - 16: O0 / i1IIi
if 58 - 58: o0oOOo0O0Ooo / i11iIiiIii / O0 % I11i % I1IiiI
if 86 - 86: IiII + OoOoOO00 / I1IiiI + I11i % I11i / i11iIiiIii
iIiI1I ( I1Ii11I1Ii1i )
if 2 - 2: o0oOOo0O0Ooo . Ii1I % OoOoOO00
threading . Thread ( target = lispconfig . lisp_config_process ,
args = [ Ooo ] ) . start ( )
if 58 - 58: I1ii11iIi11i % Ii1I * Ii1I - iII111i
if 9 - 9: ooOoO0o - Ii1I % II111iiii + IiII + OOooOOo % O0
if 65 - 65: OOooOOo - OoO0O00 % i11iIiiIii
if 58 - 58: iII111i
threading . Thread ( target = ooO ,
args = [ bottle_port ] ) . start ( )
threading . Thread ( target = oOO0O0O0OO00oo , args = [ ] ) . start ( )
if 2 - 2: II111iiii + i1IIi
if 68 - 68: OOooOOo + Ii1I
if 58 - 58: IiII * Ii1I . i1IIi
if 19 - 19: oO0o
threading . Thread ( target = IiI1I1IIIi1i ,
args = [ Ooo ] ) . start ( )
if 85 - 85: ooOoO0o - I1IiiI / i1IIi / OoO0O00 / II111iiii
if 94 - 94: iIii1I11I1II1 + IiII
if 44 - 44: OoO0O00 + I11i % OoO0O00 + i1IIi + iII111i + O0
if 18 - 18: iIii1I11I1II1 % iIii1I11I1II1 % oO0o + I1IiiI % ooOoO0o / Ii1I
threading . Thread ( target = iI1I1ii11IIi1 ) . start ( )
return ( True )
if 36 - 36: OoOoOO00 . i11iIiiIii
if 81 - 81: Oo0Ooo * iII111i * OoO0O00
if 85 - 85: O0 * oO0o
if 39 - 39: II111iiii * I1IiiI - iIii1I11I1II1
if 25 - 25: OoooooooOO . Ii1I % iII111i . IiII
if 67 - 67: OoooooooOO + I1Ii111 / ooOoO0o
if 75 - 75: IiII / OoooooooOO . I1IiiI + I1Ii111 - II111iiii
def I1i11 ( ) :
if 5 - 5: o0oOOo0O0Ooo - i11iIiiIii . IiII
if 10 - 10: OoOoOO00 . IiII * iIii1I11I1II1 - oO0o - OoOoOO00 / I1Ii111
if 13 - 13: oO0o + OoOoOO00 % IiII % OoooooooOO
if 22 - 22: I1Ii111
lisp . lisp_close_socket ( Ooo , "lisp-core" )
lisp . lisp_close_socket ( o0oOoO00o , "lisp-core-pkt" )
lisp . lisp_close_socket ( I1Ii11I1Ii1i , "" )
lisp . lisp_close_socket ( oOOoo00O0O , "" )
return
if 23 - 23: O0
if 41 - 41: i1IIi . OOooOOo / ooOoO0o / o0oOOo0O0Ooo % IiII - Ii1I
if 14 - 14: I1ii11iIi11i - i11iIiiIii * I1Ii111
if 39 - 39: OoooooooOO
if 19 - 19: i11iIiiIii
if 80 - 80: I1IiiI
if 58 - 58: oO0o + I1ii11iIi11i % OoOoOO00
if 22 - 22: iIii1I11I1II1 - Ii1I / I1IiiI * IiII
if 26 - 26: o0oOOo0O0Ooo + OOooOOo - o0oOOo0O0Ooo + Oo0Ooo . oO0o
if 97 - 97: i1IIi
if 46 - 46: I1ii11iIi11i
if 30 - 30: OoO0O00 / O0 * o0oOOo0O0Ooo * I1Ii111 + OoooooooOO * iII111i
def iIiI1I ( lisp_socket ) :
if 23 - 23: I11i
Oo = open ( "./lisp.config" , "r" ) ; O0OOOOo0O = Oo . read ( ) ; Oo . close ( )
O0OOOOo0O = O0OOOOo0O . split ( "\n" )
if 36 - 36: IiII . iII111i - i1IIi + I1Ii111
if 54 - 54: OoooooooOO . oO0o - iII111i
if 76 - 76: I1Ii111
if 61 - 61: ooOoO0o / II111iiii * ooOoO0o * OoOoOO00 * I1Ii111 . i11iIiiIii
if 26 - 26: I1Ii111 / ooOoO0o - OoO0O00 . iIii1I11I1II1
O0o0OOo0o0o = False
for OOoO in O0OOOOo0O :
if ( OOoO [ 0 : 1 ] == "#-" and OOoO [ - 2 : - 1 ] == "-#" ) : break
if ( OOoO == "" or OOoO [ 0 ] == "#" ) : continue
if ( OOoO . find ( "decentralized-push-xtr = yes" ) == - 1 ) : continue
O0o0OOo0o0o = True
break
if 90 - 90: I11i
if ( O0o0OOo0o0o == False ) : return
if 95 - 95: OoO0O00
if 68 - 68: iIii1I11I1II1 . iIii1I11I1II1 / OoOoOO00 - II111iiii - iIii1I11I1II1
if 75 - 75: ooOoO0o . I1IiiI * II111iiii
if 99 - 99: iIii1I11I1II1 * I1ii11iIi11i + IiII
if 70 - 70: i1IIi % ooOoO0o . I1ii11iIi11i - IiII + OOooOOo
OO0o0oo = [ ]
o0oo0oOOOo00 = False
for OOoO in O0OOOOo0O :
if ( OOoO [ 0 : 1 ] == "#-" and OOoO [ - 2 : - 1 ] == "-#" ) : break
if ( OOoO == "" or OOoO [ 0 ] == "#" ) : continue
if 57 - 57: o0oOOo0O0Ooo + Oo0Ooo * I1ii11iIi11i - ooOoO0o % iIii1I11I1II1 - Ii1I
if ( OOoO . find ( "lisp map-server" ) != - 1 ) :
o0oo0oOOOo00 = True
continue
if 37 - 37: OoO0O00 * I11i + Ii1I + I1ii11iIi11i * o0oOOo0O0Ooo
if ( OOoO [ 0 ] == "}" ) :
o0oo0oOOOo00 = False
continue
if 95 - 95: Ii1I - i11iIiiIii % i11iIiiIii - O0 * I1Ii111
if 81 - 81: II111iiii * I1IiiI % i1IIi * i11iIiiIii + OoOoOO00
if 100 - 100: i1IIi % Ii1I
if 55 - 55: I1IiiI + iII111i
if 85 - 85: oO0o + iII111i % iII111i / I11i . I1IiiI - OoOoOO00
if ( o0oo0oOOOo00 and OOoO . find ( "address = " ) != - 1 ) :
i1I11 = OOoO . split ( "address = " ) [ 1 ]
OoO00 = int ( i1I11 . split ( "." ) [ 0 ] )
if ( OoO00 >= 224 and OoO00 < 240 ) : OO0o0oo . append ( i1I11 )
if 57 - 57: Oo0Ooo - OoooooooOO % I1ii11iIi11i . OoO0O00 * II111iiii
if 72 - 72: I1Ii111 + ooOoO0o . IiII % II111iiii
if ( i1I11 == [ ] ) : return
if 58 - 58: ooOoO0o
if 45 - 45: o0oOOo0O0Ooo
if 67 - 67: iII111i + ooOoO0o
if 25 - 25: i1IIi - i11iIiiIii
Ii1IIi = getoutput ( 'ifconfig eth0 | egrep "inet "' )
if ( Ii1IIi == "" ) : return
i1IIII1II = Ii1IIi . split ( ) [ 1 ]
if 89 - 89: I11i % iII111i * Oo0Ooo / I1Ii111 * Oo0Ooo / ooOoO0o
if 14 - 14: i1IIi * iIii1I11I1II1 - Ii1I * OoOoOO00 - iII111i / oO0o
if 73 - 73: I1ii11iIi11i - OoOoOO00 * O0 - OoOoOO00 - OoO0O00
if 96 - 96: I1ii11iIi11i - O0
Ii111iIi1iIi = socket . inet_aton ( i1IIII1II )
for i1I11 in OO0o0oo :
lisp_socket . setsockopt ( socket . SOL_SOCKET , socket . SO_REUSEADDR , 1 )
lisp_socket . setsockopt ( socket . IPPROTO_IP , socket . IP_MULTICAST_IF , Ii111iIi1iIi )
I1iO00O000oOO0oO = socket . inet_aton ( i1I11 ) + Ii111iIi1iIi
lisp_socket . setsockopt | |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Translate messages from files and from agents to the matching engine
@author: ucaiado
Created on 10/24/2016
"""
import pprint
import numpy as np
def translate_trades_to_agent(row, my_book):
'''
Translate the instruction passed by an agent into trades messages to the
environment.
:param row: dict. the message from the agent
:param my_book: LimitOrderBook object. The order book of an instrument
NOTE - *row* has the following form:
```
row = {order_side, order_price, total_qty_order, instrumento_symbol,
agent_id}
```
'''
# recover the price levels of interest
l_msg = []
gen_bk = None
s_side = row['order_side']
if row['order_side'] == 'Sell Order':
f_best_price = my_book.best_bid[0]
f_max = f_best_price + 0.01
f_min = row['order_price']
gen_bk = my_book.book_bid.price_tree.item_slice(f_min,
f_max,
reverse=True)
else:
f_best_price = my_book.best_ask[0]
f_max = row['order_price'] + 0.01
f_min = f_best_price
gen_bk = my_book.book_ask.price_tree.item_slice(f_min,
f_max,
reverse=False)
# recover the price levels of interest
b_stop = False
i_qty = row['total_qty_order']
if not gen_bk:
return None
for f_price, obj_price in gen_bk:
if b_stop:
break
for idx_ord, obj_order in obj_price.order_tree.nsmallest(1000):
order_aux = obj_order.d_msg
# define how much should be traded
i_qty_traded = order_aux['total_qty_order'] # remain
i_qty_to_trade = min(i_qty, i_qty_traded)
# check how many qty it still need to be traded
i_qty -= i_qty_to_trade
# define the status of the message
if order_aux['total_qty_order'] == i_qty_to_trade:
s_status = 'Filled'
else:
s_status = 'Partially Filled'
assert i_qty >= 0, 'Qty traded smaller than 0'
# create the message
i_new_qty_traded = order_aux['traded_qty_order'] + i_qty_to_trade
# ==== [DEBUG] ====
s_err = 'the total qty traded should be "lqt" total qty order'
assert i_qty_traded <= order_aux['org_total_qty_order'], s_err
# =================
s_action = 'SELL'
# if one makes a trade at bid, it is a sell
if s_side == 'Sell Order':
s_action = 'BUY'
# create a trade to fill that order
d_new_msg = order_aux.copy()
i_sec_order = my_book.i_sec_ask
if order_aux['order_side'] == 'Buy Order':
i_sec_order = my_book.i_sec_bid
s_passive_action = order_aux['action']
if s_passive_action == 'history':
s_passive_action = 'traded_by_agent'
i_org_qty = order_aux['org_total_qty_order']
d_rtn = {'action': s_passive_action,
'agent_id': order_aux['agent_id'],
'agressor_indicator': 'Passive',
'execution_type': 'Trade',
'idx': order_aux['idx'],
'instrumento_symbol': order_aux['instrumento_symbol'],
'is_today': order_aux['is_today'],
'member': order_aux['member'],
'order_date': order_aux['order_date'],
'order_datetime_entry': my_book.s_time[:-7],
'order_id': order_aux['order_id'],
'order_price': order_aux['order_price'],
'order_side': order_aux['order_side'],
'order_status': s_status,
'org_total_qty_order': i_org_qty,
'priority_indicator': order_aux['priority_indicator'],
'priority_seconds': my_book.f_time,
'priority_time': my_book.s_time[11:],
'secondary_order_id': order_aux['secondary_order_id'],
'seq_order_number': order_aux['seq_order_number'],
'session_date': my_book.s_time[:10],
'total_qty_order': i_qty_traded - i_qty_to_trade,
'traded_qty_order': i_new_qty_traded,
'order_qty': i_qty_to_trade}
l_msg.append(d_rtn.copy())
# check the id of the agressive side
# create another message to update who took the action
s_action = 'BUY'
d_new_msg = my_book.d_bid.copy()
# if one makes a trade at bid, it is a sell
if s_side == 'Sell Order':
s_action = 'SELL'
d_new_msg = my_book.d_ask.copy()
my_book.i_my_order_id += 1
s_sec_order_id = '{:015d}'.format(my_book.i_my_order_id)
my_book.last_priority_id += 1
i_priority_id = long(my_book.last_priority_id)
my_book.i_last_order_id += 1 # NOTE: check that
d_rtn = {'action': s_action,
'agent_id': row['agent_id'],
'agressor_indicator': 'Agressor',
'execution_type': 'Trade',
'idx': '000000000000000',
'instrumento_symbol': row['instrumento_symbol'],
'is_today': True,
'member': 9999,
'order_date': '2016-03-29',
'order_datetime_entry': my_book.s_time[:-7],
'order_id': my_book.i_last_order_id, # i_priority_id,
'order_price': order_aux['order_price'],
'order_side': row['order_side'],
'order_status': 'Filled',
'org_total_qty_order': i_qty_to_trade,
'priority_indicator': i_priority_id,
'priority_seconds': my_book.f_time,
'priority_time': my_book.s_time[11:],
'secondary_order_id': s_sec_order_id,
'seq_order_number': s_sec_order_id,
'session_date': my_book.s_time[:10],
'total_qty_order': 0,
'traded_qty_order': i_qty_to_trade,
'order_qty': i_qty_to_trade}
l_msg.append(d_rtn.copy())
if i_qty == 0:
b_stop = True
break
return l_msg
def translate_trades(row, my_book):
'''
Translate trade row into trades messages. Just translate the row if the
trade occurs at the current best price
:param row: dict. the original message from file
:param my_book: LimitOrderBook object.
'''
# recover the price levels of interest
l_msg = []
gen_bk = None
if row['order_side'] == 'Buy Order':
if not my_book.best_bid:
return l_msg
f_best_price = my_book.best_bid[0]
f_max = f_best_price + 0.01
f_min = row['order_price']
gen_bk = my_book.book_bid.price_tree.item_slice(f_min,
f_max,
reverse=True)
else:
if not my_book.best_ask:
return l_msg
f_best_price = my_book.best_ask[0]
f_max = row['order_price'] + 0.01
f_min = f_best_price
gen_bk = my_book.book_ask.price_tree.item_slice(f_min,
f_max,
reverse=False)
# recover the price levels of interest
b_stop = False
if not gen_bk:
return None
l_msg_debug = []
for f_price, obj_price in gen_bk:
# assert obj_price.order_tree.count <= 2, 'More than two offers'
for idx_ord, obj_order in obj_price.order_tree.nsmallest(1000):
# check if the order Id is different from the message
d_compare = obj_order.d_msg
if row['seq_order_number'] != d_compare['seq_order_number']:
# create a trade to fill that order
i_org_qty = d_compare['org_total_qty_order']
i_traded_qty = d_compare['traded_qty_order']
# === DRBUG ===
l_msg_debug.append(d_compare)
# =============
i_sec_order = my_book.i_sec_ask
if d_compare['order_side'] == 'Buy Order':
i_sec_order = my_book.i_sec_bid
d_rtn = {'action': 'correction_by_trade',
'agent_id': d_compare['agent_id'],
'agressor_indicator': 'Passive',
'execution_type': 'Trade',
'idx': '000000000000000',
'instrumento_symbol': d_compare['instrumento_symbol'],
'is_today': d_compare['is_today'],
'member': d_compare['member'],
'order_date': d_compare['order_date'],
'order_datetime_entry': my_book.s_time[:-7],
'order_id': d_compare['order_id'],
'order_price': d_compare['order_price'],
'order_side': d_compare['order_side'],
'order_status': 'Filled',
'org_total_qty_order': i_org_qty,
'priority_indicator': d_compare['priority_indicator'],
'priority_seconds': my_book.f_time,
'priority_time': my_book.s_time[11:],
'secondary_order_id': d_compare['secondary_order_id'],
'seq_order_number': d_compare['seq_order_number'],
'session_date': my_book.s_time[:10],
'total_qty_order': 0,
'traded_qty_order': i_org_qty,
'order_qty': i_org_qty - i_traded_qty}
l_msg.append(d_rtn.copy())
else:
b_stop = True
break
if b_stop:
break
if not my_book.b_secure_changes:
if not b_stop:
return [row]
l_msg.append(row)
# === DEBUG ===
if len(l_msg_debug) > 1:
print 'translate_trades(): Order should not be here'
pprint.pprint(l_msg_debug)
print
# =============
return l_msg
def translate_row(idx, row, my_book, s_side=None):
'''
Translate a line from a file of the bloomberg level I data
:param idx: integer. Order entry step
:param row: dict. the original message from file
:param my_book: LimitOrderBook object.
:param s_side*: string. 'BID' or 'ASK'. Determine the side of the trade
'''
# check if is a valid hour
if row['priority_seconds'] < 10*60**2:
return [row]
if row['priority_seconds'] > 15*60**2 + 30*60:
return [row]
# check if there are other orders before the order traded
b_trade1 = (row['execution_type'] == 'Trade')
b_trade2 = (row['agressor_indicator'] == 'Passive')
if b_trade1 and b_trade2:
return translate_trades(row, my_book)
return [row]
def correct_books(my_book):
'''
Return if should correct something on the book and the modifications needed
:param my_book: LimitOrderBook object.
'''
# check if it is a valid hour
b_should_update, d_updates = False, {'BID': [], 'ASK': []}
if (my_book.f_time < 10*60**2) | (my_book.f_time > 15*60**2 + 30*60):
return b_should_update, d_updates
# check if the prices have crossed themselfs
b_test = True
# make sure that there are prices in the both sides
if my_book.book_ask.price_tree.count == 0:
b_test = False
if my_book.book_bid.price_tree.count == 0:
b_test = False
if my_book.d_bid['execution_type'] == 'Trade':
my_book.i_count_crossed_books = 0
b_test = False
if my_book.d_ask['execution_type'] == 'Trade':
my_book.i_count_crossed_books = 0
b_test = False
if not my_book.best_bid:
b_test = False
if not my_book.best_ask:
b_test = False
# correct the books when they have crossed each other
if not b_test:
return b_should_update, d_updates
if my_book.best_bid[0] != 0 and my_book.best_ask[0] != 0 and b_test:
if my_book.best_bid[0] >= my_book.best_ask[0]:
my_book.i_count_crossed_books += 1
if my_book.i_count_crossed_books > 2:
# as it doesnt know which side was there at first, it will
# emulate a trade in the smaller side
# TODO: Implement here when the books cross each other.
# recover the price levels of interest
l_msg = []
i_idx_min = np.argmin([my_book.best_bid[1],
my_book.best_ask[1]])
l_book_min = [my_book.obj_best_bid, my_book.obj_best_ask]
obj_price = l_book_min[i_idx_min]
s_symbol = my_book.d_bid['instrumento_symbol']
for idx_ord, obj_order in obj_price.order_tree.nsmallest(1000):
# check if the order Id is different from the message
d_compare = obj_order.d_msg
# create a trade to fill that order
i_org_qty = d_compare['org_total_qty_order']
i_traded_qty = d_compare['traded_qty_order']
i_sec_order_id = d_compare['secondary_order_id']
i_priority_id = d_compare['priority_indicator']
s_symbol = d_compare['instrumento_symbol']
i_sec_order = my_book.i_sec_ask
if d_compare['order_side'] == 'Buy Order':
i_sec_order = my_book.i_sec_bid
d_rtn = {'action': 'crossed_prices',
'agent_id': d_compare['agent_id'],
'agressor_indicator': 'Passive',
'execution_type': 'Trade',
'idx': '000000000000000',
'instrumento_symbol': s_symbol,
'is_today': d_compare['is_today'],
'member': d_compare['member'],
'order_date': d_compare['order_date'],
'order_datetime_entry': my_book.s_time[:-7],
'order_id': d_compare['order_id'],
'order_price': d_compare['order_price'],
'order_side': d_compare['order_side'],
'order_status': 'Filled',
'org_total_qty_order': i_org_qty,
'priority_indicator': i_priority_id,
'priority_seconds': my_book.f_time,
'priority_time': my_book.s_time[11:],
'secondary_order_id': i_sec_order_id,
'seq_order_number': d_compare['seq_order_number'],
'session_date': my_book.s_time[:10],
'total_qty_order': 0,
'traded_qty_order': i_org_qty,
'order_qty': i_org_qty - i_traded_qty}
l_msg.append(d_rtn.copy())
d_updates[['BID', 'ASK'][i_idx_min]] = l_msg
b_should_update = True
return b_should_update, d_updates
def translate_cancel_to_agent(agent, s_instr, s_action, s_side, i_n_to_cancel):
'''
Return a list of messages to cancel in the main order book, given the
parameters passed
:param agent: Agent Object.
:param s_side: string. Side of the LOB
:param s_action: string.
:param s_instr: string. Instrument to cancel orders
:param i_n_to_cancel: integer. Number of order to cancel
'''
l_msg = []
orders_aux = agent.d_order_tree[s_instr][s_side]
orders_func = orders_aux.nsmallest
if s_side == 'ASK':
orders_func = orders_aux.nlargest
for f_key, d_aux in orders_func(i_n_to_cancel):
d_rtn = d_aux.copy()
d_rtn['order_status'] = 'Canceled'
d_rtn['execution_type'] = 'Cancel'
d_rtn['action'] = s_action
l_msg.append(d_rtn.copy())
return l_msg
def translate_to_agent(agent, s_action, my_book, f_spread=0.10, | |
<reponame>JuliaMota/ross<filename>ross/stochastic/st_results.py<gh_stars>0
"""STOCHASTIC ROSS plotting module.
This module returns graphs for each type of analyses in st_rotor_assembly.py.
"""
import numpy as np
from plotly import express as px
from plotly import graph_objects as go
from plotly import io as pio
from plotly.subplots import make_subplots
from ross.plotly_theme import tableau_colors
pio.renderers.default = "browser"
# set Plotly palette of colors
colors1 = px.colors.qualitative.Dark24
colors2 = px.colors.qualitative.Light24
class ST_CampbellResults:
"""Store stochastic results and provide plots for Campbell Diagram.
It's possible to visualize multiples harmonics in a single plot to check
other speeds which also excite a specific natural frequency.
Two options for plooting are available: Matplotlib and Bokeh. The user
chooses between them using the attribute plot_type. The default is bokeh
Parameters
----------
speed_range : array
Array with the speed range in rad/s.
wd : array
Array with the damped natural frequencies
log_dec : array
Array with the Logarithmic decrement
Returns
-------
subplots : Plotly graph_objects.make_subplots()
Plotly figure with diagrams for frequency and log dec.
"""
def __init__(self, speed_range, wd, log_dec):
self.speed_range = speed_range
self.wd = wd
self.log_dec = log_dec
def plot_nat_freq(self, percentile=[], conf_interval=[], harmonics=[1], **kwargs):
"""Plot the damped natural frequencies vs frequency.
Parameters
----------
percentile : list, optional
Sequence of percentiles to compute, which must be between
0 and 100 inclusive.
conf_interval : list, optional
Sequence of confidence intervals to compute, which must be between
0 and 100 inclusive.
harmonics: list, optional
List withe the harmonics to be plotted.
The default is to plot 1x.
kwargs : optional
Additional key word arguments can be passed to change the plot
(e.g. line=dict(width=4.0, color="royalblue"), opacity=1.0, ...)
*See Plotly Python Figure Reference for more information.
Returns
-------
fig : Plotly graph_objects.Figure()
The figure object with the plot.
"""
default_values = dict(mode="lines")
conf_interval = np.sort(conf_interval)
percentile = np.sort(percentile)
for k, v in default_values.items():
kwargs.setdefault(k, v)
fig = go.Figure()
x = np.concatenate((self.speed_range, self.speed_range[::-1]))
for j, h in enumerate(harmonics):
fig.add_trace(
go.Scatter(
x=self.speed_range,
y=self.speed_range * h,
opacity=1.0,
name="{}x speed".format(h),
line=dict(width=3, color=colors1[j], dash="dashdot"),
legendgroup="speed{}".format(j),
hovertemplate=("Frequency: %{x:.3f}<br>" + "Frequency: %{y:.3f}"),
**kwargs,
)
)
for j in range(self.wd.shape[0]):
fig.add_trace(
go.Scatter(
x=self.speed_range,
y=np.mean(self.wd[j], axis=1),
opacity=1.0,
name="Mean - Mode {}".format(j + 1),
line=dict(width=3, color=colors1[j]),
legendgroup="mean{}".format(j),
hovertemplate=("Frequency: %{x:.3f}<br>" + "Frequency: %{y:.3f}"),
**kwargs,
)
)
for i, p in enumerate(percentile):
fig.add_trace(
go.Scatter(
x=self.speed_range,
y=np.percentile(self.wd[j], p, axis=1),
opacity=0.6,
line=dict(width=2.5, color=colors2[j]),
name="percentile: {}%".format(p),
legendgroup="percentile{}{}".format(j, i),
hovertemplate=(
"Frequency: %{x:.3f}<br>" + "Frequency: %{y:.3f}"
),
**kwargs,
)
)
for i, p in enumerate(conf_interval):
p1 = np.percentile(self.wd[j], 50 + p / 2, axis=1)
p2 = np.percentile(self.wd[j], 50 - p / 2, axis=1)
fig.add_trace(
go.Scatter(
x=x,
y=np.concatenate((p1, p2[::-1])),
line=dict(width=1, color=colors1[j]),
fill="toself",
fillcolor=colors1[j],
opacity=0.3,
name="confidence interval: {}% - Mode {}".format(p, j + 1),
legendgroup="conf{}{}".format(j, i),
hovertemplate=(
"Frequency: %{x:.3f}<br>" + "Frequency: %{y:.3f}"
),
**kwargs,
)
)
fig.update_xaxes(
title_text="<b>Rotor speed</b>",
title_font=dict(family="Arial", size=20),
tickfont=dict(size=16),
gridcolor="lightgray",
showline=True,
linewidth=2.5,
linecolor="black",
mirror=True,
)
fig.update_yaxes(
title_text="<b>Damped Natural Frequencies</b>",
title_font=dict(family="Arial", size=20),
tickfont=dict(size=16),
gridcolor="lightgray",
showline=True,
linewidth=2.5,
linecolor="black",
mirror=True,
)
fig.update_layout(
width=1200,
height=900,
plot_bgcolor="white",
legend=dict(
font=dict(family="sans-serif", size=14),
bgcolor="white",
bordercolor="black",
borderwidth=2,
),
)
return fig
def plot_log_dec(self, percentile=[], conf_interval=[], harmonics=[1], **kwargs):
"""Plot the log_dec vs frequency.
Parameters
----------
percentile : list, optional
Sequence of percentiles to compute, which must be between
0 and 100 inclusive.
conf_interval : list, optional
Sequence of confidence intervals to compute, which must be
between 0 and 100 inclusive.
harmonics: list, optional
List withe the harmonics to be plotted.
The default is to plot 1x.
kwargs : optional
Additional key word arguments can be passed to change the plot
(e.g. line=dict(width=4.0, color="royalblue"), opacity=1.0, ...)
*See Plotly Python Figure Reference for more information.
Returns
-------
fig : Plotly graph_objects.Figure()
The figure object with the plot.
"""
default_values = dict(mode="lines")
conf_interval = np.sort(conf_interval)
percentile = np.sort(percentile)
for k, v in default_values.items():
kwargs.setdefault(k, v)
fig = go.Figure()
x = np.concatenate((self.speed_range, self.speed_range[::-1]))
for j in range(self.log_dec.shape[0]):
fig.add_trace(
go.Scatter(
x=self.speed_range,
y=np.mean(self.log_dec[j], axis=1),
opacity=1.0,
name="Mean - Mode {}".format(j + 1),
line=dict(width=3, color=colors1[j]),
legendgroup="mean{}".format(j),
hovertemplate=("Frequency: %{x:.3f}<br>" + "Log Dec: %{y:.3f}"),
**kwargs,
)
)
for i, p in enumerate(percentile):
fig.add_trace(
go.Scatter(
x=self.speed_range,
y=np.percentile(self.log_dec[j], p, axis=1),
opacity=0.6,
line=dict(width=2.5, color=colors2[j]),
name="percentile: {}%".format(p),
legendgroup="percentile{}{}".format(j, i),
hoverinfo="none",
**kwargs,
)
)
for i, p in enumerate(conf_interval):
p1 = np.percentile(self.log_dec[j], 50 + p / 2, axis=1)
p2 = np.percentile(self.log_dec[j], 50 - p / 2, axis=1)
fig.add_trace(
go.Scatter(
x=x,
y=np.concatenate((p1, p2[::-1])),
line=dict(width=1, color=colors1[j]),
fill="toself",
fillcolor=colors1[j],
opacity=0.3,
name="confidence interval: {}% - Mode {}".format(p, j + 1),
legendgroup="conf{}{}".format(j, i),
hoverinfo="none",
**kwargs,
)
)
fig.update_xaxes(
title_text="<b>Rotor speed</b>",
title_font=dict(family="Arial", size=20),
tickfont=dict(size=16),
gridcolor="lightgray",
showline=True,
linewidth=2.5,
linecolor="black",
mirror=True,
)
fig.update_yaxes(
title_text="<b>Logarithmic decrement</b>",
title_font=dict(family="Arial", size=20),
tickfont=dict(size=16),
gridcolor="lightgray",
showline=True,
linewidth=2.5,
linecolor="black",
mirror=True,
)
fig.update_layout(
plot_bgcolor="white",
width=1200,
height=900,
legend=dict(
font=dict(family="sans-serif", size=14),
bgcolor="white",
bordercolor="black",
borderwidth=2,
),
)
return fig
def plot(self, percentile=[], conf_interval=[], *args, **kwargs):
"""Plot Campbell Diagram.
This method plots Campbell Diagram.
Parameters
----------
percentile : list, optional
Sequence of percentiles to compute, which must be between
0 and 100 inclusive.
conf_interval : list, optional
Sequence of confidence intervals to compute, which must be between
0 and 100 inclusive.
args: optional
harmonics : list, optional
List with the harmonics to be plotted.
The default is to plot 1x.
kwargs : optional
Additional key word arguments can be passed to change the plot
(e.g. line=dict(width=4.0, color="royalblue"), opacity=1.0, ...)
*See Plotly Python Figure Reference for more information.
Returns
-------
subplots : Plotly graph_objects.make_subplots()
Plotly figure with diagrams for frequency and log dec.
"""
fig0 = self.plot_nat_freq(percentile, conf_interval, *args, **kwargs)
default_values = dict(showlegend=False)
for k, v in default_values.items():
kwargs.setdefault(k, v)
fig1 = self.plot_log_dec(percentile, conf_interval, *args, **kwargs)
subplots = make_subplots(rows=1, cols=2)
for data in fig0["data"]:
subplots.add_trace(data, 1, 1)
for data in fig1["data"]:
subplots.add_trace(data, 1, 2)
subplots.update_xaxes(fig0.layout.xaxis, row=1, col=1)
subplots.update_yaxes(fig1.layout.yaxis, row=1, col=1)
subplots.update_xaxes(fig0.layout.xaxis, row=1, col=2)
subplots.update_yaxes(fig1.layout.yaxis, row=1, col=2)
subplots.update_layout(
plot_bgcolor="white",
width=1800,
height=900,
legend=dict(
font=dict(family="sans-serif", size=14),
bgcolor="white",
bordercolor="black",
borderwidth=2,
),
)
return subplots
class ST_FrequencyResponseResults:
"""Store stochastic results and provide plots for Frequency Response.
Parameters
----------
speed_range : array
Array with the speed range in rad/s.
magnitude : array
Array with the frequencies, magnitude (dB) of the frequency
response for each pair input/output.
phase : array
Array with the frequencies, phase of the frequency
response for each pair input/output.
Returns
-------
subplots : Plotly graph_objects.make_subplots()
Plotly figure with amplitude vs frequency phase angle vs frequency.
"""
def __init__(self, speed_range, magnitude, phase):
self.speed_range = speed_range
self.magnitude = magnitude
self.phase = phase
def plot_magnitude(
self,
percentile=[],
conf_interval=[],
units="mic-pk-pk",
**kwargs,
):
"""Plot amplitude vs frequency.
This method plots the frequency response magnitude given an output and
an input using Plotly.
Parameters
----------
percentile : list, optional
Sequence of percentiles to compute, which must be between
0 and 100 inclusive.
conf_interval : list, optional
Sequence of confidence intervals to compute, which must be between
0% and 100% inclusive.
units : str, optional
Unit system
Default is "mic-pk-pk".
kwargs : optional
Additional key word arguments can be passed to change the plot
(e.g. line=dict(width=4.0, color="royalblue"), opacity=1.0, ...)
*See Plotly Python Figure Reference for more information.
Returns
-------
fig : Plotly graph_objects.Figure()
The figure object with the plot.
"""
if units == "m":
y_axis_label = "<b>Amplitude (m)</b>"
elif units == "mic-pk-pk":
y_axis_label = "<b>Amplitude (μ pk-pk)</b>"
else:
y_axis_label = "<b>Amplitude (dB)</b>"
default_values = dict(mode="lines")
conf_interval = np.sort(conf_interval)
percentile = np.sort(percentile)
for k, v in default_values.items():
kwargs.setdefault(k, v)
fig = go.Figure()
fig.add_trace(
go.Scatter(
x=self.speed_range,
y=np.mean(self.magnitude, axis=1),
opacity=1.0,
name="Mean",
line=dict(width=3, color="black"),
legendgroup="mean",
hovertemplate=("Frequency: %{x:.2f}<br>" + "Amplitude: %{y:.2e}"),
**kwargs,
)
)
for i, p in enumerate(percentile):
fig.add_trace(
go.Scatter(
x=self.speed_range,
y=np.percentile(self.magnitude, p, axis=1),
opacity=0.6,
line=dict(width=2.5, color=colors2[i]),
name="percentile: {}%".format(p),
legendgroup="percentile{}".format(i),
hovertemplate=("Frequency: %{x:.2f}<br>" + "Amplitude: %{y:.2e}"),
**kwargs,
)
)
x = np.concatenate((self.speed_range, self.speed_range[::-1]))
for i, p in enumerate(conf_interval):
p1 = np.percentile(self.magnitude, 50 + p / 2, axis=1)
p2 = np.percentile(self.magnitude, 50 - p / 2, axis=1)
fig.add_trace(
go.Scatter(
x=x,
y=np.concatenate((p1, p2[::-1])),
line=dict(width=1, color=colors1[i]),
fill="toself",
fillcolor=colors1[i],
opacity=0.5,
name="confidence interval: {}%".format(p),
legendgroup="conf{}".format(i),
hovertemplate=("Frequency: %{x:.2f}<br>" + "Amplitude: %{y:.2e}"),
**kwargs,
)
)
fig.update_xaxes(
title_text="<b>Frequency</b>",
title_font=dict(family="Arial", size=20),
tickfont=dict(size=16),
gridcolor="lightgray",
showline=True,
linewidth=2.5,
linecolor="black",
mirror=True,
)
fig.update_yaxes(
title_text=y_axis_label,
title_font=dict(family="Arial", size=20),
tickfont=dict(size=16),
gridcolor="lightgray",
showline=True,
linewidth=2.5,
linecolor="black",
mirror=True,
)
fig.update_layout(
plot_bgcolor="white",
width=1200,
height=900,
legend=dict(
font=dict(family="sans-serif", size=14),
bgcolor="white",
bordercolor="black",
borderwidth=2,
),
)
return fig
def plot_phase(self, percentile=[], conf_interval=[], **kwargs):
"""Plot | |
def __init__(self, units, thetas_dim, device, backcast_length=10, forecast_length=5, nb_harmonics=None):
if nb_harmonics:
super(SeasonalityBlock, self).__init__(units, nb_harmonics, device, backcast_length,
forecast_length, share_thetas=True)
else:
super(SeasonalityBlock, self).__init__(units, forecast_length, device, backcast_length,
forecast_length, share_thetas=True)
def forward(self, x):
x = super(SeasonalityBlock, self).forward(x)
backcast = seasonality_model(self.theta_b_fc(x), self.backcast_linspace, self.device)
forecast = seasonality_model(self.theta_f_fc(x), self.forecast_linspace, self.device)
return backcast, forecast
class TrendBlock(Block):
def __init__(self, units, thetas_dim, device, backcast_length=10, forecast_length=5, nb_harmonics= None):
super(TrendBlock, self).__init__(units, thetas_dim, device, backcast_length,
forecast_length, share_thetas=True)
def forward(self, x):
x = super(TrendBlock, self).forward(x)
backcast = trend_model(self.theta_b_fc(x), self.backcast_linspace, self.device)
forecast = trend_model(self.theta_f_fc(x), self.forecast_linspace, self.device)
return backcast, forecast
class GenericBlock(Block):
def __init__(self, units, thetas_dim, device, backcast_length=10, forecast_length=5, nb_harmonics= None):
super(GenericBlock, self).__init__(units, thetas_dim, device, backcast_length, forecast_length)
self.backcast_fc = nn.Linear(thetas_dim, backcast_length)
self.forecast_fc = nn.Linear(thetas_dim, forecast_length)
def forward(self, x):
# no constraint for generic arch.
x = super(GenericBlock, self).forward(x)
theta_b = F.relu(self.theta_b_fc(x))
theta_f = F.relu(self.theta_f_fc(x))
backcast = self.backcast_fc(theta_b) # generic. 3.3.
forecast = self.forecast_fc(theta_f) # generic. 3.3.
return backcast, forecast
# plot utils. For plotting time series graphs
def plot_scatter(*args, **kwargs):
plt.plot(*args, **kwargs)
plt.scatter(*args, **kwargs)
# simple batcher.
def data_generator(x_full, y_full, bs):
def split(arr, size):
arrays = []
while len(arr) > size:
slice_ = arr[:size]
arrays.append(slice_)
arr = arr[size:]
arrays.append(arr)
return arrays
while True:
for rr in split((x_full, y_full), bs):
yield rr
###################################################################################################
# trainer. Training model for 200 steps for now
def train_200_grad_steps(data, device, net, optimiser, test_losses):
previous_loss = min(test_losses[-1],1000)
global_step = load(net, optimiser, CHECKPOINT_NAME)
patience = PATIENCE_PERIOD
for x_train_batch, y_train_batch in data:
global_step += 1
optimiser.zero_grad()
net.train()
_, forecast = net(torch.tensor(x_train_batch, dtype=torch.float))
loss = F.mse_loss(forecast, torch.tensor(y_train_batch, dtype=torch.float))
loss.backward()
optimiser.step()
###### Early stopping implementation ######
net.eval()
_, forecast = net(torch.tensor(x_val, dtype = torch.float))
val_loss = F.mse_loss(forecast, torch.tensor(y_val, dtype=torch.float))
if val_loss <= previous_loss:
with torch.no_grad():
save(net,optimiser, global_step, CHECKPOINT_NAME)
previous_loss = val_loss
patience = PATIENCE_PERIOD
else:
patience -= 1
if patience == 0:
return 0
######
if global_step % 30 == 0:
print('grad_step = ' + str(str(global_step).zfill(6)) + ', tr_loss = ' + str(loss.item()) + ', te_loss = ' + str(val_loss.item()))
if global_step > 0 and global_step % 200 == 0:
with torch.no_grad():
save(net, optimiser, global_step,CHECKPOINT_NAME)
break
# loader/saver for checkpoints.
def load(model, optimiser, checkpoint_name):
if os.path.exists(checkpoint_name):
checkpoint = torch.load(checkpoint_name)
model.load_state_dict(checkpoint['model_state_dict'])
optimiser.load_state_dict(checkpoint['optimizer_state_dict'])
grad_step = checkpoint['grad_step']
print('Restored checkpoint from ' + str(checkpoint_name) + '.')
return grad_step
return 0
def save(model, optimiser, grad_step, checkpoint_name):
torch.save({
'grad_step': grad_step,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimiser.state_dict(),
}, checkpoint_name)
# evaluate model on test data and produce some plots.
def eval_test(backcast_length, forecast_length, net, norm_constant, test_losses, x_test, y_test):
net.eval()
_, forecast = net(torch.tensor(x_test, dtype=torch.float))
test_losses.append(F.mse_loss(forecast, torch.tensor(y_test, dtype=torch.float)).item())
p = forecast.detach().numpy()
subplots = [221, 222, 223, 224]
plt.figure(1)
for plot_id, i in enumerate(np.random.choice(range(len(x_test)), size=4, replace=False)):
ff, xx, yy = p[i] * norm_constant, x_test[i] * norm_constant, y_test[i] * norm_constant
plt.subplot(subplots[plot_id])
plt.grid()
plot_scatter(range(0, backcast_length), xx, color='b')
plot_scatter(range(backcast_length, backcast_length + forecast_length), yy, color='g')
plot_scatter(range(backcast_length, backcast_length + forecast_length), ff, color='r')
if SHOW_DEVELOPMENT:
plt.show()
# main
STACK_TYPE = []
for i in range(len(args.stack_type)):
ST = []
for j in range(len(args.stack_type[i])):
if args.stack_type[i][j]=="generic":
ST.append(NBeatsNet.GENERIC_BLOCK)
elif args.stack_type[i][j]=="seasonality":
ST.append(NBeatsNet.SEASONALITY_BLOCK)
else:
ST.append(NBeatsNet.TREND_BLOCK)
STACK_TYPE.append(ST)
#-----Training code------
if TESTING==False:
if os.path.isfile(CHECKPOINT_NAME):
os.remove(CHECKPOINT_NAME)
device = torch.device('cpu') # use the trainer.py to run on GPU.
data = pd.read_csv(INPUT_FILE_PATH, index_col=0, parse_dates=True)
data = data.values # just keep np array here for simplicity.
norm_constant = args.norm_constant if args.norm_constant!=None else np.max(data) #norm_constant for normalizing the data to lie between 0 and 1
model_dict = {"norm_constant": norm_constant}
f = open(DICT_PATH,"wb")
pickle.dump(model_dict, f)
f.close()
print("\nNormalizing constant for the data is: " + str(norm_constant))
data = data / norm_constant # small leak to the test set here.
#print(data)
min_loss = 1000 #Startin minimum loss with a large value
hiddenLayerUnits = HIDDEN_LAYER_UNITS
#stackTypes = [[NBeatsNet.GENERIC_BLOCK, NBeatsNet.GENERIC_BLOCK], [NBeatsNet.GENERIC_BLOCK, NBeatsNet.GENERIC_BLOCK, NBeatsNet.GENERIC_BLOCK]] #In case you change this please change ThetaDims in the inner most for loop mentioned below.
stackTypes = STACK_TYPE
thetaDims = THETA_DIMS
numBlocksPerStack = BLOCKS_PER_STACK # Number of blocks per stack
backcastDays = BACKCAST_DAYS # Backcast length of 1-week, 2-weeks, 3-weeks
batch_size = BATCH_SIZE
shareWeightsInStack = SHARE_WEIGHTS_IN_STACK
for days in backcastDays:
backcast_length = days * 96
forecast_length = FORECAST_DAYS * 96 #forecast length is set for a forecast of 1 day
x_train, y_train = [], []
x_val, y_val = [], []
#x_test, y_test = [], []
train_length = ceil((len(data)/96) * (1-VALIDATION_RATIO)) * 96
val_length = ceil((len(data)/96) * (VALIDATION_RATIO)) * 96
#test_length = len(data) - train_length - val_length
print("\nLengths:\nFull: " + str(len(data)) + " Train: " + str(train_length) + " Val: " + str(val_length))
#Divide the dataset here
#Forming train-set in increments of 1 entry
for i in range(backcast_length, train_length - forecast_length):
x_train.append(data[i - backcast_length:i])
y_train.append(data[i:i + forecast_length])
#Forming validation-set in increments of 1 day i.e., 96 entries
for i in range(train_length + backcast_length, len(data) - forecast_length, 96):
x_val.append(data[i - backcast_length:i])
y_val.append(data[i:i + forecast_length])
x_train, y_train = np.array(x_train)[...,0], np.array(y_train)[...,0]
x_val, y_val = np.array(x_val)[...,0], np.array(y_val)[...,0]
#Printing shape of the formed train, test, and validation datasets
print("\nShape of Resultant Time Series")
print("Backcast of train & val: " + str(x_train.shape) + ", " + str(x_val.shape))
print("Forecast of train & val: " + str(y_train.shape) + ", " + str(y_val.shape))
print("\nTraining starting....")
for a in range(len(stackTypes)):
for b in range(len(numBlocksPerStack)):
for c in range(len(hiddenLayerUnits)):
print("Iteration: " + "Backcast Days: " + str(days) + ", Stack Types: " + str(stackTypes[a]) + ", No. Blocks per Stack: " + str(numBlocksPerStack[b]) + ", Hidden Layer Units: " + str(hiddenLayerUnits[c]))
#Forming model by passing model parameters
net = NBeatsNet(device = device, stack_types=stackTypes[a], nb_blocks_per_stack=numBlocksPerStack[b], forecast_length=forecast_length, backcast_length=backcast_length, thetas_dims=thetaDims[a], share_weights_in_stack=shareWeightsInStack, hidden_layer_units=hiddenLayerUnits[c])
optimiser = optim.Adam(net.parameters())
#Save a temporary model to memorize its params and store it in a dictionary. Refer save_temp function
save(net,optimiser,0,CHECKPOINT_NAME)
# data generator forms by stocking up batch_size mnumber of time series together
data = data_generator(x_train, y_train, batch_size)
# training
# model seems to converge well around ~2500 grad steps and starts to overfit a bit after.
test_losses = [] #Used to append test_losses at each validation step
for i in range(100):
eval_test(backcast_length, forecast_length, net, norm_constant, test_losses, x_val, y_val)
flag = train_200_grad_steps(data, device, net, optimiser, test_losses)
##Below code is used for early-stopping the model. flag=0 means the model is to be early-stopped otherwise flag=1
if flag==0:
global_step = load(net,optimiser,CHECKPOINT_NAME)
#Open this to specify best model by parameters
if MODEL_PREFIX[-3:]==".th": MODEL_PREFIX = MODEL_PREFIX[:-3]
model_dict[MODEL_PREFIX+"[" + str(days) + "]_" + str(stackTypes[a]) + "_" + str(numBlocksPerStack[b]) + "_" + str(hiddenLayerUnits[c]) + "_" + str(test_losses[-1]) + ".th"] = [stackTypes[a], numBlocksPerStack[b], forecast_length, backcast_length, thetaDims[a], shareWeightsInStack, hiddenLayerUnits[c], batch_size]
f = open(DICT_PATH,"wb")
pickle.dump(model_dict, f)
f.close()
save(net ,optimiser, global_step, MODEL_PREFIX+"[" + str(days) + "]_" + str(stackTypes[a]) + "_" + str(numBlocksPerStack[b]) + "_" + str(hiddenLayerUnits[c]) + "_" + str(test_losses[-1]) + ".th")
print("Model saved as: " + MODEL_PREFIX+"[" + str(days) + "]_" + str(stackTypes[a]) + "_" + str(numBlocksPerStack[b]) + "_" + str(hiddenLayerUnits[c]) + "_" + str(test_losses[-1]) + ".th")
print("Stopped Early!")
break
#------Testing code------
else:
if os.path.isfile(CHECKPOINT_NAME):
os.remove(CHECKPOINT_NAME)
device = torch.device('cpu') # use the trainer.py to run on GPU.
#Reading model dictionary
f = open(DICT_PATH,"rb")
model_dict = pickle.load(f)
print(model_dict.keys())
data = pd.read_csv(INPUT_FILE_PATH, index_col=0, parse_dates=True)
data = data.values # just keep np array here for simplicity.
norm_constant = model_dict["norm_constant"] #norm_constant for normalizing the data to lie between 0 and 1
print("Normalizing constant for the data is: " + str(norm_constant))
data = data / norm_constant # small leak to the test set here.
#print(data)
backcast_length = model_dict[MODEL][3]
forecast_length = model_dict[MODEL][2]
x_test, y_test = [], []
test_length = len(data)
print("Test length: " + str(test_length))
#Forming test-set in increments of 1 day i.e., 96 entries
for i in range(backcast_length, test_length - forecast_length, 96):
x_test.append(data[i - backcast_length:i])
y_test.append(data[i:i + forecast_length])
x_test, y_test = np.array(x_test)[...,0], np.array(y_test)[...,0]
#Printing shape of the formed train, test, and validation datasets
print("\nShape of Resultant Time Series")
print("Backcast of test: " + str(x_test.shape))
print("Forecast of test: " + str(y_test.shape))
hiddenLayerUnits = model_dict[MODEL][6]
stackTypes = model_dict[MODEL][0] #In case you change this please change ThetaDims in the inner most for loop mentioned below.
thetaDims = model_dict[MODEL][4]
numBlocksPerStack = model_dict[MODEL][1] # Number of blocks per stack
batch_size = model_dict[MODEL][7]
shareWeightsInStack = model_dict[MODEL][5]
net = NBeatsNet(device = device, stack_types=stackTypes, nb_blocks_per_stack=numBlocksPerStack, forecast_length=forecast_length, backcast_length=backcast_length, thetas_dims=thetaDims, share_weights_in_stack=shareWeightsInStack, hidden_layer_units=hiddenLayerUnits)
optimiser = optim.Adam(net.parameters())
print("\nLoading model... " + str(MODEL))
load(net,optimiser,MODEL)
net.eval()
_, forecast = net(torch.tensor(x_test, dtype = torch.float))
test_loss = F.mse_loss(forecast, torch.tensor(y_test, dtype = torch.float)).item()
print("The MSE loss while testing: " +str(test_loss))
p = forecast.detach().numpy()
if COMPARISON_PLOTS:
print("Storing prediction | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#---------------------------------------------------------------------------------
#
# ,--.
# | |-. ,---. ,---. ,--.--.,--,--,
# | .-. '| .-. :| .-. || .--'| \
# | `-' |\ --.' '-' '| | | || |
# `---' `----' `---' `--' `--''--'
#
# file: nested_tree_test
# desc: This test will test the nested tree implementation.
#
# author: <NAME>
# date: 02/06/2015
#---------------------------------------------------------------------------------
# Copyright (c) 2015 <NAME>
# All rights Reserved.
# Released Under the MIT Licence
#---------------------------------------------------------------------------------
import unittest
from beorn_lib.nested_tree import NestedTreeNode
from beorn_lib.nested_tree import NestedTree
def counter(reset = False):
if "this_id" not in counter.__dict__ or reset:
counter.this_id = 0
else:
counter.this_id += 1
return counter.this_id
class TestPayload:
def __lt__(self, other):
return self.my_id < other.my_id
def __init__(self, reset = False):
self.duration = 1
self.my_id = counter(reset)
self.start = 0
self.end = 0
self.no_walk = False
class TestNestedTree(unittest.TestCase):
""" Nested Tree Tests """
def __init__(self, testname = 'runTest', scm_type = None, scm_dir = None, test_data = None):
_ = test_data
_ = scm_type
_ = scm_dir
# initialise the test framework
super(TestNestedTree, self).__init__(testname)
def count_function(self, last_visited_node, node, value, levels, direction, parameter):
""" This is a test function that is used to count the number
of nodes that are encountered.
"""
if value is None:
value = 1
else:
value = value + 1
return (node, value, False)
def all_nodes_level_function(self, last_visited_node, node, value, levels, direction, parameter):
""" This function will collect the values from all nodes that
it encounters in the order that they were walked.
"""
if value is None:
value = [(levels, node.payload, 1)]
node.colour = 1
else:
value.append((levels, node.payload, len(value) + 1))
node.colour = len(value)
return (node, value, False)
def find_colours_function(self, last_visited_node, node, value, levels, direction, parameter):
""" This function will collect the values from all nodes that
it encounters in the order that they were walked.
"""
if node.colour == parameter:
value = node
node = None
return (node, value, False)
def all_nodes_function(self, last_visited_node, node, value, levels, direction, parameter):
""" This function will collect the values from all nodes that
it encounters in the order that they were walked.
"""
if value is None:
value = [node.payload]
else:
value.append(node.payload)
return (node, value, False)
def collect_function(self, last_visited_node, node, value, levels, direction, parameter):
""" This is a test function that is used to collect the data
from the nodes that it has visited. It will return the
list of nodes that it encounters.
"""
if not node.is_sub_node:
if value is None:
value = [node.payload]
else:
value.append(node.payload)
return (node, value, False)
def levels_function(self, last_visited_node, node, value, level, direction, parameter):
""" This is a test function that is used to render the levels information
it is used to test that the levels counting code works.
"""
if value is None:
node_list = []
else:
(_, node_list) = value
if node.payload is not None:
node_list.append((level, node.payload.my_id, direction))
value = (level, node_list)
return (node,value,False)
def plain_levels_function(self, last_visited_node, node, value, level, direction, parameter):
""" This is a test function that is used to render the levels information
it is used to test that the levels counting code works.
"""
if value is None:
node_list = []
lowest_level = 1
else:
(lowest_level, node_list) = value
if node.payload is not None:
node_list.append((level, node.payload))
if level > lowest_level:
lowest_level = level
value = (lowest_level, node_list)
return (node,value,False)
def short_walk_function(self,last_visited_node, node,value,levels,direction, parameter):
""" This is a test function that is used to collect the data
from the nodes that it has visited. It will collect all the
data for all the nodes, except those blocked by the short walk
then it will climb back up the tree and carry on walking.
It will return a list of the nodes that it visited.
"""
skip_children = False
if not node.is_sub_node:
if value is None:
value = [node.payload]
else:
value.append(node.payload)
# the magic short walk value - skip the children
if node.payload is not None and node.payload.no_walk:
skip_children = True
return (node,value,skip_children)
def calculate_function(self,last_visted_node, node,value,levels,direction, parameter):
""" This is a test function that is used to calculate a value
from the nodes that it has visited.
"""
if direction == NestedTreeNode.DIRECTION_DOWN and not node.is_sub_node:
prev_payload = node.getPrevPayload()
if prev_payload is None:
node.payload.start = 0
node.payload.end = node.payload.duration
else:
node.payload.start = prev_payload.start
node.payload.end = prev_payload.start + node.payload.duration
elif direction == NestedTreeNode.DIRECTION_NEXT:
prev_payload = node.getPrevPayload()
node.payload.start = prev_payload.end
node.payload.end = prev_payload.end + node.payload.duration
elif direction == NestedTreeNode.DIRECTION_UP:
prev_payload = node.getPrevPayload()
# set the previous end
if prev_payload.end < last_visted_node.payload.end:
prev_payload.end = last_visted_node.payload.end
if not node.is_sub_node:
# now set the current nodes start and end (and the previous end)
# have stepped up a level
node.payload.start = last_visted_node.payload.end
node.payload.end = last_visted_node.payload.end + node.payload.duration
return (node,1,False)
def dump_endtime_function(self,last_visted_node, node,value,levels,direction, parameter):
""" This is a test function that is used to return the calculated values
from the nodes that it has visited.
"""
if not node.is_sub_node:
if value is None:
value = [(node.payload.my_id, node.payload.start, node.payload.end)]
else:
value.append((node.payload.my_id, node.payload.start, node.payload.end))
return (node,value,False)
def test_createTree(self):
""" Create Object Test
This is a simple test that checks that the tree object can be created
correctly.
This is an exploding tests, to see if the code functions.`
"""
graph = NestedTree()
self.assertTrue(True)
def test_createNode(self):
""" Create Node Test
This is a basic test to make sure that the object can be created.
This is an exploding test to see if the code works.
"""
node = NestedTreeNode(0)
self.assertTrue(True)
def test_addNodes(self):
""" Add Nodes.
This test adds three nodes in a row to see if the graph code works.
This is an exploding test to see if the code works.
"""
node1 = NestedTreeNode(1)
node2 = NestedTreeNode(2)
node3 = NestedTreeNode(3)
graph = NestedTree()
graph.addNodeAfter(node1)
node1.addNodeAfter(node2)
node2.addNodeAfter(node3)
self.assertTrue(graph.hasSibling())
self.assertTrue(node1.hasSibling())
self.assertTrue(node2.hasSibling())
def test_3NodeWalk(self):
""" 3 Node Walk.
This test adds three nodes in a row to see if the graph code works.
It then will walk the tree to see if the it finds all the nodes.
"""
node1 = NestedTreeNode(1)
node2 = NestedTreeNode(2)
node3 = NestedTreeNode(3)
graph = NestedTree()
graph.addNodeAfter(node1)
node1.addNodeAfter(node2)
node2.addNodeAfter(node3)
# we are only using the next function for this rest.
count = 0
count = graph.walkTree(self.count_function)
self.assertTrue(count == 3)
def test_5NodeWalk(self):
""" 5 Node Walk - with insert before.
This test adds three nodes in a row to see if the graph code works
it also inserts two more nodes before node 2 and node 3.
It then will walk the tree to see if the it finds all the nodes.
"""
node1 = NestedTreeNode(1)
node2 = NestedTreeNode(2)
node3 = NestedTreeNode(3)
node4 = NestedTreeNode(4)
node5 = NestedTreeNode(5)
graph = NestedTree()
# insert the three nodes
graph.addNodeAfter(node1)
node1.addNodeAfter(node2)
node2.addNodeAfter(node3)
# now insert the two extra nodes
node2.addNodeBefore(node4)
node3.addNodeBefore(node5)
# we are only using the next function for this rest.
count = 0
count = graph.walkTree(self.count_function)
self.assertTrue(count == 5)
def test_SingleChildNode(self):
""" Single Child Node with Walk.
This test will add a single child node.
It then will walk the tree to see if the it finds all the nodes.
"""
node1 = NestedTreeNode(1)
graph = NestedTree()
# insert the child node
graph.addChildNode(node1)
# we are only using the next function for this rest.
count = 0
count = graph.walkTree(self.count_function)
self.assertTrue(count == 1)
def build_simple_tree(self, graph):
""" Builds a simple tree
This test will create the following tree:
[root]
|
v
[1] -> [2] -> [11] -> [17] -> [18]
| |
| v
| [12] -> [13] -> [14] -> [15] -> [16]
|
v
[3] -> [4] -> [5] -> [9] -> [10]
|
v
[6] -> [7] -> [8]
This function returns a node that can used to amend the tree.
"""
# create the nodes
nodes = []
for i in range(19):
nodes.append(NestedTreeNode(i))
graph.addChildNode(nodes[1])
nodes[1].addNodeAfter(nodes[2])
nodes[2].addNodeAfter(nodes[11])
nodes[11].addNodeAfter(nodes[17])
nodes[17].addNodeAfter(nodes[18])
nodes[2].addChildNode(nodes[3])
nodes[3].addNodeAfter(nodes[4])
nodes[4].addNodeAfter(nodes[5])
nodes[5].addNodeAfter(nodes[9])
nodes[9].addNodeAfter(nodes[10])
nodes[5].addChildNode(nodes[6])
nodes[6].addNodeAfter(nodes[7])
nodes[7].addNodeAfter(nodes[8])
nodes[11].addChildNode(nodes[12])
nodes[12].addNodeAfter(nodes[13])
nodes[13].addNodeAfter(nodes[14])
nodes[14].addNodeAfter(nodes[15])
nodes[15].addNodeAfter(nodes[16])
return (nodes[14], nodes[16])
def test_TraceSimpleTree(self):
""" Trace A Simple Tree
The simple tree is used for this test.
It should be a walk this tree and return the nodes in the correct order.
"""
graph = NestedTree()
# now build the tree
self.build_simple_tree(graph)
# we are only using the next function for this rest.
value = graph.walkTree(self.collect_function)
self.assertTrue(value == list(range(1, 19)))
def test_ChildNodeInsertion(self):
"""
This tests that the child node insertions work as expected.
It will also check to see if they can be walked as expected.
"""
# test normal insertion - empty list
graph = NestedTree()
graph.addChildNode(NestedTreeNode(1))
self.assertEqual(graph.walkTree(self.all_nodes_function), [1], "Basic insert failed")
# test normal insertion - empty list - this is the same as above as is the default
graph = NestedTree()
graph.addChildNode(NestedTreeNode(1), NestedTreeNode.INSERT_END)
graph.addChildNode(NestedTreeNode(2), NestedTreeNode.INSERT_END)
graph.addChildNode(NestedTreeNode(3), NestedTreeNode.INSERT_END)
self.assertEqual(graph.walkTree(self.all_nodes_function), [1, 2, 3], "Basic append insert")
# test front insertion - empty list
graph = NestedTree()
graph.addChildNode(NestedTreeNode(3), NestedTreeNode.INSERT_FRONT)
graph.addChildNode(NestedTreeNode(2), NestedTreeNode.INSERT_FRONT)
graph.addChildNode(NestedTreeNode(1), NestedTreeNode.INSERT_FRONT)
self.assertEqual(graph.walkTree(self.all_nodes_function), [1, 2, 3], "Basic in front insert")
# test ascending insertion - empty list
graph = NestedTree()
graph.addChildNode(NestedTreeNode(8), NestedTreeNode.INSERT_ASCENDING)
graph.addChildNode(NestedTreeNode(2), NestedTreeNode.INSERT_ASCENDING)
graph.addChildNode(NestedTreeNode(3), NestedTreeNode.INSERT_ASCENDING)
graph.addChildNode(NestedTreeNode(7), NestedTreeNode.INSERT_ASCENDING)
graph.addChildNode(NestedTreeNode(6), NestedTreeNode.INSERT_ASCENDING)
graph.addChildNode(NestedTreeNode(5), NestedTreeNode.INSERT_ASCENDING)
graph.addChildNode(NestedTreeNode(1), NestedTreeNode.INSERT_ASCENDING)
graph.addChildNode(NestedTreeNode(4), NestedTreeNode.INSERT_ASCENDING)
graph.addChildNode(NestedTreeNode(5), NestedTreeNode.INSERT_ASCENDING)
self.assertEqual(graph.walkTree(self.all_nodes_function), [1, 2, 3, 4, 5, 5, 6, 7, 8], "Basic in front insert")
def test_ChildNodeInsertionDecending(self):
"""
This tests that the child node insertions work as expected.
It will also check to see if they can be walked as expected.
"""
# test descending insertion - empty list
graph = NestedTree()
graph.addChildNode(NestedTreeNode(1), NestedTreeNode.INSERT_DESCENDING)
graph.addChildNode(NestedTreeNode(4), NestedTreeNode.INSERT_DESCENDING)
graph.addChildNode(NestedTreeNode(5), NestedTreeNode.INSERT_DESCENDING)
graph.addChildNode(NestedTreeNode(6), NestedTreeNode.INSERT_DESCENDING)
graph.addChildNode(NestedTreeNode(7), NestedTreeNode.INSERT_DESCENDING)
graph.addChildNode(NestedTreeNode(3), NestedTreeNode.INSERT_DESCENDING)
graph.addChildNode(NestedTreeNode(8), NestedTreeNode.INSERT_DESCENDING)
graph.addChildNode(NestedTreeNode(2), NestedTreeNode.INSERT_DESCENDING)
graph.addChildNode(NestedTreeNode(5), NestedTreeNode.INSERT_DESCENDING)
self.assertEqual(graph.walkTree(self.all_nodes_function), [8, 7, 6, 5, 5, 4, 3, 2, 1], "Basic descending insert")
# test normal insertion - empty list
nodes = []
nodes.append(NestedTreeNode(TestPayload(True)))
for i in range(1, 10):
nodes.append(NestedTreeNode(TestPayload()))
graph = NestedTree()
graph.addChildNode(nodes[1], NestedTreeNode.INSERT_DESCENDING)
graph.addChildNode(nodes[4], NestedTreeNode.INSERT_DESCENDING)
graph.addChildNode(nodes[5], NestedTreeNode.INSERT_DESCENDING)
graph.addChildNode(nodes[6], NestedTreeNode.INSERT_DESCENDING)
graph.addChildNode(nodes[7], NestedTreeNode.INSERT_DESCENDING)
graph.addChildNode(nodes[3], NestedTreeNode.INSERT_DESCENDING)
graph.addChildNode(nodes[8], NestedTreeNode.INSERT_DESCENDING)
graph.addChildNode(nodes[2], NestedTreeNode.INSERT_DESCENDING)
graph.addChildNode(nodes[5], NestedTreeNode.INSERT_DESCENDING)
nodes[7].addChildNode(nodes[9])
self.assertEqual((1, [(1, 8, 2), (1, 7, 3), (2, 9, 2), (1, 6, 1), (1, 5, 3), (1, 4, 3), (1, 3, 3), (1, 2, 3), (1, 1, 3)]), graph.walkTree(self.levels_function))
def build_tree(self, use_id = True, mark_no_walk = False):
"""
This function will create the following tree:
{} this indicate sub-tree nodes that have been created.
[root]
|
v
[1] -> [2] -> [24] -> | |
<reponame>SpillChek2/tfrs
"""
REST API Documentation for the NRS TFRS Credit Trading Application
The Transportation Fuels Reporting System is being designed to streamline compliance reporting for transportation fuel suppliers in accordance with the Renewable & Low Carbon Fuel Requirements Regulation.
OpenAPI spec version: v1
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework import permissions
from rest_framework import mixins
from rest_framework import generics
from rest_framework_bulk import BulkCreateModelMixin
from . import serializers
from auditable.views import AuditableMixin
from .models.Audit import Audit
from .models.CreditTrade import CreditTrade
from .models.CreditTradeHistory import CreditTradeHistory
from .models.CreditTradeStatus import CreditTradeStatus
from .models.CreditTradeType import CreditTradeType
from .models.CreditTradeZeroReason import CreditTradeZeroReason
from .models.CurrentUserViewModel import CurrentUserViewModel
from .models.FuelSupplier import FuelSupplier
from .models.FuelSupplierActionsType import FuelSupplierActionsType
from .models.FuelSupplierAttachment import FuelSupplierAttachment
from .models.FuelSupplierAttachmentTag import FuelSupplierAttachmentTag
from .models.FuelSupplierBalance import FuelSupplierBalance
from .models.FuelSupplierCCData import FuelSupplierCCData
from .models.FuelSupplierContact import FuelSupplierContact
from .models.FuelSupplierHistory import FuelSupplierHistory
from .models.FuelSupplierStatus import FuelSupplierStatus
from .models.Notification import Notification
from .models.NotificationEvent import NotificationEvent
from .models.NotificationType import NotificationType
from .models.NotificationViewModel import NotificationViewModel
from .models.Permission import Permission
from .models.PermissionViewModel import PermissionViewModel
from .models.Role import Role
from .models.RolePermission import RolePermission
from .models.RolePermissionViewModel import RolePermissionViewModel
from .models.RoleViewModel import RoleViewModel
from .models.User import User
from .models.UserDetailsViewModel import UserDetailsViewModel
from .models.UserFavourite import UserFavourite
from .models.UserFavouriteViewModel import UserFavouriteViewModel
from .models.UserRole import UserRole
from .models.UserRoleViewModel import UserRoleViewModel
from .models.UserViewModel import UserViewModel
class credittradesBulkPost(AuditableMixin,BulkCreateModelMixin, generics.GenericAPIView):
"""
Bulk create / update a number of CreditTrade object
"""
lookup_field = 'id'
permission_classes = (permissions.AllowAny,)
queryset = CreditTrade.objects.all()
serializer_class = serializers.CreditTradeSerializer
def post(self, request, *args, **kwargs):
"""
Creates a number of new CreditTrade objects
"""
return self.create(request, *args, **kwargs)
class credittradesGet(AuditableMixin,mixins.ListModelMixin, mixins.CreateModelMixin, generics.GenericAPIView):
"""
Lists available CreditTrade objects
"""
lookup_field = 'id'
permission_classes = (permissions.AllowAny,)
queryset = CreditTrade.objects.all()
serializer_class = serializers.CreditTradeSerializer
def get(self, request, *args, **kwargs):
"""
Lists available CreditTrade objects
"""
return self.list(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
"""
Creates a new CreditTrade object
"""
return self.create(request, *args, **kwargs)
class credittradesIdDeletePost(AuditableMixin,mixins.DestroyModelMixin, generics.GenericAPIView):
"""
Deletes a specific CreditTrade object
"""
lookup_field = 'id'
permission_classes = (permissions.AllowAny,)
queryset = CreditTrade.objects.all()
serializer_class = serializers.CreditTradeSerializer
def post(self, request, *args, **kwargs):
"""
Destroys the specified CreditTrade object
"""
return self.destroy(request, *args, **kwargs)
class credittradesIdGet(AuditableMixin,mixins.RetrieveModelMixin, mixins.UpdateModelMixin, generics.GenericAPIView):
"""
Gets a specific CreditTrade object
"""
lookup_field = 'id'
permission_classes = (permissions.AllowAny,)
queryset = CreditTrade.objects.all()
serializer_class = serializers.CreditTradeSerializer
def get(self, request, *args, **kwargs):
"""
Retrieves the specified CreditTrade object
"""
return self.retrieve(request, *args, **kwargs)
def put(self, request, *args, **kwargs):
"""
Updates the specified CreditTrade object
"""
return self.update(request, *args, **kwargs)
class credittradehistoriesBulkPost(AuditableMixin,BulkCreateModelMixin, generics.GenericAPIView):
"""
Bulk create / update a number of CreditTradeHistory object
"""
lookup_field = 'id'
permission_classes = (permissions.AllowAny,)
queryset = CreditTradeHistory.objects.all()
serializer_class = serializers.CreditTradeHistorySerializer
def post(self, request, *args, **kwargs):
"""
Creates a number of new CreditTradeHistory objects
"""
return self.create(request, *args, **kwargs)
class credittradehistoriesGet(AuditableMixin,mixins.ListModelMixin, mixins.CreateModelMixin, generics.GenericAPIView):
"""
Lists available CreditTradeHistory objects
"""
lookup_field = 'id'
permission_classes = (permissions.AllowAny,)
queryset = CreditTradeHistory.objects.all()
serializer_class = serializers.CreditTradeHistorySerializer
def get(self, request, *args, **kwargs):
"""
Lists available CreditTradeHistory objects
"""
return self.list(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
"""
Creates a new CreditTradeHistory object
"""
return self.create(request, *args, **kwargs)
class credittradehistoriesIdDeletePost(AuditableMixin,mixins.DestroyModelMixin, generics.GenericAPIView):
"""
Deletes a specific CreditTradeHistory object
"""
lookup_field = 'id'
permission_classes = (permissions.AllowAny,)
queryset = CreditTradeHistory.objects.all()
serializer_class = serializers.CreditTradeHistorySerializer
def post(self, request, *args, **kwargs):
"""
Destroys the specified CreditTradeHistory object
"""
return self.destroy(request, *args, **kwargs)
class credittradehistoriesIdGet(AuditableMixin,mixins.RetrieveModelMixin, mixins.UpdateModelMixin, generics.GenericAPIView):
"""
Gets a specific CreditTradeHistory object
"""
lookup_field = 'id'
permission_classes = (permissions.AllowAny,)
queryset = CreditTradeHistory.objects.all()
serializer_class = serializers.CreditTradeHistorySerializer
def get(self, request, *args, **kwargs):
"""
Retrieves the specified CreditTradeHistory object
"""
return self.retrieve(request, *args, **kwargs)
def put(self, request, *args, **kwargs):
"""
Updates the specified CreditTradeHistory object
"""
return self.update(request, *args, **kwargs)
class credittradestatusesBulkPost(AuditableMixin,BulkCreateModelMixin, generics.GenericAPIView):
"""
Bulk create / update a number of CreditTradeStatus object
"""
lookup_field = 'id'
permission_classes = (permissions.AllowAny,)
queryset = CreditTradeStatus.objects.all()
serializer_class = serializers.CreditTradeStatusSerializer
def post(self, request, *args, **kwargs):
"""
Creates a number of new CreditTradeStatus objects
"""
return self.create(request, *args, **kwargs)
class credittradestatusesGet(AuditableMixin,mixins.ListModelMixin, mixins.CreateModelMixin, generics.GenericAPIView):
"""
Lists available CreditTradeStatus objects
"""
lookup_field = 'id'
permission_classes = (permissions.AllowAny,)
queryset = CreditTradeStatus.objects.all()
serializer_class = serializers.CreditTradeStatusSerializer
def get(self, request, *args, **kwargs):
"""
Lists available CreditTradeStatus objects
"""
return self.list(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
"""
Creates a new CreditTradeStatus object
"""
return self.create(request, *args, **kwargs)
class credittradestatusesIdDeletePost(AuditableMixin,mixins.DestroyModelMixin, generics.GenericAPIView):
"""
Deletes a specific CreditTradeStatus object
"""
lookup_field = 'id'
permission_classes = (permissions.AllowAny,)
queryset = CreditTradeStatus.objects.all()
serializer_class = serializers.CreditTradeStatusSerializer
def post(self, request, *args, **kwargs):
"""
Destroys the specified CreditTradeStatus object
"""
return self.destroy(request, *args, **kwargs)
class credittradestatusesIdGet(AuditableMixin,mixins.RetrieveModelMixin, mixins.UpdateModelMixin, generics.GenericAPIView):
"""
Gets a specific CreditTradeStatus object
"""
lookup_field = 'id'
permission_classes = (permissions.AllowAny,)
queryset = CreditTradeStatus.objects.all()
serializer_class = serializers.CreditTradeStatusSerializer
def get(self, request, *args, **kwargs):
"""
Retrieves the specified CreditTradeStatus object
"""
return self.retrieve(request, *args, **kwargs)
def put(self, request, *args, **kwargs):
"""
Updates the specified CreditTradeStatus object
"""
return self.update(request, *args, **kwargs)
class credittradetypesBulkPost(AuditableMixin,BulkCreateModelMixin, generics.GenericAPIView):
"""
Bulk create / update a number of CreditTradeType object
"""
lookup_field = 'id'
permission_classes = (permissions.AllowAny,)
queryset = CreditTradeType.objects.all()
serializer_class = serializers.CreditTradeTypeSerializer
def post(self, request, *args, **kwargs):
"""
Creates a number of new CreditTradeType objects
"""
return self.create(request, *args, **kwargs)
class credittradetypesGet(AuditableMixin,mixins.ListModelMixin, mixins.CreateModelMixin, generics.GenericAPIView):
"""
Lists available CreditTradeType objects
"""
lookup_field = 'id'
permission_classes = (permissions.AllowAny,)
queryset = CreditTradeType.objects.all()
serializer_class = serializers.CreditTradeTypeSerializer
def get(self, request, *args, **kwargs):
"""
Lists available CreditTradeType objects
"""
return self.list(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
"""
Creates a new CreditTradeType object
"""
return self.create(request, *args, **kwargs)
class credittradetypesIdDeletePost(AuditableMixin,mixins.DestroyModelMixin, generics.GenericAPIView):
"""
Deletes a specific CreditTradeType object
"""
lookup_field = 'id'
permission_classes = (permissions.AllowAny,)
queryset = CreditTradeType.objects.all()
serializer_class = serializers.CreditTradeTypeSerializer
def post(self, request, *args, **kwargs):
"""
Destroys the specified CreditTradeType object
"""
return self.destroy(request, *args, **kwargs)
class credittradetypesIdGet(AuditableMixin,mixins.RetrieveModelMixin, mixins.UpdateModelMixin, generics.GenericAPIView):
"""
Gets a specific CreditTradeType object
"""
lookup_field = 'id'
permission_classes = (permissions.AllowAny,)
queryset = CreditTradeType.objects.all()
serializer_class = serializers.CreditTradeTypeSerializer
def get(self, request, *args, **kwargs):
"""
Retrieves the specified CreditTradeType object
"""
return self.retrieve(request, *args, **kwargs)
def put(self, request, *args, **kwargs):
"""
Updates the specified CreditTradeType object
"""
return self.update(request, *args, **kwargs)
class credittradezeroreasonBulkPost(AuditableMixin,BulkCreateModelMixin, generics.GenericAPIView):
"""
Bulk create / update a number of CreditTradeZeroReason object
"""
lookup_field = 'id'
permission_classes = (permissions.AllowAny,)
queryset = CreditTradeZeroReason.objects.all()
serializer_class = serializers.CreditTradeZeroReasonSerializer
def post(self, request, *args, **kwargs):
"""
Creates a number of new CreditTradeZeroReason objects
"""
return self.create(request, *args, **kwargs)
class credittradezeroreasonGet(AuditableMixin,mixins.ListModelMixin, mixins.CreateModelMixin, generics.GenericAPIView):
"""
Lists available CreditTradeZeroReason objects
"""
lookup_field = 'id'
permission_classes = (permissions.AllowAny,)
queryset = CreditTradeZeroReason.objects.all()
serializer_class = serializers.CreditTradeZeroReasonSerializer
def get(self, request, *args, **kwargs):
"""
Lists available CreditTradeZeroReason objects
"""
return self.list(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
"""
Creates a new CreditTradeZeroReason object
"""
return self.create(request, *args, **kwargs)
class credittradezeroreasonIdDeletePost(AuditableMixin,mixins.DestroyModelMixin, generics.GenericAPIView):
"""
Deletes a specific CreditTradeZeroReason object
"""
lookup_field = 'id'
permission_classes = (permissions.AllowAny,)
queryset = CreditTradeZeroReason.objects.all()
serializer_class = serializers.CreditTradeZeroReasonSerializer
def post(self, request, *args, **kwargs):
"""
Destroys the specified CreditTradeZeroReason object
"""
return self.destroy(request, *args, **kwargs)
class credittradezeroreasonIdGet(AuditableMixin,mixins.RetrieveModelMixin, mixins.UpdateModelMixin, generics.GenericAPIView):
"""
Gets a specific CreditTradeZeroReason object
"""
lookup_field = 'id'
permission_classes = (permissions.AllowAny,)
queryset = CreditTradeZeroReason.objects.all()
serializer_class = serializers.CreditTradeZeroReasonSerializer
def get(self, request, *args, **kwargs):
"""
Retrieves the specified CreditTradeZeroReason object
"""
return self.retrieve(request, *args, **kwargs)
def put(self, request, *args, **kwargs):
"""
Updates the specified CreditTradeZeroReason object
"""
return self.update(request, *args, **kwargs)
class fuelsuppliersBulkPost(AuditableMixin,BulkCreateModelMixin, generics.GenericAPIView):
"""
Bulk create / update a number of FuelSupplier object
"""
lookup_field = 'id'
permission_classes = (permissions.AllowAny,)
| |
#!/usr/bin/env python3
"""
See freq_response.md for details
"""
from dataclasses import dataclass
import fractions
import math
from typing import Iterable, Optional, Tuple, Union
import numpy as np
from analysis import linearity, time_domain_response
from utils import utils
from unit_test import unit_test
from processor import ProcessorBase
from generation import signal_generation
PI = math.pi
HALF_PI = 0.5 * math.pi
TWOPI = 2.0 * math.pi
SQRT2 = math.sqrt(2.0)
INV_SQRT2 = 1.0 / SQRT2
# Square wave has THD+N of sqrt(pi^2 / 8 - 1) ~= 0.483 ~= -6.32 dB
# https://en.wikipedia.org/wiki/Total_harmonic_distortion#Examples
SQUARE_THDN = utils.to_dB(math.sqrt((math.pi ** 2.) / 8 - 1))
_unit_tests_short = []
_unit_tests_full = []
@dataclass
class FreqResponse:
freqs: np.ndarray
sample_rate: float
amplitude: Optional[float] = None # Amplitude frequency response was performed at (relevant for nonlinear systems)
mag: Optional[np.ndarray] = None # Magnitude
rms: Optional[np.ndarray] = None # RMS response (only relevant for nonlinear system)
phase: Optional[np.ndarray] = None # Phase response, in radians
group_delay: Optional[np.ndarray] = None
thdn: Optional[np.ndarray] = None # THD + Noise (linear, not dB)
def dft_num_samples(
freq: Union[int, float],
sample_rate: Union[int, float],
min_num_samples=0,
max_num_samples: Optional[int]=None,
maximize=False,
round_up=False,
) -> int:
"""
Determine optimum DFT size at a given frequency and sample rate, in order to get an exact number of cycles at the
frequency, or as close as possible.
:param freq: frequency, in whatever units you want (must be same units as sample rate)
:param sample_rate: sample rate (in same units as freq)
:param min_num_samples:
Minimum number of samples; default 0 (i.e. no minimum).
Actual practical minimum will always be at least 1 cycle
:param max_num_samples:
Maximum number of samples; default is sample_rate or period at frequency, whichever is larger
Must be > (sample_rate/freq).
Must be specified if maximize
:param maximize:
By default, will come up with the minimum possible number of samples that satisfies the criteria sequence;
if maximize, will come up with the longest instead.
Must explicitly specify max_num_samples if maximize
:param round_up:
if True, will always round up instead of rounding to nearest
"""
if maximize and not max_num_samples:
raise ValueError('Must provide max_num_samples if setting maximize')
period = sample_rate / freq
if min_num_samples < 0:
raise ValueError('min_num_samples must be > 0')
elif not isinstance(min_num_samples, int):
raise ValueError('min_num_samples must be integer')
if max_num_samples is None:
max_num_samples = int(math.ceil(max(sample_rate, period)))
elif not isinstance(max_num_samples, int):
raise ValueError('max_num_samples must be integer')
elif max_num_samples <= 0:
raise ValueError('max_num_samples (%g) must be > 0' % max_num_samples)
elif max_num_samples <= min_num_samples:
raise ValueError('max_num_samples (%g) must be > min_num_samples (%g)' % (max_num_samples, min_num_samples))
eps = 1e-12
min_num_cycles = max(min_num_samples / period, 1.)
min_num_cycles_int = int(math.ceil(min_num_cycles - eps))
max_num_cycles = max_num_samples / period
max_num_cycles_int = int(math.floor(max_num_cycles + eps))
if max_num_cycles_int < 1:
assert max_num_samples < period
raise ValueError('max_num_samples (%u) must be >= period (%g)' % (max_num_samples, period))
assert min_num_cycles_int * (period + eps) >= min_num_samples
assert max_num_cycles_int * (period - eps) <= max_num_samples
if max_num_cycles_int == min_num_cycles_int:
# Special case: only 1 possible number of periods
n_samples = max_num_cycles_int * period
n_samples = int(math.ceil(n_samples) if round_up else round(n_samples))
assert min_num_samples <= n_samples <= max_num_samples
return n_samples
elif max_num_cycles_int < min_num_cycles_int:
# TODO: come up with good error message for this
raise ValueError('freq %g, SR %g, min_num_cycles %f -> %u, max_num_cycles %f -> %u' % (
freq, sample_rate,
min_num_cycles, min_num_cycles_int, max_num_cycles, max_num_cycles_int
))
assert max_num_samples >= period # Should be guaranteed by above conditions
freq = utils.integerize_if_int(freq)
sample_rate = utils.integerize_if_int(sample_rate)
if isinstance(freq, int) and isinstance(sample_rate, int):
period_as_fraction = fractions.Fraction(sample_rate, freq)
else:
period_as_fraction = fractions.Fraction.from_float(period)
period_as_fraction = period_as_fraction.limit_denominator(max_denominator=max_num_cycles_int)
n_samples_ideal = period * period_as_fraction.denominator
assert utils.approx_equal(period_as_fraction.numerator, n_samples_ideal, eps=0.5)
if maximize:
if 2*n_samples_ideal <= max_num_samples:
"""
What's the largest integer we can multiply n_samples_ideal by to still be <= max_num_samples?
n * k <= max
k <= max / n
k = floor(max / n)
"""
n_samples_ideal *= math.floor(max_num_samples / n_samples_ideal)
elif n_samples_ideal < min_num_samples:
"""
What's the smallest integer we can multiply n_samples_ideal by to be >= min_num_samples?
n * k >= min
k >= min / n
k = ceil(min / n)
"""
n_samples_ideal *= math.ceil(min_num_samples / n_samples_ideal)
n_samples = int(math.ceil(n_samples_ideal) if round_up else round(n_samples_ideal))
if not (min_num_samples <= n_samples <= max_num_samples):
raise AssertionError('Check n_samples (%i, from %g, fraction %s) in range (%i, %i) failed!' % (
n_samples, n_samples_ideal, period_as_fraction, min_num_samples, max_num_samples))
return n_samples
def _test_dft_num_samples():
from unit_test.unit_test import test_equal, test_threw
"""
Perfect divisors
"""
# 1 kHz @ 96 kHz
# 1 period = 96 samples
test_equal(dft_num_samples(1000, 96000), 96)
test_equal(dft_num_samples(1000, 96000.), 96)
test_equal(dft_num_samples(1000., 96000), 96)
test_equal(dft_num_samples(1000., 96000.), 96)
test_equal(dft_num_samples(1000., 96000., min_num_samples=100), 192)
test_equal(dft_num_samples(1000., 96000., min_num_samples=384), 384)
test_equal(dft_num_samples(1000., 96000., max_num_samples=400, maximize=True), 384)
test_equal(dft_num_samples(1000., 96000., min_num_samples=380, max_num_samples=400), 384)
test_threw(dft_num_samples, 1000., 96000., min_num_samples=398, max_num_samples=400)
# 3.125 (25/8) @ 96 kHz
# 1 period = 30,720 samples
test_equal(dft_num_samples(3.125, 96000.), 30720)
"""
Rational numbers
"""
# 10 kHz @ 96 kHz
# 1 period = 9.6 samples (48/5)
test_equal(dft_num_samples(10000, 96000), 48)
test_equal(dft_num_samples(10000, 96000, maximize=True, max_num_samples=96000), 96000)
# 1 kHz @ 44.1 kHz
# 1 period = 44.1 samples (441/10)
test_equal(dft_num_samples(1000, 44100), 441)
test_equal(dft_num_samples(1000, 44100, maximize=True, max_num_samples=44100), 44100)
# 440 Hz @ 44.1 kHz
# 1 period = 100.2272727 samples (2205/22)
test_equal(dft_num_samples(440, 44100), 2205)
test_equal(dft_num_samples(440, 44100, maximize=True, max_num_samples=44100), 44100)
test_equal(dft_num_samples(440, 44100, max_num_samples=102), 100)
test_equal(dft_num_samples(440, 44100, max_num_samples=102, round_up=True), 101)
test_equal(dft_num_samples(440, 44100, max_num_samples=510, maximize=True), 401)
test_equal(dft_num_samples(440, 44100, max_num_samples=510, round_up=True, maximize=True), 401)
# 100.125 Hz @ 96 kHz
# 1 period = 958.80 samples (256000/267)
test_equal(dft_num_samples(100.125, 96000, max_num_samples=1000000), 256000)
test_equal(dft_num_samples(100.125, 96000, max_num_samples=1000000, maximize=True), 768000)
test_equal(dft_num_samples(100.125, 96000), 92045)
# 3010 Hz @ 96 kHz
# 1 period = 31.89 samples (9600/301)
test_equal(dft_num_samples(3010, 96000), 9600)
test_equal(dft_num_samples(3010, 96000, maximize=True, max_num_samples=96000), 96000)
# 1001 Hz @ 96 kHz (coprime)
# 1 period = 95.904 samples (96000/1001)
test_equal(dft_num_samples(1001, 96000), 96000)
test_equal(dft_num_samples(1001, 96000, maximize=True, max_num_samples=96000), 96000)
# 1000.1 Hz @ 96 kHz
# 1 period = 95.99 samples (960,000/10,001)
test_equal(dft_num_samples(1000.1, 96000), 59994)
test_equal(dft_num_samples(1000.1, 96000, maximize=True, max_num_samples=96000), 59994)
"""
Irrational numbers
"""
# 1000*pi Hz @ 96 kHz
# 1 period = 30.5577 samples
test_equal(dft_num_samples(1000*PI, 96000), 30955)
"""
Rational numbers expressed as ratio of 2 irrational numbers
"""
test_equal(dft_num_samples(1000*PI, 96000*PI), 96)
_unit_tests_short.append(_test_dft_num_samples)
_unit_tests_full.append(_test_dft_num_samples)
def _single_freq_dft(
x: np.ndarray,
cos_sig: np.ndarray,
sin_sig: np.ndarray,
freq: Union[int, float],
sample_rate: Union[int, float],
mag=False,
phase=False,
adjust_num_samp=False,
normalize=False):
# TODO: use Goertzel algo instead
# FIXME: properly deal with boundary conditions - i.e. extra samples at end that don't fit into a complete cycle
# adjust_num_samp should mostly deal with that
if adjust_num_samp:
n_samp = dft_num_samples(freq, sample_rate, min_num_samples=(len(x) // 2), max_num_samples=len(x), maximize=True)
else:
n_samp = len(x)
dft_mult = cos_sig[:n_samp] - 1j * sin_sig[:n_samp]
xs = x[:n_samp] * dft_mult
xs = np.mean(xs) if normalize else sum(xs)
if mag and phase:
return np.abs(xs), np.angle(xs)
elif mag:
return np.abs(xs)
elif phase:
return np.angle(xs)
else:
return xs
def single_freq_dft(
x: np.ndarray,
freq: float,
sample_rate=1.0,
mag=True,
phase=True,
adjust_num_samp=False,
normalize=False):
"""
Perform DFT at a single arbitrary frequency
:param x:
:param freq:
:param sample_rate:
:param mag: return magnitude
:param phase: return phase
:param adjust_num_samp:
if True, will not perform DFT on entire signal; rather, will find optimal number of samples to get as close
to a zero-crossing as possible (though guaranteed to use at least half the samples).
Recommend calling dft_num_samples to determine sample size instead, in order to get the optimal DFT size of the
signal in the first place.
:param normalize: divide by number of samples, i.e. return average power per sample instead of sum
:return: (mag, phase) if mag and phase; magnitude if mag only; phase if phase only; complex result if neither
"""
cos_sig, sin_sig = signal_generation.gen_cos_sine(freq / sample_rate, len(x))
return _single_freq_dft(
x, cos_sig, sin_sig, freq, sample_rate,
mag=mag, phase=phase, adjust_num_samp=adjust_num_samp, normalize=normalize)
def phase_to_group_delay(freqs: np.ndarray, phases_rad: np.ndarray, sample_rate: float) -> np.ndarray:
phases_rad_unwrapped = np.unwrap(phases_rad)
freqs_cycles_per_sample = freqs / sample_rate
freqs_rads_per_sample = freqs_cycles_per_sample * TWOPI
np_version = [int(n) for n in np.__version__.split('.')]
if np_version[0] <= 1 and np_version[1] < 13:
delay_samples = -np.gradient(phases_rad_unwrapped) / np.gradient(freqs_rads_per_sample)
else:
delay_samples = -np.gradient(phases_rad_unwrapped, freqs_rads_per_sample)
delay_seconds = delay_samples / sample_rate
return delay_seconds
def get_ir_freq_response(
ir: np.ndarray,
freqs: Iterable,
sample_rate,
mag=True,
phase=True,
group_delay=True) -> FreqResponse:
"""
Calculate frequency response based on impulse response
:param ir: Impulse response
:param freqs: frequencies to get response at. More frequencies will also lead to more precise group delay
:param sample_rate: sample rate, in Hz
:param mag: if False, does not calculate nor return magnitude
:param rms: if False, does not calculate nor return RMS magnitude
:param phase: if False, does not calculate nor return phase
:param group_delay: if False, does not calculate nor return group delay
:return: frequency response of system
"""
if group_delay and not phase:
raise ValueError('Must calculate phase to calculate group delay!')
freqs = np.array(freqs)
freq_resp = FreqResponse(freqs=freqs, sample_rate=sample_rate)
if mag:
freq_resp.mag = np.zeros(len(freqs))
if phase:
freq_resp.phase = np.zeros(len(freqs))
for n, f_norm in enumerate(freqs / sample_rate):
ret = single_freq_dft(ir, f_norm, mag=mag, phase=phase, adjust_num_samp=True)
if mag:
freq_resp.mag[n] = ret[0]
if phase:
freq_resp.phase[n] = ret[-1]
if group_delay:
freq_resp.group_delay = phase_to_group_delay(freqs, freq_resp.phase, sample_rate)
if phase:
freq_resp.phase = ((freq_resp.phase + PI) % TWOPI) - PI
return freq_resp
def _calc_thdn(y, f_norm, mag, phase, debug_assert=False):
# Subtract fundamental from signal
phase01 = np.mod(phase / TWOPI, 1.0)
fundamental = signal_generation.gen_sine(f_norm, n_samp=len(y), start_phase=phase01) * mag
if debug_assert:
debug_mag, debug_phase = single_freq_dft(fundamental, f_norm, mag=True, phase=True, normalize=True, adjust_num_samp=False)
assert utils.approx_equal(debug_mag, mag, eps=0.001)
assert utils.approx_equal(debug_phase, phase, eps=0.01)
thdn_sig = y - fundamental
return utils.rms(thdn_sig) * SQRT2 / mag
def get_discrete_sine_sweep_freq_response(
system: ProcessorBase,
freqs: Iterable,
sample_rate,
n_cycles=40.0,
n_samp_min: Optional[int]=None,
n_samp=None,
amplitude=1.0,
mag=True,
rms=True,
phase=True,
group_delay=None,
thdn=None) -> FreqResponse:
"""
Calculate frequency response by passing sine waves at various frequencies through system
Unlike impulse response analysis, this will work for nonlinear systems as well
(Of course, the definition of "frequency response" is ill-defined for a nonlinear system - see freq_response.md)
:param system: Processor to process
:param freqs: frequencies to get response at. More frequencies will also lead to more precise group delay
:param sample_rate: sample rate, in Hz
:param n_cycles: how many cycles of waveform to calculate over
:param n_samp_min: if using n_cycles, | |
src, 'target': targ} for src, targ, attrs in netx_edges]
edge_df = pd.DataFrame.from_records(netx_edge_list)
# TODO: This will blow if there are no edges or no nodes ... so will create_network_from_igraph() ... will R blow, too?
# Make sure critical attributes are strings
node_df['name'] = node_df['name'].astype(str)
edge_df['source'] = edge_df['source'].astype(str)
edge_df['target'] = edge_df['target'].astype(str)
if 'interaction' in edge_df.columns: edge_df['interaction'] = edge_df['interaction'].astype(str)
if len(node_df.index) == 0: node_df = None
if len(edge_df.index) == 0: edge_df = None
return create_network_from_data_frames(nodes=node_df, edges=edge_df, title=title, collection=collection,
base_url=base_url, node_id_list='name')
@cy_log
def create_network_from_data_frames(nodes=None, edges=None, title='From dataframe',
collection='My Dataframe Network Collection', base_url=DEFAULT_BASE_URL, *,
node_id_list='id', source_id_list='source', target_id_list='target',
interaction_type_list='interaction'):
"""Create a network from data frames.
Takes data frames for nodes and edges, as well as naming parameters to generate the JSON data format required by
the "networks" POST operation via CyREST. Returns the network.suid and applies the preferred layout set in
Cytoscape preferences.
Notes:
``nodes`` should contain a column of character strings named: id. This name can be overridden by the arg:
``node_id_list``. Additional columns are loaded as node attributes. ``edges`` should contain columns of
character strings named: source, target and interaction. These names can be overridden by args:
source_id_list, target_id_list, interaction_type_list. Additional columns are loaded as edge attributes.
The ``interaction`` list can contain a single value to apply to all rows; and if excluded altogether, the
interaction type will be set to "interacts with". NOTE: attribute values of types (num) will be imported
as (Double); (int) as (Integer); (chr) as (String); and (logical) as (Boolean). (Lists) will be imported as
(Lists) in CyREST v3.9+.
Note that the extra ``id`` column is created in the node table because the ``id`` column is mandatory in the
cytoscape.js format, which is what is sent to Cytoscape.
Args:
nodes (DataFrame): see details and examples below; default NULL to derive nodes from edge sources and targets
edges (DataFrame): see details and examples below; default NULL for disconnected set of nodes
title (str): network name
collection (str): network collection name
base_url (str): Ignore unless you need to specify a custom domain,
port or version to connect to the CyREST API. Default is http://127.0.0.1:1234
and the latest version of the CyREST API supported by this version of py4cytoscape.
* :
node_id_list (str): Name of column in ``nodes`` containing node id
source_id_list (str): Name of column in ``edges`` containing source node name
target_id_list (str): Name of column in ``edges`` containing target node name
interaction_type_list (str): Name of column in ``edges`` containing interaction name
Returns:
int: The ``SUID`` of the new network
Raises:
ValueError: if server response has no JSON
CyError: if network name or SUID doesn't exist
requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error
Examples:
>>> node_data = {'id':["node 0","node 1","node 2","node 3"],
>>> 'group':["A","A","B","B"],
>>> 'score':[20,10,15,5]}
>>> nodes = df.DataFrame(data=node_data, columns=['id', 'group', 'score'])
>>> edge_data = {'source':["node 0","node 0","node 0","node 2"],
>>> 'target':["node 1","node 2","node 3","node 3"],
>>> 'interaction':["inhibits","interacts","activates","interacts"],
>>> 'weight':[5.1,3.0,5.2,9.9]}
>>> edges = df.DataFrame(data=edge_data, columns=['source', 'target', 'interaction', 'weight'])
>>>
>>> create_network_from_data_frames(nodes, edges, title='From node & edge dataframe')
1477
"""
# TODO: Verify the above documentation
def compute_edge_name(source, target, interaction):
return source + ' (' + interaction + ') ' + target
# Create a node list even if we have to use the edges lists to infer nodes
if nodes is None:
if not edges is None:
id_list = []
for source, target in zip(edges['source'].values, edges['target'].values):
id_list.append(source)
id_list.append(target)
nodes = pd.DataFrame(data=id_list, columns=['id'])
else:
raise CyError('Must provide either nodes or edges')
# create the JSON for a node list ... in cytoscape.js format
# TODO: Verify that we really do need this 'id' field ... or maybe we can kill it afterward?
json_nodes = [{'data': {'id': node}} for node in nodes[node_id_list]]
# create the JSON for an edge list ... in cytoscape.js format
json_edges = []
if not edges is None:
if not interaction_type_list in edges.columns: edges[interaction_type_list] = 'interacts with'
edges_sub = edges[[source_id_list, target_id_list, interaction_type_list]]
json_edges = [{'data': {'name': compute_edge_name(source, target, interaction), 'source': source,
'target': target, 'interaction': interaction}} for source, target, interaction in
zip(edges_sub[source_id_list], edges_sub[target_id_list], edges_sub[interaction_type_list])]
# create the full JSON for a cytoscape.js-style network ... see http://manual.cytoscape.org/en/stable/Supported_Network_File_Formats.html#cytoscape-js-json
# Note that no node or edge attributes are included in this version of the network
json_network = {'data': [{'name': title}], 'elements': {'nodes': json_nodes, 'edges': json_edges}}
# call Cytoscape to create this network and return the SUID
network_suid = commands.cyrest_post('networks', parameters={'title': title, 'collection': collection},
body=json_network, base_url=base_url)['networkSUID']
# TODO: There appears to be a race condition here ... the view isn't set for a while. Without an explicit delay, the
# "vizmap apply" command below fails for lack of a valid view. So, we'll retry
# the problem operations until they succeed (see _delay_until_stable() calls below)
# Keep cycling until Cytoscape is able to return table information ... safe after that
_delay_until_stable(lambda: get_network_suid(network_suid, base_url=base_url) is not None,
'verifying network SUID', vote_count=10)
# drop the SUID column if one is present
nodes = nodes.drop(['SUID'], axis=1, errors='ignore')
# load node attributes into Cytoscape network
if len(set(nodes.columns) - {node_id_list}) != 0:
tables.load_table_data(nodes, data_key_column=node_id_list, table_key_column=node_id_list, network=network_suid,
base_url=base_url)
if not edges is None:
# get rid of SUID column if one is present
edges = edges.drop(['SUID'], axis=1, errors='ignore')
# create edge name out of source/interaction/target
edge_names = [compute_edge_name(source, target, interaction) for source, interaction, target in
zip(edges[source_id_list], edges[interaction_type_list], edges[target_id_list])]
edges['name'] = edge_names
# find out the SUID of each node so it can be used in a multigraph if needed
edges['data.key.column'] = edge_name_to_edge_suid(edge_names, network_suid, base_url=base_url, unique_list=True)
# if the edge list looks real, add the edge attributes (if any)
if len(set(edges.columns) - set(['source', 'target', 'interaction', 'name', 'data.key.column'])) != 0:
tables.load_table_data(edges, data_key_column='data.key.column', table='edge', table_key_column='SUID',
network=network_suid, base_url=base_url)
narrate('Applying default style...')
_delay_until_stable(lambda: commands.commands_post('vizmap apply styles="default"', base_url=base_url) is not None,
'apply vizmap')
narrate('Applying preferred layout')
_delay_until_stable(lambda: layouts.layout_network(network=network_suid) is not None,
'layout network')
# TODO: Verify that attribute types are properly set in Cytoscape
return network_suid
@cy_log
def import_network_from_tabular_file(file=None, first_row_as_column_names=False, start_load_row=1, column_type_list='s,i,t', delimiters='\\,,\t', base_url=DEFAULT_BASE_URL):
"""Loads a network from specified file.
Note:
To load a tabular file from cloud storage, use the file's URL and the ``sandbox_url_to`` function to download
the file to a sandbox, and then use ``import_network_from_tabular_file`` to load it from there.
Args:
file (str): Name of file in any of the supported tabular formats (e.g., csv, tsv, Excel, etc).
first_row_as_column_names (bool): True if first row contributes column names but no data values
start_load_row (int): 1-based row to start reading data ... after column name row, if present
column_type_list (str): comma-separated map of column types ordered by column index
(e.g. "source,target,interaction,source attribute,target attribute,edge attribute,skip" or just "s,t,i,sa,ta,ea,x"); defaults to "s,i,t"
delimiters (str): comma-separated list of characters that can separate columns ... ``\\\\,`` is a comma, ``\\t`` is a tab
base_url (str): Ignore unless you need to specify a custom domain,
port or version to connect to the CyREST API. Default is http://127.0.0.1:1234
and the latest version of the CyREST API supported by this version of py4cytoscape.
Returns:
dict: {"networks": [network suid], "views": [suid for views]} where networks and views lists have length 1
Raises:
CyError: if file cannot be found or loaded, or if error in tabular_params list
requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error
Examples:
>>> import_network_from_tabular_file('data/yeastHighQuality.sif') # import a SIF-formatted network
{'networks': [131481], 'views': [131850]}
>>> import_network_from_tabular_file('data/disease.net.default.xlsx') # import an Excel file that has no header row
{'networks': [131481], 'views': [131850]}
>>> import_network_from_tabular_file('data/disease.net.default.txt') # import a text file that has no header row
{'networks': [131481], 'views': [131850]}
>>> import_network_from_tabular_file('data/disease.net.interaction.txt', # import ' '-delimited header row and data
>>> first_row_as_column_names=True,
>>> start_load_row=1,
>>> column_type_list='s,t,x,i',
>>> delimiters=' ')
{'networks': [131481], 'views': [131850]}
"""
file = get_abs_sandbox_path(file)
# As of 3.9, the column_type_list is sufficient for specifying the layout of a data line. However,
# per CYTOSCAPE-12764, pre-3.9 Cytoscape has trouble with the "interaction" tag. To accommodate all
# Cytoscape versions, we provide explicit indexes for source, target and interaction columns.
| |
# -*- coding: utf-8 -*-
# cython: language_level=3
# BSD 3-Clause License
#
# Copyright (c) 2020-2022, Faster Speeding
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# pyright: reportUnknownMemberType=none
# This leads to too many false-positives around mocks.
import typing
from unittest import mock
import hikari
import pytest
import tanjun
@pytest.fixture()
def command() -> tanjun.abc.ExecutableCommand[typing.Any]:
command_ = mock.MagicMock(tanjun.abc.ExecutableCommand)
command_.add_check.return_value = command_
return command_
@pytest.fixture()
def context() -> tanjun.abc.Context:
return mock.MagicMock(tanjun.abc.Context)
class TestOwnerCheck:
@pytest.mark.asyncio()
async def test(self):
mock_dependency = mock.AsyncMock()
mock_dependency.check_ownership.return_value = True
mock_context = mock.Mock()
check = tanjun.checks.OwnerCheck(error_message=None, halt_execution=False)
result = await check(mock_context, mock_dependency)
assert result is True
mock_dependency.check_ownership.assert_awaited_once_with(mock_context.client, mock_context.author)
@pytest.mark.asyncio()
async def test_when_false(self):
mock_dependency = mock.AsyncMock()
mock_dependency.check_ownership.return_value = False
mock_context = mock.Mock()
check = tanjun.checks.OwnerCheck(error_message=None, halt_execution=False)
result = await check(mock_context, mock_dependency)
assert result is False
mock_dependency.check_ownership.assert_awaited_once_with(mock_context.client, mock_context.author)
@pytest.mark.asyncio()
async def test_when_false_and_error_message(self):
mock_dependency = mock.AsyncMock()
mock_dependency.check_ownership.return_value = False
mock_context = mock.Mock()
check = tanjun.checks.OwnerCheck(error_message="aye", halt_execution=False)
with pytest.raises(tanjun.errors.CommandError, match="aye"):
await check(mock_context, mock_dependency)
mock_dependency.check_ownership.assert_awaited_once_with(mock_context.client, mock_context.author)
@pytest.mark.asyncio()
async def test_when_false_and_halt_execution(self):
mock_dependency = mock.AsyncMock()
mock_dependency.check_ownership.return_value = False
mock_context = mock.Mock()
check = tanjun.checks.OwnerCheck(error_message=None, halt_execution=True)
with pytest.raises(tanjun.errors.HaltExecution):
await check(mock_context, mock_dependency)
mock_dependency.check_ownership.assert_awaited_once_with(mock_context.client, mock_context.author)
class TestNsfwCheck:
@pytest.mark.asyncio()
async def test_when_is_dm(self):
mock_context = mock.Mock(guild_id=None)
mock_cache = mock.AsyncMock()
check = tanjun.checks.NsfwCheck(error_message=None, halt_execution=False)
result = await check(mock_context, channel_cache=mock_cache)
assert result is True
mock_context.cache.get_guild_channel.assert_not_called()
mock_context.rest.fetch_channel.assert_not_called()
mock_cache.get.assert_not_called()
@pytest.mark.asyncio()
async def test(self):
mock_context = mock.Mock()
mock_context.cache.get_guild_channel.return_value.is_nsfw = True
mock_cache = mock.AsyncMock()
check = tanjun.checks.NsfwCheck(error_message=None, halt_execution=False)
result = await check(mock_context, channel_cache=mock_cache)
assert result is True
mock_context.cache.get_guild_channel.assert_called_once_with(mock_context.channel_id)
mock_context.rest.fetch_channel.assert_not_called()
mock_cache.get.assert_not_called()
@pytest.mark.asyncio()
async def test_when_async_cache_raises_not_found(self):
mock_context = mock.Mock(cache=None, rest=mock.AsyncMock())
mock_cache = mock.AsyncMock()
mock_cache.get.side_effect = tanjun.dependencies.EntryNotFound
check = tanjun.checks.NsfwCheck(error_message=None, halt_execution=False)
with pytest.raises(tanjun.dependencies.EntryNotFound):
await check(mock_context, channel_cache=mock_cache)
mock_context.rest.fetch_channel.assert_not_called()
mock_cache.get.assert_called_once_with(mock_context.channel_id)
@pytest.mark.asyncio()
async def test_when_not_cache_bound_and_async_cache_hit(self):
mock_context = mock.Mock(cache=None, rest=mock.AsyncMock())
mock_cache = mock.AsyncMock()
mock_cache.get.return_value.is_nsfw = True
check = tanjun.checks.NsfwCheck(error_message=None, halt_execution=False)
result = await check(mock_context, channel_cache=mock_cache)
assert result is True
mock_context.rest.fetch_channel.assert_not_called()
mock_cache.get.assert_called_once_with(mock_context.channel_id)
@pytest.mark.asyncio()
async def test_when_not_found_in_cache_and_async_cache_hit(self):
mock_context = mock.Mock(rest=mock.AsyncMock())
mock_context.cache.get_guild_channel.return_value = None
mock_cache = mock.AsyncMock()
mock_cache.get.return_value.is_nsfw = None
check = tanjun.checks.NsfwCheck(error_message=None, halt_execution=False)
result = await check(mock_context, channel_cache=mock_cache)
assert result is False
mock_context.cache.get_guild_channel.assert_called_once_with(mock_context.channel_id)
mock_context.rest.fetch_channel.assert_not_called()
mock_cache.get.assert_called_once_with(mock_context.channel_id)
@pytest.mark.asyncio()
async def test_when_not_cache_bound(self):
mock_context = mock.Mock(cache=None, rest=mock.AsyncMock())
mock_context.rest.fetch_channel.return_value = mock.Mock(hikari.GuildChannel, is_nsfw=True)
check = tanjun.checks.NsfwCheck(error_message=None, halt_execution=False)
result = await check(mock_context, channel_cache=None)
assert result is True
mock_context.rest.fetch_channel.assert_awaited_once_with(mock_context.channel_id)
@pytest.mark.asyncio()
async def test_when_not_found_in_cache(self):
mock_context = mock.Mock(rest=mock.AsyncMock())
mock_context.cache.get_guild_channel.return_value = None
mock_context.rest.fetch_channel.return_value = mock.Mock(hikari.GuildChannel, is_nsfw=True)
mock_cache = mock.AsyncMock()
mock_cache.get.side_effect = tanjun.dependencies.CacheMissError
check = tanjun.checks.NsfwCheck(error_message=None, halt_execution=False)
result = await check(mock_context, channel_cache=mock_cache)
assert result is True
mock_context.cache.get_guild_channel.assert_called_once_with(mock_context.channel_id)
mock_context.rest.fetch_channel.assert_awaited_once_with(mock_context.channel_id)
mock_cache.get.assert_awaited_once_with(mock_context.channel_id)
@pytest.mark.asyncio()
async def test_when_false(self):
mock_context = mock.Mock()
mock_context.cache.get_guild_channel.return_value.is_nsfw = None
check = tanjun.checks.NsfwCheck(error_message=None, halt_execution=False)
result = await check(mock_context, channel_cache=None)
assert result is False
mock_context.cache.get_guild_channel.assert_called_once_with(mock_context.channel_id)
mock_context.rest.fetch_channel.assert_not_called()
@pytest.mark.asyncio()
async def test_when_false_and_error_message(self):
mock_context = mock.Mock()
mock_context.cache.get_guild_channel.return_value.is_nsfw = False
mock_cache = mock.AsyncMock()
mock_cache.get.side_effect = tanjun.dependencies.CacheMissError
check = tanjun.checks.NsfwCheck(error_message="meow me", halt_execution=False)
with pytest.raises(tanjun.errors.CommandError, match="meow me"):
await check(mock_context, channel_cache=mock_cache)
mock_context.cache.get_guild_channel.assert_called_once_with(mock_context.channel_id)
mock_context.rest.fetch_channel.assert_not_called()
mock_cache.get.assert_not_called()
@pytest.mark.asyncio()
async def test_when_false_and_halt_execution(self):
mock_context = mock.Mock(rest=mock.AsyncMock())
mock_context.cache.get_guild_channel.return_value = None
mock_context.rest.fetch_channel.return_value = mock.Mock(hikari.GuildChannel, is_nsfw=False)
mock_cache = mock.AsyncMock()
mock_cache.get.side_effect = tanjun.dependencies.CacheMissError
check = tanjun.checks.NsfwCheck(error_message=None, halt_execution=True)
with pytest.raises(tanjun.errors.HaltExecution):
await check(mock_context, channel_cache=mock_cache)
mock_context.cache.get_guild_channel.assert_called_once_with(mock_context.channel_id)
mock_context.rest.fetch_channel.assert_awaited_once_with(mock_context.channel_id)
mock_cache.get.assert_awaited_once_with(mock_context.channel_id)
class TestSfwCheck:
@pytest.mark.asyncio()
async def test_when_is_dm(self):
mock_context = mock.Mock(guild_id=None)
mock_cache = mock.AsyncMock()
check = tanjun.checks.SfwCheck(error_message=None, halt_execution=False)
result = await check(mock_context, channel_cache=mock_cache)
assert result is True
mock_context.cache.get_guild_channel.assert_not_called()
mock_context.rest.fetch_channel.assert_not_called()
mock_cache.get.assert_not_called()
@pytest.mark.asyncio()
async def test(self):
mock_context = mock.Mock()
mock_context.cache.get_guild_channel.return_value.is_nsfw = False
mock_cache = mock.AsyncMock()
check = tanjun.checks.SfwCheck(error_message=None, halt_execution=False)
result = await check(mock_context, channel_cache=mock_cache)
assert result is True
mock_context.cache.get_guild_channel.assert_called_once_with(mock_context.channel_id)
mock_context.rest.fetch_channel.assert_not_called()
mock_cache.get.assert_not_called()
@pytest.mark.asyncio()
async def test_when_not_cache_bound_and_async_cache_hit(self):
mock_context = mock.Mock(cache=None, rest=mock.AsyncMock())
mock_cache = mock.AsyncMock()
mock_cache.get.return_value.is_nsfw = False
check = tanjun.checks.SfwCheck(error_message=None, halt_execution=False)
result = await check(mock_context, channel_cache=mock_cache)
assert result is True
mock_context.rest.fetch_channel.assert_not_called()
mock_cache.get.assert_called_once_with(mock_context.channel_id)
@pytest.mark.asyncio()
async def test_when_not_found_in_cache_and_async_cache_hit(self):
mock_context = mock.Mock(rest=mock.AsyncMock())
mock_context.cache.get_guild_channel.return_value = None
mock_cache = mock.AsyncMock()
mock_cache.get.return_value.is_nsfw = None
check = tanjun.checks.SfwCheck(error_message=None, halt_execution=False)
result = await check(mock_context, channel_cache=mock_cache)
assert result is True
mock_context.cache.get_guild_channel.assert_called_once_with(mock_context.channel_id)
mock_context.rest.fetch_channel.assert_not_called()
mock_cache.get.assert_called_once_with(mock_context.channel_id)
@pytest.mark.asyncio()
async def test_when_not_cache_bound(self):
mock_context = mock.Mock(cache=None, rest=mock.AsyncMock())
mock_context.rest.fetch_channel.return_value = mock.Mock(hikari.GuildChannel, is_nsfw=True)
check = tanjun.checks.SfwCheck(error_message=None, halt_execution=False)
result = await check(mock_context, channel_cache=None)
assert result is False
mock_context.rest.fetch_channel.assert_awaited_once_with(mock_context.channel_id)
@pytest.mark.asyncio()
async def test_when_not_found_in_cache(self):
mock_context = mock.Mock(rest=mock.AsyncMock())
mock_context.cache.get_guild_channel.return_value = None
mock_context.rest.fetch_channel.return_value = mock.Mock(hikari.GuildChannel, is_nsfw=True)
mock_cache = mock.AsyncMock()
mock_cache.get.side_effect = tanjun.dependencies.CacheMissError
check = tanjun.checks.SfwCheck(error_message=None, halt_execution=False)
result = await check(mock_context, channel_cache=mock_cache)
assert result is False
mock_context.cache.get_guild_channel.assert_called_once_with(mock_context.channel_id)
mock_context.rest.fetch_channel.assert_awaited_once_with(mock_context.channel_id)
mock_cache.get.assert_awaited_once_with(mock_context.channel_id)
@pytest.mark.asyncio()
async def test_when_is_nsfw(self):
mock_context = mock.Mock()
mock_context.cache.get_guild_channel.return_value.is_nsfw = True
check = tanjun.checks.SfwCheck(error_message=None, halt_execution=False)
result = await check(mock_context, channel_cache=None)
assert result is False
mock_context.cache.get_guild_channel.assert_called_once_with(mock_context.channel_id)
mock_context.rest.fetch_channel.assert_not_called()
@pytest.mark.asyncio()
async def test_when_is_nsfw_and_error_message(self):
mock_context = mock.Mock()
mock_context.cache.get_guild_channel.return_value.is_nsfw = True
mock_cache = mock.AsyncMock()
mock_cache.get.side_effect = tanjun.dependencies.CacheMissError
check = tanjun.checks.SfwCheck(error_message="meow me", halt_execution=False)
with pytest.raises(tanjun.errors.CommandError, match="meow me"):
await check(mock_context, channel_cache=mock_cache)
mock_context.cache.get_guild_channel.assert_called_once_with(mock_context.channel_id)
mock_context.rest.fetch_channel.assert_not_called()
mock_cache.get.assert_not_called()
@pytest.mark.asyncio()
async def test_when_is_nsfw_and_halt_execution(self):
mock_context = mock.Mock(rest=mock.AsyncMock())
mock_context.cache.get_guild_channel.return_value = None
mock_context.rest.fetch_channel.return_value = mock.Mock(hikari.GuildChannel, is_nsfw=True)
mock_cache = mock.AsyncMock()
mock_cache.get.side_effect = tanjun.dependencies.CacheMissError
check = tanjun.checks.SfwCheck(error_message=None, halt_execution=True)
with pytest.raises(tanjun.errors.HaltExecution):
await check(mock_context, channel_cache=mock_cache)
mock_context.cache.get_guild_channel.assert_called_once_with(mock_context.channel_id)
mock_context.rest.fetch_channel.assert_awaited_once_with(mock_context.channel_id)
mock_cache.get.assert_awaited_once_with(mock_context.channel_id)
class TestDmCheck:
def test_for_dm(self):
assert tanjun.checks.DmCheck()(mock.Mock(guild_id=None)) is True
def test_for_guild(self):
assert tanjun.checks.DmCheck(halt_execution=False, error_message=None)(mock.Mock(guild_id=3123)) is False
def test_for_guild_when_halt_execution(self):
with pytest.raises(tanjun.HaltExecution):
assert tanjun.checks.DmCheck(halt_execution=True, error_message=None)(mock.Mock(guild_id=3123))
def test_for_guild_when_error_message(self):
with pytest.raises(tanjun.CommandError, match="message"):
assert tanjun.checks.DmCheck(halt_execution=False, error_message="message")(mock.Mock(guild_id=3123))
class TestGuildCheck:
def test_for_guild(self):
assert tanjun.checks.GuildCheck()(mock.Mock(guild_id=123123)) is True
def test_for_dm(self):
assert tanjun.checks.GuildCheck(halt_execution=False, error_message=None)(mock.Mock(guild_id=None)) is False
def test_for_dm_when_halt_execution(self):
with pytest.raises(tanjun.HaltExecution):
tanjun.checks.GuildCheck(halt_execution=True, error_message=None)(mock.Mock(guild_id=None))
def test_for_dm_when_error_message(self):
with pytest.raises(tanjun.CommandError, match="hi"):
tanjun.checks.GuildCheck(halt_execution=False, error_message="hi")(mock.Mock(guild_id=None))
@pytest.mark.skip(reason="Not Implemented")
class TestAuthorPermissionCheck:
...
@pytest.mark.skip(reason="Not Implemented")
class TestOwnPermissionCheck:
...
def test_with_dm_check(command: mock.Mock):
with mock.patch.object(tanjun.checks, "DmCheck") as dm_check:
assert tanjun.checks.with_dm_check(command) is command
command.add_check.assert_called_once_with(dm_check.return_value)
dm_check.assert_called_once_with(halt_execution=False, error_message="Command can only be used in DMs")
def test_with_dm_check_with_keyword_arguments(command: mock.Mock):
with mock.patch.object(tanjun.checks, "DmCheck") as dm_check:
assert tanjun.checks.with_dm_check(halt_execution=True, error_message="message")(command) is command
command.add_check.assert_called_once_with(dm_check.return_value)
dm_check.assert_called_once_with(halt_execution=True, error_message="message")
def test_with_guild_check(command: mock.Mock):
with mock.patch.object(tanjun.checks, "GuildCheck") as guild_check:
assert tanjun.checks.with_guild_check(command) is command
command.add_check.assert_called_once_with(guild_check.return_value)
guild_check.assert_called_once_with(
halt_execution=False, error_message="Command can only be used in guild channels"
)
def test_with_guild_check_with_keyword_arguments(command: mock.Mock):
with mock.patch.object(tanjun.checks, "GuildCheck") as guild_check:
assert tanjun.checks.with_guild_check(halt_execution=True, error_message="eee")(command) is command
command.add_check.assert_called_once_with(guild_check.return_value)
guild_check.assert_called_once_with(halt_execution=True, error_message="eee")
def test_with_nsfw_check(command: mock.Mock):
with mock.patch.object(tanjun.checks, "NsfwCheck", return_value=mock.AsyncMock()) as nsfw_check:
assert tanjun.checks.with_nsfw_check(command) is command
command.add_check.assert_called_once_with(nsfw_check.return_value)
nsfw_check.assert_called_once_with(
halt_execution=False, error_message="Command can only be used in NSFW channels"
)
def test_with_nsfw_check_with_keyword_arguments(command: mock.Mock):
with mock.patch.object(tanjun.checks, "NsfwCheck", return_value=mock.AsyncMock()) as nsfw_check:
assert tanjun.checks.with_nsfw_check(halt_execution=True, error_message="banned!!!")(command) is command
command.add_check.assert_called_once_with(nsfw_check.return_value)
nsfw_check.assert_called_once_with(halt_execution=True, error_message="banned!!!")
def test_with_sfw_check(command: mock.Mock):
with mock.patch.object(tanjun.checks, "SfwCheck", return_value=mock.AsyncMock()) as sfw_check:
assert tanjun.checks.with_sfw_check(command) is command
command.add_check.assert_called_once_with(sfw_check.return_value)
sfw_check.assert_called_once_with(
halt_execution=False, error_message="Command can only be used in SFW channels"
)
def test_with_sfw_check_with_keyword_arguments(command: mock.Mock):
with mock.patch.object(tanjun.checks, "SfwCheck", return_value=mock.AsyncMock()) as sfw_check:
assert tanjun.checks.with_sfw_check(halt_execution=True, error_message="bango")(command) is command
command.add_check.assert_called_once_with(sfw_check.return_value)
sfw_check.assert_called_once_with(halt_execution=True, error_message="bango")
def test_with_owner_check(command: mock.Mock):
with mock.patch.object(tanjun.checks, "OwnerCheck") as owner_check:
assert tanjun.checks.with_owner_check(command) is command
command.add_check.assert_called_once_with(owner_check.return_value)
owner_check.assert_called_once_with(halt_execution=False, error_message="Only bot owners can use this command")
def test_with_owner_check_with_keyword_arguments(command: mock.Mock):
mock_check = object()
with mock.patch.object(tanjun.checks, "OwnerCheck", return_value=mock_check) as owner_check:
result = tanjun.checks.with_owner_check(
halt_execution=True,
error_message="dango",
)(command)
assert result is command
command.add_check.assert_called_once_with(owner_check.return_value)
owner_check.assert_called_once_with(halt_execution=True, error_message="dango")
def test_with_author_permission_check(command: mock.Mock):
with mock.patch.object(tanjun.checks, "AuthorPermissionCheck") as author_permission_check:
assert (
tanjun.checks.with_author_permission_check(435213, halt_execution=True, error_message="bye")(command)
is command
)
command.add_check.assert_called_once_with(author_permission_check.return_value)
author_permission_check.assert_called_once_with(435213, halt_execution=True, error_message="bye")
def test_with_own_permission_check(command: mock.Mock):
with mock.patch.object(tanjun.checks, "OwnPermissionCheck") as own_permission_check:
assert (
tanjun.checks.with_own_permission_check(5412312, halt_execution=True, error_message="hi")(command)
is command
)
command.add_check.assert_called_once_with(own_permission_check.return_value)
own_permission_check.assert_called_once_with(5412312, halt_execution=True, error_message="hi")
def test_with_check(command: mock.Mock):
mock_check = mock.Mock()
result = tanjun.checks.with_check(mock_check)(command)
assert result is command
command.add_check.assert_called_once_with(mock_check)
@pytest.mark.asyncio()
async def test_all_checks():
mock_check_1 = mock.Mock()
mock_check_2 = mock.Mock()
mock_check_3 = mock.Mock()
mock_context = mock.Mock()
mock_context.call_with_async_di = mock.AsyncMock(return_value=True)
check = tanjun.checks.all_checks(mock_check_1, mock_check_2, mock_check_3)
result = await check(mock_context)
assert result is True
mock_context.call_with_async_di.assert_has_awaits(
[
mock.call(mock_check_1, mock_context),
mock.call(mock_check_2, mock_context),
mock.call(mock_check_3, mock_context),
| |
- m.x1051 == 0)
m.c718 = Constraint(expr= m.x520 - m.x1052 - m.x1053 - m.x1054 - m.x1055 - m.x1056 - m.x1057 - m.x1058 == 0)
m.c719 = Constraint(expr= m.x521 - m.x1059 - m.x1060 - m.x1061 - m.x1062 - m.x1063 - m.x1064 - m.x1065 == 0)
m.c720 = Constraint(expr= m.x522 - m.x1066 - m.x1067 - m.x1068 - m.x1069 - m.x1070 - m.x1071 - m.x1072 == 0)
m.c721 = Constraint(expr= m.x523 - m.x1073 - m.x1074 - m.x1075 - m.x1076 - m.x1077 - m.x1078 - m.x1079 == 0)
m.c722 = Constraint(expr= m.x524 - m.x1080 - m.x1081 - m.x1082 - m.x1083 - m.x1084 - m.x1085 - m.x1086 == 0)
m.c723 = Constraint(expr= m.x525 - m.x1087 - m.x1088 - m.x1089 - m.x1090 - m.x1091 - m.x1092 - m.x1093 == 0)
m.c724 = Constraint(expr= m.x526 - m.x1094 - m.x1095 - m.x1096 - m.x1097 - m.x1098 - m.x1099 - m.x1100 == 0)
m.c725 = Constraint(expr= m.x527 - m.x1101 - m.x1102 - m.x1103 - m.x1104 - m.x1105 - m.x1106 - m.x1107 == 0)
m.c726 = Constraint(expr= m.x528 - m.x1108 - m.x1109 - m.x1110 - m.x1111 - m.x1112 - m.x1113 - m.x1114 == 0)
m.c727 = Constraint(expr= m.x529 - m.x1115 - m.x1116 - m.x1117 - m.x1118 - m.x1119 - m.x1120 - m.x1121 == 0)
m.c728 = Constraint(expr= m.x530 - m.x1122 - m.x1123 - m.x1124 - m.x1125 - m.x1126 - m.x1127 - m.x1128 == 0)
m.c729 = Constraint(expr= m.x531 - m.x1129 - m.x1130 - m.x1131 - m.x1132 - m.x1133 - m.x1134 - m.x1135 == 0)
m.c730 = Constraint(expr= m.x532 - m.x1136 - m.x1137 - m.x1138 - m.x1139 - m.x1140 - m.x1141 - m.x1142 == 0)
m.c731 = Constraint(expr= m.x533 - m.x1143 - m.x1144 - m.x1145 - m.x1146 - m.x1147 - m.x1148 - m.x1149 == 0)
m.c732 = Constraint(expr= m.x534 - m.x1150 - m.x1151 - m.x1152 - m.x1153 - m.x1154 - m.x1155 - m.x1156 == 0)
m.c733 = Constraint(expr= m.x535 - m.x1157 - m.x1158 - m.x1159 - m.x1160 - m.x1161 - m.x1162 - m.x1163 == 0)
m.c734 = Constraint(expr= m.x536 - m.x1164 - m.x1165 - m.x1166 - m.x1167 - m.x1168 - m.x1169 - m.x1170 == 0)
m.c735 = Constraint(expr= m.x537 - m.x1171 - m.x1172 - m.x1173 - m.x1174 - m.x1175 - m.x1176 - m.x1177 == 0)
m.c736 = Constraint(expr= m.x538 - m.x1178 - m.x1179 - m.x1180 - m.x1181 - m.x1182 - m.x1183 - m.x1184 == 0)
m.c737 = Constraint(expr= m.x539 - m.x1185 - m.x1186 - m.x1187 - m.x1188 - m.x1189 - m.x1190 - m.x1191 == 0)
m.c738 = Constraint(expr= m.x540 - m.x1192 - m.x1193 - m.x1194 - m.x1195 - m.x1196 - m.x1197 - m.x1198 == 0)
m.c739 = Constraint(expr= m.x541 - m.x1199 - m.x1200 - m.x1201 - m.x1202 - m.x1203 - m.x1204 - m.x1205 == 0)
m.c740 = Constraint(expr= m.x542 - m.x1206 - m.x1207 - m.x1208 - m.x1209 - m.x1210 - m.x1211 - m.x1212 == 0)
m.c741 = Constraint(expr= m.x543 - m.x1213 - m.x1214 - m.x1215 - m.x1216 - m.x1217 - m.x1218 - m.x1219 == 0)
m.c742 = Constraint(expr= m.x544 - m.x1220 - m.x1221 - m.x1222 - m.x1223 - m.x1224 - m.x1225 - m.x1226 == 0)
m.c743 = Constraint(expr= m.x545 - m.x1227 - m.x1228 - m.x1229 - m.x1230 - m.x1231 - m.x1232 - m.x1233 == 0)
m.c744 = Constraint(expr= m.x546 - m.x1234 - m.x1235 - m.x1236 - m.x1237 - m.x1238 - m.x1239 - m.x1240 == 0)
m.c745 = Constraint(expr= m.x547 - m.x1241 - m.x1242 - m.x1243 - m.x1244 - m.x1245 - m.x1246 - m.x1247 == 0)
m.c746 = Constraint(expr= m.x548 - m.x1248 - m.x1249 - m.x1250 - m.x1251 - m.x1252 - m.x1253 - m.x1254 == 0)
m.c747 = Constraint(expr= m.x549 - m.x1255 - m.x1256 - m.x1257 - m.x1258 - m.x1259 - m.x1260 - m.x1261 == 0)
m.c748 = Constraint(expr= m.x550 - m.x1262 - m.x1263 - m.x1264 - m.x1265 - m.x1266 - m.x1267 - m.x1268 == 0)
m.c749 = Constraint(expr= m.x551 - m.x1269 - m.x1270 - m.x1271 - m.x1272 - m.x1273 - m.x1274 - m.x1275 == 0)
m.c750 = Constraint(expr= m.x552 - m.x1276 - m.x1277 - m.x1278 - m.x1279 - m.x1280 - m.x1281 - m.x1282 == 0)
m.c751 = Constraint(expr= m.x553 - m.x1283 - m.x1284 - m.x1285 - m.x1286 - m.x1287 - m.x1288 - m.x1289 == 0)
m.c752 = Constraint(expr= m.x554 - m.x1290 - m.x1291 - m.x1292 - m.x1293 - m.x1294 - m.x1295 - m.x1296 == 0)
m.c753 = Constraint(expr= m.x555 - m.x1297 - m.x1298 - m.x1299 - m.x1300 - m.x1301 - m.x1302 - m.x1303 == 0)
m.c754 = Constraint(expr= m.x556 - m.x1304 - m.x1305 - m.x1306 - m.x1307 - m.x1308 - m.x1309 - m.x1310 == 0)
m.c755 = Constraint(expr= m.x557 - m.x1311 - m.x1312 - m.x1313 - m.x1314 - m.x1315 - m.x1316 - m.x1317 == 0)
m.c756 = Constraint(expr= m.x558 - m.x1318 - m.x1319 - m.x1320 - m.x1321 - m.x1322 - m.x1323 - m.x1324 == 0)
m.c757 = Constraint(expr= m.x559 - m.x1325 - m.x1326 - m.x1327 - m.x1328 - m.x1329 - m.x1330 - m.x1331 == 0)
m.c758 = Constraint(expr= m.x560 - m.x1332 - m.x1333 - m.x1334 - m.x1335 - m.x1336 - m.x1337 - m.x1338 == 0)
m.c759 = Constraint(expr= m.x561 - m.x1339 - m.x1340 - m.x1341 - m.x1342 - m.x1343 - m.x1344 - m.x1345 == 0)
m.c760 = Constraint(expr= m.x1346 <= 50)
m.c761 = Constraint(expr= m.x1347 <= 50)
m.c762 = Constraint(expr= m.x1348 <= 50)
m.c763 = Constraint(expr= m.x1349 <= 100)
m.c764 = Constraint(expr= m.x1350 <= 100)
m.c765 = Constraint(expr= m.x1351 <= 100)
m.c766 = Constraint(expr= m.x1352 <= 100)
m.c767 = Constraint(expr= m.x1353 <= 100)
m.c768 = Constraint(expr= m.x1354 <= 100)
m.c769 = Constraint(expr= m.x1357 <= 50)
m.c770 = Constraint(expr= m.x1358 <= 50)
m.c771 = Constraint(expr= m.x1359 <= 50)
m.c772 = Constraint(expr= m.x1360 <= 100)
m.c773 = Constraint(expr= m.x1361 <= 100)
m.c774 = Constraint(expr= m.x1362 <= 100)
m.c775 = Constraint(expr= m.x1363 <= 100)
m.c776 = Constraint(expr= m.x1364 <= 100)
m.c777 = Constraint(expr= m.x1365 <= 100)
m.c778 = Constraint(expr= m.x1368 <= 50)
m.c779 = Constraint(expr= m.x1369 <= 50)
m.c780 = Constraint(expr= m.x1370 <= 50)
m.c781 = Constraint(expr= m.x1371 <= 100)
m.c782 = Constraint(expr= m.x1372 <= 100)
m.c783 = Constraint(expr= m.x1373 <= 100)
m.c784 = Constraint(expr= m.x1374 <= 100)
m.c785 = Constraint(expr= m.x1375 <= 100)
m.c786 = Constraint(expr= m.x1376 <= 100)
m.c787 = Constraint(expr= m.x1379 <= 50)
m.c788 = Constraint(expr= m.x1380 <= 50)
m.c789 = Constraint(expr= m.x1381 <= 50)
m.c790 = Constraint(expr= m.x1382 <= 100)
m.c791 = Constraint(expr= m.x1383 <= 100)
m.c792 = Constraint(expr= m.x1384 <= 100)
m.c793 = Constraint(expr= m.x1385 <= 100)
m.c794 = Constraint(expr= m.x1386 <= 100)
m.c795 = Constraint(expr= m.x1387 <= 100)
m.c796 = Constraint(expr= m.x1390 <= 50)
m.c797 = Constraint(expr= m.x1391 <= 50)
m.c798 = Constraint(expr= m.x1392 <= 50)
m.c799 = Constraint(expr= m.x1393 <= 100)
m.c800 = Constraint(expr= m.x1394 <= 100)
m.c801 = Constraint(expr= m.x1395 <= 100)
m.c802 = Constraint(expr= m.x1396 <= 100)
m.c803 = Constraint(expr= m.x1397 <= 100)
m.c804 = Constraint(expr= m.x1398 <= 100)
m.c805 = Constraint(expr= m.x1401 <= 50)
m.c806 = Constraint(expr= m.x1402 <= 50)
m.c807 = Constraint(expr= m.x1403 <= 50)
m.c808 = Constraint(expr= m.x1404 <= 100)
m.c809 = Constraint(expr= m.x1405 <= 100)
m.c810 = Constraint(expr= m.x1406 <= 100)
m.c811 = Constraint(expr= m.x1407 <= 100)
m.c812 = Constraint(expr= m.x1408 <= 100)
m.c813 = Constraint(expr= m.x1409 <= 100)
m.c814 = Constraint(expr= m.x1412 <= 50)
m.c815 = Constraint(expr= m.x1413 <= 50)
m.c816 = Constraint(expr= m.x1414 <= 50)
m.c817 = Constraint(expr= m.x1415 <= 100)
m.c818 = Constraint(expr= m.x1416 <= 100)
m.c819 = Constraint(expr= m.x1417 <= 100)
m.c820 = Constraint(expr= m.x1418 <= 100)
m.c821 = Constraint(expr= m.x1419 <= 100)
m.c822 = Constraint(expr= m.x1420 <= 100)
m.c823 = Constraint(expr= m.x1423 <= 50)
m.c824 = Constraint(expr= m.x1424 <= 50)
m.c825 = Constraint(expr= m.x1425 <= 50)
m.c826 = Constraint(expr= m.x1426 <= 100)
m.c827 = Constraint(expr= m.x1427 <= 100)
m.c828 = Constraint(expr= m.x1428 <= 100)
m.c829 = Constraint(expr= m.x1429 <= 100)
m.c830 = Constraint(expr= m.x1430 <= 100)
m.c831 = Constraint(expr= m.x1431 <= 100)
m.c832 = Constraint(expr= m.x1434 >= 0)
m.c833 = Constraint(expr= m.x1435 >= 0)
m.c834 = Constraint(expr= m.x1436 >= 0)
m.c835 = Constraint(expr= m.x1437 >= 0)
m.c836 = Constraint(expr= m.x1438 >= 0)
m.c837 = Constraint(expr= m.x1439 >= 0)
m.c838 = Constraint(expr= m.x1440 >= 0)
m.c839 = Constraint(expr= m.x1441 >= 0)
m.c840 = Constraint(expr= m.x1442 >= 0)
m.c841 = Constraint(expr= m.x1443 >= 0)
m.c842 = Constraint(expr= m.x1444 >= 0)
m.c843 = Constraint(expr= m.x1445 >= 0)
m.c844 = Constraint(expr= m.x1446 >= 0)
m.c845 = Constraint(expr= m.x1447 >= 0)
m.c846 = Constraint(expr= m.x1448 >= 0)
m.c847 = Constraint(expr= m.x1449 >= 0)
m.c848 = Constraint(expr= m.x1450 >= 0)
m.c849 = Constraint(expr= m.x1451 >= 0)
m.c850 = Constraint(expr= m.x1452 >= 0)
m.c851 = Constraint(expr= m.x1453 >= 0)
m.c852 = Constraint(expr= m.x1454 >= 0)
m.c853 = Constraint(expr= m.x1455 >= 0)
m.c854 = Constraint(expr= m.x1456 >= 0)
m.c855 = Constraint(expr= m.x1457 | |
"""
Copyright 2021 Novartis Institutes for BioMedical Research Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
#!/usr/bin/env python
__doc__ = \
"""
assay/compound querying
========================================================================
run the following command on the zscaled file
Example (PRED_DIR is the location of ZscalledAllAssays)
#time grep -aob ".$" $PRED_DIR/ZscaledAllAssays.csv > $PRED_DIR/indices
"""
class QueryCmpd:
"""
input: mtable_dir, e.g., PLS_Predictions
index file: MasterTable_indices
RFR predicted master table: MasterTable.csv
"""
def __init__(self, mtable_dir, uncertainty=False):
# initialize indices and MasterTable files
if uncertainty == True:
self.index_file = '{}/RFR_Predictions/ZscaledAllAssaysCID_indices'.format(mtable_dir)
self.MasterTable = '{}/RFR_Predictions/ZscaledAllAssaysCID.csv'.format(mtable_dir)
else:
self.index_file = '{}/PLS_Predictions/ZscaledAllAssaysCID_indices'.format(mtable_dir)
self.MasterTable = '{}/PLS_Predictions/ZscaledAllAssaysCID.csv'.format(mtable_dir)
self.indices = joblib.load(self.index_file)
self.fid = open(self.MasterTable)
self.separator = ',' if self.MasterTable.endswith('.csv') else '\t'
def columns(self):
self.fid.seek(0, 0)
line = self.fid.readline().strip().split(self.separator)
col = line[1: ]
return(col)
def idx(self):
return(list(self.indices.index)[1: ])
def get(self, cmpd, raw=False):
index = self.indices.loc[cmpd].values[0]
self.fid.seek(index, 0)
line = self.fid.readline().strip().split(self.separator)
line_name = line[0]
if raw:
return(line_name, line[1: ])
line_data = [float(x) if x != '' else 0.0 for x in line[1: ]]
return(line_name, line_data)
class QueryAssay:
"""
input: mtable_dir, e.g., PLS_Predictions
index file: MasterTable_indices
RFR predicted master table: MasterTable.csv
"""
def __init__(self, mtable_dir, uncertainty=False):
# initialize indices and MasterTable files
if uncertainty == True:
self.index_file = '{}/RFR_Predictions/ZscaledAllAssaysAID_indices'.format(mtable_dir)
self.MasterTable = '{}/RFR_Predictions/ZscaledAllAssaysAID.csv'.format(mtable_dir)
else:
self.index_file = '{}/PLS_Predictions/ZscaledAllAssaysAID_indices'.format(mtable_dir)
self.MasterTable = '{}/PLS_Predictions/ZscaledAllAssaysAID.csv'.format(mtable_dir)
self.indices = joblib.load(self.index_file)
self.indices.index = self.indices.index.map(str)
self.fid = open(self.MasterTable)
self.separator = ',' if self.MasterTable.endswith('.csv') else '\t'
def columns(self):
self.fid.seek(0, 0)
line = self.fid.readline().strip().split(self.separator)
col = line[1: ]
return(col)
def idx(self):
return(list(self.indices.index)[1: ])
def get(self, assay, raw=False):
index = self.indices.loc[assay].values[0]
self.fid.seek(index, 0)
line = self.fid.readline().strip().split(self.separator)
line_name = line[0]
if raw:
return(line_name, line[1: ])
line_data = [float(x) for x in line[1: ]]
return(line_name, line_data)
class QueryCustomCSV():
def __init__(self, mtable, scale_stat, bool_ZpIC50):
df_csv = pd.read_csv(mtable, header=0, index_col=0, sep=',')
df_csv.index = df_csv.index.astype(str)
df_stat = pd.read_csv(scale_stat, index_col=0, header=0, sep=',')
if bool_ZpIC50 == True:
cols = list(df_csv.columns)
idx = [str(i) for i in df_stat.index]
if len(set(cols) - set(idx)) == 0:
cols = [int(c) for c in cols]
df_mean = df_stat.loc[cols, 'mean_pred'].to_numpy()
df_std = df_stat.loc[cols, 'stdev_pred'].to_numpy()
df_csv = df_csv.sub(df_mean, axis=1).div(df_std, axis=1).round(3)
self.df = df_csv
def columns(self):
col = list(self.df.columns)
return(col)
def get_column(self, assay=False, idx=False):
if idx:
return(list(self.df.index))
else:
return(self.df[assay])
def get(self, cmpd):
line_data = list(self.df.loc[cmpd])
return(cmpd, line_data)
# test the compound's loading time
def get_CA(p, p_col, assays, cmpds):
df_CA = pd.DataFrame(0.0, index=assays, columns=cmpds)
cmpd_new = []
for cmpd in cmpds:
try:
name, pqsar_vec = p.get(cmpd)
df_get = pd.DataFrame([float(s) for s in pqsar_vec], index=p_col, columns=[name])
df_CA[cmpd] = df_get.loc[assays]
except:
if cmpd in list(df_CA.columns):
cmpd_new.append(cmpd)
print('Warning! {} not found'.format(cmpd))
df_CA.drop(cmpd_new, axis=1, inplace=True)
return(df_CA)
def get_list(input_list):
# get the query list
separator = ',' if input_list.endswith('.csv') else '\t'
df = pd.read_csv(input_list, header=0, index_col=0, sep=separator)
items = [str(s) for s in set(df.index)]
return(items)
def get_stat(df, thr, suffix='Zscore'):
stats = pd.DataFrame(0.0, index=df.index, columns=[])
col_name = list(df.columns)
stats['count_{}>{}'.format(suffix, thr)] = df[df[col_name] >= thr].count(axis=1)
stats['min_{}'.format(suffix)] = df.min(axis=1)
stats['mean_{}'.format(suffix)] = df.mean(axis=1).round(3)
stats['max_{}'.format(suffix)] = df.max(axis=1)
return(stats)
def check_AID(items, p_col):
real_items = [i for i in items if i in p_col]
if len(real_items) == 0:
print('Error! No AID was found')
sys.exit(1)
if len(real_items) < len(items):
fake_items = [i for i in items if i not in real_items]
print('Warning! AID {} not found'.format(fake_items))
return(real_items)
def scale2pIC50(scale_stat, df, row_AID=True, uncertainty=False):
df_scale = pd.read_csv(scale_stat, dtype={'AID':str}, index_col=0, header=0, sep=',')
#df_scale.reindex([str(d) for d in df_scale.index])
df_scale.index = df_scale.index.map(str)
if row_AID:
df = df.T.copy()
cols = list(df.columns)
df_std = df_scale.loc[cols, 'stdev_pred']
if uncertainty == True:
df = df * df_std
else:
df_mean = df_scale.loc[cols, 'mean_pred']
df = df * df_std + df_mean
if row_AID:
df = df.T
# output: cpds in the row, AID in the column
return(df)
def tidy_view(df, args):
# hide 3 columns: R^2_RF(ext), stdev_pred and mean_pred. I sort it vertically (descending) by Count_score>threshold and mean_score.
# I sort it horizontally (descending) by the column count_score>Threshold.
# If the columns are compounds, I calculate it in excel with =countif().
cols = list(df.columns)
for c in cols:
if 'count_Zscore' in c or 'count_pIC50' in c:
count_Zscore = c
if 'mean_Zscore' in c or 'mean_pIC50' in c:
mean_Zscore = c
df.sort_values(by=[count_Zscore, mean_Zscore], axis=0, inplace=True, ascending=False)
cpd_idx = []
if args.Assay or args.CA:
df.sort_values(by=[count_Zscore, mean_Zscore], axis=1, inplace=True, ascending=False)
for idx in df.index:
if 'count_Zscore' in idx or 'count_pIC50' in idx:
break
cpd_idx.append(idx)
else:
# args.Compound
cpds = []
for c in cols:
if 'count_Zscore' in c or 'count_pIC50' in c:
break
cpds.append(c)
AID = list(df.index)
thr = float(count_Zscore.split('>')[1])
df.loc['count_Zscore'] = 0.0
df.loc['count_Zscore'] = df.loc[AID, cpds][df.loc[AID, cpds] >= thr].count()
df.loc['count_Zscore'] = [float(s) for s in df.loc['count_Zscore']]
df = df.sort_values(by='count_Zscore', axis=1, ascending=False)
df.drop('count_Zscore', axis=0, inplace=True)
#tobe_moved = ['R^2_RF(ext)', 'stdev_pred', 'mean_pred', 'validity', 'delta_error_rate', 'efficiency', 'wt']
tobe_moved = ['stdev_pred', 'mean_pred']
cols = list(df.columns)
left_cols = [c for c in cols if c not in tobe_moved]
left_cols.extend(tobe_moved)
df = df[left_cols]
return(df, cpd_idx)
def save_query(df, args, out_csv, scale_stat, bool_ZpIC50):
cols = []
rows = []
for c in df.columns:
if 'count_' in c:
break
cols.append(c)
cols_left = [c for c in df.columns if c not in cols]
for i in df.index:
if 'count_' in i:
break
rows.append(i)
if args.Uncertainty.lower() in ['true', 'yes', 't', 'y']:
if args.Local:
local_dir, basename = os.path.dirname(args.Local), os.path.basename(args.Local)
local_csv_PI = os.path.join(local_dir, basename.split(',')[1])
u = QueryCustomCSV(local_csv_PI, scale_stat, False)
u_col = u.columns()
else:
u = QueryCmpd(args.Directory, uncertainty=True)
u_col = u.columns()
df_error = get_CA(u, u_col, rows, cols) if args.Compound else get_CA(u, u_col, cols, rows)
df_error = scale2pIC50(scale_stat, df_error, uncertainty=True) if bool_ZpIC50 == False else df_error
df_error = df_error if args.Compound else df_error.T
error_cols = list(df_error.columns)
error_cols = [col + '_Error' for col in error_cols]
df_error.columns = error_cols
df = df.merge(df_error, how='left', right_index=True, left_index=True)
assert len(cols) == len(error_cols)
cols_all = sum([[c, e] for c, e in zip(cols, error_cols)], []) + cols_left
df = df[cols_all]
if args.Compound:
df.round(2).to_csv(out_csv, encoding='utf-8')
else:
df.round(2).loc[rows].to_csv(out_csv, mode='a+', encoding='utf-8')
with open(out_csv, 'a+') as fid:
fid.writelines('\n')
df.drop(rows, axis=0).round(2).to_csv(out_csv, mode='a+', header=False, encoding='utf-8')
def main():
description = """Querying individual screening of pQSAR by CID and/or AID
***usage examples
***input file (.txt or .csv): querying by compounds or assays, a header line followed by compounds or assays (one item per line)
querying by compounds and assays, a header line followed by CID at the first column and AID second column.
***querying by compounds (CID) with a threshold of 3 stdev above the mean
python grepMOA2.py -c -i cid.txt -d ../chembl_28 -t 3.0 -z True -o cid_out.csv
***querying by assays (AID) with a threshold of 3 stdev above the mean
python grepMOA2.py -a -i aid.txt -d ../chembl_28 -t 3.0 -z True -o aid_out.csv
***querying by compounds (CID) and assays (AID)
python grepMOA2.py -ca -i ca_id.txt -d ../chembl_28 -z True -o ca_id_out.csv
"""
epilog = """----------------------profile-QSAR application-----------------------
"""
parser = argparse.ArgumentParser(
description=description,
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=epilog)
parser.add_argument('-i', '--Input', action='store', help='Input file with header line followed by querying compounds\' ID and/or assays\' AIDs', metavar='')
parser.add_argument('-d', '--Directory', help='Directory contains modeling information, default is chembl_28', type=str, metavar='')
parser.add_argument('-u', '--Uncertainty', help='Query the predicted uncertainty (Error) at 80 percent confidence level', type=str, default='False', metavar='True/False')
parser.add_argument('-l', '--Local', help='Local file (csv) of custom prediction', type=str, metavar='')
parser.add_argument('-e', '--Experimental', help='False(default): Querying predicted values; True: Querying experimental values (always returns pIC50, -z is not applicable)', type=str, default='False', metavar='True/False')
#parser.add_argument('-z', '--ZpIC50', help='Z scaled predictions of real ones', action='store_true')
parser.add_argument('-z', '--ZpIC50', help='True (default): Threshold and predictions in Z-scaled values; False: original pIC50 (log molar).', type=str, default='True', metavar='True/False')
parser.add_argument('-t', '--Threshold', help='Threshold to filter out unqualified screening', metavar='')
parser.add_argument('-o', '--Output', help='Output file in csv format', default='Query_Output.csv', metavar='')
index_group = parser.add_mutually_exclusive_group()
index_group.add_argument('-c', '--Compound', action='store_true', help='Querying by compounds (one compound per row)')
index_group.add_argument('-a', '--Assay', action='store_true', help='Querying by assays (one assay per row)')
index_group.add_argument('-ca', '--CA', action='store_true', help='Querying by compounds(first column) and assays (second column)')
if len(sys.argv) < 4:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
Uncertainty = True if args.Uncertainty.lower() in ['true', 'yes', 't', 'y'] else False
if not args.Input:
print('Error: no input file ')
sys.exit(1)
# Default threshold is 3.0
#Thr = args.Threshold if args.Threshold else 3.0
# flag predicted uncertainty (error)
# Default output
bool_expr = True if args.Experimental.lower() in ['true', 'yes', 't', 'y'] else False
out_csv = args.Output if args.Output else 'Query_Output.csv'
if os.path.exists(out_csv):
os.remove(out_csv)
if bool_expr == False:
bool_ZpIC50 = True if args.ZpIC50.lower() in ['true', 'yes', 't', 'y'] else False
scale_stat = '{}/PLS_Predictions/ZscalingStats.csv'.format(args.Directory)
summary_table = '{}/Summary_pQSAR/SummaryPQSAR.csv'.format(args.Directory)
df_summary = pd.read_csv(summary_table, index_col=0, header=0)
df_summary.index = df_summary.index.map(str)
if args.Local:
if Uncertainty == True:
# we expect AllPLS_prediction.csv and AllPLS_prediction_PI80.csv are comma seperated.
# e.g., AllPLS_prediction.csv,AllPLS_prediction_PI80.csv
local_dir, basename = os.path.dirname(args.Local), os.path.basename(args.Local)
local_csv_act = os.path.join(local_dir, basename.split(',')[0])
else:
local_csv_act = args.Local
if local_csv_act.endswith('.csv'):
p = QueryCustomCSV(local_csv_act, scale_stat, bool_ZpIC50)
p_col = p.columns()
else:
print('Error! Need to be a custom predicted csv file')
sys.exit(1)
if args.Assay:
p_idx = p.get_column(idx=True)
else:
if args.Compound or args.CA:
p = QueryCmpd(args.Directory)
p_col = p.columns()
elif args.Assay:
p = QueryAssay(args.Directory)
p_idx = p.columns()
p_col = p.idx()
# get the list of compounds or assays
items = get_list(args.Input)
if args.Compound:
items = [it.strip() for it in items]
df_cmpd = pd.DataFrame(0.0, index=p_col, columns=items)
cmpd_new = []
for cmpd in items:
#print(cmpd, thr)
try:
name, pqsar_vec = p.get(cmpd)
df_cmpd[cmpd] = pqsar_vec
except:
cmpd_new.append(cmpd)
print('Warning! {} not found'.format(cmpd))
df_cmpd.drop(cmpd_new, axis=1, inplace=True)
if len(df_cmpd.index) == 0:
print('Error, No CID was found')
sys.exit(1)
if not args.Local and bool_ZpIC50 == False:
# convert back to pIC50
df_cmpd = scale2pIC50(scale_stat, df_cmpd)
if not args.Threshold:
thr= -10000
else:
thr = float(args.Threshold)
df_cmpd = df_cmpd.iloc[list(df_cmpd.max(axis=1) > thr), :].copy()
# If threshold argument is not provided, the whole result table will be saved by default.
if bool_ZpIC50 == False:
stats = get_stat(df_cmpd, thr, suffix='pIC50')
else:
#print(df_cmpd.iloc[0:5, 0:10])
stats = get_stat(df_cmpd, thr)
df_cmpd = df_cmpd.merge(stats, how='left', left_index=True, right_index=True)
df_cmpd.index = df_cmpd.index.map(str)
df_cmpd = df_cmpd.merge(df_summary, how='left', left_index=True, right_index=True)
df_cmpd.index.name = 'AID'
df_cmpd, _ = tidy_view(df_cmpd, args)
save_query(df_cmpd, args, out_csv, scale_stat, bool_ZpIC50)
if args.Assay:
items = check_AID(items, p_col)
df_assay = pd.DataFrame(0.0, index=p_idx, columns=items)
# filtering z | |
if self.isValidValue(v):
vl.append(self.citems[v])
continue # end of while(v)
if self.sweepRange[1] - v > 0:
if self.isValidValue(v):
vl.append(self.citems[v])
else:
return vl
else:
for v in self.valList:
if self.isValidValue(v):
vl.append(self.citems[v])
return vl
return vl
def calcCaseNum(self):
"""
パラメータサーベイ時のパラメータケース数の計算
戻り値 -> パラメータケース数
"""
if self.type == Param.Type_INT or self.type == Param.Type_REAL or \
self.type == Param.Type_CHOICE:
c = 0
if self.useRange:
if self.sweepRange[0] != None and self.sweepRange[1] != None \
and self.sweepRange[2] != None:
delta = 0
if self.type == Param.Type_REAL:
delta = self.sweepRange[2] / 100.0
v = self.sweepRange[0]
if self.isValidValue(v):
c += 1
while v < self.sweepRange[1]:
v += self.sweepRange[2]
if v > self.sweepRange[1] + delta:
break
if self.isValidValue(v):
c += 1
continue # end of while(v)
if self.sweepRange[1] - v > delta:
if self.isValidValue(v):
c += 1
else:
return 0
else:
for v in self.valList:
if self.isValidValue(v):
c += 1
return c
if self.type == Param.Type_BOOL:
if self.useRange:
return 2
else:
return 1
if self.type == Param.Type_STRING:
if self.valList == [] or self.valList[0] == '':
return 0
else:
return 1
return 0
def parseXML(self, xnp, group=''):
"""
XMLの<param>ノードのパース
[in] xnp <param>ノードのXMLデータ
[in] group <param>ノードが所属するパラメータグループ名
戻り値 -> 真偽値
"""
if not xnp.nodeType == xnp.ELEMENT_NODE or \
not xnp.tagName == 'param':
return False
if not xnp.hasAttribute('name') or not xnp.hasAttribute('type'):
log.error(LogMsg(42, '<param> tag without name nor type '
+ 'attribute found, ignored'))
return True
if xnp.hasAttribute('disable'):
val = xnp.getAttribute('disable').lower()
if val in ['true', 'yes']:
self.disable = True
self.disable_gui = True
elif val in ['false', 'no']:
self.disable = False
self.disable_gui = False
p = self
p.group = group
p.name = xnp.getAttribute('name')
ltype = xnp.getAttribute('type')
if ltype == 'int':
p.type = Param.Type_INT
p.arithPrec = 0
elif ltype == 'real':
p.type = Param.Type_REAL
p.arithPrec = 6
elif ltype == 'bool':
p.type = Param.Type_BOOL
elif ltype == 'string':
p.type = Param.Type_STRING
elif ltype == 'choice':
p.type = Param.Type_CHOICE
else:
log.error(LogMsg(43, '<param> tag with invalid type: %s' % ltype
+ ', ignored'))
return True
for cur in xnp.childNodes:
if cur.nodeType == cur.TEXT_NODE:
p.desc = p.desc + conv_text(cur.data.strip())
continue
if cur.nodeType == cur.ELEMENT_NODE:
if cur.tagName == 'item':
if p.type != Param.Type_CHOICE:
continue
if not cur.hasChildNodes():
continue
if cur.firstChild.nodeType != cur.TEXT_NODE:
continue
p.citems.append(conv_text(cur.firstChild.data.strip()))
continue
if cur.tagName == 'minmax' or cur.tagName == 'range':
if p.type != Param.Type_INT and p.type != Param.Type_REAL:
continue
if cur.hasAttribute('min'):
val = cur.getAttribute('min')
if p.type == Param.Type_INT:
p.minmax[0] = int(val)
else:
p.minmax[0] = float(val)
if cur.hasAttribute('max'):
val = cur.getAttribute('max')
if p.type == Param.Type_INT:
p.minmax[1] = int(val)
else:
p.minmax[1] = float(val)
continue
if cur.tagName == 'value':
if not cur.hasChildNodes():
continue
if cur.firstChild.nodeType != cur.TEXT_NODE:
continue
val = conv_text(cur.firstChild.data.strip())
try:
if p.type == Param.Type_INT or \
p.type == Param.Type_CHOICE:
vals = val.split()
p.valList = [0]*len(vals)
for i in range(len(vals)):
p.valList[i] = int(vals[i])
continue
if p.type == Param.Type_REAL:
vals = val.split()
p.valList = [0]*len(vals)
for i in range(len(vals)):
p.valList[i] = float(vals[i])
continue
if p.type == Param.Type_BOOL:
val = val.lower()
if val in ['true', 'yes']:
p.valList = [True]
elif val in ['false', 'no']:
p.valList = [False]
continue
if p.type == Param.Type_STRING:
p.valList = [val]
continue
except Exception, e:
log.error(LogMsg(44, 'invalid <value> tag found'))
log.error(str(e))
continue
continue
if cur.tagName == 'depend':
if not cur.hasAttribute('target'):
log.error(LogMsg(45, '<depend> tag without '
+ 'target attribute found, ignored'))
continue
targ = cur.getAttribute('target')
targ2 = None
if cur.hasAttribute('target2'):
targ2 = cur.getAttribute('target2')
for cur2 in cur.childNodes:
if cur2.nodeType != cur.ELEMENT_NODE:
continue
if cur2.tagName == 'cond':
if not self.parseCond(cur2, targ, targ2):
log.error(LogMsg(46, 'invalid <cond> tag found,'
+ ' ignored'))
#return False
continue
continue
continue
if cur.tagName == 'useRange':
if p.type == Param.Type_STRING:
continue
if not cur.hasChildNodes():
continue
if cur.firstChild.nodeType != cur.TEXT_NODE:
continue
val = conv_text(cur.firstChild.data.strip()).lower()
if val in ['true', 'yes']:
p.useRange = True
elif val in ['false', 'no']:
p.useRange = False
continue
if cur.tagName == 'sweepRange':
if p.type != Param.Type_INT and \
p.type != Param.Type_REAL and \
p.type != Param.Type_CHOICE:
continue
if cur.hasAttribute('min'):
val = cur.getAttribute('min')
if p.type == Param.Type_INT or \
p.type == Param.Type_CHOICE:
p.sweepRange[0] = int(val)
else:
p.sweepRange[0] = float(val)
if cur.hasAttribute('max'):
val = cur.getAttribute('max')
if p.type == Param.Type_INT or \
p.type == Param.Type_CHOICE:
p.sweepRange[1] = int(val)
else:
p.sweepRange[1] = float(val)
if cur.hasAttribute('delta'):
val = cur.getAttribute('delta')
if p.type == Param.Type_INT or \
p.type == Param.Type_CHOICE:
p.sweepRange[2] = int(val)
else:
p.sweepRange[2] = float(val)
continue
if cur.tagName == 'arithPrect':
if not cur.hasChildNodes():
continue
if cur.firstChild.nodeType != cur.TEXT_NODE:
continue
val = conv_text(cur.firstChild.data.strip())
if p.type == Param.Type_REAL:
vals = val.split()
try:
ndp = int(vals[0])
if ndp >= 0:
p.arithPrec = ndp
except:
pass
continue
if cur.tagName == 'useExcept':
if p.type != Param.Type_INT and \
p.type != Param.Type_REAL and \
p.type != Param.Type_CHOICE:
continue
if not cur.hasChildNodes():
continue
if cur.firstChild.nodeType != cur.TEXT_NODE:
continue
val = conv_text(cur.firstChild.data.strip()).lower()
if val in ['true', 'yes']:
p.useExcept = True
elif val in ['false', 'no']:
p.useExcept = False
continue
if cur.tagName == 'except':
if p.type != Param.Type_INT and \
p.type != Param.Type_REAL and \
p.type != Param.Type_CHOICE:
continue
self.useExceptList = True
if not cur.hasChildNodes():
continue
if cur.firstChild.nodeType != cur.TEXT_NODE:
continue
val = conv_text(cur.firstChild.data.strip())
try:
if p.type == Param.Type_INT or \
p.type == Param.Type_CHOICE:
vals = val.split()
p.excepts = [0]*len(vals)
for i in range(len(vals)):
p.excepts[i] = int(vals[i])
continue
if p.type == Param.Type_REAL:
vals = val.split()
p.excepts = [0]*len(vals)
for i in range(len(vals)):
p.excepts[i] = float(vals[i])
continue
except Exception, e:
log.error(LogMsg(47, 'invalid <except> tag found'))
log.error(str(e))
continue
continue
if cur.tagName == 'exceptRange':
self.useExceptList = False
if p.type != Param.Type_INT and \
p.type != Param.Type_REAL and \
p.type != Param.Type_CHOICE:
continue
if cur.hasAttribute('min'):
val = cur.getAttribute('min')
if p.type == Param.Type_INT or \
p.type == Param.Type_CHOICE:
p.exceptRange[0] = int(val)
else:
p.exceptRange[0] = float(val)
if cur.hasAttribute('max'):
val = cur.getAttribute('max')
if p.type == Param.Type_INT or \
p.type == Param.Type_CHOICE:
p.exceptRange[1] = int(val)
else:
p.exceptRange[1] = float(val)
continue
continue # end of ELEMENT node
continue # end of for(cur)
return True
def parseCond(self, xnp, target, target2=None):
"""
XMLの<cond>ノードのパース
[in] xnp <cond>ノードのXMLデータ
[in] target <cond>ノードの親の<depend>ノードのtarget属性値
[in] target2 <cond>ノードの親の<depend>ノードのtarget2属性値
戻り値 -> 真偽値
"""
if not xnp.nodeType == xnp.ELEMENT_NODE or \
not xnp.tagName == 'cond':
return False
# create cond class object
c = Cond()
# parse <cond> node
if not c.parseXML(xnp, self, target, target2):
return False
self.depend_cond = c
return True
def outputXML(self, ofp, ofst=0):
"""
XMLの<param>ノードの出力
[in] ofp 出力先ファイル
[in] ofst オフセット量
戻り値 -> 真偽値
"""
ofs = ' ' * ofst
ofs2 = ' ' * (ofst+2)
ofs4 = ' ' * (ofst+4)
if self.type == Param.Type_INT:
ts = 'int'
elif self.type == Param.Type_REAL:
ts = 'real'
elif self.type == Param.Type_BOOL:
ts = 'bool'
elif self.type == Param.Type_STRING:
ts = 'string'
elif self.type == Param.Type_CHOICE:
ts = 'choice'
else:
return False
try:
if self.disable and self.disable_gui:
ofp.write(ofs + '<param name="%s" type="%s" disable="yes">\n' \
% (self.name, ts))
else:
ofp.write(ofs + '<param name="%s" type="%s">\n' \
% (self.name, ts))
except:
return False
# description
if self.desc != '':
ofp.write(ofs2 + self.desc + '\n')
# minmax
if self.type == Param.Type_INT or self.type == Param.Type_REAL:
if self.minmax[0] != None or self.minmax[1] != None:
ofp.write(ofs2 + '<range')
if self.minmax[0] != None:
ofp.write(' min="%s"' % str(self.minmax[0]))
if self.minmax[1] != None:
ofp.write(' max="%s"' % str(self.minmax[1]))
ofp.write('/>\n')
# citems
if self.type == Param.Type_CHOICE:
for item in self.citems:
ofp.write(ofs2 + '<item>%s</item>\n' % item)
# valList
if self.type == Param.Type_BOOL or self.type == Param.Type_STRING:
if self.valList != []:
ofp.write(ofs2 + '<value>%s</value>\n' % str(self.valList[0]))
else:
ofp.write(ofs2 + '<value></value>\n')
elif len(self.valList) > 0:
ofp.write(ofs2 + '<value>\n')
for v in self.valList:
ofp.write(ofs4 + str(v) + '\n')
ofp.write(ofs2 + '</value>\n')
# useRange
if self.type != Param.Type_STRING:
ofp.write(ofs2 + '<useRange>%s</useRange>\n' % str(self.useRange))
# sweepRange
if self.type == Param.Type_INT or self.type == Param.Type_REAL or \
self.type == Param.Type_CHOICE:
if self.sweepRange[0] != None or self.sweepRange[1] != None or \
self.sweepRange[2] != None:
ofp.write(ofs2 + '<sweepRange')
if self.sweepRange[0] != None:
ofp.write(' min="%s"' % str(self.sweepRange[0]))
if self.sweepRange[1] != None:
ofp.write(' max="%s"' % str(self.sweepRange[1]))
if self.sweepRange[2] != None:
ofp.write(' delta="%s"' % str(self.sweepRange[2]))
ofp.write('/>\n')
# arithPrec
if self.type == Param.Type_REAL and self.arithPrec != 6:
ofp.write(ofs2 + '<arithPrec>%d</arithPrec>\n' | |
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import re
import datetime
import logging
from cleanliness import cleaner
from django.conf import settings
from google.appengine.ext import db
from google.appengine.api import urlfetch
from google.appengine.api import images
from common.models import Stream, StreamEntry, InboxEntry, Actor, Relation
from common.models import Subscription, Invite, OAuthConsumer, OAuthRequestToken
from common.models import OAuthAccessToken, Image, Activation
from common.models import KeyValue, Presence
from common.models import AbuseReport
from common.models import Task
from common.models import PRIVACY_PRIVATE, PRIVACY_CONTACTS, PRIVACY_PUBLIC
from common import clean
from common import clock
from common import exception
from common import imageutil
from common import mail
from common import memcache
from common import models
from common import normalize
from common import patterns
from common import properties
from common import throttle
from common import util
from common import validate
from common.protocol import sms
from common.protocol import xmpp
NO_ACCESS = 'none'
READ_ACCESS = 'read'
WRITE_ACCESS = 'write'
DELETE_ACCESS = 'delete'
ADMIN_ACCESS = 'admin'
ACCESS_LEVELS = [NO_ACCESS,
READ_ACCESS,
WRITE_ACCESS,
DELETE_ACCESS,
ADMIN_ACCESS]
ROOT = Actor(nick=settings.ROOT_NICK, type='user')
ROOT.access_level = ADMIN_ACCESS
# Max length of a message. Conciseness is a virtue.
# UIs should prevent posting longer messages. API will truncate
# posts longer than this.
MAX_POST_LENGTH = 140
# How many contacts we are willing to count to update an actor's
# contact_count or follower_count properties
CONTACT_COUNT_THRESHOLD = 100
# Maximum number of channels a user is allowed to admin at a time
MAX_ADMINS_PER_ACTOR = 48
# The default length of a task's visibility lock in seconds
DEFAULT_TASK_EXPIRE = 10
# The maximum number of followers to process per task iteration of inboxes
MAX_FOLLOWERS_PER_INBOX = 100
MAX_NOTIFICATIONS_PER_TASK = 100
# The maximum number of followers we can notify per task iteration
# The first notification type to handle
FIRST_NOTIFICATION_TYPE = 'im'
AVATAR_IMAGE_SIZES = { 'u': (30, 30),
't': (50, 50),
'f': (60, 60),
'm': (175, 175),
}
# Wrap utcnow so that it can be mocked in tests. We can't replace the function
# in the datetime module because it's an extension, not a python module.
utcnow = lambda: clock.utcnow()
RE_NS_DOMAIN = settings.NS_DOMAIN.replace('.', r'\.')
channel_post_re = re.compile(
r'^(?P<channel>#[a-zA-Z][a-zA-Z0-9]{%d,%d}(?:@%s)?)'
r':?\s+' # separator
r'(?P<message>.*)' # message
% (clean.NICK_MIN_LENGTH - 1, clean.NICK_MAX_LENGTH - 1, RE_NS_DOMAIN)
)
smashed_title_re = re.compile(r'(?:(?:^|\s+)(\w))')
# little helper for code reuse
def _item_from_args_kw(f, allowed, args, kw):
""" attempt to fetch an identifying key from the list of args and kw,
allowed - list of allowable kw key names
args - list of args
kw - dict of key-value args
"""
x = None
for possible in allowed:
x = kw.get(possible)
if x:
break
if not x:
x = args[0]
return f(ROOT, x)
def _actor_from_args_kw(allowed, args, kw):
return _item_from_args_kw(actor_get, allowed, args, kw)
def _entry_from_args_kw(allowed, args, kw):
return _item_from_args_kw(entry_get, allowed, args, kw)
def _stream_from_args_kw(allowed, args, kw):
return _item_from_args_kw(stream_get, allowed, args, kw)
# Better Access Control
def has_access(actor_ref, access_level):
if not actor_ref:
return False
# TODO(termie): I don't really like that the default access is full access
# but changing that in any way makes testing much more
# verbose, requiring us to set access levels every time we
# fetch an actor to use. Some work can probably be done to
# put the site into some testing mode where the default
# access level for testing is DELETE_ACCESS at which point
# this can become NO_ACCESS again
test_access = getattr(actor_ref, 'access_level', DELETE_ACCESS)
if ACCESS_LEVELS.index(access_level) <= ACCESS_LEVELS.index(test_access):
return True
return False
def actor_owns_actor(actor_ref, other_ref):
if not actor_ref or not other_ref:
return False
# actors own themselves
if actor_ref.nick == other_ref.nick:
return True
# admins own anything
if has_access(actor_ref, ADMIN_ACCESS):
return True
# if this is a channel, it is owned by its admins
if (other_ref.is_channel()
and channel_has_admin(ROOT, other_ref.nick, actor_ref.nick)
):
return True
# well, we tried.
return False
def actor_owns_stream(actor_ref, stream_ref):
if not stream_ref:
return False
# streams are owned by whoever owns the actor that owns a stream
stream_owner_ref = actor_get_safe(ROOT, stream_ref.owner)
if not stream_owner_ref:
# this stream has no owner, the owner is deleted, something like that
# we shouldn't ever really be getting here
return False
return actor_owns_actor(actor_ref, stream_owner_ref)
def actor_owns_entry(actor_ref, entry_ref):
if not entry_ref:
return False
# owned by whoever owns the actor whom wrote the entry
entry_actor_ref = actor_get_safe(ROOT, entry_ref.actor)
if not entry_actor_ref:
# this entry has no author, the author is deleted, something like that
# we shouldn't ever really be getting here
return False
if actor_owns_actor(actor_ref, entry_actor_ref):
return True
# owned by whoever owns the actor whom owns the stream the entry is in
entry_owner_ref = actor_get_safe(ROOT, entry_ref.owner)
if not entry_owner_ref:
# this stream has no owner, the owner is deleted, something like that
# we shouldn't ever really be getting here
return False
if actor_owns_actor(actor_ref, entry_owner_ref):
return True
# if this is a comment we have to check for the entry as well
# this is recursive, but should be okay since we can't comment on comments
if entry_ref.entry:
entry_parent_ref = entry_get_safe(ROOT, entry_ref.entry)
if actor_owns_entry(actor_ref, entry_parent_ref):
return True
return False
def actor_can_view_actor(actor_ref, other_ref):
""" actor_ref can view other_ref """
if not other_ref:
return False
# if other is public
if other_ref.is_public():
return True
# if we're not public we better have an actor_ref
if not actor_ref:
return False
# if we are the owner
if actor_owns_actor(actor_ref, other_ref):
return True
# other_ref is restricted
if other_ref.is_restricted():
# and we are a contact
if (not other_ref.is_channel()
and actor_has_contact(ROOT, other_ref.nick, actor_ref.nick)
):
return True
# is a channel and we are a member (admin covered above by owner)
if (other_ref.is_channel()
and channel_has_member(ROOT, other_ref.nick, actor_ref.nick)
):
return True
return False
def actor_can_view_stream(actor_ref, stream_ref):
if not stream_ref:
return False
# if stream is public
if stream_ref.is_public():
return True
if actor_owns_stream(actor_ref, stream_ref):
return True
if stream_ref.is_restricted():
stream_owner_ref = actor_get_safe(ROOT, stream_ref.owner)
if actor_can_view_actor(actor_ref, stream_owner_ref):
return True
# special case the comments stream, because it is private but comments take
# on the privacy of the entries they are on
# this allows anybody to see that the comments stream exists while giving
# no specific access to any actual comments held therein
# unfortunately some of the imported data has type == 'comment' and some
# type == 'comments'.
if stream_ref.type == 'comment' or stream_ref.type == 'comments':
return True
return False
def actor_can_view_entry(actor_ref, entry_ref):
if not entry_ref:
return False
if actor_owns_entry(actor_ref, entry_ref):
return True
# if not a comment inherit the visibility of the stream
if not entry_ref.entry:
stream_ref = stream_get_safe(ROOT, entry_ref.stream)
if actor_can_view_stream(actor_ref, stream_ref):
return True
# if this is a comment we want to check the parent entry's stream
if entry_ref.entry:
entry_parent_ref = entry_get_safe(ROOT, entry_ref.entry)
if actor_can_view_entry(actor_ref, entry_parent_ref):
return True
return False
# Better Access Control Decorators
def access_required(access_level):
def _decorator(f):
def _wrap(api_user, *args, **kw):
if not has_access(api_user, access_level):
raise exception.ApiException(
exception.PERMISSION_ERROR,
'You need %s access or above to use this method' % access_level)
return f(api_user, *args, **kw)
_wrap.func_name = f.func_name
_wrap.meta = append_meta(f, '%s_required' % access_level)
return _wrap
return _decorator
write_required = access_required(WRITE_ACCESS)
delete_required = access_required(DELETE_ACCESS)
admin_required = access_required(ADMIN_ACCESS)
def append_meta(f, key, value=None):
if not hasattr(f, 'meta'):
f.meta = []
f.meta.append((key, value))
return f.meta
def owner_required(f):
def _wrap(api_user, *args, **kw):
actor_ref = _actor_from_args_kw(['nick', 'owner', 'channel'], args, kw)
if not actor_owns_actor(api_user, actor_ref):
# TODO(termie): pretty obtuse message...
raise exception.ApiException(exception.PRIVACY_ERROR,
'Operation not allowed')
# everything checks out, call the original function
return f(api_user, *args, **kw)
_wrap.func_name = f.func_name
_wrap.meta = append_meta(f, 'owner_required')
return _wrap
def owner_required_by_target(f):
def _wrap(api_user, *args, **kw):
# TODO(termie): I don't really like that this looks at the second
# arg, it feels hacky.
target = kw.get('target')
if target is None:
target = args[1]
nick = util.get_user_from_topic(target)
actor_ref = actor_get_safe(ROOT, nick)
if not actor_ref:
raise exception.ApiException(0x00, 'Actor does not exist: %s' % nick)
if not actor_owns_actor(api_user, actor_ref):
# TODO(termie): pretty obtuse message...
raise exception.ApiException(exception.PRIVACY_ERROR,
'Operation not allowed')
# everything checks out, call the original function
return f(api_user, *args, **kw)
_wrap.func_name = f.func_name
_wrap.meta = | |
import re
import logging
from datetime import datetime
from sqlalchemy import func, or_, DATE
from dataactcore.models.domainModels import Zips, CityCode, ZipCity, DUNS
from dataactcore.models.stagingModels import PublishedAwardFinancialAssistance
from dataactcore.models.lookups import (ACTION_TYPE_DICT, ASSISTANCE_TYPE_DICT, CORRECTION_DELETE_IND_DICT,
RECORD_TYPE_DICT, BUSINESS_TYPE_DICT, BUSINESS_FUNDS_IND_DICT)
from dataactcore.utils.business_categories import get_business_categories
logger = logging.getLogger(__name__)
def get_zip_data(sess, zip_five, zip_four):
""" Get zip data based on 5-digit or 9-digit zips and the counts of congressional districts associated with them
Args:
sess: the current DB session
zip_five: the 5-digit zip being checked
zip_four: the 4-digit zip being checked
Returns:
A Zips object (if one was found) and a count of the congressional districts in that district as 2 return
values
"""
zip_info = None
cd_count = 1
# if we have a 4-digit zip to work with, try using both
if zip_four:
zip_info = sess.query(Zips).filter_by(zip5=zip_five, zip_last4=zip_four).first()
# if we didn't manage to find anything using 9 digits or we don't have 9 digits, try to find one using just 5 digits
if not zip_info:
zip_info = sess.query(Zips).filter_by(zip5=zip_five).first()
# if this is a 5-digit zip, there may be more than one congressional district associated with it
cd_count = sess.query(Zips.congressional_district_no.label('cd_count')). \
filter_by(zip5=zip_five).distinct().count()
return zip_info, cd_count
def derive_cfda(obj, cfda_dict, job_id, detached_award_financial_assistance_id):
""" Deriving cfda title from cfda number using cfda program table.
Args:
obj: a dictionary containing the details we need to derive from and to
cfda_dict: a dictionary containing data for all CFDA objects keyed by cfda number
job_id: the ID of the submission job
detached_award_financial_assistance_id: the ID of the submission row
"""
obj['cfda_title'] = cfda_dict.get(obj['cfda_number'])
if not obj['cfda_title']:
logger.error({
'message': 'CFDA title not found for CFDA number {}'.format(obj['cfda_number']),
'message_type': 'BrokerError',
'job_id': job_id,
'detached_award_financial_assistance_id': detached_award_financial_assistance_id
})
def derive_awarding_agency_data(obj, sub_tier_dict, office_dict):
""" Deriving awarding sub tier agency name, awarding agency name, and awarding agency code
Args:
obj: a dictionary containing the details we need to derive from and to
sub_tier_dict: a dictionary containing all the data for SubTierAgency objects keyed by sub tier code
office_dict: a dictionary containing all the data for Office objects keyed by office code
"""
obj['awarding_agency_code'] = None
obj['awarding_agency_name'] = None
obj['awarding_sub_tier_agency_n'] = None
# If we have an office code and no sub tier code, use the office to derive the sub tier
if obj['awarding_office_code'] and not obj['awarding_sub_tier_agency_c']:
office = office_dict.get(obj['awarding_office_code'])
obj['awarding_sub_tier_agency_c'] = office['sub_tier_code']
# If we have an office code (provided or derived), we can use that to get the names for the sub and top tiers and
# the code for the top tier
if obj['awarding_sub_tier_agency_c']:
sub_tier = sub_tier_dict.get(obj['awarding_sub_tier_agency_c'])
obj['awarding_agency_code'] = sub_tier["frec_code"] if sub_tier["is_frec"] else sub_tier["cgac_code"]
obj['awarding_agency_name'] = sub_tier["agency_name"]
obj['awarding_sub_tier_agency_n'] = sub_tier["sub_tier_agency_name"]
def derive_funding_agency_data(obj, sub_tier_dict, office_dict):
""" Deriving funding sub tier agency name, funding agency name, and funding agency code
Args:
obj: a dictionary containing the details we need to derive from and to
sub_tier_dict: a dictionary containing all the data for SubTierAgency objects keyed by sub tier code
office_dict: a dictionary containing all the data for Office objects keyed by office code
"""
obj['funding_sub_tier_agency_na'] = None
obj['funding_agency_name'] = None
obj['funding_agency_code'] = None
# If we have an office code and no sub tier code, use the office to derive the sub tier
if obj['funding_office_code'] and not obj['funding_sub_tier_agency_co']:
office = office_dict.get(obj['funding_office_code'])
obj['funding_sub_tier_agency_co'] = office['sub_tier_code']
if obj['funding_sub_tier_agency_co']:
sub_tier = sub_tier_dict.get(obj['funding_sub_tier_agency_co'])
obj['funding_agency_code'] = sub_tier["frec_code"] if sub_tier["is_frec"] else sub_tier["cgac_code"]
obj['funding_agency_name'] = sub_tier["agency_name"]
obj['funding_sub_tier_agency_na'] = sub_tier["sub_tier_agency_name"]
def derive_ppop_state(obj, state_dict):
""" Deriving ppop code and ppop state name
Args:
obj: a dictionary containing the details we need to derive from and to
state_dict: a dictionary containing all the data for State objects keyed by state code
Returns:
Place of performance code, state code, and state name as 3 return values (all strings or None)
"""
ppop_code = None
state_code = None
state_name = None
if obj['place_of_performance_code']:
ppop_code = obj['place_of_performance_code'].upper()
if ppop_code == '00*****':
state_name = 'Multi-state'
elif ppop_code != '00FORGN':
state_code = ppop_code[:2]
state_name = state_dict.get(state_code)
obj['place_of_perfor_state_code'] = state_code
obj['place_of_perform_state_nam'] = state_name
return ppop_code, state_code, state_name
def derive_ppop_location_data(obj, sess, ppop_code, ppop_state_code, county_dict):
""" Deriving place of performance location values from zip4
Args:
obj: a dictionary containing the details we need to derive from and to
sess: the current DB session
ppop_code: place of performance code
ppop_state_code: state code from the place of performance code
county_dict: a dictionary containing all the data for County objects keyed by state code + county number
"""
if obj['place_of_performance_zip4a'] and obj['place_of_performance_zip4a'] != 'city-wide':
zip_five = obj['place_of_performance_zip4a'][:5]
zip_four = None
# if zip4 is 9 digits, set the zip_four value to the last 4 digits
if len(obj['place_of_performance_zip4a']) > 5:
zip_four = obj['place_of_performance_zip4a'][-4:]
zip_info, cd_count = get_zip_data(sess, zip_five, zip_four)
# deriving ppop congressional district
if not obj['place_of_performance_congr']:
if zip_info.congressional_district_no and cd_count == 1:
obj['place_of_performance_congr'] = zip_info.congressional_district_no
else:
obj['place_of_performance_congr'] = '90'
# deriving PrimaryPlaceOfPerformanceCountyName/Code
obj['place_of_perform_county_co'] = zip_info.county_number
obj['place_of_perform_county_na'] = county_dict.get(zip_info.state_abbreviation + zip_info.county_number)
# deriving PrimaryPlaceOfPerformanceCityName
city_info = sess.query(ZipCity).filter_by(zip_code=zip_five).one()
obj['place_of_performance_city'] = city_info.city_name
# if there is no ppop zip4, we need to try to derive county/city info from the ppop code
elif ppop_code:
# if ppop_code is in county format,
if re.match('^[A-Z]{2}\*\*\d{3}$', ppop_code):
# getting county name
county_code = ppop_code[-3:]
obj['place_of_perform_county_co'] = county_code
obj['place_of_perform_county_na'] = county_dict.get(ppop_state_code + county_code)
obj['place_of_performance_city'] = None
# if ppop_code is in city format
elif re.match('^[A-Z]{2}\d{5}$', ppop_code) and not re.match('^[A-Z]{2}0{5}$', ppop_code):
# getting city and county name
city_code = ppop_code[-5:]
city_info = sess.query(CityCode).filter_by(city_code=city_code, state_code=ppop_state_code).first()
obj['place_of_performance_city'] = city_info.feature_name
obj['place_of_perform_county_co'] = city_info.county_number
obj['place_of_perform_county_na'] = city_info.county_name
# if there's no ppop code, just set them all to None
else:
obj['place_of_perform_county_co'] = None
obj['place_of_perform_county_na'] = None
obj['place_of_performance_city'] = None
def derive_le_location_data(obj, sess, ppop_code, state_dict, ppop_state_code, ppop_state_name, county_dict):
""" Deriving place of performance location values
Args:
obj: a dictionary containing the details we need to derive from and to
sess: the current DB session
ppop_code: place of performance code
state_dict: a dictionary containing all the data for State objects keyed by state code
ppop_state_code: state code from the place of performance code
ppop_state_name: state name from the code from place of performance code
county_dict: a dictionary containing all the data for County objects keyed by state code + county number
"""
# Deriving from zip code (record type is 2 or 3 in this case)
if obj['legal_entity_zip5']:
# legal entity city data
city_info = sess.query(ZipCity).filter_by(zip_code=obj['legal_entity_zip5']).one()
obj['legal_entity_city_name'] = city_info.city_name
zip_data, cd_count = get_zip_data(sess, obj['legal_entity_zip5'], obj['legal_entity_zip_last4'])
# deriving legal entity congressional district
if not obj['legal_entity_congressional']:
if zip_data.congressional_district_no and cd_count == 1:
obj['legal_entity_congressional'] = zip_data.congressional_district_no
else:
obj['legal_entity_congressional'] = '90'
# legal entity county data
obj['legal_entity_county_code'] = zip_data.county_number
obj['legal_entity_county_name'] = county_dict.get(zip_data.state_abbreviation + zip_data.county_number)
# legal entity state data
obj['legal_entity_state_code'] = zip_data.state_abbreviation
obj['legal_entity_state_name'] = state_dict.get(zip_data.state_abbreviation.upper())
# deriving legal entity stuff that's based on record type of 1
# (ppop code must be in the format XX**###, XX*****, 00FORGN for these)
if obj['record_type'] == 1:
county_wide_pattern = re.compile("^[a-zA-Z]{2}\*{2}\d{3}$")
state_wide_pattern = re.compile("^[a-zA-Z]{2}\*{5}$")
obj['legal_entity_congressional'] = None
if county_wide_pattern.match(ppop_code):
# legal entity county data
county_code = ppop_code[-3:]
obj['legal_entity_county_code'] = county_code
obj['legal_entity_county_name'] = county_dict.get(ppop_state_code + county_code)
if county_wide_pattern.match(ppop_code) or state_wide_pattern.match(ppop_code):
# legal entity state data
obj['legal_entity_state_code'] = ppop_state_code
obj['legal_entity_state_name'] = ppop_state_name
# legal entity cd data
if not obj['legal_entity_congressional'] and county_wide_pattern.match(ppop_code):
obj['legal_entity_congressional'] = obj['place_of_performance_congr']
def derive_office_data(obj, office_dict, sess):
""" Deriving office data
Args:
obj: a dictionary containing the details we need to derive from and to
office_dict: a dictionary containing all the data for Office objects keyed by office code
sess: the current DB session
"""
# If we don't have an awarding office code, we need to copy it from the earliest transaction of that award
if not obj['awarding_office_code'] or not obj['funding_office_code']:
first_transaction = None
pafa = PublishedAwardFinancialAssistance
if obj['record_type'] == 1:
# Get the minimum action date for this uri/AwardingSubTierCode combo
min_action_date = sess.query(func.min(pafa.action_date).label("min_date")). \
filter(pafa.uri == obj['uri'], pafa.awarding_sub_tier_agency_c == obj['awarding_sub_tier_agency_c'],
pafa.is_active.is_(True), pafa.record_type == 1).one()
# If we have a minimum action date, get the office codes for the first entry that matches it
if min_action_date.min_date:
first_transaction = sess.query(pafa.awarding_office_code, pafa.funding_office_code,
pafa.award_modification_amendme).\
filter(pafa.uri == obj['uri'], pafa.is_active.is_(True),
pafa.awarding_sub_tier_agency_c == obj['awarding_sub_tier_agency_c'],
func.cast_as_date(pafa.action_date) == min_action_date.min_date,
pafa.record_type == 1).first()
else:
# Get the minimum action date | |
<filename>pyNA/src/aircraft.py
import pdb
import json
import numpy as np
import pandas as pd
from dataclasses import dataclass
from pyNA.src.settings import Settings
from scipy import interpolate
@dataclass
class Aircraft:
"""
Aircraft class containing vehicle constants and aerodynamics data.
"""
# Vehicle parameters
mtow : float # Max. take-off weight [kg]
n_eng : int # Number of engines installed on the aircraft [-]
comp_lst : list # List of airframe components to include in noise analysis [-]
# Airframe parameters
af_S_h : float # Horizontal tail area (0.0 ft2) [m2]
af_S_v : float # Vertical tail area (361.8 ft2) [m2]
af_S_w : float # Wing area (3863.0 ft2) [m2]
af_b_f : float # Flap span (25.74 ft) [m]
af_b_h : float # Horizontal tail span (0. ft) [m]
af_b_v : float # Vertical tail span (57.17 ft) [m]
af_b_w : float # Wing span (94.43 ft) [m]
# High-lift devices
af_S_f : float # Flap area (120.0 ft2) [m2]
af_s : int # Number of slots for trailing-edge flaps (min. is 1) [-]
# Landing gear
af_d_mg : float # Tire diameter of main landing gear (3.08 ft) [m]
af_d_ng : float # Tire diameter of nose landing gear (3.71 ft) [m]
af_l_mg : float # Main landing-gear strut length (7.5 ft) [m]
af_l_ng : float # Nose landing-gear strut length (6.0 ft) [m]
af_n_mg : float # Number of wheels per main landing gear [-]
af_n_ng : float # Number of wheels per nose landing gear [-]
af_N_mg : float # Number of main landing gear [-]
af_N_ng : float # Number of nose landing gear [-]
mu_r : float # Rolling resistance coefficient [-]
# Engine parameters
B_fan : int # Number of fan blades [-]
V_fan : int # Number of fan vanes [-]
RSS_fan : float # Rotor-stator spacing [%]
M_d_fan : float # Relative tip Mach number of fan at design [-]
inc_F_n : float # Thrust inclination angle [deg]
TS_lower : float # Min. power setting [-]
TS_upper : float # Max. power setting [-]
# Airframe configuration
af_clean_w : bool # Clean wing (1: yes / 0: no)
af_clean_h : bool # Clean horizontal tail (1: yes / 0: no)
af_clean_v: bool # Clean vertical tail (1: yes / 0: no)
af_delta_wing : bool # Delta wing (1: yes / 0: no)
# Aerodynamics and flight performance
alpha_0 : float # Wing mounting angle [deg]
k_rot : float # Rotation coefficient (v_rot/v_stall) [-]
v_max : float # Maximum climb-out velocity [m/s]
z_max: float # Maximum climb-out altitude [m]
def __init__(self, name: str, version: str, settings: Settings) -> None:
"""
Initialize Aircraft class.
:param name: aircraft name
:type name: str
:param version: aircraft version
:type version: str
:param settings: pyna settings
:type settings: Settings
:return: None
"""
# Initialize aircraft name
self.name = name
self.version = version
# Initialize aerodynamics deck and flight performance parameters
self.aero = dict()
# self.v_stall: float
# self.v_rot: float
# Load aircraft parameters
if len(self.version) != 0:
path = settings.pyNA_directory + '/cases/' + settings.case_name + '/aircraft/' + self.name + '_' + self.version + '.json'
else:
path = settings.pyNA_directory + '/cases/' + settings.case_name + '/aircraft/' + self.name + '.json'
with open(path) as f:
params = json.load(f)
Aircraft.set_aircraft_parameters(self, **params)
def set_aircraft_parameters(self, mtow: np.float64, n_eng: np.int64, comp_lst: list, af_S_h: np.float64, af_S_v: np.float64,
af_S_w: np.float64, af_b_f: np.float64, af_b_h: np.float64, af_b_v: np.float64, af_b_w: np.float64,
af_S_f: np.float64, af_s: np.int64, af_d_mg: np.float64, af_d_ng: np.float64, af_l_mg:np.float64,
af_l_ng: np.float64, af_n_mg: np.float64, af_n_ng: np.float64, af_N_mg: np.float64, af_N_ng: np.float64,
c_d_g: np.float64, mu_r: np.float64, B_fan: np.int64, V_fan: np.int64, RSS_fan: np.float64, M_d_fan: np.float64,
inc_F_n: np.float64, TS_lower: np.float64, TS_upper: np.float64, af_clean_w: bool, af_clean_h:bool, af_clean_v: bool,
af_delta_wing: bool, alpha_0: np.float64, k_rot: np.float64, v_max: np.float64, z_max: np.float64) -> None:
"""
Set the aircraft parameters in the aircraft class.
:param mtow: Max. take-off weight [kg]
:type mtow: np.float64
:param n_eng: Number of engines installed on the aircraft [-]
:type n_eng: np.int64
:param comp_lst: List of airframe components to include [-]
:type comp_lst: list
:param af_S_h: Horizontal tail area [m2]
:type af_S_h: np.float64
:param af_S_v: Vertical tail area [m2]
:type af_S_v: np.float64
:param af_S_w: Wing area [m2]
:type af_S_w: np.float64
:param af_b_f: Flap span [m]
:type af_b_f: np.float64
:param af_b_h: Horizontal tail span [m]
:type af_b_h: np.float64
:param af_b_v: Vertical tail span [m]
:type af_b_v: np.float64
:param af_b_w: Wing span [m]
:type af_b_w: np.float64
:param af_S_f: Flap area [m2]
:type af_S_f: np.float64
:param af_s: Number of slots for trailing-edge flaps (min. is 1) [-]
:type af_s: np.int64
:param af_d_mg: Tire diameter of main landing gear [m]
:type af_d_mg: np.float64
:param af_d_ng: Tire diameter of nose landing gear [m]
:type af_d_ng: np.float64
:param af_l_mg: Main landing-gear strut length [m]
:type af_l_mg: np.float64
:param af_l_ng: Nose landing-gear strut length [m]
:type af_l_ng: np.float64
:param af_n_mg: Number of wheels per main landing gear [-]
:type af_n_mg: np.int64
:param af_n_ng: Number of wheels per nose landing gear [-]
:type af_n_ng: np.int64
:param af_N_mg: Number of main landing gear [-]
:type af_N_mg: np.int64
:param af_N_ng: Number of nose landing gear [-]
:type af_N_ng: np.int64
:param: c_d_g: Landing gear drag coefficient [-]
:type c_d_g; np.float64
:param mu_r: Rolling resistance coefficient [-]
:type mu_r: np.float64
:param B_fan: Number of fan blades [-]
:type B_fan: np.int64
:param V_fan: Number of fan vanes [-]
:type V_fan: np.int64
:param RSS_fan: Rotor-stator spacing [%]
:type RSS_fan: np.float64
:param M_d_fan: Relative tip Mach number of fan at design [-]
:type M_d_fan: np.float64
:param inc_F_n: Thrust inclination angle [deg]
:type inc_F_n: np.float64
:param TS_lower: Min. power setting [-]
:type TS_lower: np.float64
:param TS_upper: Max. power setting [-]
:type TS_upper: np.float64
:param af_clean_w: Flag for clean wing configuration [-]
:type af_clean_w: bool
:param af_clean_h: Flag for clean horizontal tail configuration [-]
:type af_clean_h: bool
:param af_clean_v: Flag for clean vertical tail configuration [-]
:type af_clean_v: bool
:param af_delta_wing: Flag for delta wing configuration [-]
:type af_delta_wing: bool
:param alpha_0: Wing mounting angle [deg]
:type alpha_0: np.float64
:param k_rot: Rotation coefficient (v_rot/v_stall) [-]
:type k_rot: np.float64
:param v_max: Maximum climb-out velocity [m/s]
:type v_max: np.float64
:param z_max: Maximum climb-out altitude [m]
:type z_max: np.float64
:return: None
"""
self.mtow = mtow
self.n_eng = n_eng
self.comp_lst = comp_lst
self.af_S_h = af_S_h
self.af_S_v = af_S_v
self.af_S_w = af_S_w
self.af_b_f = af_b_f
self.af_b_h = af_b_h
self.af_b_v = af_b_v
self.af_b_w = af_b_w
self.af_S_f = af_S_f
self.af_s = af_s
self.af_d_mg = af_d_mg
self.af_d_ng = af_d_ng
self.af_l_mg = af_l_mg
self.af_l_ng = af_l_ng
self.af_n_mg = af_n_mg
self.af_n_ng = af_n_ng
self.af_N_mg = af_N_mg
self.af_N_ng = af_N_ng
self.c_d_g = c_d_g
self.mu_r = mu_r
self.B_fan = B_fan
self.V_fan = V_fan
self.RSS_fan = RSS_fan
self.M_d_fan = M_d_fan
self.inc_F_n = inc_F_n
self.TS_lower = TS_lower
self.TS_upper = TS_upper
self.af_clean_w = af_clean_w
self.af_clean_h = af_clean_h
self.af_clean_v = af_clean_v
self.af_delta_wing = af_delta_wing
self.alpha_0 = alpha_0
self.k_rot = k_rot
self.v_max = v_max
self.z_max = z_max
return None
def load_aerodynamics(self, settings: Settings) -> None:
"""
Load aerodynamic data from aerodynamics deck.
:param settings: pyNA settings
:type settings: Settingps
:return: None
"""
# Load aerodynamics deck
if settings.ac_name == 'stca':
# Load data
self.aero['alpha'] = np.array([-2., 0., 2., 4., 6., 8., 10., 12., 15., 18., 21., 23., 25.])
self.aero['theta_flaps'] = np.array([ 0., 2., 4., 6., 8., 10., 12., 14., 16., 18., 20., 22., 24., 26.])
self.aero['theta_slats'] = np.array([-26., -24., -22., -20., -18., -16., -14., -12., -10., -8., -6., -4., -2., 0.])
self.aero['c_l'] = np.load(settings.pyNA_directory + '/cases/' + settings.case_name + '/aircraft/c_l_stca.npy')
self.aero['c_l_max'] = np.load(settings.pyNA_directory + '/cases/' + settings.case_name + '/aircraft/c_l_max_' + settings.ac_name + '.npy')
self.aero['c_d'] = np.load(settings.pyNA_directory + '/cases/' + settings.case_name + '/aircraft/c_d_stca.npy')
# Compute minimum drag flap angle during groundroll
f_c_d = interpolate.RegularGridInterpolator((self.aero['alpha'], self.aero['theta_flaps'], self.aero['theta_slats']), self.aero['c_d'])
c_d_flaps = np.zeros(50)
theta_flaps_lst = np.linspace(self.aero['theta_flaps'][0], self.aero['theta_flaps'][-1], 50)
for i, theta in enumerate(theta_flaps_lst):
c_d_flaps[i] = f_c_d((-0.85, theta, settings.theta_slats))
self.aero['theta_flaps_c_d_min_gr'] = theta_flaps_lst[np.argmin(c_d_flaps)]
elif settings.ac_name == 'a10':
self.aero['alpha'] = np.array([-2, -1, 0, 1, 2, 3, 4, 5, | |
Nyinchung Monastery',
'Chharka Bon Monastery',
'Dolpo Zhur monastery',
'Myanmar Monastery',
'Dolpo Menmo monastery',
'SHIMEN MONASTERY',
'Drakngon Pema Choeling Monastery',
'Dolpo Jampa monastery',
'Dolpo Jaglung Ladang monastery',
'Choejung Palri Monastery',
'New Hungtram monastery',
'Namgung Monastery',
'Pema Dechan Ling (Buddha Statue)',
'Dolpo Mekhem monastery',
'Dolpo Balung Monastery དོལ་པོ་སྦ་ལུང་དགོན་པ།',
'Dolpo Kharba monastery',
'Dolpo Dakmar(Tragmar)monastery',
'Dolpo Shepchok Monastery དོལ་པོ་སྲིབ་ཕྱོགས་དགོན་པ།',
'Dolpo Shering Monastery',
'Sachen Monastery',
'Dolpo Gekar(Kegar) Ladang monastery',
'Dolpo Jaglung old monastery',
'Shree Pranidhi Purna Maha Vihar',
'葉才江秋佛寺 Yangjer Monastery',
'Yalbang Monastery',
'Kag Chode',
'Tibetan Buddhist Monastery Kamroo',
'Jetavana',
'Pal Saghon Dhungkar Choezom Monastrery',
'Buddha temple',
'Buddhist Monastery, Manali',
'Tibetan Monastery',
'Buddhist Shrine',
'Mindrolling Monastery',
'Buddhist Temple In Delhi, Buddhist Temple',
'Ladakh Budh Vihar',
'Dorje Drak Monastery',
'CAMBODIAN MONASTERY',
'Buddha Temple (Tibetian Temple) बौद्ध मन्दिर',
'Maitreya Buddha Vihar',
'Sh<NAME> - श्योर्शिङ गोम्बा',
'Tabo Monastery',
'Mahabodhi Statue of Buddha',
'Buddhist monastery Akhnoor',
'Drikung Kagyu Institute',
'Advaita Ashrama, Mayavati',
'Palpung Sherabling Monastic Seat',
'The Menri Monastery',
'Dip Tse Chok Ling',
'Kardang Monastery',
'Abhayarama Meditation Centre ( Enfield Buddhist Temple)',
'West London Buddhist Centre',
'Cambridge Buddhist Temple',
'Buddhist Priory (Reading)',
'Shinnyo En',
'Kadampa Meditation Centre',
'Letchworth Buddhist Temple',
'Mindfulness Meditation Buddhist Centre',
'Association for Insight Meditation',
'Letchworth Buddhist Vihara',
'Thrangu House',
'Hampshire Buddhist Society',
'Nichiren Shu Buddhist Temple Of Uk',
'Rangjung Yeshe Gomde UK Tibetan Buddhist Centre (Nyingma & Kagyu)',
'Taplow Court',
'Wat Saddhadhamma',
'Tibetan Buddhism Center for World Peace',
'Dhamma Buddhism Decoded a personal block',
'Wat Buddhamahawanaram',
'Wat Sanghabucha',
'Austin Buddhist Vihara',
'Austin Buddhist Center',
'Palri Pema Od Ling',
'Theravada Dhamma Society of America',
'San Antonio Zen Center',
'- Insight Meditation',
'SGI-USA San Antonio',
'Hindu Temple of San Antonio',
'Buddhist Meditation Center of Austin',
'The Alamo',
'Shambhala Meditation Center of San Antonio',
'Chua Lien Huu Tinh Do Temple',
'San Antonio Missions National Historical Park',
'San Antonio Meditation Group',
'Mission of Divine Mercy',
'Thien Hau Temple',
'Austin Shambhala Meditation Center',
'Unity Church of San Antonio',
'San Antonio Museum of Art',
'Kadampa Meditation Center Texas',
'Buddhist Rassmey Monastery',
'Lone Star Buddhist Meditation Center',
'Phu Van Monastery',
'Drepung Loseling Institute of Texas',
'Amitabha Buddhist Society',
'Viet-Nam Buddhist Center',
'Zeyavati Buddha Vihara',
'Dharma Spring Temple',
'Wat Phouthasamakhy Lao',
'Thubten Richen Ling Dharma Center',
'Chua Linh Son SW',
'Chùa Phật Giác-Enlightened Buddha Temple',
'Chung Mei Temple Columbarium',
'True Buddha Society of Houston',
'Watpasrithavorn',
'Texas Cambodian Buddhist',
'Fo Guang Shan Chung Mei Temple-Houston',
'Dawn Mountain',
'International Buddhist Progress',
'Vien Thong Buddhist Temple',
'Second Long Hoa Buddhist Temple',
'Phật Quang Vietnamese Buddhist Pagoda',
'Watpa Buddhayan Meditation Center',
'Từ bi Đạo Tràng Quán Thế Âm',
'Avatamsaka Buddhist Lotus Society',
'Won Buddhism of Houston Inc',
'Southwest Zen Academy',
'Diamond Way Buddhist Center Clear Lake',
'Diamond Way Buddhist Center Houston',
'Quang Duc Temple',
'Pháp Vân Monastery',
'Lien Hoa Temple',
'Tu Viện Toàn Giác Houston',
'Kadampa Meditation Center Houston',
'Teo Chew Temple',
'Nichiren Buddhist Sangha of Texas',
'Quang Chieu Zen Monastery',
'Zhong Yue Shaolin Temple',
'Houston Zen Center',
'Universal Door Meditation Center',
'Master Cau Chin Buddhist Temple - Office',
'American Bodhi Center',
'Buu Mon Buddhist Temple',
'Phuoc Minh Monastery',
'Wat Buddharatanaram',
'Tam Bao Meditation Center',
'Queen of Vietnam Catholic Church',
'Lutcher Memorial Church Building',
'The Art Studio Inc',
'SGI-USA New Orleans Buddhist Center',
'Wat Wimuttayaram Buddhist Temple',
'Tibetan House',
'Chua Bo De Buddhist Temple',
'New Orleans Zen Temple',
'Mid City Zen',
'Vietnamese Buddhist Congregation (Chùa Vạn-đức)',
'Chua Chan Nguyen',
'Magnolia Grove Monastery',
'ISKCON',
'Unity of New Orleans Spiritual Center',
'SGI-USA New Orleans Buddhist Community Center',
'New Orleans Baptist Theological Seminary',
'Faithful Word Assembly',
'New Orleans Museum of Art',
'Saint Charles Avenue Baptist Church',
'Baptist Center for Theology and Ministry',
'Sakya Centre Buddhist Monastery',
'Gelug Monastery (Tibetan)',
'Tashi Kyil Monastery',
'Sakya Gompa, Buddhist Monastery',
'Himalayan Nyinmapa Tibetan Buddhist Monastery',
'The Main Jonang Takten Phuntsok Choeling Buddhist Monastery',
'D<NAME>, Tashi Kyil Monastery',
'Buddhist monastery BULANDSHAHR',
'Yufosi',
'Shuanggui Temple',
'Wenshu Yuan Monastery',
'Luohan Temple',
'Kumchai Buddhist monastery Temple',
'Wat Pah Samarkki',
'Wat Nitadsasanakhun Buddhist Temple of Texas',
'Chua Linh Son Temple - Leander',
'Truc Lam Buddhist Temple (Chùa Trúc Lâm)',
'Jangchub Choeling Buddhist Dharma Center',
'Wat Buddhamahamunee',
'diamond way center',
'Hindu Temple of Central Texas',
'Sgi-Usa',
'Ch<NAME>',
'Cambodian Buddhist Temple',
'Vien Giac Buddhist Temple',
"Waco Baha'i Center- Baha'i Faith", 'Himalayan Tribal Buddhist Welfare society',
'Brelangi Monastery',
'Rarang Monastery',
'Lochawa La Khang (Samdug Choeling)',
'Hango Gompa',
'Tashigang Rong Gompa',
'Buddh Temple',
'Nako Gompa',
'Ponda Monastery',
'Moorang Monestry',
'Dechen Choekhor Buddhist Monastry',
'Nyingmapa Buddhist Temple',
'Nun Buddhist Monastery',
'Kinnaur Bodh Vihar',
'Sakya Kaza Gompa, Buddhist Monastery',
'Key Gompa (Key Monastery)',
'Buddh Mandir, Nigani, Nichar',
'Dungyur temple',
'Sarabai Monastery',
"Tibetan's Monastery", 'Nako Monastery Parking',
'Buddha Statue, Dorzong monastery, Jia',
'Shashur Buddhist Monastery. Keylong',
'K<NAME>',
'Nako - Buddhist Temple',
'Chhoskhorling Boudh Sewa Sang',
'Sh Aarya Tara Mahayana Budhist Temple',
'Tibetan Temple Gorkuwala',
'Zabsang Choekhorling Monastery',
'Chimet Druppai Ghatsal Apo Rinpoche Monastery',
'Gozzangwa Monastery',
'Tayul Gompa/Monastery',
'Ashoka Buddha Vihar',
'Shedup Choephelling Buddhist Temple',
'Sh<NAME>',
'KYEGU MONASTERY',
'Von Ngari Monastery',
'Ngor Monastery',
'Bhumang Jampaling Monastery',
'Kham Kathok Tibetan Settlement',
'Peace House',
'Wat Buddhametta: Tucson Buddhist Meditation Center',
'Awam Tibetan Buddhist Institute',
'D<NAME>gyal Ling',
'SGI-USA Tucson Buddhist Center',
'Dharma Flower Buddhist Temple - Pháp Hoa Phật Tự',
'Arizona Thammaram Monastery',
'Dharma Vista Zen Center',
'Trúc Lâm Hoa Từ Zen Monastery',
'Kadampa Meditation Center Arizona',
'Arizona Buddhist Temple',
'Wat Samakki of Arizona',
'Tucson Shambhala Meditation Center',
'Vien Minh Temple 圓明精舍',
'Arizona Thammaram Nyanasampanno Monastery',
'Wat Lao Thammaram',
'Chaiya Meditation Monastery',
'Mahapajapati Monastery',
'Zen Desert Sangha',
'Arizona International Buddhist Meditation Center INCORPORATION',
'Paduma Monastery',
'Wat Khmer Monastery Lake Elsinore',
'Trúc Lâm Đại Đăng Zen Monastery',
'Azusa Myanmar Monastery',
'Metta Forest Monastery',
'Thai Buddhist Temple of CA',
'Blessing, Prosperity & Longevity Monastery',
'Deer Park Monastery',
'Sambuddhaloka Buddhist Vihara',
'Biosphere 2',
'Western American Buddhist Association',
'Buddhist Temple of San Diego',
'Compassionate Dharma Cloud Monastery',
'WAT KHMER AZ LLC',
"St Anne's Convent", 'Middle Land Chan Monastery',
'Tinh Xa Giac Ly - Buddhist Monastery',
'Pao Fa Buddhist Temple',
'Dharma Vijaya Buddhist Vihara',
"Saint Anthony's Greek Orthodox Monastery", 'Ahmadiyya Muslim Community, Tucson Arizona',
'Buddhist Temple of America',
'Las Vegas Buddhist Sangha',
'The Village Church Tucson',
'Healing Arts Qigong Center',
'The Cambodian Buddhist Temple',
'San Xavier del Bac Mission',
'Dharma Treasure Buddhist Sangha',
'Kadampa Meditation Center New Mexico',
'Vairocana Zen Monastery',
'Minh Dang Quang Buddhist Temple',
'Diamond Mountain Retreat Center',
'Sri Lanka America Buddha Dhamma Society',
'Wat Buddhanimit Thai Buddhist Temple',
'Chua Phap Hoa',
'Awakening Enlightenment Buddhist Monastery',
'Hokoji',
'Taefumi Temple',
'Sozenji',
'Manpukuji',
'Taigan Temple',
'Renkoji',
'Senpukuji',
'藤の瀧園田光洋',
'Hokedakeyakushiji',
'Korinji',
'Ongakuji',
'Zentsuji',
'正願寺',
'創価学会 大城会館',
'創価学会 球磨会館',
'創価学会 人吉会館',
'創価学会 西都会館',
'創価学会 小林会館',
'Byodoji',
'Taiyozen Temple',
'Shinryusan Hekigan Temple',
'Konrenji',
'Ryuganji',
'Kotaiji Main Hall',
'Komyoji',
'Pannavasa Meditation Center',
'Dallas Buddhist Association',
'Wat Lao Siribuddhavas',
'Tinh Xa Ngoc Nhien',
'Chùa Từ Quang',
'Brahma Vihara Monastery',
'WAT Dallas Fort Worth-Texas',
'Karma Thegsum Choling Tibetan Buddhist Meditation Center',
'Burning Coals Intl Ministry',
'Shwephonepwint Burmese Dhamma Center',
'International Society for Krishna Consciousness',
'DFW Buddhist Vihara (Texas Buddhist Meditation Center)',
'Phap Quang Temple',
'Chua Tam-Bao',
'Dallas Museum Of Art',
'Rime Buddhist Center',
'Dallas Shambhala Meditation Center',
'WAT SANGHARATTANARAM BUDDHIST TEMPLE OF OKLAHOMA',
'George W. Bush Presidential Center',
'Compassion Buddhist Temple ( CHUA DAI BI )',
'Wat Buddhamahamunee of Arlington (วัดพุทธมหามุนี)',
'Bohyun Buddha Temple & Zen Center',
'Chua Bo De Dao Trang Meditation Center',
'Wat Lao Thepnimith of Fort Worth',
'Ch<NAME>',
'Bo De Dao Trang',
'佛光山達拉斯講堂 IBPS Dallas',
'Kalachakra Buddhist Meditation Center',
"Buddha's Light Private School", 'SGI USA Dallas Center',
'W<NAME>',
'Dallas Meditation Center',
'Mahamevnawa Buddhist Monastery, Germany',
'Ramakrishna Vedanta Society of North Texas',
'Chùa Huyền Quang',
'Karya Siddhi Hanuman Temple',
'Wat Busayadhammvanaram',
'SGI-USA',
'Radha Kalachandji Temple',
'Japanese Garden',
'<NAME>',
'Gotama Buddha Vihara',
'Milton Keynes Buddhist Vihara',
'Tibet Society',
'Sunnataram Monastery',
'Ocean of Peace Meditation Hall',
'Tibetan Meditation Center',
'Wat Sovannkiri',
'Dharma Bum Temple',
'Bao Son Buddhist Temple - Chùa Bảo Sơn',
'Chua Phap Vuong',
'Wat Ratanapanya',
'International Bodhisattva Sangha (IBS)',
'Rigpa San Diego',
'Phat Da Buddhist Congregation',
'CHÙA VIÊN QUANG',
'Van Hanh Temple',
'Dharma Meditation Temple (Thiền Viện Pháp Thuận)',
'Tinh Xa Ngoc Dang',
'Thabarwa Center USA',
'Tu Viện Tường Vân',
'Wat Lao Buddharam',
'Drikung Kyobpa Choling Monastery',
'Vedanta Society Of Southern California: Ramakrishna Monastery',
'Phap Vuong Monastery',
'Quan Am Monastery - Tu Viện Quan Âm',
'Quan The Am Thien Vien',
'Chinese Friendship Association',
'Aung Chan Thar Buddhist Monastery',
'Tu Vien Phat An- Phat An Monastery',
'Monastery of our lady of Guadalupe (Trinitarians of mary)',
'Vista Buddhist Temple',
'Tibetan Buddhist Dharma',
'San Diego Buddhist Association',
'Wat Lao Boubpharam-San Diego',
'Rosemead Buddhist Monastery',
'Zen San Diego',
'Higashi Honganji Buddhist Temple',
'Southern California Buddhist',
'Indo-Chinese Association',
'Parami Buddha Vihara',
'Chua Dieu Ngu Buddhist Temple',
'Cat Son Monastery',
'Chùa Phật Tuệ, Phat Tue Temple',
'Bupwahng Sa Korean Buddhist Temple',
'Wat Lao Buddhist of Riverside',
'Santivana Buddhist Assembly Tinh Lam Vien',
'Santidham Temple of Augusta',
'Chua Dieu Quang Buddhist Temple',
'Vô Biên Hạnh Buddhist Monastery',
'Perfect Enlightenment Monastery',
'Jun Dung Sa Buddhist Temple',
'Columbia Zen Buddhist Priory',
'Community of Khmer Buddhist Monks Temple - Wat Sao Sokh San',
'Dharma Jewel Monastery (法寶寺)',
'Charleston Tibetan Society',
'Puxian Temple',
'Georgia Buddhist Vihara',
'Wat Lao Buddha Phothisaram Inc',
'Buddhanara Temple',
'Wat Sumter Buddharam',
'Dorje Ling Buddhist Center',
'Myanmar Buddhist Association, Georgia Inc.',
'Wat Lao Houeikeo Indharam',
'Forest Dhamma Monastery',
'Vien Ngo Buddhist Zen Center',
'Wat Buddha Bucha Foundation',
'Wat Lao Buddhamoongcoon',
'Cambodian Buddhist Society Inc',
'Urban Dharma',
'Wat phramahajanaka',
'Kadampa Meditation Center North Carolina',
'Hindu Temple Society',
'Embracing Simplicity Meditation Center',
'Zen Center of Georgia',
'Chua Quang Minh',
'Van-Hanh Pagoda-NC Buddhist',
'Saints Mary & Martha Monastery',
'Chùa Đông Hưng- Buddhist Education Center - Dong Hung Temple',
'Emaho Foundation for Tibetan Buddhist Studies',
'Gamrosa',
'Wat Promkunaram',
'Vietnamese Buddhist Congregation',
'Fo Guang Shan Arizona Buddhist Center',
'Kadampa Meditation Center Phoenix',
'SGI-USA Buddhist Center',
'<NAME>',
'Garchen Buddhist Institute',
'Nhu Lai Thien Tu',
'Animal Liberation Temple',
'Amitabha Stupa and Peace Park',
'Phoenix Shambhala Meditation Center',
'Mesa Arizona Temple',
'Buddha Gate Monastery',
'Shreenathji Haveli Pushtimarg Temple',
'Aloka Vihara Forest Monastery',
'Los Angeles Buddhist Union',
'Buddha By The Sea',
'Meditation Learning Center',
'Wa-Konkan Taiko Drum Dojo of Ken Koshio',
'Wat Lao Buddha Rattanararm of Arizona',
'Monastery of Divine Mercy Ct. Vn. Inc.',
'Atlanta Buddhism Association',
'The Mystical Arts of Tibet',
'Trúc Lâm Bảo Chí Zen Buddhist Temple',
'Atlanta Soto Zen Center',
'Trairatanaram Temple',
'Nipponzan Myohoji Atlanta Dojo',
'Wat Dhamma LLC (វត្តធម្មជោតិការាម)',
'Conyers Monastery',
'Tu Vien Truc Lam',
'Thiền Viện Minh Đăng Quang',
'法宝寺',
'Chua Quan The Am - Georgia State',
'Hui Tz Tao Temple',
'PHẬT GIÁO HÒA HẢO',
'Hui Tz Tao Temple (慧慈佛院)',
'Monastery of The Holy Spirit',
'Chùa Quãng Ninh',
'Mietoville Academy Inc.',
'Losel Shedrup Ling of Knoxville',
'Monastery Of The Holy Spirit',
'Atlanta Shambhala Center',
'BAPS Shri Swaminarayan Mandir',
'Monastic Heritage Center',
'GSL Monastery',
'Diamond Zen Center 금강선원',
'Yokoji Zen Mountain Center',
'Wat PHRATHATPHANOM AMERICA',
'Dharmachakra Kadampa Buddhist | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
@Autor: <NAME>
@Tema: Pronosticador del Futbol Ecuatoriano
@Descripcion: Genere el archivo de respuesta html, la información contenida en una instancia de la clase info_liga
'''
import recoleccion_info
import graficos_liga
import os
import sys
# CLASE VOLCADO INFO
class volcado_info:
#Constructor que toma un objeto de clase info_liga basico (solo contiene info en el campo jornada)
#Se establece como salida estandar el fichero que seria index para la info de jornada en instancia_info_liga
def __init__(self, instancia_info_liga):
self.graficas=graficos_liga.graficos_liga(instancia_info_liga)
self.index="jornada"+str(instancia_info_liga.get_jornada())+".html"
self.optimos="optimosj"+str(instancia_info_liga.get_jornada())+".html"
self.dir_imgs="imgs"
try:
print("Cargando Datos Para El Pronosticador")
os.remove(self.index)
#os.remove(self.optimos)
except:
print("Generando INDEX...")
self.fsalida=open(self.index, 'a+')
sys.stdout=self.fsalida
#Metodo que genera todos los archivos de imagen de las graficas en un directorio dado
def genera_graficas(self):
self.graficas.set_graficos_ratios_todos_guarda(self.dir_imgs)
self.graficas.set_graficos_resultados_todos_guarda(self.dir_imgs)
self.graficas.set_grafico_marca_encaja_primero_todos_guarda(self.dir_imgs)
#Metodo que genera la lineas de estilo tocadas manualmente
def estilo_manual(self):
print("<STYLE type=text/css>")
print(".pstrong{ font-weight: bold; }")
print(".pgrande{ font-size:18; }")
print(".lider{ color: blue; font-weight: bold; font-size:18; }")
print(".champions{ color: grey; font-weight: bold; font-size:18; }")
print(".uefa{ color: orange; font-weight: bold; font-size:18; }")
print(".descenso{ color: red; font-weight: bold; font-size:18; }")
print(".img-grafica{ display: inline; float: right; -webkit-column-span: in-column; -moz-column-span: in-column;} ")
print(".img-ratios{ display: inline; float: left; -webkit-column-span: in-column; -moz-column-span: in-column;} ")
print(".navbar{ background: black; text-align:center; padding: 3 0;}")
print(".link-github{ color: #363636; font-weight: bold; font-size:14; }")
print(".link-inicio{ color: #363636; font-weight: bold; }")
print("</STYLE>")
#Metodo que vuelca en el archivo la deficion del estilo
def estilo(self):
print("<link href='css/bootstrap.min.css' rel='stylesheet'>")
print("<link href='css/agency.css' rel='stylesheet'>")
print("<link href='css/backtotop.css' rel='stylesheet'>")
self.estilo_manual()
#Metodo que vuelca en el archivo la deficion del estilo para los html de enfrentamientos
def estilo_partidos(self):
print("<link href='../css/bootstrap.min.css' rel='stylesheet'>")
print("<link href='../css/agency.css' rel='stylesheet'>")
self.estilo_manual()
#Metodo que vuelva en el archivo la cabecera al completo
def cabecera(self):
print("<html>")
print("<head>")
print("<meta http-equiv='Content-Type' content='text/html; charset=utf-8'>")
print("<title>Información Jornada "+ str(self.graficas.get_instancia_info_liga().get_jornada())+"</title>")
self.estilo()
print("</head>")
def back_to_top(self):
print("<script type='text/javascript' src='http://ajax.googleapis.com/ajax/libs/jquery/1.4.3/jquery.min.js'></script> <script>$(document).ready(function(){ // hide #back-top first $('#back-top').hide(); // fade in #back-top $(function () { $(window).scroll(function () { if ($(this).scrollTop() > 100) { $('#back-top').fadeIn(); } else { $('#back-top').fadeOut(); } }); // scroll body to 0px on click $('#back-top a').click(function () { $('body,html').animate({ scrollTop: 0 }, 800); return false; }); });});</script>")
#Metodo que vuelva la info de un equipo dado
def info_equipo(self, e):
if e in self.graficas.get_instancia_info_liga().get_equipos():
print("<div class='container' id='%s'>" % e[:-2].replace(' ', ''))
print("<div class='row'>")
print("<br>")
print("<h2 class='section-heading'>")
print(e, ":")
print("</h2>")
print("</div>")
#Ratios y resultados
#Grafica de ratios
print("<div class='row'>") #ojo
g=self.dir_imgs+"/ratio"+e[:-2]+".png"
print("<img src='%s' class='img-ratios img-responsive' width=450 />" % g)
g=self.dir_imgs+"/resultados"+e[:-2]+".png"
print("<img src='%s' class='img-grafica img-responsive' width=450/>" % g)
c=self.graficas.get_instancia_info_liga().get_clasif()[e]
posicion=int(c[0][:])
print("</div>")
print("<div class='row'>")
print("<br><p class='pgrande'>Ha jugado ", c[1], "partidos; ", c[3], "victorias, ", c[4], "empates,", c[5], "derrotas; => ", c[2], "puntos.</p>")
#Posicion | Color segun que posicion
if posicion==1:
print("<p class='posicion lider'> \tPosicion: ", c[0], " .Lider</p>")
elif posicion==18 or posicion==19 or posicion==20:
print("<p class='posicion descenso'> Posicion: ", c[0], " .En Descenso</p>")
elif posicion ==2 or posicion==3 or posicion==4:
print("<p class='posicion champions'> \tPosicion: ", c[0], " </p>")
elif posicion==5 or posicion==6:
print("<p class='posicion uefa'> \tPosicion: ", c[0], " </p>")
else:
print("<p class='posicion pgrande'> \tPosicion: ", c[0], "</p>")
print("<p class='pgrande'>LLeva: ", c[6], "goles a favor. |", c[7], "goles en contra. | ", c[8],"</p>")
print("</div>")
print("<br>")
#Goles Total
gall=self.graficas.get_instancia_info_liga().get_golesTotal()[e[1:]]
print("<div class='col-md-4'>") # Para posicionamiento en varias columnas!
print("<h3>GOLES:</h3>")
print("<p class='pstrong' >Media goles por partido: ", gall[1], "</p>")
print("<p>Partidos con: <br>más de <strong>0.5</strong> goles: ", gall[2], "<br>más de <strong>1.5</strong>: ", gall[3], "<br>más de <strong>2.5</strong>: ", gall[4], "<br>más de <strong>3.5</strong>: ", gall[5], "<br>más de <strong>4.5</strong>: ", gall[6], "</p>")
#Goles e info Casa
gcasa=self.graficas.get_instancia_info_liga().get_golesLocal()[e[1:]]
pcasa=self.graficas.get_instancia_info_liga().get_partidosCasa()[e]
print("<p class='pstrong'>Como LOCAL: </p><p>", gcasa[0], "partidos: ", pcasa[0], "victorias, ", pcasa[1], " empates, ", pcasa[2], "derrotas. <br>", pcasa[3], "goles a favor. |", pcasa[4], "goles en contra.</p>" )
print("<p>Media goles por partido en casa: ", gcasa[1], "</p>")
print("<p>En casa. Partidos con: <br>más de <strong>0.5</strong> goles: ", gcasa[2], "<br>más de <strong>1.5</strong>: ", gcasa[3], "<br>más de <strong>2.5</strong>: ", gcasa[4], "<br>más de <strong>3.5</strong>: ", gcasa[5], "<br>más de <strong>4.5</strong>: ", gcasa[6], "</p>")
#Goles e info fuera
gfuera=self.graficas.get_instancia_info_liga().get_golesVisitante()[e[1:]]
pfuera=self.graficas.get_instancia_info_liga().get_partidosFuera()[e]
print("<p class='pstrong'>Como VISITANTE: </p><p>", gfuera[0], "partidos: ",pfuera[0], "victorias, ", pfuera[1], " empates, ", pfuera[2], "derrotas. ", pfuera[3], "goles a favor. |", pfuera[4], "goles en contra.</p>" )
print("<p>Media goles por partido en fuera: ", gfuera[1], "</p>")
print("<p>Fuera. Partidos con: <br>más de <strong>0.5</strong> goles: ", gfuera[2], "<br>más de <strong>1.5</strong>: ", gfuera[3], "<br>más de <strong>2.5</strong>: ", gfuera[4], "<br>más de <strong>3.5</strong>: ", gfuera[5], "<br>más de <strong>4.5</strong>: ", gfuera[6], "</p>")
print("</div>")
print("<div class='col-md-4'>")
print("</div>")
print("<div class='col-md-4'>")
#Cantidad de goles por partido
ip=self.graficas.get_instancia_info_liga().get_infopartidos()[e]
print("<h3>Promedio de Goles Marcados/Encajados (c. 10 minutos)</h3>")
print("<p>")
print("Puntos por partido: <strong>", ip[0], "</strong><br>")
print("Partidos con mas de <strong>2.5</strong> goles(a favor+en contra): <strong>", ip[1], "</strong><br>")
print("Partidos <strong>sin encajar: ", ip[2], "</strong><br>")
print("Partidos <strong>sin marcar: ", ip[3], "</strong><br>")
print("Partidos <strong>ambos equipos marcan: ", ip[4], "</strong>")
print("</p>")
partidoscasa=self.graficas.get_instancia_info_liga().get_partidosCasa()[e]
partidosfuera=self.graficas.get_instancia_info_liga().get_partidosFuera()[e]
#Porcentaje puntos casa-fuera
ppcasa=(int(partidoscasa[0])*3+int(partidoscasa[1]))/int(c[2])
ppfuera=(int(partidosfuera[0])*3+int(partidosfuera[1]))/int(c[2])
print("<p>% puntos ganados en Casa: <strong>", ppcasa*100, '%</strong></p>')
print("<p>% puntos ganados Fuera: <strong>", ppfuera*100, '%</strong></p>')
#Porcentajes goles a favor-en contra en casa-fuera
pgfavorcasa=int(partidoscasa[3])/int(c[6])
pgfavorfuera=int(partidosfuera[3])/int(c[6])
pgcontracasa=int(partidoscasa[4])/int(c[7])
pgcontrafuera=int(partidosfuera[4])/int(c[7])
print("<p>")
print("% goles a favor como local:<strong>", pgfavorcasa*100, '%</strong> <br>| ', partidoscasa[3], "de", c[6], " anotados<br>")
print("% goles en contra como local:<strong>", pgcontracasa*100, '%</strong> <br>| ', partidoscasa[4], "de", c[7], " encajados<br>")
print("% goles a favor como visitante:<strong>", pgfavorfuera*100, '%</strong> <br>| ', partidosfuera[3], "de", c[6], " anotados<br>")
print("% goles en contra como visitante:<strong>", pgcontrafuera*100, '%</strong> <br>| ', partidosfuera[4], "de", c[7], " encajados<br>")
print("</p>")
print("</div>")
print("</div>")
# Metodo que crea el html con la info dedicada a un partido de la siguiente jornada
def info_enfrentamiento(self, partido):
loc=partido[0]
vis=partido[1]
print("<html>")
print("<head>")
print("<meta http-equiv='Content-Type' content='text/html; charset=utf-8'>")
print("<title>Jorná ", self.graficas.get_instancia_info_liga().get_jornada(), ": ", loc, "-", vis, "</title>")
self.estilo_partidos()
print("</head>")
print("<body>")
print("<div class='container'>")
print("<div class='intro-text'>")
print("<h2 class='section-heading'>Info siguiente jornada partido ", loc, "-", vis, ":</h2>")
print("</div>")
#Nav con los nombres de cada equipo. Pinchando en un equipo te lleva a su info
print("<nav class='navbar navbar-default navbar-fixed-top'>")
for e in sorted(self.graficas.get_instancia_info_liga().get_equipos()):
print("<a class='page-scroll' href=%s#%s>%s</a>" % ('../'+self.index, e[:-2].replace(' ', ''), e) )
print(" | ")
print("</nav>")
#Barra con los enlaces a los partidos de la proxima jornada
print("<h5><a class='link-inicio' href='../%s'>Inicio</a></h5>" % (str(self.index)))
print("<h4>Partidos siguiente jornada:</h4>")
for p in self.graficas.get_instancia_info_liga().get_siguiente_jornada():
enlace=p[0][1:-1].replace(' ', '')+"-"+p[1][1:-1].replace(' ', '')+".html"
print("<span><a class='navbar navbar-default' href=%s >%s</a><span>" % (str(enlace), p[0]+"-"+p[1]))
print("</nav>")
#INICIO Muestra la informacion del partido a analizar
#AQUI ES LA PARTE A MODIFICAR
print("<div class='row'>")
print("<h2 class='section-heading'>")
print(loc, "-", vis, " : Jornada ", str(self.graficas.get_instancia_info_liga().get_jornada()+1))
print("</h2>")
print("</div>")
#Grafica de ratios (goles marcados/encajados)
print("<div class='row'>")
gl='../'+self.dir_imgs+"/ratio"+loc[:-2]+".png"
gv='../'+self.dir_imgs+"/ratio"+vis[:-2]+".png"
print("<img src='%s' class='img-ratios img-responsive' width=450 />" % gl)
print("<img src='%s' class='img-ratios img-responsive' width=450 />" % gv)
print("</div>")
#Grafica de resultados
print("<br><br>")
print("<div class='row'>")
gl='../'+self.dir_imgs+"/resultados"+loc[:-2]+".png"
gv='../'+self.dir_imgs+"/resultados"+vis[:-2]+".png"
print("<img src='%s' class='img-ratios img-responsive' width=400/>" % gl)
print("<img src='%s' class='img-ratios img-responsive' width=400/>" % gv)
print("</div>")
#Grafica de Marca-Encaja primero
print("<div class='row'>")
g='../'+self.dir_imgs+"/marcaencaja"+loc[:-2]+".png"
print("<img src='%s' width=400/>" % g)
g='../'+self.dir_imgs+"/marcaencaja"+vis[:-2]+".png"
print("<img src='%s' width=400/>" % g)
'''
print("</div>")
print("<div class='col-md-3'>")
print("<h4>%s</h4>" % (loc))
'''
#FIN Muestra la informacion del partido a analizar
print("</body>")
print("<br><br>")
self.pie()
#Metodo que vuelva la info de todos los equipos
def info_equipos(self):
for i in sorted(self.graficas.get_instancia_info_liga().get_equipos()):
self.info_equipo(i)
#Metodo que incluye el body, la info global
def cuerpo(self):
print("<body>")
print("<div class='container'>")
print("<div class='intro-text'>")
print("<h2 class='section-heading'>Info actualizada tras final de jornada ", self.graficas.get_instancia_info_liga().get_jornada(), ":</h2>")
print("</div>")
self.back_to_top()
print("<p id='back-top'><a href=%s ><span>Vuerta parriba</span></a></p>" % (self.index))
#Nav con los nombres de cada equipo. Pinchando en un equipo te lleva a su info
print("<nav class='navbar navbar-default navbar-fixed-top'>")
for e in sorted(self.graficas.get_instancia_info_liga().get_equipos()):
print("<a class='page-scroll' href=#%s>%s</a>" % (e[:-2].replace(' ', ''), e) )
print(" | ")
print("</nav>")
print("<nav>")
#Lista con la info para cada par de equipos correspondientes a los partidos de la siguiente jornada
print("<h4>Partidos siguiente jornada:</h4>")
for p in self.graficas.get_instancia_info_liga().get_siguiente_jornada():
enlace='partidos/'+p[0][1:-1].replace(' ', '')+"-"+p[1][1:-1].replace(' ', '')+".html"
print("<a class='navbar navbar-default' href=%s >%s</a>" % (str(enlace), p[0]+"-"+p[1]))
print("</nav>")
print("<div class='container'><h1>Info de todos los equipos: Jornada "+str(self.graficas.get_instancia_info_liga().get_jornada())+"</h1></div>")
#Llamada a volcado info equipos
self.info_equipos()
print("</div>") #Fin container
print("</body>")
#Metodo que incluye el footer
def pie(self):
print("<footer>")
print("<div class='container'>")
print("<p>Web Creada Por Esperanza <NAME></p>")
print("</div>")
print("</footer>")
print("</html>")
#Metodo que vuelca la información completa de todos los equipos en un html
def todo_index(self):
self.genera_graficas()
self.cabecera()
self.cuerpo()
self.pie()
try:
sys.stdout=sys.__stdout__
self.fsalida.close()
except:
print("No se ha podido cerrar", str(self.fsalida))
#Metodo que genera cada html correspondiente a cada partido de la siguiente jornada
def todo_enfrentamientos(self):
directorio='partidos/'
if not os.path.isdir(directorio):
os.makedirs(directorio)
for p in self.graficas.get_instancia_info_liga().get_siguiente_jornada():
enlace=directorio+p[0][1:-1].replace(' ', | |
# Copyright (C) 2020 GreenWaves Technologies, SAS
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import logging
import math
from copy import deepcopy
import numpy as np
from sympy import (Add, Float, Integer, Max, Min, Mul, Pow, UnevaluatedExpr,
factor, log, sqrt)
from graph.types.activations import ActivationParameters
from graph.types.fusions import FusionInputParameters, FusionOutputParameters
from graph.types.input_output import ConstantInputParameters
from graph.types.others import BinaryOpParameters, UnaryOpParameters
from graph.types.tensor_arithmetic import (MatrixAddParameters,
MatrixDivParameters,
MatrixMulParameters,
MatrixSubParameters)
from utils.stats_funcs import calc_bits
from .expr_state import ExprState
from .misc import (check_mul_overflow, check_overflow, equalize_scales,
from_node)
from .sympy_types import ATLShift, ATNorm, ATScale, HSigmoid, HTanh
LOG = logging.getLogger("nntool." + __name__)
BINARY_OP_MAPPING = {
"maximum": Max,
"minimum": Min,
"pow": Pow,
}
def generate_mul(var1, var2, scaling):
if scaling:
if var1.is_not_scaled and var2.is_not_scaled:
return ExprState(Mul(var1.expr, var2.expr), 8)
var1, var2 = check_mul_overflow(var1, var2)
return ExprState(Mul(var1.expr, var2.expr),
var1.ibits + var2.ibits,
q=var1.q + var2.q,
scale=var1.scale * var2.scale)
return ExprState(Mul(var1.expr, var2.expr), var1.ibits + var2.ibits)
def generate_div(var1, var2, scaling):
if scaling:
if var1.is_not_scaled and var2.is_not_scaled:
return ExprState(Mul(var1.expr, Pow(var2.expr, -1)), 8)
if var2.is_not_scaled:
# can it be turned into a shift
val2 = var2.expr
if val2.is_integer and val2 % 2 == 0:
if val2 == 0:
raise ValueError("division by zero")
norm = val2//2
var1.length -= norm
var1.expr = ATNorm(var1.expr, norm)
return var1
out_scale = (var1.scale * pow(2, -var1.q))/(var2.scale * pow(2, -var2.q))
left_shift = min(31-var1.length, 16)
right_shift = max(left_shift // 2, 8)
# shift left available bits
# shift back leaving at least end_bits of available
# change in scale is pow(2, -left_shift + right_shift)
# worst case length comes from divide by one
return ExprState(
ATNorm(
Mul(
ATLShift(var1.expr, left_shift),
Pow(var2.expr, -1)
),
right_shift),
var1.ibits + left_shift - right_shift,
q=0,
scale=out_scale * pow(2, -left_shift + right_shift))
return ExprState(Mul(var1.expr, Pow(var2.expr, -1)), min(var1.ibits - var2.ibits + 1, 0))
def generate_sqrt(var, scaling):
if scaling:
if var.is_not_scaled:
return ExprState(sqrt(var.expr), var.ibits)
# adjust to Q15 fraction
shift = 15 - var.length
if shift == 0:
subexpr = sqrt(var.expr)
else:
subexpr = sqrt(ATLShift(var.expr, shift))
# The number is now a Q15 fraction so the scale is multiplied by Q15 minus the correction
cor_scale = math.sqrt(var.scale * 2**(15 - shift))
# Now put the Q15 into the scale
out_scale = cor_scale * pow(2, -15)
return ExprState(subexpr, int(math.ceil((var.ibits + shift) / 2)), q=0, scale=out_scale)
return ExprState(sqrt(var.expr), int(math.ceil(var.ibits / 2)))
def generate_log(var, scaling):
if scaling:
if var.is_not_scaled:
return ExprState(math.log(var.expr), var.ibits)
# adjust to Q15 fraction
shift = 15 - var.length
if shift == 0:
subexpr = log(var.expr)
else:
subexpr = log(ATLShift(var.expr, shift))
# The number is now a Q15 fraction so the scale is multiplied by Q15 minus the correction
cor_scale = int(math.floor(math.log(var.scale * 2**(15-shift)) * pow(2, 15) + 0.5))
# Scale is now 1 * Q15
out_scale = pow(2, -15)
return ExprState(UnevaluatedExpr(Add(subexpr, cor_scale)), int(math.ceil((var.ibits + shift) / 2)), q=0, scale=out_scale)
return ExprState(log(var.expr), int(math.ceil(var.ibits / 2)))
def generate_tanh(var, scaling):
if scaling:
# what is the current maximum value of the input?
# We want to: (a) represent (1) precisely
# (b) make sure that scaling to this rep does not overflow
# Find the closest power of 2 greater than the current scale
closest_repr = math.log2(var.scale)
closest_repr = min(math.floor(closest_repr), -7)
new_scale = pow(2, closest_repr)
cur_max_val = math.ceil(pow(2, var.ibits) * var.scale)
new_scaled_max_val = math.ceil(cur_max_val / new_scale)
assert calc_bits(new_scaled_max_val) + var.q <= 31, "risk of overflow in htanh"
new_q = 0
return ExprState(
HTanh(
ATScale.from_scales(var.expr, var.scale, new_scale, 28 -
var.length, to_q=new_q, from_q=var.q),
new_q,
new_scale
),
abs(closest_repr) + 1,
q=new_q,
scale=new_scale
)
return ExprState(
HTanh(var.expr, None, None),
var.ibits
)
def generate_sigmoid(var, scaling):
if scaling:
# sigmoid represents 1/6 in Q7 always
var = check_overflow(var, 7)
return ExprState(
HSigmoid(var.expr, var.q, var.scale),
var.ibits + 7,
q=var.q,
scale=var.scale * pow(2, -7)
)
return ExprState(
HSigmoid(var.expr, None, None),
var.ibits + 7
)
def generate_pow(var1, var2, scaling):
if scaling:
shift1 = 15 - var1.length
cor_scale1 = var1.scale * 2**(15 - shift1)
subexpr1 = ATLShift(var1.expr, shift1) if shift1 != 0 else var1.expr
if not var2.expr.is_number:
raise NotImplementedError(
"power is currently only supported with positive fractional constants, 0, 1, -2 or 2")
if var1.is_number:
return ExprState(var1.expr**var2.expr, 7)
if var2.expr == 2:
return generate_mul(var1, var1, scaling)
if var2.expr == -2:
return generate_sqrt(var1, scaling)
if var2.expr == 1:
return var1
if var2.expr == 0:
return ExprState(1, 1, q=0, scale=1)
if var2.expr > 1 or var2.expr < 0:
raise NotImplementedError(
"power is currently only supported with positive fractional constants, 0, 1, -2 or 2")
arg2 = int(math.floor(var2.expr * pow(2, 15) + 0.5))
out_scale = pow(cor_scale1, var2.expr) * pow(2, -15)
return ExprState(Pow(subexpr1, arg2), 15, q=0, scale=out_scale)
return ExprState(Pow(var1.expr, var2.expr), 15)
def compose_expression(G, node_symbol_map, node, scaling=False, first=True) -> ExprState:
""" Walks down an expression in the graph and returns a Sympy equivalent. If
scaling is True then the quantization and necessary scaling is dynamically created.
node_symbol_map contains a map of node to ExprState instances containing symbols and scaling."""
if not first and node in node_symbol_map:
var = deepcopy(node_symbol_map[node])
LOG.info("using cached %s %s.%s %s", node.name, var.ibits, var.q, var.scale)
return var
if isinstance(node, ActivationParameters):
var = compose_expression(
G, node_symbol_map, from_node(G, node, idx=0),
scaling=scaling, first=False)
LOG.debug("Node: %s \nInner: %s", node.name, var)
if node.activation in ["tanh", "htanh"]:
return generate_tanh(var, scaling)
if node.activation in ["sigmoid", "hsigmoid"]:
return generate_sigmoid(var, scaling)
raise NotImplementedError()
if isinstance(node, (MatrixAddParameters, MatrixSubParameters)):
var1 = compose_expression(
G, node_symbol_map, from_node(G, node, idx=0),
scaling=scaling, first=False)
var2 = compose_expression(
G, node_symbol_map, from_node(G, node, idx=1),
scaling=scaling, first=False)
LOG.debug("Node: %s \nInner1: %s \nInner2: %s", node.name, var1, var2)
if isinstance(node, MatrixSubParameters):
var2.expr = Mul(var2.expr, -1)
if scaling:
if var1.is_not_scaled and var2.is_not_scaled:
return ExprState(Add(var1.expr, var2.expr), 8)
var1, var2 = equalize_scales(var1, var2, threshold=30)
assert var1.q == var2.q, "safety check"
ibits = max(var1.ibits, var2.ibits)
if isinstance(node, MatrixSubParameters):
return ExprState(factor(Add(var1.expr, Mul(var2.expr, -1 * pow(2, var2.q)))), ibits, q=var1.q, scale=var1.scale)
# addition potentially adds a bit
ibits += 1
return ExprState(factor(Add(var1.expr, var2.expr)), ibits, q=var1.q, scale=var1.scale)
if isinstance(node, MatrixMulParameters):
var1 = compose_expression(
G, node_symbol_map, from_node(G, node, idx=0),
scaling=scaling, first=False)
var2 = compose_expression(
G, node_symbol_map, from_node(G, node, idx=1),
scaling=scaling, first=False)
LOG.debug("Node: %s \nInner1: %s \nInner2: %s", node.name, var1, var2)
return generate_mul(var1, var2, scaling)
if isinstance(node, MatrixDivParameters):
var1 = compose_expression(
G, node_symbol_map, from_node(G, node, idx=0),
scaling=scaling, first=False)
var2 = compose_expression(
G, node_symbol_map, from_node(G, node, idx=1),
scaling=scaling, first=False)
LOG.debug("Node: %s \nInner1: %s \nInner2: %s", node.name, var1, var2)
return generate_div(var1, var2, scaling)
if isinstance(node, BinaryOpParameters):
op = BINARY_OP_MAPPING.get(node.op_type)
if op is None:
raise NotImplementedError("%s is not implemented in expression fusion" % node.op_type)
var1 = compose_expression(
G, node_symbol_map, from_node(G, node, idx=0),
scaling=scaling, first=False)
var2 = compose_expression(
G, node_symbol_map, from_node(G, node, idx=1),
scaling=scaling, first=False)
LOG.debug("Node: %s \nInner1: %s \nInner2: %s", node.name, var1, var2)
if node.op_type == "maximum" or node.op_type == "minimum":
if scaling:
if var1.is_not_scaled and var2.is_not_scaled:
return ExprState(op(var1.expr, var2.expr), max(var1.ibits, var2.ibits))
var1, var2 = equalize_scales(var1, var2)
return ExprState(op(var1.expr, var2.expr), max(var1.ibits, var2.ibits), q=var1.q, scale=var1.scale)
if node.op_type == "pow":
return generate_pow(var1, var2, scaling)
if isinstance(node, UnaryOpParameters):
var = compose_expression(
G, node_symbol_map, from_node(G, node),
scaling=scaling, first=False)
LOG.debug("Node: %s \nInner: %s", node.name, var)
if node.op_type == "sqrt":
return generate_sqrt(var, scaling)
if node.op_type == "log":
return generate_log(var, scaling)
raise NotImplementedError("%s is not implemented in expression fusion" % node.op_type)
if isinstance(node, FusionInputParameters):
LOG.debug("Node: %s", node.name)
return node_symbol_map[node]
if isinstance(node, FusionOutputParameters):
return compose_expression(G, node_symbol_map, from_node(G, node),
scaling=scaling, first=False)
if isinstance(node, ConstantInputParameters):
# only scalars are absorbed into expression
# cope with arrays, numpy arrays and scalars
# whether scaling or not always return the real number
# The scaling will occur later when the constant is used
value = np.array(node.value).flatten()[0]
LOG.debug("Node: %s Value: %s", node.name, value)
if value == math.floor(value):
return ExprState(Integer(value), 7)
return ExprState(Float(value), 7)
raise NotImplementedError("don't know | |
key in enumerate(pos_labels[:-2]):
pos_sliders[key] = widgets.IntSlider(value=0, min=0, max=pos_dims[ikey] - 1,
step=1, description='{} Step:'.format(key),
continuous_update=False)
def update_sho_plots(sho_quantity, step_ind):
bias_slider.set_xdata((step_ind, step_ind))
spatial_dict[step_chan] = [step_ind]
spatial_map = pc_sho_dset.slice(spatial_dict, as_scalar=False)[0][sho_quantity].squeeze()
map_title = '{} - {}={}'.format(sho_quantity, step_chan, bias_mat[step_ind][0])
ax_map.set_title(map_title)
img_map.set_data(spatial_map.T)
spat_mean = np.mean(spatial_map)
spat_std = np.std(spatial_map)
img_map.set_clim(vmin=spat_mean - 3 * spat_std, vmax=spat_mean + 3 * spat_std)
def update_resp_plot(resp_dict):
resp_vec = resp_func(pc_sho_dset.slice(resp_dict, as_scalar=False)[0].reshape(bias_mat.shape)).T
for line_handle, data in zip(line_handles, resp_vec):
line_handle.set_ydata(data)
ax_loop.relim()
ax_loop.autoscale_view()
def pos_picker(event):
if not img_map.axes.in_axes(event):
return
xdata = int(round(event.xdata))
ydata = int(round(event.ydata))
resp_dict[pos_labels[-1]] = [xdata]
resp_dict[pos_labels[-2]] = [ydata]
crosshair.set_xdata(xdata)
crosshair.set_ydata(ydata)
update_resp_plot(resp_dict)
fig.canvas.draw()
def pos_slider_update(slider):
for key in pos_labels[:-2]:
spatial_dict[key] = [pos_sliders[key].value]
resp_dict[key] = [pos_sliders[key].value]
step = bias_step_picker.value
sho_quantity = sho_quantity_picker.value
update_resp_plot(resp_dict)
update_sho_plots(sho_quantity, step)
fig.canvas.draw()
slider_dict = dict()
slider_dict['Bias Step'] = (0, bias_mat.shape[0] - 1, 1)
sho_quantity_picker = widgets.Dropdown(options=list(sho_dset_collapsed.dtype.names[:-1]),
description='SHO Quantity')
bias_step_picker = widgets.IntSlider(min=0, max=bias_mat.shape[0] - 1, step=1,
description='Bias Step')
fig_filename, _ = os.path.splitext(pc_sho_dset.file.filename)
display(save_fig_filebox_button(fig, fig_filename + '.png'))
for key, slider in pos_sliders.items():
widgets.interact(pos_slider_update, slider=slider)
cid = img_map.figure.canvas.mpl_connect('button_press_event', pos_picker)
widgets.interact(update_sho_plots, sho_quantity=sho_quantity_picker, step_ind=bias_step_picker)
return fig
def jupyter_visualize_be_spectrograms(pc_main, cmap=None):
"""
Jupyer notebook ONLY function. Sets up a simple visualzier for visualizing raw BE data.
Sliders for position indices can be used to visualize BE spectrograms (frequency, UDVS step).
In the case of 2 spatial dimensions, a spatial map will be provided as well
Parameters
----------
pc_main : USIDataset
Raw Band Excitation dataset
cmap : String, or matplotlib.colors.LinearSegmentedColormap object (Optional)
Requested color map
"""
cmap = get_cmap_object(cmap)
h5_pos_inds = pc_main.h5_pos_inds
pos_dims = pc_main.pos_dim_sizes
pos_labels = pc_main.pos_dim_labels
h5_spec_vals = pc_main.h5_spec_vals
h5_spec_inds = pc_main.h5_spec_inds
spec_dims = pc_main.spec_dim_sizes
spec_labels = pc_main.spec_dim_labels
ifreq = spec_labels.index('Frequency')
freqs_nd = reshape_to_n_dims(h5_spec_vals, h5_spec=h5_spec_inds)[0][ifreq].squeeze()
freqs_2d = freqs_nd.reshape(freqs_nd.shape[0], -1) / 1000 # Convert to kHz
num_udvs_steps = int(np.prod([spec_dims[idim] for idim in range(len(spec_dims)) if idim != ifreq]))
if len(pos_dims) >= 2:
# Build initial slice dictionaries
spatial_slice_dict = {'X': slice(None), 'Y': slice(None)}
for key in pos_labels:
if key in spatial_slice_dict.keys():
continue
else:
spatial_slice_dict[key] = [0]
spectrogram_slice_dict = {key: [0] for key in pos_labels}
spatial_slice, _ = pc_main._get_pos_spec_slices(slice_dict=spatial_slice_dict)
x_size = pos_dims[-1]
y_size = pos_dims[-2]
spatial_map = np.abs(np.reshape(pc_main[spatial_slice, 0], (y_size, x_size)))
spectrogram = np.reshape(pc_main[0], (num_udvs_steps, -1))
fig, axes = plt.subplots(ncols=3, figsize=(12, 4), subplot_kw={'adjustable': 'box'})
spatial_img, spatial_cbar = plot_map(axes[0], np.abs(spatial_map), cmap=cmap)
axes[0].set_aspect('equal')
axes[0].set_xlabel(pos_labels[-1])
axes[0].set_ylabel(pos_labels[-2])
xdata = int(0.5 * x_size)
ydata = int(0.5 * y_size)
crosshair = axes[0].plot(xdata, ydata, 'k+')[0]
if len(spec_dims) > 1:
amp_img, amp_cbar = plot_map(axes[1], np.abs(spectrogram), show_xy_ticks=True, cmap=cmap,
extent=[freqs_2d[0, 0], freqs_2d[-1, 0], 0, num_udvs_steps])
phase_img, phase_cbar = plot_map(axes[2], np.angle(spectrogram), show_xy_ticks=True, cmap=cmap,
extent=[freqs_2d[0, 0], freqs_2d[-1, 0], 0, num_udvs_steps])
phase_img.set_clim(vmin=-np.pi, vmax=np.pi)
for axis in axes[1:3]:
axis.set_ylabel('BE step')
axis.axis('tight')
x0, x1 = (freqs_2d[0, 0], freqs_2d[-1, 0])
y0, y1 = (0, num_udvs_steps)
axis.set_aspect(np.abs(x1 - x0) / np.abs(y1 - y0))
else:
# BE-Line
axes[1].set_ylabel('Amplitude (a. u.)')
axes[2].set_ylabel('Phase (rad)')
spectrogram = np.squeeze(spectrogram)
amp_img = axes[1].plot(np.abs(spectrogram))[0]
phase_img = axes[2].plot(np.angle(spectrogram))[0]
amp_full = np.abs(pc_main[()])
amp_mean = np.mean(amp_full)
amp_std = np.std(amp_full)
st_devs = 4
axes[1].set_ylim([0, amp_mean + st_devs * amp_std])
axes[2].set_ylim([-np.pi, np.pi])
pos_heading = pos_labels[-1] + ': ' + str(xdata) + ', ' + \
pos_labels[-2] + ': ' + str(ydata) + ', '
for dim_name in pos_labels[-3::-1]:
pos_heading += dim_name + ': ' + str(spatial_slice_dict[dim_name]) + ', '
axes[1].set_title('Amplitude \n' + pos_heading)
axes[1].set_xlabel('Frequency (kHz)')
axes[2].set_title('Phase \n' + pos_heading)
axes[2].set_xlabel('Frequency (kHz)')
fig.tight_layout()
fig_filename, _ = os.path.splitext(pc_main.file.filename)
display(save_fig_filebox_button(fig, fig_filename + '.png'))
# Build sliders for any extra Position Dimensions
pos_sliders = dict()
for ikey, key in enumerate(pos_labels[:-2]):
pos_sliders[key] = widgets.IntSlider(value=0, min=0, max=pos_dims[ikey] - 1,
step=1, description='{} Step:'.format(key),
continuous_update=False)
def get_spatial_slice():
xdata, ydata = crosshair.get_xydata().squeeze()
spatial_slice_dict[pos_labels[-1]] = [int(xdata)]
spatial_slice_dict[pos_labels[-2]] = [int(ydata)]
for key in pos_labels[:-2]:
spatial_slice_dict[key] = [pos_sliders[key].value]
spatial_slice, _ = pc_main._get_pos_spec_slices(slice_dict=spatial_slice_dict)
return spatial_slice
def spec_index_unpacker(step):
spatial_slice_dict[pos_labels[-1]] = slice(None)
spatial_slice_dict[pos_labels[-2]] = slice(None)
for key in pos_labels[:-2]:
spatial_slice_dict[key] = [pos_sliders[key].value]
spatial_slice, _ = pc_main._get_pos_spec_slices(slice_dict=spatial_slice_dict)
spatial_map = np.abs(np.reshape(pc_main[spatial_slice, step], (x_size, y_size)))
spatial_img.set_data(spatial_map)
spat_mean = np.mean(spatial_map)
spat_std = np.std(spatial_map)
spatial_img.set_clim(vmin=spat_mean - 3 * spat_std, vmax=spat_mean + 3 * spat_std)
spec_heading = ''
for dim_ind, dim_name in enumerate(spec_labels):
spec_heading += dim_name + ': ' + str(h5_spec_vals[dim_ind, step]) + ', '
axes[0].set_title(spec_heading[:-2])
fig.canvas.draw()
def pos_picker(event):
if not spatial_img.axes.in_axes(event):
return
xdata = int(round(event.xdata))
ydata = int(round(event.ydata))
crosshair.set_xdata(xdata)
crosshair.set_ydata(ydata)
spatial_slice = get_spatial_slice()
pos_heading = pos_labels[-1] + ': ' + str(xdata) + ', ' + \
pos_labels[-2] + ': ' + str(ydata) + ', '
for dim_name in pos_labels[-3::-1]:
pos_heading += dim_name + ': ' + str(spatial_slice_dict[dim_name]) + ', '
axes[1].set_title('Amplitude \n' + pos_heading)
axes[2].set_title('Phase \n' + pos_heading)
spectrogram = np.reshape(pc_main[spatial_slice, :], (num_udvs_steps, -1))
if len(spec_dims) > 1:
amp_map = np.abs(spectrogram)
amp_img.set_data(np.abs(spectrogram))
phase_img.set_data(np.angle(spectrogram))
amp_mean = np.mean(amp_map)
amp_std = np.std(amp_map)
amp_img.set_clim(vmin=amp_mean - 3 * amp_std, vmax=amp_mean + 3 * amp_std)
else:
amp_img.set_ydata(np.abs(spectrogram))
phase_img.set_ydata(np.angle(spectrogram))
amp_cbar.changed()
phase_cbar.changed()
fig.canvas.draw()
def pos_slider_update(slider):
spatial_slice = get_spatial_slice()
step = spec_index_slider.value
spec_index_unpacker(step)
pos_heading = pos_labels[-1] + ': ' + str(xdata) + ', ' + \
pos_labels[-2] + ': ' + str(ydata) + ', '
for dim_name in pos_labels[-3::-1]:
pos_heading += dim_name + ': ' + str(spatial_slice_dict[dim_name]) + ', '
axes[1].set_title('Amplitude \n' + pos_heading)
axes[2].set_title('Phase \n' + pos_heading)
spectrogram = np.reshape(pc_main[spatial_slice, :], (num_udvs_steps, -1))
if len(spec_dims) > 1:
amp_img.set_data(np.abs(spectrogram))
phase_img.set_data(np.angle(spectrogram))
else:
amp_img.set_ydata(np.abs(spectrogram))
phase_img.set_ydata(np.angle(spectrogram))
amp_cbar.changed()
phase_cbar.changed()
fig.canvas.draw()
spec_index_slider = widgets.IntSlider(value=0, min=0, max=pc_main.shape[1], step=1,
description='Step')
cid = spatial_img.figure.canvas.mpl_connect('button_press_event', pos_picker)
widgets.interact(spec_index_unpacker, step=spec_index_slider)
for key, slider in pos_sliders.items():
widgets.interact(pos_slider_update, slider=slider)
# plt.show()
else:
def plot_spectrogram(data, freq_vals):
fig, axes = plt.subplots(ncols=2, figsize=(9, 5), sharey=True)
im_handles = list()
im_handles.append(axes[0].imshow(np.abs(data), cmap=cmap,
extent=[freqs_2d[0, 0], freqs_2d[-1, 0],
data.shape[0], 0],
interpolation='none'))
axes[0].set_title('Amplitude')
axes[0].set_ylabel('BE step')
im_handles.append(axes[1].imshow(np.angle(data), cmap=cmap,
extent=[freqs_2d[0, 0], freqs_2d[-1, 0],
data.shape[0], 0],
interpolation='none'))
axes[1].set_title('Phase')
axes[0].set_xlabel('Frequency index')
axes[1].set_xlabel('Frequency index')
for axis in axes:
axis.axis('tight')
axis.set_ylim(0, data.shape[0])
fig.tight_layout()
return fig, axes, im_handles
fig, axes, im_handles = plot_spectrogram(np.reshape(pc_main[0], (num_udvs_steps, -1)), freqs_2d)
def position_unpacker(**kwargs):
pos_dim_vals = range(len(pos_labels))
for pos_dim_ind, pos_dim_name in enumerate(pos_labels):
pos_dim_vals[pos_dim_ind] = kwargs[pos_dim_name]
pix_ind = pos_dim_vals[0]
for pos_dim_ind in range(1, len(pos_labels)):
pix_ind += pos_dim_vals[pos_dim_ind] * pos_dims[pos_dim_ind - 1]
spectrogram = np.reshape(pc_main[pix_ind], (num_udvs_steps, -1))
im_handles[0].set_data(np.abs(spectrogram))
im_handles[1].set_data(np.angle(spectrogram))
display(fig)
pos_dict = dict()
for pos_dim_ind, dim_name in enumerate(pos_labels):
pos_dict[dim_name] = (0, pos_dims[pos_dim_ind] - 1, 1)
widgets.interact(position_unpacker, **pos_dict)
display(fig)
return fig
def jupyter_visualize_beps_loops(h5_projected_loops, h5_loop_guess, h5_loop_fit, step_chan='DC_Offset', cmap=None):
"""
Interactive plotting of the BE Loops
Parameters
----------
h5_projected_loops : h5py.Dataset
Dataset holding the loop projections
h5_loop_guess : h5py.Dataset
Dataset holding the loop guesses
h5_loop_fit : h5py.Dataset
Dataset holding the loop fits
step_chan : str, optional
The name of the Spectroscopic dimension to plot versus. Needs testing.
Default 'DC_Offset'
cmap : String, or matplotlib.colors.LinearSegmentedColormap object (Optional)
Requested color map
Returns
-------
None
"""
cmap = get_cmap_object(cmap)
# Prepare some variables for plotting loops fits and guesses
# Plot the Loop Guess and Fit Results
proj_nd, _ = reshape_to_n_dims(h5_projected_loops)
guess_nd, _ = reshape_to_n_dims(h5_loop_guess)
fit_nd, _ = reshape_to_n_dims(h5_loop_fit)
h5_projected_loops = h5_loop_guess.parent['Projected_Loops']
h5_proj_spec_inds = get_auxiliary_datasets(h5_projected_loops,
aux_dset_name='Spectroscopic_Indices')[-1]
h5_proj_spec_vals = get_auxiliary_datasets(h5_projected_loops,
aux_dset_name='Spectroscopic_Values')[-1]
h5_pos_inds = get_auxiliary_datasets(h5_projected_loops,
aux_dset_name='Position_Indices')[-1]
pos_nd, _ = reshape_to_n_dims(h5_pos_inds, h5_pos=h5_pos_inds)
pos_dims = list(pos_nd.shape[:h5_pos_inds.shape[1]])
pos_labels = get_attr(h5_pos_inds, 'labels')
# reshape the vdc_vec into DC_step by Loop
spec_nd, _ = reshape_to_n_dims(h5_proj_spec_vals, h5_spec=h5_proj_spec_inds)
loop_spec_dims = np.array(spec_nd.shape[1:])
loop_spec_labels = get_attr(h5_proj_spec_vals, 'labels')
spec_step_dim_ind = np.where(loop_spec_labels == step_chan)[0][0]
# # move the step dimension to be the first after all position dimensions
rest_loop_dim_order = list(range(len(pos_dims), len(proj_nd.shape)))
rest_loop_dim_order.pop(spec_step_dim_ind)
new_order = list(range(len(pos_dims))) + [len(pos_dims) + spec_step_dim_ind] + rest_loop_dim_order
new_spec_order = np.array(new_order[len(pos_dims):], dtype=np.uint32) - len(pos_dims)
# Also reshape the projected loops to Positions-DC_Step-Loop
final_loop_shape = pos_dims + [loop_spec_dims[spec_step_dim_ind]] + [-1]
proj_nd2 = np.moveaxis(proj_nd, spec_step_dim_ind + len(pos_dims), len(pos_dims))
proj_nd_3 = np.reshape(proj_nd2, final_loop_shape)
# Do the same for the guess and fit datasets
guess_3d = np.reshape(guess_nd, pos_dims + [-1])
fit_3d = np.reshape(fit_nd, pos_dims + [-1])
# Get the bias vector:
spec_nd2 = np.moveaxis(spec_nd[spec_step_dim_ind], spec_step_dim_ind, 0)
bias_vec = np.reshape(spec_nd2, final_loop_shape[len(pos_dims):])
# Shift the bias vector and the loops by a quarter cycle
shift_ind = int(-1 * bias_vec.shape[0] / 4)
bias_shifted = np.roll(bias_vec, shift_ind, axis=0)
proj_nd_shifted = np.roll(proj_nd_3, shift_ind, axis=len(pos_dims))
# This is just the visualizer:
loop_field_names = fit_nd.dtype.names
loop_field = loop_field_names[0]
loop_ind = 0
row_ind = 0
col_ind = 0
# Initial plot data
spatial_map = fit_3d[:, :, loop_ind][loop_field]
proj_data = proj_nd_shifted[col_ind, row_ind, :, loop_ind]
bias_data = bias_shifted[:, loop_ind]
guess_data = loop_fit_function(bias_data, np.array(list(guess_3d[col_ind, row_ind, loop_ind])))
fit_data = loop_fit_function(bias_data, np.array(list(fit_3d[col_ind, row_ind, loop_ind])))
fig = plt.figure(figsize=(12, 8))
ax_map | |
<reponame>alexbowers/pulumi-aws
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetInstanceResult',
'AwaitableGetInstanceResult',
'get_instance',
]
@pulumi.output_type
class GetInstanceResult:
"""
A collection of values returned by getInstance.
"""
def __init__(__self__, address=None, allocated_storage=None, auto_minor_version_upgrade=None, availability_zone=None, backup_retention_period=None, ca_cert_identifier=None, db_cluster_identifier=None, db_instance_arn=None, db_instance_class=None, db_instance_identifier=None, db_instance_port=None, db_name=None, db_parameter_groups=None, db_security_groups=None, db_subnet_group=None, enabled_cloudwatch_logs_exports=None, endpoint=None, engine=None, engine_version=None, hosted_zone_id=None, id=None, iops=None, kms_key_id=None, license_model=None, master_username=None, monitoring_interval=None, monitoring_role_arn=None, multi_az=None, option_group_memberships=None, port=None, preferred_backup_window=None, preferred_maintenance_window=None, publicly_accessible=None, replicate_source_db=None, resource_id=None, storage_encrypted=None, storage_type=None, tags=None, timezone=None, vpc_security_groups=None):
if address and not isinstance(address, str):
raise TypeError("Expected argument 'address' to be a str")
pulumi.set(__self__, "address", address)
if allocated_storage and not isinstance(allocated_storage, int):
raise TypeError("Expected argument 'allocated_storage' to be a int")
pulumi.set(__self__, "allocated_storage", allocated_storage)
if auto_minor_version_upgrade and not isinstance(auto_minor_version_upgrade, bool):
raise TypeError("Expected argument 'auto_minor_version_upgrade' to be a bool")
pulumi.set(__self__, "auto_minor_version_upgrade", auto_minor_version_upgrade)
if availability_zone and not isinstance(availability_zone, str):
raise TypeError("Expected argument 'availability_zone' to be a str")
pulumi.set(__self__, "availability_zone", availability_zone)
if backup_retention_period and not isinstance(backup_retention_period, int):
raise TypeError("Expected argument 'backup_retention_period' to be a int")
pulumi.set(__self__, "backup_retention_period", backup_retention_period)
if ca_cert_identifier and not isinstance(ca_cert_identifier, str):
raise TypeError("Expected argument 'ca_cert_identifier' to be a str")
pulumi.set(__self__, "ca_cert_identifier", ca_cert_identifier)
if db_cluster_identifier and not isinstance(db_cluster_identifier, str):
raise TypeError("Expected argument 'db_cluster_identifier' to be a str")
pulumi.set(__self__, "db_cluster_identifier", db_cluster_identifier)
if db_instance_arn and not isinstance(db_instance_arn, str):
raise TypeError("Expected argument 'db_instance_arn' to be a str")
pulumi.set(__self__, "db_instance_arn", db_instance_arn)
if db_instance_class and not isinstance(db_instance_class, str):
raise TypeError("Expected argument 'db_instance_class' to be a str")
pulumi.set(__self__, "db_instance_class", db_instance_class)
if db_instance_identifier and not isinstance(db_instance_identifier, str):
raise TypeError("Expected argument 'db_instance_identifier' to be a str")
pulumi.set(__self__, "db_instance_identifier", db_instance_identifier)
if db_instance_port and not isinstance(db_instance_port, int):
raise TypeError("Expected argument 'db_instance_port' to be a int")
pulumi.set(__self__, "db_instance_port", db_instance_port)
if db_name and not isinstance(db_name, str):
raise TypeError("Expected argument 'db_name' to be a str")
pulumi.set(__self__, "db_name", db_name)
if db_parameter_groups and not isinstance(db_parameter_groups, list):
raise TypeError("Expected argument 'db_parameter_groups' to be a list")
pulumi.set(__self__, "db_parameter_groups", db_parameter_groups)
if db_security_groups and not isinstance(db_security_groups, list):
raise TypeError("Expected argument 'db_security_groups' to be a list")
pulumi.set(__self__, "db_security_groups", db_security_groups)
if db_subnet_group and not isinstance(db_subnet_group, str):
raise TypeError("Expected argument 'db_subnet_group' to be a str")
pulumi.set(__self__, "db_subnet_group", db_subnet_group)
if enabled_cloudwatch_logs_exports and not isinstance(enabled_cloudwatch_logs_exports, list):
raise TypeError("Expected argument 'enabled_cloudwatch_logs_exports' to be a list")
pulumi.set(__self__, "enabled_cloudwatch_logs_exports", enabled_cloudwatch_logs_exports)
if endpoint and not isinstance(endpoint, str):
raise TypeError("Expected argument 'endpoint' to be a str")
pulumi.set(__self__, "endpoint", endpoint)
if engine and not isinstance(engine, str):
raise TypeError("Expected argument 'engine' to be a str")
pulumi.set(__self__, "engine", engine)
if engine_version and not isinstance(engine_version, str):
raise TypeError("Expected argument 'engine_version' to be a str")
pulumi.set(__self__, "engine_version", engine_version)
if hosted_zone_id and not isinstance(hosted_zone_id, str):
raise TypeError("Expected argument 'hosted_zone_id' to be a str")
pulumi.set(__self__, "hosted_zone_id", hosted_zone_id)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if iops and not isinstance(iops, int):
raise TypeError("Expected argument 'iops' to be a int")
pulumi.set(__self__, "iops", iops)
if kms_key_id and not isinstance(kms_key_id, str):
raise TypeError("Expected argument 'kms_key_id' to be a str")
pulumi.set(__self__, "kms_key_id", kms_key_id)
if license_model and not isinstance(license_model, str):
raise TypeError("Expected argument 'license_model' to be a str")
pulumi.set(__self__, "license_model", license_model)
if master_username and not isinstance(master_username, str):
raise TypeError("Expected argument 'master_username' to be a str")
pulumi.set(__self__, "master_username", master_username)
if monitoring_interval and not isinstance(monitoring_interval, int):
raise TypeError("Expected argument 'monitoring_interval' to be a int")
pulumi.set(__self__, "monitoring_interval", monitoring_interval)
if monitoring_role_arn and not isinstance(monitoring_role_arn, str):
raise TypeError("Expected argument 'monitoring_role_arn' to be a str")
pulumi.set(__self__, "monitoring_role_arn", monitoring_role_arn)
if multi_az and not isinstance(multi_az, bool):
raise TypeError("Expected argument 'multi_az' to be a bool")
pulumi.set(__self__, "multi_az", multi_az)
if option_group_memberships and not isinstance(option_group_memberships, list):
raise TypeError("Expected argument 'option_group_memberships' to be a list")
pulumi.set(__self__, "option_group_memberships", option_group_memberships)
if port and not isinstance(port, int):
raise TypeError("Expected argument 'port' to be a int")
pulumi.set(__self__, "port", port)
if preferred_backup_window and not isinstance(preferred_backup_window, str):
raise TypeError("Expected argument 'preferred_backup_window' to be a str")
pulumi.set(__self__, "preferred_backup_window", preferred_backup_window)
if preferred_maintenance_window and not isinstance(preferred_maintenance_window, str):
raise TypeError("Expected argument 'preferred_maintenance_window' to be a str")
pulumi.set(__self__, "preferred_maintenance_window", preferred_maintenance_window)
if publicly_accessible and not isinstance(publicly_accessible, bool):
raise TypeError("Expected argument 'publicly_accessible' to be a bool")
pulumi.set(__self__, "publicly_accessible", publicly_accessible)
if replicate_source_db and not isinstance(replicate_source_db, str):
raise TypeError("Expected argument 'replicate_source_db' to be a str")
pulumi.set(__self__, "replicate_source_db", replicate_source_db)
if resource_id and not isinstance(resource_id, str):
raise TypeError("Expected argument 'resource_id' to be a str")
pulumi.set(__self__, "resource_id", resource_id)
if storage_encrypted and not isinstance(storage_encrypted, bool):
raise TypeError("Expected argument 'storage_encrypted' to be a bool")
pulumi.set(__self__, "storage_encrypted", storage_encrypted)
if storage_type and not isinstance(storage_type, str):
raise TypeError("Expected argument 'storage_type' to be a str")
pulumi.set(__self__, "storage_type", storage_type)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if timezone and not isinstance(timezone, str):
raise TypeError("Expected argument 'timezone' to be a str")
pulumi.set(__self__, "timezone", timezone)
if vpc_security_groups and not isinstance(vpc_security_groups, list):
raise TypeError("Expected argument 'vpc_security_groups' to be a list")
pulumi.set(__self__, "vpc_security_groups", vpc_security_groups)
@property
@pulumi.getter
def address(self) -> str:
"""
The hostname of the RDS instance. See also `endpoint` and `port`.
"""
return pulumi.get(self, "address")
@property
@pulumi.getter(name="allocatedStorage")
def allocated_storage(self) -> int:
"""
Specifies the allocated storage size specified in gigabytes.
"""
return pulumi.get(self, "allocated_storage")
@property
@pulumi.getter(name="autoMinorVersionUpgrade")
def auto_minor_version_upgrade(self) -> bool:
"""
Indicates that minor version patches are applied automatically.
"""
return pulumi.get(self, "auto_minor_version_upgrade")
@property
@pulumi.getter(name="availabilityZone")
def availability_zone(self) -> str:
"""
Specifies the name of the Availability Zone the DB instance is located in.
"""
return pulumi.get(self, "availability_zone")
@property
@pulumi.getter(name="backupRetentionPeriod")
def backup_retention_period(self) -> int:
"""
Specifies the number of days for which automatic DB snapshots are retained.
"""
return pulumi.get(self, "backup_retention_period")
@property
@pulumi.getter(name="caCertIdentifier")
def ca_cert_identifier(self) -> str:
"""
Specifies the identifier of the CA certificate for the DB instance.
"""
return pulumi.get(self, "ca_cert_identifier")
@property
@pulumi.getter(name="dbClusterIdentifier")
def db_cluster_identifier(self) -> str:
"""
If the DB instance is a member of a DB cluster, contains the name of the DB cluster that the DB instance is a member of.
"""
return pulumi.get(self, "db_cluster_identifier")
@property
@pulumi.getter(name="dbInstanceArn")
def db_instance_arn(self) -> str:
"""
The Amazon Resource Name (ARN) for the DB instance.
"""
return pulumi.get(self, "db_instance_arn")
@property
@pulumi.getter(name="dbInstanceClass")
def db_instance_class(self) -> str:
"""
Contains the name of the compute and memory capacity class of the DB instance.
"""
return pulumi.get(self, "db_instance_class")
@property
@pulumi.getter(name="dbInstanceIdentifier")
def db_instance_identifier(self) -> str:
return pulumi.get(self, "db_instance_identifier")
@property
@pulumi.getter(name="dbInstancePort")
def db_instance_port(self) -> int:
"""
Specifies the port that the DB instance listens on.
"""
return pulumi.get(self, "db_instance_port")
@property
@pulumi.getter(name="dbName")
def db_name(self) -> str:
"""
Contains the name of the initial database of this instance that was provided at create time, if one was specified when the DB instance was created. This same name is returned for the life of the DB instance.
"""
return pulumi.get(self, "db_name")
@property
@pulumi.getter(name="dbParameterGroups")
def db_parameter_groups(self) -> Sequence[str]:
"""
Provides the list of DB parameter groups applied to this DB instance.
"""
return pulumi.get(self, "db_parameter_groups")
@property
@pulumi.getter(name="dbSecurityGroups")
def db_security_groups(self) -> Sequence[str]:
"""
Provides List of DB security groups associated to this DB instance.
"""
return pulumi.get(self, "db_security_groups")
@property
@pulumi.getter(name="dbSubnetGroup")
def db_subnet_group(self) -> str:
"""
Specifies the name of the subnet group associated with the DB instance.
"""
return pulumi.get(self, "db_subnet_group")
@property
@pulumi.getter(name="enabledCloudwatchLogsExports")
def enabled_cloudwatch_logs_exports(self) -> Sequence[str]:
"""
List of log types to export to cloudwatch.
"""
return pulumi.get(self, "enabled_cloudwatch_logs_exports")
@property
@pulumi.getter
def endpoint(self) -> str:
"""
The connection endpoint in `address:port` format.
"""
return pulumi.get(self, "endpoint")
@property
@pulumi.getter
def engine(self) -> str:
"""
Provides the name of the database engine to be used for this DB instance.
"""
return pulumi.get(self, "engine")
@property
@pulumi.getter(name="engineVersion")
def engine_version(self) -> str:
"""
Indicates the database engine version.
"""
return pulumi.get(self, "engine_version")
@property
@pulumi.getter(name="hostedZoneId")
def hosted_zone_id(self) -> str:
"""
The canonical hosted zone ID of the DB instance (to be used in a Route 53 Alias record).
"""
return pulumi.get(self, "hosted_zone_id")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def iops(self) -> int:
"""
Specifies the Provisioned IOPS (I/O operations per second) | |
<filename>time_series/time_series.py
#!/usr/bin/env python
# coding: utf-8
# # Project description
#
# Sweet Lift Taxi company has collected historical data on taxi orders at airports. To attract more drivers during peak hours, we need to predict the amount of taxi orders for the next hour. Build a model for such a prediction.
#
# The RMSE metric on the test set should not be more than 48.
#
# ## Project instructions
#
# 1. Download the data and resample it by one hour.
# 2. Analyze the data.
# 3. Train different models with different hyperparameters. The test sample should be 10% of the initial dataset.
# 4. Test the data using the test sample and provide a conclusion.
#
# ## Data description
#
# The data is stored in file `taxi.csv`. The number of orders is in the '*num_orders*' column.
# ## Preparation
# In[1]:
# import libraries
import time
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import math
from statsmodels.tsa.seasonal import seasonal_decompose
import sklearn.linear_model
import sklearn.metrics
import sklearn.neighbors
import sklearn.preprocessing
from sklearn.model_selection import train_test_split
from sklearn.neighbors import NearestNeighbors # for kNN
from sklearn.neighbors import KNeighborsClassifier
from IPython.display import display
# import sys and insert code to ignore warnings
import sys
if not sys.warnoptions:
import warnings
warnings.simplefilter("ignore")
# In[2]:
from statsmodels.tsa.seasonal import seasonal_decompose
from sklearn.dummy import DummyRegressor
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor
from catboost import CatBoostRegressor
from lightgbm import LGBMRegressor
from xgboost import XGBRegressor
from sklearn.model_selection import train_test_split
from sklearn.model_selection import TimeSeriesSplit
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import StandardScaler as ss
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import cross_val_score
from sklearn.metrics import make_scorer
# __Load data and conduct a basic check that it's free from obvious issues.__
# In[3]:
# load the data
try:
df = pd.read_csv('/Users/rraven/Desktop/a_final_yandex/datasets/taxi.csv')
except:
df = pd.read_csv('/datasets/taxi.csv')
df.info()
# In[4]:
df.describe()
# In[5]:
df.head(2)
# In[6]:
df.tail(2)
# In[7]:
df.duplicated().sum()
# In[8]:
df.isnull().sum()
# In[9]:
df.shape
# __Observations__
# - Data is collected in 10 minute intervals between 2018-03-01 00:00:00 and 2018-08-31 23:50:00
# - There are 26596 instances and 2 columns, datetime and num_orders
# - No missing values or duplicate rows
# - Given that datetime datatype is an object --> we need to change it
#
# __Convert datetime datatype to datetime64__
# In[10]:
df['datetime'] = pd.to_datetime(df['datetime'])
df.dtypes
# __Set the table index equal to the datetime column__
# In[11]:
df = df.set_index(['datetime'])
df.info()
# __Sort data and then check if the dates and times are in chronological order__
# In[12]:
df.index.is_monotonic
# __Observations__
#
# - Index is set to datetime
# - Data is in chronological order
#
# __Resample the data by 1 hour__
# In[13]:
df = df.resample('1H').sum()
df
# ## Analysis
# __Explore the data for patterns__
# In[14]:
df.plot(figsize=(20,5))
plt.title('Sum of the number of orders per hour per date\n', fontsize=18)
plt.show()
# In[15]:
df_roll_10 = df.copy()
df_roll_10['rolling_mean'] = df_roll_10.rolling(10).mean()
df_roll_10.plot(figsize=(20,5))
plt.title('Sum of the number of orders per hour per date using a rolling mean of 10\n', fontsize=18)
plt.show()
# In[16]:
df_roll_20 = df.copy()
df_roll_20['rolling_mean'] = df_roll_20.rolling(20).mean()
df_roll_20.plot(figsize=(20,5))
plt.title('Sum of the number of orders per hour per date using a rolling mean of 20\n', fontsize=18)
plt.show()
# __Observations__
# - We note fluctions across the 6 month time span, but are not able to demonstrate patterns, seasonality, or trends
# - As we increase the rolling mean from 10 to 20 we do note a stronger smoothing of the curve and the overall increase in July and August
# - Due to the granularity of hourly data for 6 months of data, we need additional tools to investigate trends, seasonality, and residuals
#
# __Use seasonal_decompose to better understand the time series__
# In[17]:
decomposed = seasonal_decompose(df)
plt.figure(figsize=(20, 30))
plt.subplot(311)
decomposed.trend.plot(ax=plt.gca())
plt.title('Trend line for the number of orders by hour\n', fontsize=18)
plt.subplot(312)
decomposed.seasonal.plot(ax=plt.gca())
plt.title('Seasonality line for the number of orders by hour\n', fontsize=18)
plt.subplot(313)
decomposed.resid.plot(ax=plt.gca())
plt.title('Residuals line for the number of orders by hour\n', fontsize=18)
plt.tight_layout();
# __Observations__
# - As we suspected from the rolling mean charts, the overall trend demonstrates the number of orders per hour is increasing as the months progress
# - While we are limited to looking at only 6 months of data, we will not be able to consider monthly or yearly seasonality
# - The regularity of the seasonality graph does suggest there is a pattern --> look for any seasonality in subsections of the df
# - The Residuals mostly stay close to zero suggesting a proper decomposition. There is some variability, but it is likely noise
#
# __Decompose the first and last weeks to look for seasonality__
# In[18]:
df_firstwk = df['2018-03-01':'2018-03-07']
decomposed = seasonal_decompose(df_firstwk)
plt.figure(figsize=(20, 30))
plt.subplot(312)
decomposed.seasonal.plot(ax=plt.gca())
plt.title('Seasonality line for first week\n', fontsize=18)
plt.tight_layout();
# In[19]:
df_lastwk = df['2018-08-25':'2018-08-31']
decomposed = seasonal_decompose(df_lastwk)
plt.figure(figsize=(20, 30))
plt.subplot(312)
decomposed.seasonal.plot(ax=plt.gca())
plt.title('Seasonality line for the last week\n', fontsize=18)
plt.tight_layout();
# In[20]:
df_firstday = df['2018-03-01':'2018-03-02']
decomposed = seasonal_decompose(df_firstday)
plt.figure(figsize=(20, 30))
plt.subplot(312)
decomposed.seasonal.plot(ax=plt.gca())
plt.title('Seasonality line for first day\n', fontsize=18)
plt.tight_layout();
# In[21]:
df_lastday = df['2018-08-30':'2018-08-31']
decomposed = seasonal_decompose(df_lastday)
plt.figure(figsize=(20, 30))
plt.subplot(312)
decomposed.seasonal.plot(ax=plt.gca())
plt.title('Seasonality line for first day\n', fontsize=18)
plt.tight_layout();
# __Observations__
# - We observe a definite cyclic pattern for days in the first and last week, and we can extrapolate from the pattern of the 6 month Seasonality graph that this same pattern follows through the time period
# - The lowest demand occurs at 6 am and 7 pm. Demand fluctuates throughout the 24 hour cycle, but has peaks around midnight and 5 pm.
#
# __Prepare new features from data for better predictions and begin model training__
# <div class="alert alert-warning">
# <b>Reviewer's comment</b>
#
# Great, you visualized the data, did a seasonal decomposition and noted some patterns. One suggestion: it's better to look at a slice of the decomposed data on the whole dataset, rather than to look at a decomposed slice of the data. That way we are looking at long term patterns
#
# </div>
# ## Training
# In[22]:
def make_features(data, max_lag, rolling_mean_size):
data['month'] = data.index.month
data['day'] = data.index.day
data['dayofweek'] = data.index.dayofweek
data['hourofday'] = data.index.hour
for lag in range(1, max_lag + 1):
data['lag_{}'.format(lag)] = data['num_orders'].shift(lag)
data['rolling_mean'] = data['num_orders'].shift().rolling(rolling_mean_size).mean()
data['rolling_std'] = data['num_orders'].shift().rolling(rolling_mean_size).std()
make_features(df, 10, 5)
df.head()
# __Split the data using num_orders as the target__
# In[23]:
df = df.dropna(how='any', axis=0)
X = df.drop('num_orders', axis=1)
y = df['num_orders']
# note instructions demand 10% of dataset reserved for test dataset
X_train, X_test, y_train, y_test = train_test_split(X, y, shuffle=False, test_size=0.1)
print('Shape X_train:', X_train.shape)
print('Shape X_test:', X_test.shape)
print('Shape y_train:', y_train.shape)
print('Shape y_test:', y_test.shape)
print('First train set value:', X_train.index.min())
print('Last train set value:', X_train.index.max())
print('First test set value:', X_test.index.min())
print('Last test set value:', X_test.index.max())
# __Observations__
# - The dataset has been split correctly, reserved the specified 10% for test data set
# - The training dataset has 3965 instances
# - The test dataset consists of 441 instances
# - Since this is a time series forecast, the dataset cannot be shuffled and the training data must precede the testing data, which we\'ve verified
# - The training data runs from 3/1/18 to 8/13/18 while the test data runs from 8/13/18 to 8/31/18
#
# __Scale data and select models for regression analysis using RMSE to select the best model__
# In[24]:
sc = ss()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# In[25]:
tscv = TimeSeriesSplit() # to be used to split into train and validation
# In[26]:
def rmse(actual, predict):
predict = np.array(predict)
actual = np.array(actual)
distance = predict - actual
square_distance = distance ** 2
mean_square_distance = square_distance.mean()
score = np.sqrt(mean_square_distance)
return score
rmse_scorer = make_scorer(rmse, greater_is_better = False)
# __Observations__
# - We scaled the data
# - We inststiated the TimeSeriesSplit to divide the training ds into train and validation segments
# - We created the rmse_scorer so we can compare our models
#
# __Create baseline model and random forest regressor model__
# In[27]:
model = DummyRegressor(strategy='median')
baseline = np.mean(abs(cross_val_score(model, X_train, y_train, cv=tscv, scoring = rmse_scorer)))
baseline
# In[28]:
rf = RandomForestRegressor(random_state=12345)
rf_original = np.mean(abs(cross_val_score(rf, X_train, y_train, cv=tscv, scoring = rmse_scorer)))
rf_original
# In[29]:
get_ipython().run_cell_magic('time', '', 'params = {"n_estimators" : [500, 700],\n "max_depth" : [6, 7, 8, 9, 10]}\n\ngsSVR = GridSearchCV(estimator=rf, cv=tscv, param_grid=params, n_jobs=-1, verbose=0, scoring=rmse_scorer)\ngsSVR.fit(X_train, y_train)\nSVR_best = gsSVR.best_estimator_\nprint(abs(gsSVR.best_score_))')
# In[30]:
get_ipython().run_cell_magic('time', '', "best_param = pd.DataFrame(gsSVR.best_params_, index=[0])\nrf_tuned = abs(gsSVR.best_score_)\nbest_param['score'] = rf_tuned\n\nbest_param")
# __Observations__
# - The RMSE for the dummy regressor model is over 40, but we anticipate a higher RMSE in when we run the test data set because of the increased variablility and volume for August
# - Even without hyperparameter tuning, our random forest regressor model performs markedly better by the RMSE metric
# - The best parameters of 10 max_depth and 500 n_estimators slightly improves/lowers the RMSE score
#
# __Create LightGBM model__
# In[31]:
get_ipython().run_cell_magic('time', '', 'lgbm = LGBMRegressor(random_state=12345)\nlgbm_original = np.mean(abs(cross_val_score(lgbm, X_train, y_train, cv=tscv, scoring = rmse_scorer)))\nlgbm_original')
# In[34]:
get_ipython().run_cell_magic('time', '', 'params = {"n_estimators" : [500, 700],\n "max_depth" : [6, 7, 8, 9, 10]}\n\ngsSVR = GridSearchCV(estimator=lgbm, cv=tscv, param_grid=params, n_jobs=-1, verbose=0, scoring=rmse_scorer)\ngsSVR.fit(X_train, y_train)\nSVR_best = gsSVR.best_estimator_\nprint(abs(gsSVR.best_score_))')
# | |
check was introduced.
"""
previous_last_logged_in_datetime = (
user_services.get_user_settings(self.viewer_id).last_logged_in)
self.assertIsNotNone(previous_last_logged_in_datetime)
current_datetime = datetime.datetime.utcnow()
mocked_datetime_utcnow = current_datetime - datetime.timedelta(days=1)
with self.mock_datetime_utcnow(mocked_datetime_utcnow):
user_services.record_user_logged_in(self.viewer_id)
user_settings = user_services.get_user_settings(self.viewer_id)
last_logged_in = user_settings.last_logged_in
# After logging in and requesting a URL, the last_logged_in property is
# changed.
self.login(self.VIEWER_EMAIL)
self.get_html_response(feconf.LIBRARY_INDEX_URL)
self.assertLess(
last_logged_in,
user_services.get_user_settings(self.viewer_id).last_logged_in)
self.logout()
def test_last_logged_in_only_updated_if_enough_time_has_elapsed(self):
# The last logged-in time has already been set when the user
# registered.
previous_last_logged_in_datetime = (
user_services.get_user_settings(self.viewer_id).last_logged_in)
self.assertIsNotNone(previous_last_logged_in_datetime)
current_datetime = datetime.datetime.utcnow()
mocked_datetime_utcnow = current_datetime + datetime.timedelta(hours=11)
with self.mock_datetime_utcnow(mocked_datetime_utcnow):
self.login(self.VIEWER_EMAIL)
self.get_html_response(feconf.LIBRARY_INDEX_URL)
self.assertEqual(
user_services.get_user_settings(self.viewer_id).last_logged_in,
previous_last_logged_in_datetime)
self.logout()
mocked_datetime_utcnow = current_datetime + datetime.timedelta(hours=13)
with self.mock_datetime_utcnow(mocked_datetime_utcnow):
self.login(self.VIEWER_EMAIL)
self.get_html_response(feconf.LIBRARY_INDEX_URL)
self.assertGreater(
user_services.get_user_settings(self.viewer_id).last_logged_in,
previous_last_logged_in_datetime)
self.logout()
class LastExplorationEditedIntegrationTests(test_utils.GenericTestBase):
"""Integration tests for testing the time the user last edited an
exploration updates correctly.
"""
EXP_ID = 'exp'
def setUp(self):
"""Create users for creating and editing exploration."""
super(LastExplorationEditedIntegrationTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.save_new_valid_exploration(
self.EXP_ID, self.owner_id, end_state_name='End')
def test_legacy_user(self):
"""Test the case of a user who are editing exploration for first time
after the last edited time check was introduced.
"""
editor_settings = user_services.get_user_settings(self.editor_id)
self.assertIsNone(editor_settings.last_edited_an_exploration)
exp_services.update_exploration(
self.editor_id, self.EXP_ID, [exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'objective',
'new_value': 'the objective'
})], 'Test edit')
editor_settings = user_services.get_user_settings(self.editor_id)
self.assertIsNotNone(editor_settings.last_edited_an_exploration)
def test_last_exp_edit_time_gets_updated(self):
exp_services.update_exploration(
self.editor_id, self.EXP_ID, [exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'objective',
'new_value': 'the objective'
})], 'Test edit')
# Decrease last exploration edited time by 13 hours.
user_settings = user_services.get_user_settings(self.editor_id)
mocked_datetime_utcnow = (
user_settings.last_edited_an_exploration -
datetime.timedelta(hours=13))
with self.mock_datetime_utcnow(mocked_datetime_utcnow):
user_services.record_user_edited_an_exploration(self.editor_id)
editor_settings = user_services.get_user_settings(self.editor_id)
previous_last_edited_an_exploration = (
editor_settings.last_edited_an_exploration)
self.assertIsNotNone(previous_last_edited_an_exploration)
# The editor edits the exploration 13 hours after it was created.
exp_services.update_exploration(
self.editor_id, self.EXP_ID, [exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'objective',
'new_value': 'new objective'
})], 'Test edit 2')
# Make sure last exploration edited time gets updated.
editor_settings = user_services.get_user_settings(self.editor_id)
self.assertGreater(
(editor_settings.last_edited_an_exploration),
previous_last_edited_an_exploration)
class LastExplorationCreatedIntegrationTests(test_utils.GenericTestBase):
"""Integration tests for the time the user last created an exploration
updates correctly.
"""
EXP_ID_A = 'exp_a'
EXP_ID_B = 'exp_b'
def setUp(self):
"""Create user for creating exploration."""
super(LastExplorationCreatedIntegrationTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
def test_legacy_user(self):
"""Test the case of a user who are creating exploration for first time
after the last edited time check was introduced.
"""
owner_settings = user_services.get_user_settings(self.owner_id)
self.assertIsNone(owner_settings.last_created_an_exploration)
self.save_new_valid_exploration(
self.EXP_ID_A, self.owner_id, end_state_name='End')
owner_settings = user_services.get_user_settings(self.owner_id)
self.assertIsNotNone(owner_settings.last_created_an_exploration)
def test_last_exp_edit_time_gets_updated(self):
self.save_new_valid_exploration(
self.EXP_ID_A, self.owner_id, end_state_name='End')
# Decrease last exploration created time by 13 hours.
user_settings = user_services.get_user_settings(self.owner_id)
with self.mock_datetime_utcnow(
user_settings.last_created_an_exploration -
datetime.timedelta(hours=13)):
user_services.record_user_created_an_exploration(self.owner_id)
owner_settings = user_services.get_user_settings(self.owner_id)
previous_last_created_an_exploration = (
owner_settings.last_created_an_exploration)
self.assertIsNotNone(previous_last_created_an_exploration)
# The creator creates another exploration 13 hours later.
self.save_new_valid_exploration(
self.EXP_ID_B, self.owner_id, end_state_name='End')
# Make sure that last exploration created time gets updated.
owner_settings = user_services.get_user_settings(self.owner_id)
self.assertGreater(
(owner_settings.last_created_an_exploration),
previous_last_created_an_exploration)
class CommunityContributionStatsUnitTests(test_utils.GenericTestBase):
"""Test the functionality related to updating the community contribution
stats.
"""
REVIEWER_1_EMAIL = '<EMAIL>'
REVIEWER_2_EMAIL = '<EMAIL>'
def _assert_community_contribution_stats_is_in_default_state(self):
"""Checks if the community contribution stats is in its default
state.
"""
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
self.assertEqual(
(
community_contribution_stats
.translation_reviewer_counts_by_lang_code
), {})
self.assertEqual(
(
community_contribution_stats
.translation_suggestion_counts_by_lang_code
), {})
self.assertEqual(
community_contribution_stats.question_reviewer_count, 0)
self.assertEqual(
community_contribution_stats.question_suggestion_count, 0)
def setUp(self):
super(
CommunityContributionStatsUnitTests, self).setUp()
self.signup(self.REVIEWER_1_EMAIL, 'reviewer1')
self.reviewer_1_id = self.get_user_id_from_email(
self.REVIEWER_1_EMAIL)
self.signup(self.REVIEWER_2_EMAIL, 'reviewer2')
self.reviewer_2_id = self.get_user_id_from_email(
self.REVIEWER_2_EMAIL)
def test_grant_reviewer_translation_reviewing_rights_increases_count(self):
user_services.allow_user_to_review_translation_in_language(
self.reviewer_1_id, 'hi')
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 0)
self.assertEqual(stats.question_suggestion_count, 0)
self.assertDictEqual(
stats.translation_reviewer_counts_by_lang_code, {'hi': 1})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code, {})
def test_grant_reviewer_translation_multi_reviewing_rights_increases_count(
self):
user_services.allow_user_to_review_translation_in_language(
self.reviewer_1_id, 'hi')
user_services.allow_user_to_review_translation_in_language(
self.reviewer_1_id, 'en')
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 0)
self.assertEqual(stats.question_suggestion_count, 0)
self.assertDictEqual(
stats.translation_reviewer_counts_by_lang_code,
{'hi': 1, 'en': 1})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code, {})
def test_grant_reviewer_existing_translation_reviewing_rights_no_count_diff(
self):
user_services.allow_user_to_review_translation_in_language(
self.reviewer_1_id, 'hi')
# Assert that the translation reviewer count increased by one.
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 0)
self.assertEqual(stats.question_suggestion_count, 0)
self.assertDictEqual(
stats.translation_reviewer_counts_by_lang_code, {'hi': 1})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code, {})
user_services.allow_user_to_review_translation_in_language(
self.reviewer_1_id, 'hi')
# Assert that the translation reviewer count did not change because the
# reviewer already had the permissions.
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 0)
self.assertEqual(stats.question_suggestion_count, 0)
self.assertDictEqual(
stats.translation_reviewer_counts_by_lang_code, {'hi': 1})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code, {})
def test_remove_all_reviewer_translation_reviewing_rights_decreases_count(
self):
user_services.allow_user_to_review_translation_in_language(
self.reviewer_1_id, 'hi')
# Assert that the translation reviewer count increased by one.
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 0)
self.assertEqual(stats.question_suggestion_count, 0)
self.assertDictEqual(
stats.translation_reviewer_counts_by_lang_code, {'hi': 1})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code, {})
user_services.remove_translation_review_rights_in_language(
self.reviewer_1_id, 'hi')
# Assert that the translation reviewer count decreased by one after the
# rights were removed.
self._assert_community_contribution_stats_is_in_default_state()
def test_remove_some_reviewer_translation_reviewing_rights_decreases_count(
self):
user_services.allow_user_to_review_translation_in_language(
self.reviewer_1_id, 'hi')
user_services.allow_user_to_review_translation_in_language(
self.reviewer_1_id, 'en')
# Assert that the translation reviewer count increased by one.
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 0)
self.assertEqual(stats.question_suggestion_count, 0)
self.assertDictEqual(
stats.translation_reviewer_counts_by_lang_code, {'hi': 1, 'en': 1})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code, {})
user_services.remove_translation_review_rights_in_language(
self.reviewer_1_id, 'hi')
# Assert that the translation reviewer count decreased by one after the
# rights were removed.
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 0)
self.assertEqual(stats.question_suggestion_count, 0)
self.assertDictEqual(
stats.translation_reviewer_counts_by_lang_code, {'en': 1})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code, {})
def test_remove_translation_contribution_reviewer_decreases_count(self):
user_services.allow_user_to_review_translation_in_language(
self.reviewer_1_id, 'hi')
user_services.allow_user_to_review_translation_in_language(
self.reviewer_1_id, 'en')
# Assert that the translation reviewer count increased by one.
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 0)
self.assertEqual(stats.question_suggestion_count, 0)
self.assertDictEqual(
stats.translation_reviewer_counts_by_lang_code, {'hi': 1, 'en': 1})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code, {})
user_services.remove_contribution_reviewer(self.reviewer_1_id)
# Assert that the translation reviewer counts decreased by one after the
# contribution reviewer was removed.
self._assert_community_contribution_stats_is_in_default_state()
def test_grant_reviewer_question_reviewing_rights_increases_count(self):
user_services.allow_user_to_review_question(self.reviewer_1_id)
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 1)
self.assertEqual(stats.question_suggestion_count, 0)
self.assertDictEqual(stats.translation_reviewer_counts_by_lang_code, {})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code, {})
def test_grant_reviewer_existing_question_reviewing_rights_no_count_diff(
self):
user_services.allow_user_to_review_question(self.reviewer_1_id)
# Assert that the question reviewer count increased by one.
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 1)
self.assertEqual(stats.question_suggestion_count, 0)
self.assertDictEqual(stats.translation_reviewer_counts_by_lang_code, {})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code, {})
user_services.allow_user_to_review_question(self.reviewer_1_id)
# Assert that the question reviewer count did not change because the
# reviewer already had the permissions.
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 1)
self.assertEqual(stats.question_suggestion_count, 0)
self.assertDictEqual(stats.translation_reviewer_counts_by_lang_code, {})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code, {})
def test_remove_reviewer_question_reviewing_rights_decreases_count(
self):
user_services.allow_user_to_review_question(self.reviewer_1_id)
# Assert that the question reviewer count increased by one.
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 1)
self.assertEqual(stats.question_suggestion_count, 0)
self.assertDictEqual(stats.translation_reviewer_counts_by_lang_code, {})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code, {})
user_services.remove_question_review_rights(self.reviewer_1_id)
# Assert that the question reviewer count decreased by one after the
# rights were removed.
self._assert_community_contribution_stats_is_in_default_state()
def test_remove_question_contribution_reviewer_decreases_count(
self):
user_services.allow_user_to_review_question(self.reviewer_1_id)
# Assert that the question reviewer count increased by one.
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 1)
self.assertEqual(stats.question_suggestion_count, 0)
self.assertDictEqual(stats.translation_reviewer_counts_by_lang_code, {})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code, {})
user_services.remove_contribution_reviewer(self.reviewer_1_id)
# Assert that the question reviewer count decreased by one after the
# contribution reviewer was removed.
self._assert_community_contribution_stats_is_in_default_state()
def test_grant_reviewer_multiple_reviewing_rights_increases_counts(self):
user_services.allow_user_to_review_translation_in_language(
self.reviewer_1_id, 'hi')
user_services.allow_user_to_review_translation_in_language(
self.reviewer_1_id, 'en')
user_services.allow_user_to_review_question(self.reviewer_1_id)
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 1)
self.assertEqual(stats.question_suggestion_count, 0)
self.assertDictEqual(
stats.translation_reviewer_counts_by_lang_code, {'hi': 1, 'en': 1})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code, {})
def test_grant_multiple_reviewers_multi_reviewing_rights_increases_counts(
self):
user_services.allow_user_to_review_translation_in_language(
self.reviewer_1_id, 'hi')
user_services.allow_user_to_review_translation_in_language(
self.reviewer_1_id, 'en')
user_services.allow_user_to_review_question(self.reviewer_1_id)
user_services.allow_user_to_review_translation_in_language(
self.reviewer_2_id, 'hi')
user_services.allow_user_to_review_translation_in_language(
self.reviewer_2_id, 'fr')
user_services.allow_user_to_review_question(self.reviewer_2_id)
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 2)
self.assertEqual(stats.question_suggestion_count, 0)
self.assertDictEqual(
stats.translation_reviewer_counts_by_lang_code,
{'hi': 2, 'en': 1, 'fr': 1})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code, {})
def test_remove_question_rights_from_multi_rights_reviewer_updates_count(
self):
user_services.allow_user_to_review_translation_in_language(
self.reviewer_1_id, 'hi')
user_services.allow_user_to_review_translation_in_language(
self.reviewer_1_id, 'en')
user_services.allow_user_to_review_question(self.reviewer_1_id)
# Assert that the counts were updated before the question rights are
# removed.
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 1)
self.assertEqual(stats.question_suggestion_count, 0)
self.assertDictEqual(
stats.translation_reviewer_counts_by_lang_code, {'hi': 1, 'en': 1})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code, {})
user_services.remove_question_review_rights(self.reviewer_1_id)
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 0)
self.assertEqual(stats.question_suggestion_count, 0)
self.assertDictEqual(
stats.translation_reviewer_counts_by_lang_code, {'hi': 1, 'en': 1})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code, {})
def test_remove_translation_rights_from_multi_rights_reviewer_updates_count(
self):
user_services.allow_user_to_review_translation_in_language(
self.reviewer_1_id, 'hi')
user_services.allow_user_to_review_question(self.reviewer_1_id)
# Assert that the counts were updated before the translation rights are
# removed.
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 1)
self.assertEqual(stats.question_suggestion_count, 0)
self.assertDictEqual(
stats.translation_reviewer_counts_by_lang_code, {'hi': 1})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code, {})
user_services.remove_translation_review_rights_in_language(
self.reviewer_1_id, 'hi')
self.process_and_flush_pending_tasks()
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 1)
self.assertEqual(stats.question_suggestion_count, 0)
self.assertDictEqual(
stats.translation_reviewer_counts_by_lang_code, {})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code, {})
def test_remove_multi_rights_contribution_reviewer_decreases_counts(self):
user_services.allow_user_to_review_translation_in_language(
self.reviewer_1_id, 'hi')
user_services.allow_user_to_review_translation_in_language(
self.reviewer_1_id, 'en')
user_services.allow_user_to_review_question(self.reviewer_1_id)
# Assert that the counts were updated before the contribution reviewer
# is removed.
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 1)
self.assertEqual(stats.question_suggestion_count, 0)
self.assertDictEqual(
stats.translation_reviewer_counts_by_lang_code, {'hi': 1, 'en': 1})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code, {})
user_services.remove_contribution_reviewer(self.reviewer_1_id)
self._assert_community_contribution_stats_is_in_default_state()
def test_grant_reviewer_voiceover_reviewing_permissions_does_nothing(self):
# Granting reviewers voiceover reviewing permissions does not change the
# counts because voiceover suggestions are currently not offered on the
# Contributor Dashboard.
user_services.allow_user_to_review_voiceover_in_language(
self.reviewer_1_id, 'hi')
self._assert_community_contribution_stats_is_in_default_state()
def test_remove_reviewer_voiceover_reviewing_permissions_does_nothing(self):
# Removing reviewers voiceover reviewing permissions does not change the
# counts because voiceover suggestions are currently not offered on the
# Contributor Dashboard.
user_services.allow_user_to_review_voiceover_in_language(
self.reviewer_1_id, 'hi')
self._assert_community_contribution_stats_is_in_default_state()
user_services.remove_voiceover_review_rights_in_language(
self.reviewer_1_id, 'hi')
self._assert_community_contribution_stats_is_in_default_state()
class UserContributionReviewRightsTests(test_utils.GenericTestBase):
TRANSLATOR_EMAIL = '<EMAIL>'
TRANSLATOR_USERNAME = 'translator'
VOICE_ARTIST_EMAIL = '<EMAIL>'
VOICE_ARTIST_USERNAME = 'voiceartist'
QUESTION_REVIEWER_EMAIL = '<EMAIL>'
QUESTION_REVIEWER_USERNAME = 'questionreviewer'
def setUp(self):
super(UserContributionReviewRightsTests, self).setUp()
self.signup(self.TRANSLATOR_EMAIL, self.TRANSLATOR_USERNAME)
self.translator_id = self.get_user_id_from_email(self.TRANSLATOR_EMAIL)
self.signup(self.VOICE_ARTIST_EMAIL, self.VOICE_ARTIST_USERNAME)
self.voice_artist_id = self.get_user_id_from_email(
self.VOICE_ARTIST_EMAIL)
self.signup(
self.QUESTION_REVIEWER_EMAIL, self.QUESTION_REVIEWER_USERNAME)
self.question_reviewer_id = (
self.get_user_id_from_email(self.QUESTION_REVIEWER_EMAIL))
def test_assign_user_review_translation_suggestion_in_language(self):
self.assertFalse(
user_services.can_review_translation_suggestions(
self.translator_id))
user_services.allow_user_to_review_translation_in_language(
self.translator_id, 'hi')
self.assertTrue(
user_services.can_review_translation_suggestions(
self.translator_id, language_code='hi'))
def test_translation_review_assignement_adds_language_in_sorted_order(self):
user_services.allow_user_to_review_translation_in_language(
self.translator_id, 'hi')
user_contribution_rights = user_services.get_user_contribution_rights(
self.translator_id)
self.assertEqual(
user_contribution_rights.can_review_translation_for_language_codes,
['hi'])
user_services.allow_user_to_review_translation_in_language(
self.translator_id, 'en')
user_contribution_rights = user_services.get_user_contribution_rights(
self.translator_id)
self.assertEqual(
user_contribution_rights.can_review_translation_for_language_codes,
['en', 'hi'])
def test_assign_user_review_voiceover_application_in_language(self):
self.assertFalse(
user_services.can_review_voiceover_applications(
self.voice_artist_id))
user_services.allow_user_to_review_voiceover_in_language(
self.voice_artist_id, 'hi')
self.assertTrue(
user_services.can_review_voiceover_applications(
self.voice_artist_id, language_code='hi'))
| |
= _bacnet.UNITS_DEGREES_KELVIN_PER_MINUTE
_bacnet.UNITS_DEGREES_FAHRENHEIT_swigconstant(_bacnet)
UNITS_DEGREES_FAHRENHEIT = _bacnet.UNITS_DEGREES_FAHRENHEIT
_bacnet.UNITS_DEGREE_DAYS_CELSIUS_swigconstant(_bacnet)
UNITS_DEGREE_DAYS_CELSIUS = _bacnet.UNITS_DEGREE_DAYS_CELSIUS
_bacnet.UNITS_DEGREE_DAYS_FAHRENHEIT_swigconstant(_bacnet)
UNITS_DEGREE_DAYS_FAHRENHEIT = _bacnet.UNITS_DEGREE_DAYS_FAHRENHEIT
_bacnet.UNITS_DELTA_DEGREES_FAHRENHEIT_swigconstant(_bacnet)
UNITS_DELTA_DEGREES_FAHRENHEIT = _bacnet.UNITS_DELTA_DEGREES_FAHRENHEIT
_bacnet.UNITS_DELTA_DEGREES_KELVIN_swigconstant(_bacnet)
UNITS_DELTA_DEGREES_KELVIN = _bacnet.UNITS_DELTA_DEGREES_KELVIN
_bacnet.UNITS_YEARS_swigconstant(_bacnet)
UNITS_YEARS = _bacnet.UNITS_YEARS
_bacnet.UNITS_MONTHS_swigconstant(_bacnet)
UNITS_MONTHS = _bacnet.UNITS_MONTHS
_bacnet.UNITS_WEEKS_swigconstant(_bacnet)
UNITS_WEEKS = _bacnet.UNITS_WEEKS
_bacnet.UNITS_DAYS_swigconstant(_bacnet)
UNITS_DAYS = _bacnet.UNITS_DAYS
_bacnet.UNITS_HOURS_swigconstant(_bacnet)
UNITS_HOURS = _bacnet.UNITS_HOURS
_bacnet.UNITS_MINUTES_swigconstant(_bacnet)
UNITS_MINUTES = _bacnet.UNITS_MINUTES
_bacnet.UNITS_SECONDS_swigconstant(_bacnet)
UNITS_SECONDS = _bacnet.UNITS_SECONDS
_bacnet.UNITS_HUNDREDTHS_SECONDS_swigconstant(_bacnet)
UNITS_HUNDREDTHS_SECONDS = _bacnet.UNITS_HUNDREDTHS_SECONDS
_bacnet.UNITS_MILLISECONDS_swigconstant(_bacnet)
UNITS_MILLISECONDS = _bacnet.UNITS_MILLISECONDS
_bacnet.UNITS_NEWTON_METERS_swigconstant(_bacnet)
UNITS_NEWTON_METERS = _bacnet.UNITS_NEWTON_METERS
_bacnet.UNITS_MILLIMETERS_PER_SECOND_swigconstant(_bacnet)
UNITS_MILLIMETERS_PER_SECOND = _bacnet.UNITS_MILLIMETERS_PER_SECOND
_bacnet.UNITS_MILLIMETERS_PER_MINUTE_swigconstant(_bacnet)
UNITS_MILLIMETERS_PER_MINUTE = _bacnet.UNITS_MILLIMETERS_PER_MINUTE
_bacnet.UNITS_METERS_PER_SECOND_swigconstant(_bacnet)
UNITS_METERS_PER_SECOND = _bacnet.UNITS_METERS_PER_SECOND
_bacnet.UNITS_METERS_PER_MINUTE_swigconstant(_bacnet)
UNITS_METERS_PER_MINUTE = _bacnet.UNITS_METERS_PER_MINUTE
_bacnet.UNITS_METERS_PER_HOUR_swigconstant(_bacnet)
UNITS_METERS_PER_HOUR = _bacnet.UNITS_METERS_PER_HOUR
_bacnet.UNITS_KILOMETERS_PER_HOUR_swigconstant(_bacnet)
UNITS_KILOMETERS_PER_HOUR = _bacnet.UNITS_KILOMETERS_PER_HOUR
_bacnet.UNITS_FEET_PER_SECOND_swigconstant(_bacnet)
UNITS_FEET_PER_SECOND = _bacnet.UNITS_FEET_PER_SECOND
_bacnet.UNITS_FEET_PER_MINUTE_swigconstant(_bacnet)
UNITS_FEET_PER_MINUTE = _bacnet.UNITS_FEET_PER_MINUTE
_bacnet.UNITS_MILES_PER_HOUR_swigconstant(_bacnet)
UNITS_MILES_PER_HOUR = _bacnet.UNITS_MILES_PER_HOUR
_bacnet.UNITS_CUBIC_FEET_swigconstant(_bacnet)
UNITS_CUBIC_FEET = _bacnet.UNITS_CUBIC_FEET
_bacnet.UNITS_CUBIC_METERS_swigconstant(_bacnet)
UNITS_CUBIC_METERS = _bacnet.UNITS_CUBIC_METERS
_bacnet.UNITS_IMPERIAL_GALLONS_swigconstant(_bacnet)
UNITS_IMPERIAL_GALLONS = _bacnet.UNITS_IMPERIAL_GALLONS
_bacnet.UNITS_MILLILITERS_swigconstant(_bacnet)
UNITS_MILLILITERS = _bacnet.UNITS_MILLILITERS
_bacnet.UNITS_LITERS_swigconstant(_bacnet)
UNITS_LITERS = _bacnet.UNITS_LITERS
_bacnet.UNITS_US_GALLONS_swigconstant(_bacnet)
UNITS_US_GALLONS = _bacnet.UNITS_US_GALLONS
_bacnet.UNITS_CUBIC_FEET_PER_SECOND_swigconstant(_bacnet)
UNITS_CUBIC_FEET_PER_SECOND = _bacnet.UNITS_CUBIC_FEET_PER_SECOND
_bacnet.UNITS_CUBIC_FEET_PER_MINUTE_swigconstant(_bacnet)
UNITS_CUBIC_FEET_PER_MINUTE = _bacnet.UNITS_CUBIC_FEET_PER_MINUTE
_bacnet.UNITS_CUBIC_FEET_PER_HOUR_swigconstant(_bacnet)
UNITS_CUBIC_FEET_PER_HOUR = _bacnet.UNITS_CUBIC_FEET_PER_HOUR
_bacnet.UNITS_CUBIC_METERS_PER_SECOND_swigconstant(_bacnet)
UNITS_CUBIC_METERS_PER_SECOND = _bacnet.UNITS_CUBIC_METERS_PER_SECOND
_bacnet.UNITS_CUBIC_METERS_PER_MINUTE_swigconstant(_bacnet)
UNITS_CUBIC_METERS_PER_MINUTE = _bacnet.UNITS_CUBIC_METERS_PER_MINUTE
_bacnet.UNITS_CUBIC_METERS_PER_HOUR_swigconstant(_bacnet)
UNITS_CUBIC_METERS_PER_HOUR = _bacnet.UNITS_CUBIC_METERS_PER_HOUR
_bacnet.UNITS_IMPERIAL_GALLONS_PER_MINUTE_swigconstant(_bacnet)
UNITS_IMPERIAL_GALLONS_PER_MINUTE = _bacnet.UNITS_IMPERIAL_GALLONS_PER_MINUTE
_bacnet.UNITS_MILLILITERS_PER_SECOND_swigconstant(_bacnet)
UNITS_MILLILITERS_PER_SECOND = _bacnet.UNITS_MILLILITERS_PER_SECOND
_bacnet.UNITS_LITERS_PER_SECOND_swigconstant(_bacnet)
UNITS_LITERS_PER_SECOND = _bacnet.UNITS_LITERS_PER_SECOND
_bacnet.UNITS_LITERS_PER_MINUTE_swigconstant(_bacnet)
UNITS_LITERS_PER_MINUTE = _bacnet.UNITS_LITERS_PER_MINUTE
_bacnet.UNITS_LITERS_PER_HOUR_swigconstant(_bacnet)
UNITS_LITERS_PER_HOUR = _bacnet.UNITS_LITERS_PER_HOUR
_bacnet.UNITS_US_GALLONS_PER_MINUTE_swigconstant(_bacnet)
UNITS_US_GALLONS_PER_MINUTE = _bacnet.UNITS_US_GALLONS_PER_MINUTE
_bacnet.UNITS_US_GALLONS_PER_HOUR_swigconstant(_bacnet)
UNITS_US_GALLONS_PER_HOUR = _bacnet.UNITS_US_GALLONS_PER_HOUR
_bacnet.UNITS_DEGREES_ANGULAR_swigconstant(_bacnet)
UNITS_DEGREES_ANGULAR = _bacnet.UNITS_DEGREES_ANGULAR
_bacnet.UNITS_DEGREES_CELSIUS_PER_HOUR_swigconstant(_bacnet)
UNITS_DEGREES_CELSIUS_PER_HOUR = _bacnet.UNITS_DEGREES_CELSIUS_PER_HOUR
_bacnet.UNITS_DEGREES_CELSIUS_PER_MINUTE_swigconstant(_bacnet)
UNITS_DEGREES_CELSIUS_PER_MINUTE = _bacnet.UNITS_DEGREES_CELSIUS_PER_MINUTE
_bacnet.UNITS_DEGREES_FAHRENHEIT_PER_HOUR_swigconstant(_bacnet)
UNITS_DEGREES_FAHRENHEIT_PER_HOUR = _bacnet.UNITS_DEGREES_FAHRENHEIT_PER_HOUR
_bacnet.UNITS_DEGREES_FAHRENHEIT_PER_MINUTE_swigconstant(_bacnet)
UNITS_DEGREES_FAHRENHEIT_PER_MINUTE = _bacnet.UNITS_DEGREES_FAHRENHEIT_PER_MINUTE
_bacnet.UNITS_JOULE_SECONDS_swigconstant(_bacnet)
UNITS_JOULE_SECONDS = _bacnet.UNITS_JOULE_SECONDS
_bacnet.UNITS_KILOGRAMS_PER_CUBIC_METER_swigconstant(_bacnet)
UNITS_KILOGRAMS_PER_CUBIC_METER = _bacnet.UNITS_KILOGRAMS_PER_CUBIC_METER
_bacnet.UNITS_KW_HOURS_PER_SQUARE_METER_swigconstant(_bacnet)
UNITS_KW_HOURS_PER_SQUARE_METER = _bacnet.UNITS_KW_HOURS_PER_SQUARE_METER
_bacnet.UNITS_KW_HOURS_PER_SQUARE_FOOT_swigconstant(_bacnet)
UNITS_KW_HOURS_PER_SQUARE_FOOT = _bacnet.UNITS_KW_HOURS_PER_SQUARE_FOOT
_bacnet.UNITS_MEGAJOULES_PER_SQUARE_METER_swigconstant(_bacnet)
UNITS_MEGAJOULES_PER_SQUARE_METER = _bacnet.UNITS_MEGAJOULES_PER_SQUARE_METER
_bacnet.UNITS_MEGAJOULES_PER_SQUARE_FOOT_swigconstant(_bacnet)
UNITS_MEGAJOULES_PER_SQUARE_FOOT = _bacnet.UNITS_MEGAJOULES_PER_SQUARE_FOOT
_bacnet.UNITS_NO_UNITS_swigconstant(_bacnet)
UNITS_NO_UNITS = _bacnet.UNITS_NO_UNITS
_bacnet.UNITS_NEWTON_SECONDS_swigconstant(_bacnet)
UNITS_NEWTON_SECONDS = _bacnet.UNITS_NEWTON_SECONDS
_bacnet.UNITS_NEWTONS_PER_METER_swigconstant(_bacnet)
UNITS_NEWTONS_PER_METER = _bacnet.UNITS_NEWTONS_PER_METER
_bacnet.UNITS_PARTS_PER_MILLION_swigconstant(_bacnet)
UNITS_PARTS_PER_MILLION = _bacnet.UNITS_PARTS_PER_MILLION
_bacnet.UNITS_PARTS_PER_BILLION_swigconstant(_bacnet)
UNITS_PARTS_PER_BILLION = _bacnet.UNITS_PARTS_PER_BILLION
_bacnet.UNITS_PERCENT_swigconstant(_bacnet)
UNITS_PERCENT = _bacnet.UNITS_PERCENT
_bacnet.UNITS_PERCENT_OBSCURATION_PER_FOOT_swigconstant(_bacnet)
UNITS_PERCENT_OBSCURATION_PER_FOOT = _bacnet.UNITS_PERCENT_OBSCURATION_PER_FOOT
_bacnet.UNITS_PERCENT_OBSCURATION_PER_METER_swigconstant(_bacnet)
UNITS_PERCENT_OBSCURATION_PER_METER = _bacnet.UNITS_PERCENT_OBSCURATION_PER_METER
_bacnet.UNITS_PERCENT_PER_SECOND_swigconstant(_bacnet)
UNITS_PERCENT_PER_SECOND = _bacnet.UNITS_PERCENT_PER_SECOND
_bacnet.UNITS_PER_MINUTE_swigconstant(_bacnet)
UNITS_PER_MINUTE = _bacnet.UNITS_PER_MINUTE
_bacnet.UNITS_PER_SECOND_swigconstant(_bacnet)
UNITS_PER_SECOND = _bacnet.UNITS_PER_SECOND
_bacnet.UNITS_PSI_PER_DEGREE_FAHRENHEIT_swigconstant(_bacnet)
UNITS_PSI_PER_DEGREE_FAHRENHEIT = _bacnet.UNITS_PSI_PER_DEGREE_FAHRENHEIT
_bacnet.UNITS_RADIANS_swigconstant(_bacnet)
UNITS_RADIANS = _bacnet.UNITS_RADIANS
_bacnet.UNITS_RADIANS_PER_SECOND_swigconstant(_bacnet)
UNITS_RADIANS_PER_SECOND = _bacnet.UNITS_RADIANS_PER_SECOND
_bacnet.UNITS_REVOLUTIONS_PER_MINUTE_swigconstant(_bacnet)
UNITS_REVOLUTIONS_PER_MINUTE = _bacnet.UNITS_REVOLUTIONS_PER_MINUTE
_bacnet.UNITS_SQUARE_METERS_PER_NEWTON_swigconstant(_bacnet)
UNITS_SQUARE_METERS_PER_NEWTON = _bacnet.UNITS_SQUARE_METERS_PER_NEWTON
_bacnet.UNITS_WATTS_PER_METER_PER_DEGREE_KELVIN_swigconstant(_bacnet)
UNITS_WATTS_PER_METER_PER_DEGREE_KELVIN = _bacnet.UNITS_WATTS_PER_METER_PER_DEGREE_KELVIN
_bacnet.UNITS_WATTS_PER_SQUARE_METER_DEGREE_KELVIN_swigconstant(_bacnet)
UNITS_WATTS_PER_SQUARE_METER_DEGREE_KELVIN = _bacnet.UNITS_WATTS_PER_SQUARE_METER_DEGREE_KELVIN
_bacnet.UNITS_PER_MILLE_swigconstant(_bacnet)
UNITS_PER_MILLE = _bacnet.UNITS_PER_MILLE
_bacnet.UNITS_GRAMS_PER_GRAM_swigconstant(_bacnet)
UNITS_GRAMS_PER_GRAM = _bacnet.UNITS_GRAMS_PER_GRAM
_bacnet.UNITS_KILOGRAMS_PER_KILOGRAM_swigconstant(_bacnet)
UNITS_KILOGRAMS_PER_KILOGRAM = _bacnet.UNITS_KILOGRAMS_PER_KILOGRAM
_bacnet.UNITS_GRAMS_PER_KILOGRAM_swigconstant(_bacnet)
UNITS_GRAMS_PER_KILOGRAM = _bacnet.UNITS_GRAMS_PER_KILOGRAM
_bacnet.UNITS_MILLIGRAMS_PER_GRAM_swigconstant(_bacnet)
UNITS_MILLIGRAMS_PER_GRAM = _bacnet.UNITS_MILLIGRAMS_PER_GRAM
_bacnet.UNITS_MILLIGRAMS_PER_KILOGRAM_swigconstant(_bacnet)
UNITS_MILLIGRAMS_PER_KILOGRAM = _bacnet.UNITS_MILLIGRAMS_PER_KILOGRAM
_bacnet.UNITS_GRAMS_PER_MILLILITER_swigconstant(_bacnet)
UNITS_GRAMS_PER_MILLILITER = _bacnet.UNITS_GRAMS_PER_MILLILITER
_bacnet.UNITS_GRAMS_PER_LITER_swigconstant(_bacnet)
UNITS_GRAMS_PER_LITER = _bacnet.UNITS_GRAMS_PER_LITER
_bacnet.UNITS_MILLIGRAMS_PER_LITER_swigconstant(_bacnet)
UNITS_MILLIGRAMS_PER_LITER = _bacnet.UNITS_MILLIGRAMS_PER_LITER
_bacnet.UNITS_MICROGRAMS_PER_LITER_swigconstant(_bacnet)
UNITS_MICROGRAMS_PER_LITER = _bacnet.UNITS_MICROGRAMS_PER_LITER
_bacnet.UNITS_GRAMS_PER_CUBIC_METER_swigconstant(_bacnet)
UNITS_GRAMS_PER_CUBIC_METER = _bacnet.UNITS_GRAMS_PER_CUBIC_METER
_bacnet.UNITS_MILLIGRAMS_PER_CUBIC_METER_swigconstant(_bacnet)
UNITS_MILLIGRAMS_PER_CUBIC_METER = _bacnet.UNITS_MILLIGRAMS_PER_CUBIC_METER
_bacnet.UNITS_MICROGRAMS_PER_CUBIC_METER_swigconstant(_bacnet)
UNITS_MICROGRAMS_PER_CUBIC_METER = _bacnet.UNITS_MICROGRAMS_PER_CUBIC_METER
_bacnet.UNITS_NANOGRAMS_PER_CUBIC_METER_swigconstant(_bacnet)
UNITS_NANOGRAMS_PER_CUBIC_METER = _bacnet.UNITS_NANOGRAMS_PER_CUBIC_METER
_bacnet.UNITS_GRAMS_PER_CUBIC_CENTIMETER_swigconstant(_bacnet)
UNITS_GRAMS_PER_CUBIC_CENTIMETER = _bacnet.UNITS_GRAMS_PER_CUBIC_CENTIMETER
_bacnet.UNITS_BECQUERELS_swigconstant(_bacnet)
UNITS_BECQUERELS = _bacnet.UNITS_BECQUERELS
_bacnet.UNITS_MEGABECQUERELS_swigconstant(_bacnet)
UNITS_MEGABECQUERELS = _bacnet.UNITS_MEGABECQUERELS
_bacnet.UNITS_GRAY_swigconstant(_bacnet)
UNITS_GRAY = _bacnet.UNITS_GRAY
_bacnet.UNITS_MILLIGRAY_swigconstant(_bacnet)
UNITS_MILLIGRAY = _bacnet.UNITS_MILLIGRAY
_bacnet.UNITS_MICROGRAY_swigconstant(_bacnet)
UNITS_MICROGRAY = _bacnet.UNITS_MICROGRAY
_bacnet.UNITS_SIEVERTS_swigconstant(_bacnet)
UNITS_SIEVERTS = _bacnet.UNITS_SIEVERTS
_bacnet.UNITS_MILLISIEVERTS_swigconstant(_bacnet)
UNITS_MILLISIEVERTS = _bacnet.UNITS_MILLISIEVERTS
_bacnet.UNITS_MICROSIEVERTS_swigconstant(_bacnet)
UNITS_MICROSIEVERTS = _bacnet.UNITS_MICROSIEVERTS
_bacnet.UNITS_MICROSIEVERTS_PER_HOUR_swigconstant(_bacnet)
UNITS_MICROSIEVERTS_PER_HOUR = _bacnet.UNITS_MICROSIEVERTS_PER_HOUR
_bacnet.UNITS_DECIBELS_A_swigconstant(_bacnet)
UNITS_DECIBELS_A = _bacnet.UNITS_DECIBELS_A
_bacnet.UNITS_NEPHELOMETRIC_TURBIDITY_UNIT_swigconstant(_bacnet)
UNITS_NEPHELOMETRIC_TURBIDITY_UNIT = _bacnet.UNITS_NEPHELOMETRIC_TURBIDITY_UNIT
_bacnet.UNITS_PH_swigconstant(_bacnet)
UNITS_PH = _bacnet.UNITS_PH
_bacnet.UNITS_GRAMS_PER_SQUARE_METER_swigconstant(_bacnet)
UNITS_GRAMS_PER_SQUARE_METER = _bacnet.UNITS_GRAMS_PER_SQUARE_METER
_bacnet.UNITS_MINUTES_PER_DEGREE_KELVIN_swigconstant(_bacnet)
UNITS_MINUTES_PER_DEGREE_KELVIN = _bacnet.UNITS_MINUTES_PER_DEGREE_KELVIN
_bacnet.MAX_UNITS_swigconstant(_bacnet)
MAX_UNITS = _bacnet.MAX_UNITS
_bacnet.UNITS_PROPRIETARY_RANGE_MIN_swigconstant(_bacnet)
UNITS_PROPRIETARY_RANGE_MIN = _bacnet.UNITS_PROPRIETARY_RANGE_MIN
_bacnet.UNITS_PROPRIETARY_RANGE_MAX_swigconstant(_bacnet)
UNITS_PROPRIETARY_RANGE_MAX = _bacnet.UNITS_PROPRIETARY_RANGE_MAX
_bacnet.POLARITY_NORMAL_swigconstant(_bacnet)
POLARITY_NORMAL = _bacnet.POLARITY_NORMAL
_bacnet.POLARITY_REVERSE_swigconstant(_bacnet)
POLARITY_REVERSE = _bacnet.POLARITY_REVERSE
_bacnet.MAX_POLARITY_swigconstant(_bacnet)
MAX_POLARITY = _bacnet.MAX_POLARITY
_bacnet.PROGRAM_REQUEST_READY_swigconstant(_bacnet)
PROGRAM_REQUEST_READY = _bacnet.PROGRAM_REQUEST_READY
_bacnet.PROGRAM_REQUEST_LOAD_swigconstant(_bacnet)
PROGRAM_REQUEST_LOAD = _bacnet.PROGRAM_REQUEST_LOAD
_bacnet.PROGRAM_REQUEST_RUN_swigconstant(_bacnet)
PROGRAM_REQUEST_RUN = _bacnet.PROGRAM_REQUEST_RUN
_bacnet.PROGRAM_REQUEST_HALT_swigconstant(_bacnet)
PROGRAM_REQUEST_HALT = _bacnet.PROGRAM_REQUEST_HALT
_bacnet.PROGRAM_REQUEST_RESTART_swigconstant(_bacnet)
PROGRAM_REQUEST_RESTART = _bacnet.PROGRAM_REQUEST_RESTART
_bacnet.PROGRAM_REQUEST_UNLOAD_swigconstant(_bacnet)
PROGRAM_REQUEST_UNLOAD = _bacnet.PROGRAM_REQUEST_UNLOAD
_bacnet.PROGRAM_STATE_IDLE_swigconstant(_bacnet)
PROGRAM_STATE_IDLE = _bacnet.PROGRAM_STATE_IDLE
_bacnet.PROGRAM_STATE_LOADING_swigconstant(_bacnet)
PROGRAM_STATE_LOADING = _bacnet.PROGRAM_STATE_LOADING
_bacnet.PROGRAM_STATE_RUNNING_swigconstant(_bacnet)
PROGRAM_STATE_RUNNING = _bacnet.PROGRAM_STATE_RUNNING
_bacnet.PROGRAM_STATE_WAITING_swigconstant(_bacnet)
PROGRAM_STATE_WAITING = _bacnet.PROGRAM_STATE_WAITING
_bacnet.PROGRAM_STATE_HALTED_swigconstant(_bacnet)
PROGRAM_STATE_HALTED = _bacnet.PROGRAM_STATE_HALTED
_bacnet.PROGRAM_STATE_UNLOADING_swigconstant(_bacnet)
PROGRAM_STATE_UNLOADING = _bacnet.PROGRAM_STATE_UNLOADING
_bacnet.PROGRAM_ERROR_NORMAL_swigconstant(_bacnet)
PROGRAM_ERROR_NORMAL = _bacnet.PROGRAM_ERROR_NORMAL
_bacnet.PROGRAM_ERROR_LOAD_FAILED_swigconstant(_bacnet)
PROGRAM_ERROR_LOAD_FAILED = _bacnet.PROGRAM_ERROR_LOAD_FAILED
_bacnet.PROGRAM_ERROR_INTERNAL_swigconstant(_bacnet)
PROGRAM_ERROR_INTERNAL = _bacnet.PROGRAM_ERROR_INTERNAL
_bacnet.PROGRAM_ERROR_PROGRAM_swigconstant(_bacnet)
PROGRAM_ERROR_PROGRAM = _bacnet.PROGRAM_ERROR_PROGRAM
_bacnet.PROGRAM_ERROR_OTHER_swigconstant(_bacnet)
PROGRAM_ERROR_OTHER = _bacnet.PROGRAM_ERROR_OTHER
_bacnet.PROGRAM_ERROR_PROPRIETARY_MIN_swigconstant(_bacnet)
PROGRAM_ERROR_PROPRIETARY_MIN = _bacnet.PROGRAM_ERROR_PROPRIETARY_MIN
_bacnet.PROGRAM_ERROR_PROPRIETARY_MAX_swigconstant(_bacnet)
PROGRAM_ERROR_PROPRIETARY_MAX = _bacnet.PROGRAM_ERROR_PROPRIETARY_MAX
_bacnet.RESTART_REASON_UNKNOWN_swigconstant(_bacnet)
RESTART_REASON_UNKNOWN = _bacnet.RESTART_REASON_UNKNOWN
_bacnet.RESTART_REASON_COLDSTART_swigconstant(_bacnet)
RESTART_REASON_COLDSTART = _bacnet.RESTART_REASON_COLDSTART
_bacnet.RESTART_REASON_WARMSTART_swigconstant(_bacnet)
RESTART_REASON_WARMSTART = _bacnet.RESTART_REASON_WARMSTART
_bacnet.RESTART_REASON_DETECTED_POWER_LOST_swigconstant(_bacnet)
RESTART_REASON_DETECTED_POWER_LOST = _bacnet.RESTART_REASON_DETECTED_POWER_LOST
_bacnet.RESTART_REASON_DETECTED_POWER_OFF_swigconstant(_bacnet)
RESTART_REASON_DETECTED_POWER_OFF = _bacnet.RESTART_REASON_DETECTED_POWER_OFF
_bacnet.RESTART_REASON_HARDWARE_WATCHDOG_swigconstant(_bacnet)
RESTART_REASON_HARDWARE_WATCHDOG = _bacnet.RESTART_REASON_HARDWARE_WATCHDOG
_bacnet.RESTART_REASON_SOFTWARE_WATCHDOG_swigconstant(_bacnet)
RESTART_REASON_SOFTWARE_WATCHDOG = _bacnet.RESTART_REASON_SOFTWARE_WATCHDOG
_bacnet.RESTART_REASON_SUSPENDED_swigconstant(_bacnet)
RESTART_REASON_SUSPENDED = _bacnet.RESTART_REASON_SUSPENDED
_bacnet.RESTART_REASON_PROPRIETARY_MIN_swigconstant(_bacnet)
RESTART_REASON_PROPRIETARY_MIN = _bacnet.RESTART_REASON_PROPRIETARY_MIN
_bacnet.RESTART_REASON_PROPRIETARY_MAX_swigconstant(_bacnet)
RESTART_REASON_PROPRIETARY_MAX = _bacnet.RESTART_REASON_PROPRIETARY_MAX
_bacnet.PROP_STATE_BOOLEAN_VALUE_swigconstant(_bacnet)
PROP_STATE_BOOLEAN_VALUE = _bacnet.PROP_STATE_BOOLEAN_VALUE
_bacnet.PROP_STATE_BINARY_VALUE_swigconstant(_bacnet)
PROP_STATE_BINARY_VALUE = _bacnet.PROP_STATE_BINARY_VALUE
_bacnet.PROP_STATE_EVENT_TYPE_swigconstant(_bacnet)
PROP_STATE_EVENT_TYPE = _bacnet.PROP_STATE_EVENT_TYPE
_bacnet.PROP_STATE_POLARITY_swigconstant(_bacnet)
PROP_STATE_POLARITY = _bacnet.PROP_STATE_POLARITY
_bacnet.PROP_STATE_PROGRAM_CHANGE_swigconstant(_bacnet)
PROP_STATE_PROGRAM_CHANGE = _bacnet.PROP_STATE_PROGRAM_CHANGE
_bacnet.PROP_STATE_PROGRAM_STATE_swigconstant(_bacnet)
PROP_STATE_PROGRAM_STATE = _bacnet.PROP_STATE_PROGRAM_STATE
_bacnet.PROP_STATE_REASON_FOR_HALT_swigconstant(_bacnet)
PROP_STATE_REASON_FOR_HALT = _bacnet.PROP_STATE_REASON_FOR_HALT
_bacnet.PROP_STATE_RELIABILITY_swigconstant(_bacnet)
PROP_STATE_RELIABILITY = _bacnet.PROP_STATE_RELIABILITY
_bacnet.PROP_STATE_EVENT_STATE_swigconstant(_bacnet)
PROP_STATE_EVENT_STATE = _bacnet.PROP_STATE_EVENT_STATE
_bacnet.PROP_STATE_SYSTEM_STATUS_swigconstant(_bacnet)
PROP_STATE_SYSTEM_STATUS = _bacnet.PROP_STATE_SYSTEM_STATUS
_bacnet.PROP_STATE_UNITS_swigconstant(_bacnet)
PROP_STATE_UNITS = _bacnet.PROP_STATE_UNITS
_bacnet.PROP_STATE_UNSIGNED_VALUE_swigconstant(_bacnet)
PROP_STATE_UNSIGNED_VALUE = _bacnet.PROP_STATE_UNSIGNED_VALUE
_bacnet.PROP_STATE_LIFE_SAFETY_MODE_swigconstant(_bacnet)
PROP_STATE_LIFE_SAFETY_MODE = _bacnet.PROP_STATE_LIFE_SAFETY_MODE
_bacnet.PROP_STATE_LIFE_SAFETY_STATE_swigconstant(_bacnet)
PROP_STATE_LIFE_SAFETY_STATE = _bacnet.PROP_STATE_LIFE_SAFETY_STATE
_bacnet.PROP_STATE_RESTART_REASON_swigconstant(_bacnet)
PROP_STATE_RESTART_REASON = _bacnet.PROP_STATE_RESTART_REASON
_bacnet.PROP_STATE_DOOR_ALARM_STATE_swigconstant(_bacnet)
PROP_STATE_DOOR_ALARM_STATE = _bacnet.PROP_STATE_DOOR_ALARM_STATE
_bacnet.PROP_STATE_ACTION_swigconstant(_bacnet)
PROP_STATE_ACTION = _bacnet.PROP_STATE_ACTION
_bacnet.PROP_STATE_DOOR_SECURED_STATUS_swigconstant(_bacnet)
PROP_STATE_DOOR_SECURED_STATUS = _bacnet.PROP_STATE_DOOR_SECURED_STATUS
_bacnet.PROP_STATE_DOOR_STATUS_swigconstant(_bacnet)
PROP_STATE_DOOR_STATUS = _bacnet.PROP_STATE_DOOR_STATUS
_bacnet.PROP_STATE_DOOR_VALUE_swigconstant(_bacnet)
PROP_STATE_DOOR_VALUE = _bacnet.PROP_STATE_DOOR_VALUE
_bacnet.PROP_STATE_FILE_ACCESS_METHOD_swigconstant(_bacnet)
PROP_STATE_FILE_ACCESS_METHOD = _bacnet.PROP_STATE_FILE_ACCESS_METHOD
_bacnet.PROP_STATE_LOCK_STATUS_swigconstant(_bacnet)
PROP_STATE_LOCK_STATUS = _bacnet.PROP_STATE_LOCK_STATUS
_bacnet.PROP_STATE_LIFE_SAFETY_OPERATION_swigconstant(_bacnet)
PROP_STATE_LIFE_SAFETY_OPERATION = _bacnet.PROP_STATE_LIFE_SAFETY_OPERATION
_bacnet.PROP_STATE_MAINTENANCE_swigconstant(_bacnet)
PROP_STATE_MAINTENANCE = _bacnet.PROP_STATE_MAINTENANCE
_bacnet.PROP_STATE_NODE_TYPE_swigconstant(_bacnet)
PROP_STATE_NODE_TYPE = _bacnet.PROP_STATE_NODE_TYPE
_bacnet.PROP_STATE_NOTIFY_TYPE_swigconstant(_bacnet)
PROP_STATE_NOTIFY_TYPE = _bacnet.PROP_STATE_NOTIFY_TYPE
_bacnet.PROP_STATE_SECURITY_LEVEL_swigconstant(_bacnet)
PROP_STATE_SECURITY_LEVEL = _bacnet.PROP_STATE_SECURITY_LEVEL
_bacnet.PROP_STATE_SHED_STATE_swigconstant(_bacnet)
PROP_STATE_SHED_STATE = _bacnet.PROP_STATE_SHED_STATE
_bacnet.PROP_STATE_SILENCED_STATE_swigconstant(_bacnet)
PROP_STATE_SILENCED_STATE = _bacnet.PROP_STATE_SILENCED_STATE
_bacnet.PROP_STATE_ACCESS_EVENT_swigconstant(_bacnet)
PROP_STATE_ACCESS_EVENT = _bacnet.PROP_STATE_ACCESS_EVENT
_bacnet.PROP_STATE_ZONE_OCCUPANCY_STATE_swigconstant(_bacnet)
PROP_STATE_ZONE_OCCUPANCY_STATE = _bacnet.PROP_STATE_ZONE_OCCUPANCY_STATE
_bacnet.PROP_STATE_ACCESS_CRED_DISABLE_REASON_swigconstant(_bacnet)
PROP_STATE_ACCESS_CRED_DISABLE_REASON = _bacnet.PROP_STATE_ACCESS_CRED_DISABLE_REASON
_bacnet.PROP_STATE_ACCESS_CRED_DISABLE_swigconstant(_bacnet)
PROP_STATE_ACCESS_CRED_DISABLE = _bacnet.PROP_STATE_ACCESS_CRED_DISABLE
_bacnet.PROP_STATE_AUTHENTICATION_STATUS_swigconstant(_bacnet)
PROP_STATE_AUTHENTICATION_STATUS = _bacnet.PROP_STATE_AUTHENTICATION_STATUS
_bacnet.RELIABILITY_NO_FAULT_DETECTED_swigconstant(_bacnet)
RELIABILITY_NO_FAULT_DETECTED = _bacnet.RELIABILITY_NO_FAULT_DETECTED
_bacnet.RELIABILITY_NO_SENSOR_swigconstant(_bacnet)
RELIABILITY_NO_SENSOR = _bacnet.RELIABILITY_NO_SENSOR
_bacnet.RELIABILITY_OVER_RANGE_swigconstant(_bacnet)
RELIABILITY_OVER_RANGE = _bacnet.RELIABILITY_OVER_RANGE
_bacnet.RELIABILITY_UNDER_RANGE_swigconstant(_bacnet)
RELIABILITY_UNDER_RANGE = _bacnet.RELIABILITY_UNDER_RANGE
_bacnet.RELIABILITY_OPEN_LOOP_swigconstant(_bacnet)
RELIABILITY_OPEN_LOOP = _bacnet.RELIABILITY_OPEN_LOOP
_bacnet.RELIABILITY_SHORTED_LOOP_swigconstant(_bacnet)
RELIABILITY_SHORTED_LOOP = _bacnet.RELIABILITY_SHORTED_LOOP
_bacnet.RELIABILITY_NO_OUTPUT_swigconstant(_bacnet)
RELIABILITY_NO_OUTPUT = _bacnet.RELIABILITY_NO_OUTPUT
_bacnet.RELIABILITY_UNRELIABLE_OTHER_swigconstant(_bacnet)
RELIABILITY_UNRELIABLE_OTHER = _bacnet.RELIABILITY_UNRELIABLE_OTHER
_bacnet.RELIABILITY_PROCESS_ERROR_swigconstant(_bacnet)
RELIABILITY_PROCESS_ERROR = _bacnet.RELIABILITY_PROCESS_ERROR
_bacnet.RELIABILITY_MULTI_STATE_FAULT_swigconstant(_bacnet)
RELIABILITY_MULTI_STATE_FAULT = _bacnet.RELIABILITY_MULTI_STATE_FAULT
_bacnet.RELIABILITY_CONFIGURATION_ERROR_swigconstant(_bacnet)
RELIABILITY_CONFIGURATION_ERROR = _bacnet.RELIABILITY_CONFIGURATION_ERROR
_bacnet.RELIABILITY_MEMBER_FAULT_swigconstant(_bacnet)
RELIABILITY_MEMBER_FAULT = _bacnet.RELIABILITY_MEMBER_FAULT
_bacnet.RELIABILITY_COMMUNICATION_FAILURE_swigconstant(_bacnet)
RELIABILITY_COMMUNICATION_FAILURE = _bacnet.RELIABILITY_COMMUNICATION_FAILURE
_bacnet.RELIABILITY_TRIPPED_swigconstant(_bacnet)
RELIABILITY_TRIPPED = _bacnet.RELIABILITY_TRIPPED
_bacnet.RELIABILITY_PROPRIETARY_MIN_swigconstant(_bacnet)
RELIABILITY_PROPRIETARY_MIN = _bacnet.RELIABILITY_PROPRIETARY_MIN
_bacnet.RELIABILITY_PROPRIETARY_MAX_swigconstant(_bacnet)
RELIABILITY_PROPRIETARY_MAX = _bacnet.RELIABILITY_PROPRIETARY_MAX
_bacnet.EVENT_CHANGE_OF_BITSTRING_swigconstant(_bacnet)
EVENT_CHANGE_OF_BITSTRING = _bacnet.EVENT_CHANGE_OF_BITSTRING
_bacnet.EVENT_CHANGE_OF_STATE_swigconstant(_bacnet)
EVENT_CHANGE_OF_STATE = _bacnet.EVENT_CHANGE_OF_STATE
_bacnet.EVENT_CHANGE_OF_VALUE_swigconstant(_bacnet)
EVENT_CHANGE_OF_VALUE = _bacnet.EVENT_CHANGE_OF_VALUE
_bacnet.EVENT_COMMAND_FAILURE_swigconstant(_bacnet)
EVENT_COMMAND_FAILURE = _bacnet.EVENT_COMMAND_FAILURE
_bacnet.EVENT_FLOATING_LIMIT_swigconstant(_bacnet)
EVENT_FLOATING_LIMIT = _bacnet.EVENT_FLOATING_LIMIT
_bacnet.EVENT_OUT_OF_RANGE_swigconstant(_bacnet)
EVENT_OUT_OF_RANGE = _bacnet.EVENT_OUT_OF_RANGE
_bacnet.EVENT_CHANGE_OF_LIFE_SAFETY_swigconstant(_bacnet)
EVENT_CHANGE_OF_LIFE_SAFETY = _bacnet.EVENT_CHANGE_OF_LIFE_SAFETY
_bacnet.EVENT_EXTENDED_swigconstant(_bacnet)
EVENT_EXTENDED = _bacnet.EVENT_EXTENDED
_bacnet.EVENT_BUFFER_READY_swigconstant(_bacnet)
EVENT_BUFFER_READY = _bacnet.EVENT_BUFFER_READY
_bacnet.EVENT_UNSIGNED_RANGE_swigconstant(_bacnet)
EVENT_UNSIGNED_RANGE = _bacnet.EVENT_UNSIGNED_RANGE
_bacnet.EVENT_PROPRIETARY_MIN_swigconstant(_bacnet)
EVENT_PROPRIETARY_MIN = _bacnet.EVENT_PROPRIETARY_MIN
_bacnet.EVENT_PROPRIETARY_MAX_swigconstant(_bacnet)
EVENT_PROPRIETARY_MAX = _bacnet.EVENT_PROPRIETARY_MAX
_bacnet.FILE_RECORD_ACCESS_swigconstant(_bacnet)
FILE_RECORD_ACCESS = _bacnet.FILE_RECORD_ACCESS
_bacnet.FILE_STREAM_ACCESS_swigconstant(_bacnet)
FILE_STREAM_ACCESS = _bacnet.FILE_STREAM_ACCESS
_bacnet.FILE_RECORD_AND_STREAM_ACCESS_swigconstant(_bacnet)
FILE_RECORD_AND_STREAM_ACCESS = _bacnet.FILE_RECORD_AND_STREAM_ACCESS
_bacnet.MIN_LIFE_SAFETY_MODE_swigconstant(_bacnet)
MIN_LIFE_SAFETY_MODE = _bacnet.MIN_LIFE_SAFETY_MODE
_bacnet.LIFE_SAFETY_MODE_OFF_swigconstant(_bacnet)
LIFE_SAFETY_MODE_OFF = _bacnet.LIFE_SAFETY_MODE_OFF
_bacnet.LIFE_SAFETY_MODE_ON_swigconstant(_bacnet)
LIFE_SAFETY_MODE_ON = _bacnet.LIFE_SAFETY_MODE_ON
_bacnet.LIFE_SAFETY_MODE_TEST_swigconstant(_bacnet)
LIFE_SAFETY_MODE_TEST = _bacnet.LIFE_SAFETY_MODE_TEST
_bacnet.LIFE_SAFETY_MODE_MANNED_swigconstant(_bacnet)
LIFE_SAFETY_MODE_MANNED = _bacnet.LIFE_SAFETY_MODE_MANNED
_bacnet.LIFE_SAFETY_MODE_UNMANNED_swigconstant(_bacnet)
LIFE_SAFETY_MODE_UNMANNED = _bacnet.LIFE_SAFETY_MODE_UNMANNED
_bacnet.LIFE_SAFETY_MODE_ARMED_swigconstant(_bacnet)
LIFE_SAFETY_MODE_ARMED = _bacnet.LIFE_SAFETY_MODE_ARMED
_bacnet.LIFE_SAFETY_MODE_DISARMED_swigconstant(_bacnet)
LIFE_SAFETY_MODE_DISARMED = _bacnet.LIFE_SAFETY_MODE_DISARMED
_bacnet.LIFE_SAFETY_MODE_PREARMED_swigconstant(_bacnet)
LIFE_SAFETY_MODE_PREARMED = _bacnet.LIFE_SAFETY_MODE_PREARMED
_bacnet.LIFE_SAFETY_MODE_SLOW_swigconstant(_bacnet)
LIFE_SAFETY_MODE_SLOW = _bacnet.LIFE_SAFETY_MODE_SLOW
_bacnet.LIFE_SAFETY_MODE_FAST_swigconstant(_bacnet)
LIFE_SAFETY_MODE_FAST = _bacnet.LIFE_SAFETY_MODE_FAST
_bacnet.LIFE_SAFETY_MODE_DISCONNECTED_swigconstant(_bacnet)
LIFE_SAFETY_MODE_DISCONNECTED = _bacnet.LIFE_SAFETY_MODE_DISCONNECTED
_bacnet.LIFE_SAFETY_MODE_ENABLED_swigconstant(_bacnet)
LIFE_SAFETY_MODE_ENABLED = _bacnet.LIFE_SAFETY_MODE_ENABLED
_bacnet.LIFE_SAFETY_MODE_DISABLED_swigconstant(_bacnet)
LIFE_SAFETY_MODE_DISABLED = _bacnet.LIFE_SAFETY_MODE_DISABLED
_bacnet.LIFE_SAFETY_MODE_AUTOMATIC_RELEASE_DISABLED_swigconstant(_bacnet)
LIFE_SAFETY_MODE_AUTOMATIC_RELEASE_DISABLED = _bacnet.LIFE_SAFETY_MODE_AUTOMATIC_RELEASE_DISABLED
_bacnet.LIFE_SAFETY_MODE_DEFAULT_swigconstant(_bacnet)
LIFE_SAFETY_MODE_DEFAULT = _bacnet.LIFE_SAFETY_MODE_DEFAULT
_bacnet.MAX_LIFE_SAFETY_MODE_swigconstant(_bacnet)
MAX_LIFE_SAFETY_MODE = _bacnet.MAX_LIFE_SAFETY_MODE
_bacnet.LIFE_SAFETY_MODE_PROPRIETARY_MIN_swigconstant(_bacnet)
LIFE_SAFETY_MODE_PROPRIETARY_MIN = _bacnet.LIFE_SAFETY_MODE_PROPRIETARY_MIN
_bacnet.LIFE_SAFETY_MODE_PROPRIETARY_MAX_swigconstant(_bacnet)
LIFE_SAFETY_MODE_PROPRIETARY_MAX = _bacnet.LIFE_SAFETY_MODE_PROPRIETARY_MAX
_bacnet.LIFE_SAFETY_OP_NONE_swigconstant(_bacnet)
LIFE_SAFETY_OP_NONE = _bacnet.LIFE_SAFETY_OP_NONE
_bacnet.LIFE_SAFETY_OP_SILENCE_swigconstant(_bacnet)
LIFE_SAFETY_OP_SILENCE = _bacnet.LIFE_SAFETY_OP_SILENCE
_bacnet.LIFE_SAFETY_OP_SILENCE_AUDIBLE_swigconstant(_bacnet)
LIFE_SAFETY_OP_SILENCE_AUDIBLE = _bacnet.LIFE_SAFETY_OP_SILENCE_AUDIBLE
_bacnet.LIFE_SAFETY_OP_SILENCE_VISUAL_swigconstant(_bacnet)
LIFE_SAFETY_OP_SILENCE_VISUAL = _bacnet.LIFE_SAFETY_OP_SILENCE_VISUAL
_bacnet.LIFE_SAFETY_OP_RESET_swigconstant(_bacnet)
LIFE_SAFETY_OP_RESET = _bacnet.LIFE_SAFETY_OP_RESET
_bacnet.LIFE_SAFETY_OP_RESET_ALARM_swigconstant(_bacnet)
LIFE_SAFETY_OP_RESET_ALARM = _bacnet.LIFE_SAFETY_OP_RESET_ALARM
_bacnet.LIFE_SAFETY_OP_RESET_FAULT_swigconstant(_bacnet)
LIFE_SAFETY_OP_RESET_FAULT = _bacnet.LIFE_SAFETY_OP_RESET_FAULT
_bacnet.LIFE_SAFETY_OP_UNSILENCE_swigconstant(_bacnet)
LIFE_SAFETY_OP_UNSILENCE = _bacnet.LIFE_SAFETY_OP_UNSILENCE
_bacnet.LIFE_SAFETY_OP_UNSILENCE_AUDIBLE_swigconstant(_bacnet)
LIFE_SAFETY_OP_UNSILENCE_AUDIBLE = _bacnet.LIFE_SAFETY_OP_UNSILENCE_AUDIBLE
_bacnet.LIFE_SAFETY_OP_UNSILENCE_VISUAL_swigconstant(_bacnet)
LIFE_SAFETY_OP_UNSILENCE_VISUAL = _bacnet.LIFE_SAFETY_OP_UNSILENCE_VISUAL
_bacnet.LIFE_SAFETY_OP_PROPRIETARY_MIN_swigconstant(_bacnet)
LIFE_SAFETY_OP_PROPRIETARY_MIN = _bacnet.LIFE_SAFETY_OP_PROPRIETARY_MIN
_bacnet.LIFE_SAFETY_OP_PROPRIETARY_MAX_swigconstant(_bacnet)
LIFE_SAFETY_OP_PROPRIETARY_MAX = _bacnet.LIFE_SAFETY_OP_PROPRIETARY_MAX
_bacnet.MIN_LIFE_SAFETY_STATE_swigconstant(_bacnet)
MIN_LIFE_SAFETY_STATE = _bacnet.MIN_LIFE_SAFETY_STATE
_bacnet.LIFE_SAFETY_STATE_QUIET_swigconstant(_bacnet)
LIFE_SAFETY_STATE_QUIET = _bacnet.LIFE_SAFETY_STATE_QUIET
_bacnet.LIFE_SAFETY_STATE_PRE_ALARM_swigconstant(_bacnet)
LIFE_SAFETY_STATE_PRE_ALARM = _bacnet.LIFE_SAFETY_STATE_PRE_ALARM
_bacnet.LIFE_SAFETY_STATE_ALARM_swigconstant(_bacnet)
LIFE_SAFETY_STATE_ALARM = _bacnet.LIFE_SAFETY_STATE_ALARM
_bacnet.LIFE_SAFETY_STATE_FAULT_swigconstant(_bacnet)
LIFE_SAFETY_STATE_FAULT = _bacnet.LIFE_SAFETY_STATE_FAULT
_bacnet.LIFE_SAFETY_STATE_FAULT_PRE_ALARM_swigconstant(_bacnet)
LIFE_SAFETY_STATE_FAULT_PRE_ALARM = _bacnet.LIFE_SAFETY_STATE_FAULT_PRE_ALARM
_bacnet.LIFE_SAFETY_STATE_FAULT_ALARM_swigconstant(_bacnet)
LIFE_SAFETY_STATE_FAULT_ALARM = _bacnet.LIFE_SAFETY_STATE_FAULT_ALARM
_bacnet.LIFE_SAFETY_STATE_NOT_READY_swigconstant(_bacnet)
LIFE_SAFETY_STATE_NOT_READY = _bacnet.LIFE_SAFETY_STATE_NOT_READY
_bacnet.LIFE_SAFETY_STATE_ACTIVE_swigconstant(_bacnet)
LIFE_SAFETY_STATE_ACTIVE = _bacnet.LIFE_SAFETY_STATE_ACTIVE
_bacnet.LIFE_SAFETY_STATE_TAMPER_swigconstant(_bacnet)
LIFE_SAFETY_STATE_TAMPER = _bacnet.LIFE_SAFETY_STATE_TAMPER
_bacnet.LIFE_SAFETY_STATE_TEST_ALARM_swigconstant(_bacnet)
LIFE_SAFETY_STATE_TEST_ALARM = _bacnet.LIFE_SAFETY_STATE_TEST_ALARM
_bacnet.LIFE_SAFETY_STATE_TEST_ACTIVE_swigconstant(_bacnet)
LIFE_SAFETY_STATE_TEST_ACTIVE = _bacnet.LIFE_SAFETY_STATE_TEST_ACTIVE
_bacnet.LIFE_SAFETY_STATE_TEST_FAULT_swigconstant(_bacnet)
LIFE_SAFETY_STATE_TEST_FAULT = _bacnet.LIFE_SAFETY_STATE_TEST_FAULT
_bacnet.LIFE_SAFETY_STATE_TEST_FAULT_ALARM_swigconstant(_bacnet)
LIFE_SAFETY_STATE_TEST_FAULT_ALARM = _bacnet.LIFE_SAFETY_STATE_TEST_FAULT_ALARM
_bacnet.LIFE_SAFETY_STATE_HOLDUP_swigconstant(_bacnet)
LIFE_SAFETY_STATE_HOLDUP = _bacnet.LIFE_SAFETY_STATE_HOLDUP
_bacnet.LIFE_SAFETY_STATE_DURESS_swigconstant(_bacnet)
LIFE_SAFETY_STATE_DURESS = _bacnet.LIFE_SAFETY_STATE_DURESS
_bacnet.LIFE_SAFETY_STATE_TAMPER_ALARM_swigconstant(_bacnet)
LIFE_SAFETY_STATE_TAMPER_ALARM = _bacnet.LIFE_SAFETY_STATE_TAMPER_ALARM
_bacnet.LIFE_SAFETY_STATE_ABNORMAL_swigconstant(_bacnet)
LIFE_SAFETY_STATE_ABNORMAL = _bacnet.LIFE_SAFETY_STATE_ABNORMAL
_bacnet.LIFE_SAFETY_STATE_EMERGENCY_POWER_swigconstant(_bacnet)
LIFE_SAFETY_STATE_EMERGENCY_POWER = _bacnet.LIFE_SAFETY_STATE_EMERGENCY_POWER
_bacnet.LIFE_SAFETY_STATE_DELAYED_swigconstant(_bacnet)
LIFE_SAFETY_STATE_DELAYED = _bacnet.LIFE_SAFETY_STATE_DELAYED
_bacnet.LIFE_SAFETY_STATE_BLOCKED_swigconstant(_bacnet)
LIFE_SAFETY_STATE_BLOCKED = _bacnet.LIFE_SAFETY_STATE_BLOCKED
_bacnet.LIFE_SAFETY_STATE_LOCAL_ALARM_swigconstant(_bacnet)
LIFE_SAFETY_STATE_LOCAL_ALARM = _bacnet.LIFE_SAFETY_STATE_LOCAL_ALARM
_bacnet.LIFE_SAFETY_STATE_GENERAL_ALARM_swigconstant(_bacnet)
LIFE_SAFETY_STATE_GENERAL_ALARM = _bacnet.LIFE_SAFETY_STATE_GENERAL_ALARM
_bacnet.LIFE_SAFETY_STATE_SUPERVISORY_swigconstant(_bacnet)
LIFE_SAFETY_STATE_SUPERVISORY = _bacnet.LIFE_SAFETY_STATE_SUPERVISORY
_bacnet.LIFE_SAFETY_STATE_TEST_SUPERVISORY_swigconstant(_bacnet)
LIFE_SAFETY_STATE_TEST_SUPERVISORY = _bacnet.LIFE_SAFETY_STATE_TEST_SUPERVISORY
_bacnet.MAX_LIFE_SAFETY_STATE_swigconstant(_bacnet)
MAX_LIFE_SAFETY_STATE = _bacnet.MAX_LIFE_SAFETY_STATE
_bacnet.LIFE_SAFETY_STATE_PROPRIETARY_MIN_swigconstant(_bacnet)
LIFE_SAFETY_STATE_PROPRIETARY_MIN = _bacnet.LIFE_SAFETY_STATE_PROPRIETARY_MIN
_bacnet.LIFE_SAFETY_STATE_PROPRIETARY_MAX_swigconstant(_bacnet)
LIFE_SAFETY_STATE_PROPRIETARY_MAX = _bacnet.LIFE_SAFETY_STATE_PROPRIETARY_MAX
_bacnet.SILENCED_STATE_UNSILENCED_swigconstant(_bacnet)
SILENCED_STATE_UNSILENCED = _bacnet.SILENCED_STATE_UNSILENCED
_bacnet.SILENCED_STATE_AUDIBLE_SILENCED_swigconstant(_bacnet)
SILENCED_STATE_AUDIBLE_SILENCED = _bacnet.SILENCED_STATE_AUDIBLE_SILENCED
_bacnet.SILENCED_STATE_VISIBLE_SILENCED_swigconstant(_bacnet)
SILENCED_STATE_VISIBLE_SILENCED = _bacnet.SILENCED_STATE_VISIBLE_SILENCED
_bacnet.SILENCED_STATE_ALL_SILENCED_swigconstant(_bacnet)
SILENCED_STATE_ALL_SILENCED = _bacnet.SILENCED_STATE_ALL_SILENCED
_bacnet.SILENCED_STATE_PROPRIETARY_MIN_swigconstant(_bacnet)
SILENCED_STATE_PROPRIETARY_MIN = _bacnet.SILENCED_STATE_PROPRIETARY_MIN
_bacnet.SILENCED_STATE_PROPRIETARY_MAX_swigconstant(_bacnet)
SILENCED_STATE_PROPRIETARY_MAX = _bacnet.SILENCED_STATE_PROPRIETARY_MAX
_bacnet.MAINTENANCE_NONE_swigconstant(_bacnet)
MAINTENANCE_NONE = _bacnet.MAINTENANCE_NONE
_bacnet.MAINTENANCE_PERIODIC_TEST_swigconstant(_bacnet)
MAINTENANCE_PERIODIC_TEST = _bacnet.MAINTENANCE_PERIODIC_TEST
_bacnet.MAINTENANCE_NEED_SERVICE_OPERATIONAL_swigconstant(_bacnet)
MAINTENANCE_NEED_SERVICE_OPERATIONAL = _bacnet.MAINTENANCE_NEED_SERVICE_OPERATIONAL
_bacnet.MAINTENANCE_NEED_SERVICE_INOPERATIVE_swigconstant(_bacnet)
MAINTENANCE_NEED_SERVICE_INOPERATIVE = _bacnet.MAINTENANCE_NEED_SERVICE_INOPERATIVE
_bacnet.MAINTENANCE_PROPRIETARY_MIN_swigconstant(_bacnet)
MAINTENANCE_PROPRIETARY_MIN = _bacnet.MAINTENANCE_PROPRIETARY_MIN
_bacnet.MAINTENANCE_PROPRIETARY_MAX_swigconstant(_bacnet)
MAINTENANCE_PROPRIETARY_MAX = _bacnet.MAINTENANCE_PROPRIETARY_MAX
_bacnet.NOTIFY_ALARM_swigconstant(_bacnet)
NOTIFY_ALARM = _bacnet.NOTIFY_ALARM
_bacnet.NOTIFY_EVENT_swigconstant(_bacnet)
NOTIFY_EVENT = _bacnet.NOTIFY_EVENT
_bacnet.NOTIFY_ACK_NOTIFICATION_swigconstant(_bacnet)
NOTIFY_ACK_NOTIFICATION = _bacnet.NOTIFY_ACK_NOTIFICATION
_bacnet.OBJECT_ANALOG_INPUT_swigconstant(_bacnet)
OBJECT_ANALOG_INPUT = _bacnet.OBJECT_ANALOG_INPUT
_bacnet.OBJECT_ANALOG_OUTPUT_swigconstant(_bacnet)
OBJECT_ANALOG_OUTPUT = _bacnet.OBJECT_ANALOG_OUTPUT
_bacnet.OBJECT_ANALOG_VALUE_swigconstant(_bacnet)
OBJECT_ANALOG_VALUE = _bacnet.OBJECT_ANALOG_VALUE
_bacnet.OBJECT_BINARY_INPUT_swigconstant(_bacnet)
OBJECT_BINARY_INPUT = _bacnet.OBJECT_BINARY_INPUT
_bacnet.OBJECT_BINARY_OUTPUT_swigconstant(_bacnet)
OBJECT_BINARY_OUTPUT = _bacnet.OBJECT_BINARY_OUTPUT
_bacnet.OBJECT_BINARY_VALUE_swigconstant(_bacnet)
OBJECT_BINARY_VALUE = _bacnet.OBJECT_BINARY_VALUE
_bacnet.OBJECT_CALENDAR_swigconstant(_bacnet)
OBJECT_CALENDAR = _bacnet.OBJECT_CALENDAR
_bacnet.OBJECT_COMMAND_swigconstant(_bacnet)
OBJECT_COMMAND = _bacnet.OBJECT_COMMAND
_bacnet.OBJECT_DEVICE_swigconstant(_bacnet)
OBJECT_DEVICE = _bacnet.OBJECT_DEVICE
_bacnet.OBJECT_EVENT_ENROLLMENT_swigconstant(_bacnet)
OBJECT_EVENT_ENROLLMENT = _bacnet.OBJECT_EVENT_ENROLLMENT
_bacnet.OBJECT_FILE_swigconstant(_bacnet)
OBJECT_FILE = _bacnet.OBJECT_FILE
_bacnet.OBJECT_GROUP_swigconstant(_bacnet)
OBJECT_GROUP = _bacnet.OBJECT_GROUP
_bacnet.OBJECT_LOOP_swigconstant(_bacnet)
OBJECT_LOOP = _bacnet.OBJECT_LOOP
_bacnet.OBJECT_MULTI_STATE_INPUT_swigconstant(_bacnet)
OBJECT_MULTI_STATE_INPUT = _bacnet.OBJECT_MULTI_STATE_INPUT
_bacnet.OBJECT_MULTI_STATE_OUTPUT_swigconstant(_bacnet)
OBJECT_MULTI_STATE_OUTPUT = _bacnet.OBJECT_MULTI_STATE_OUTPUT
_bacnet.OBJECT_NOTIFICATION_CLASS_swigconstant(_bacnet)
OBJECT_NOTIFICATION_CLASS = _bacnet.OBJECT_NOTIFICATION_CLASS
_bacnet.OBJECT_PROGRAM_swigconstant(_bacnet)
OBJECT_PROGRAM = _bacnet.OBJECT_PROGRAM
_bacnet.OBJECT_SCHEDULE_swigconstant(_bacnet)
OBJECT_SCHEDULE = _bacnet.OBJECT_SCHEDULE
_bacnet.OBJECT_AVERAGING_swigconstant(_bacnet)
OBJECT_AVERAGING = _bacnet.OBJECT_AVERAGING
_bacnet.OBJECT_MULTI_STATE_VALUE_swigconstant(_bacnet)
OBJECT_MULTI_STATE_VALUE = _bacnet.OBJECT_MULTI_STATE_VALUE
_bacnet.OBJECT_TRENDLOG_swigconstant(_bacnet)
OBJECT_TRENDLOG = _bacnet.OBJECT_TRENDLOG
_bacnet.OBJECT_LIFE_SAFETY_POINT_swigconstant(_bacnet)
OBJECT_LIFE_SAFETY_POINT = _bacnet.OBJECT_LIFE_SAFETY_POINT
_bacnet.OBJECT_LIFE_SAFETY_ZONE_swigconstant(_bacnet)
OBJECT_LIFE_SAFETY_ZONE = _bacnet.OBJECT_LIFE_SAFETY_ZONE
_bacnet.OBJECT_ACCUMULATOR_swigconstant(_bacnet)
OBJECT_ACCUMULATOR = _bacnet.OBJECT_ACCUMULATOR
_bacnet.OBJECT_PULSE_CONVERTER_swigconstant(_bacnet)
OBJECT_PULSE_CONVERTER = _bacnet.OBJECT_PULSE_CONVERTER
_bacnet.OBJECT_EVENT_LOG_swigconstant(_bacnet)
OBJECT_EVENT_LOG = _bacnet.OBJECT_EVENT_LOG
_bacnet.OBJECT_GLOBAL_GROUP_swigconstant(_bacnet)
OBJECT_GLOBAL_GROUP = _bacnet.OBJECT_GLOBAL_GROUP
_bacnet.OBJECT_TREND_LOG_MULTIPLE_swigconstant(_bacnet)
OBJECT_TREND_LOG_MULTIPLE = _bacnet.OBJECT_TREND_LOG_MULTIPLE
_bacnet.OBJECT_LOAD_CONTROL_swigconstant(_bacnet)
OBJECT_LOAD_CONTROL = _bacnet.OBJECT_LOAD_CONTROL
_bacnet.OBJECT_STRUCTURED_VIEW_swigconstant(_bacnet)
OBJECT_STRUCTURED_VIEW = _bacnet.OBJECT_STRUCTURED_VIEW
_bacnet.OBJECT_ACCESS_DOOR_swigconstant(_bacnet)
OBJECT_ACCESS_DOOR = _bacnet.OBJECT_ACCESS_DOOR
_bacnet.OBJECT_ACCESS_CREDENTIAL_swigconstant(_bacnet)
OBJECT_ACCESS_CREDENTIAL = _bacnet.OBJECT_ACCESS_CREDENTIAL
_bacnet.OBJECT_ACCESS_POINT_swigconstant(_bacnet)
OBJECT_ACCESS_POINT = _bacnet.OBJECT_ACCESS_POINT
_bacnet.OBJECT_ACCESS_RIGHTS_swigconstant(_bacnet)
OBJECT_ACCESS_RIGHTS = _bacnet.OBJECT_ACCESS_RIGHTS
_bacnet.OBJECT_ACCESS_USER_swigconstant(_bacnet)
OBJECT_ACCESS_USER = _bacnet.OBJECT_ACCESS_USER
_bacnet.OBJECT_ACCESS_ZONE_swigconstant(_bacnet)
OBJECT_ACCESS_ZONE = _bacnet.OBJECT_ACCESS_ZONE
_bacnet.OBJECT_CREDENTIAL_DATA_INPUT_swigconstant(_bacnet)
OBJECT_CREDENTIAL_DATA_INPUT = _bacnet.OBJECT_CREDENTIAL_DATA_INPUT
_bacnet.OBJECT_NETWORK_SECURITY_swigconstant(_bacnet)
OBJECT_NETWORK_SECURITY = _bacnet.OBJECT_NETWORK_SECURITY
_bacnet.OBJECT_BITSTRING_VALUE_swigconstant(_bacnet)
OBJECT_BITSTRING_VALUE = _bacnet.OBJECT_BITSTRING_VALUE
_bacnet.OBJECT_CHARACTERSTRING_VALUE_swigconstant(_bacnet)
OBJECT_CHARACTERSTRING_VALUE = _bacnet.OBJECT_CHARACTERSTRING_VALUE
_bacnet.OBJECT_DATE_PATTERN_VALUE_swigconstant(_bacnet)
OBJECT_DATE_PATTERN_VALUE = _bacnet.OBJECT_DATE_PATTERN_VALUE
_bacnet.OBJECT_DATE_VALUE_swigconstant(_bacnet)
OBJECT_DATE_VALUE = _bacnet.OBJECT_DATE_VALUE
_bacnet.OBJECT_DATETIME_PATTERN_VALUE_swigconstant(_bacnet)
OBJECT_DATETIME_PATTERN_VALUE = _bacnet.OBJECT_DATETIME_PATTERN_VALUE
_bacnet.OBJECT_DATETIME_VALUE_swigconstant(_bacnet)
OBJECT_DATETIME_VALUE = _bacnet.OBJECT_DATETIME_VALUE
_bacnet.OBJECT_INTEGER_VALUE_swigconstant(_bacnet)
OBJECT_INTEGER_VALUE = _bacnet.OBJECT_INTEGER_VALUE
_bacnet.OBJECT_LARGE_ANALOG_VALUE_swigconstant(_bacnet)
OBJECT_LARGE_ANALOG_VALUE = _bacnet.OBJECT_LARGE_ANALOG_VALUE
_bacnet.OBJECT_OCTETSTRING_VALUE_swigconstant(_bacnet)
OBJECT_OCTETSTRING_VALUE = _bacnet.OBJECT_OCTETSTRING_VALUE
_bacnet.OBJECT_POSITIVE_INTEGER_VALUE_swigconstant(_bacnet)
OBJECT_POSITIVE_INTEGER_VALUE = _bacnet.OBJECT_POSITIVE_INTEGER_VALUE
_bacnet.OBJECT_TIME_PATTERN_VALUE_swigconstant(_bacnet)
OBJECT_TIME_PATTERN_VALUE = _bacnet.OBJECT_TIME_PATTERN_VALUE
_bacnet.OBJECT_TIME_VALUE_swigconstant(_bacnet)
OBJECT_TIME_VALUE = _bacnet.OBJECT_TIME_VALUE
_bacnet.OBJECT_NOTIFICATION_FORWARDER_swigconstant(_bacnet)
OBJECT_NOTIFICATION_FORWARDER = _bacnet.OBJECT_NOTIFICATION_FORWARDER
_bacnet.OBJECT_ALERT_ENROLLMENT_swigconstant(_bacnet)
OBJECT_ALERT_ENROLLMENT = _bacnet.OBJECT_ALERT_ENROLLMENT
_bacnet.OBJECT_CHANNEL_swigconstant(_bacnet)
OBJECT_CHANNEL = _bacnet.OBJECT_CHANNEL
_bacnet.OBJECT_LIGHTING_OUTPUT_swigconstant(_bacnet)
OBJECT_LIGHTING_OUTPUT = _bacnet.OBJECT_LIGHTING_OUTPUT
_bacnet.OBJECT_PROPRIETARY_MIN_swigconstant(_bacnet)
OBJECT_PROPRIETARY_MIN = _bacnet.OBJECT_PROPRIETARY_MIN
_bacnet.OBJECT_PROPRIETARY_MAX_swigconstant(_bacnet)
OBJECT_PROPRIETARY_MAX = _bacnet.OBJECT_PROPRIETARY_MAX
_bacnet.MAX_BACNET_OBJECT_TYPE_swigconstant(_bacnet)
MAX_BACNET_OBJECT_TYPE = _bacnet.MAX_BACNET_OBJECT_TYPE
_bacnet.SEGMENTATION_BOTH_swigconstant(_bacnet)
SEGMENTATION_BOTH = _bacnet.SEGMENTATION_BOTH
_bacnet.SEGMENTATION_TRANSMIT_swigconstant(_bacnet)
SEGMENTATION_TRANSMIT = _bacnet.SEGMENTATION_TRANSMIT
_bacnet.SEGMENTATION_RECEIVE_swigconstant(_bacnet)
SEGMENTATION_RECEIVE = _bacnet.SEGMENTATION_RECEIVE
_bacnet.SEGMENTATION_NONE_swigconstant(_bacnet)
SEGMENTATION_NONE = _bacnet.SEGMENTATION_NONE
_bacnet.MAX_BACNET_SEGMENTATION_swigconstant(_bacnet)
MAX_BACNET_SEGMENTATION = _bacnet.MAX_BACNET_SEGMENTATION
_bacnet.VT_CLASS_DEFAULT_swigconstant(_bacnet)
VT_CLASS_DEFAULT = _bacnet.VT_CLASS_DEFAULT
_bacnet.VT_CLASS_ANSI_X34_swigconstant(_bacnet)
VT_CLASS_ANSI_X34 = _bacnet.VT_CLASS_ANSI_X34
_bacnet.VT_CLASS_DEC_VT52_swigconstant(_bacnet)
VT_CLASS_DEC_VT52 = _bacnet.VT_CLASS_DEC_VT52
_bacnet.VT_CLASS_DEC_VT100_swigconstant(_bacnet)
VT_CLASS_DEC_VT100 = _bacnet.VT_CLASS_DEC_VT100
_bacnet.VT_CLASS_DEC_VT220_swigconstant(_bacnet)
VT_CLASS_DEC_VT220 = _bacnet.VT_CLASS_DEC_VT220
_bacnet.VT_CLASS_HP_700_94_swigconstant(_bacnet)
VT_CLASS_HP_700_94 = _bacnet.VT_CLASS_HP_700_94
_bacnet.VT_CLASS_IBM_3130_swigconstant(_bacnet)
VT_CLASS_IBM_3130 = _bacnet.VT_CLASS_IBM_3130
_bacnet.VT_CLASS_PROPRIETARY_MIN_swigconstant(_bacnet)
VT_CLASS_PROPRIETARY_MIN = _bacnet.VT_CLASS_PROPRIETARY_MIN
_bacnet.VT_CLASS_PROPRIETARY_MAX_swigconstant(_bacnet)
VT_CLASS_PROPRIETARY_MAX = _bacnet.VT_CLASS_PROPRIETARY_MAX
_bacnet.CHARACTER_ANSI_X34_swigconstant(_bacnet)
CHARACTER_ANSI_X34 = _bacnet.CHARACTER_ANSI_X34
_bacnet.CHARACTER_UTF8_swigconstant(_bacnet)
CHARACTER_UTF8 = _bacnet.CHARACTER_UTF8
_bacnet.CHARACTER_MS_DBCS_swigconstant(_bacnet)
CHARACTER_MS_DBCS = _bacnet.CHARACTER_MS_DBCS
_bacnet.CHARACTER_JISC_6226_swigconstant(_bacnet)
CHARACTER_JISC_6226 = _bacnet.CHARACTER_JISC_6226
_bacnet.CHARACTER_UCS4_swigconstant(_bacnet)
CHARACTER_UCS4 = _bacnet.CHARACTER_UCS4
_bacnet.CHARACTER_UCS2_swigconstant(_bacnet)
CHARACTER_UCS2 = _bacnet.CHARACTER_UCS2
_bacnet.CHARACTER_ISO8859_swigconstant(_bacnet)
CHARACTER_ISO8859 = _bacnet.CHARACTER_ISO8859
_bacnet.MAX_CHARACTER_STRING_ENCODING_swigconstant(_bacnet)
MAX_CHARACTER_STRING_ENCODING = _bacnet.MAX_CHARACTER_STRING_ENCODING
_bacnet.BACNET_APPLICATION_TAG_NULL_swigconstant(_bacnet)
BACNET_APPLICATION_TAG_NULL = _bacnet.BACNET_APPLICATION_TAG_NULL
_bacnet.BACNET_APPLICATION_TAG_BOOLEAN_swigconstant(_bacnet)
BACNET_APPLICATION_TAG_BOOLEAN = _bacnet.BACNET_APPLICATION_TAG_BOOLEAN
_bacnet.BACNET_APPLICATION_TAG_UNSIGNED_INT_swigconstant(_bacnet)
BACNET_APPLICATION_TAG_UNSIGNED_INT = _bacnet.BACNET_APPLICATION_TAG_UNSIGNED_INT
_bacnet.BACNET_APPLICATION_TAG_SIGNED_INT_swigconstant(_bacnet)
BACNET_APPLICATION_TAG_SIGNED_INT = _bacnet.BACNET_APPLICATION_TAG_SIGNED_INT
_bacnet.BACNET_APPLICATION_TAG_REAL_swigconstant(_bacnet)
BACNET_APPLICATION_TAG_REAL = _bacnet.BACNET_APPLICATION_TAG_REAL
_bacnet.BACNET_APPLICATION_TAG_DOUBLE_swigconstant(_bacnet)
BACNET_APPLICATION_TAG_DOUBLE = _bacnet.BACNET_APPLICATION_TAG_DOUBLE
_bacnet.BACNET_APPLICATION_TAG_OCTET_STRING_swigconstant(_bacnet)
BACNET_APPLICATION_TAG_OCTET_STRING = _bacnet.BACNET_APPLICATION_TAG_OCTET_STRING
_bacnet.BACNET_APPLICATION_TAG_CHARACTER_STRING_swigconstant(_bacnet)
BACNET_APPLICATION_TAG_CHARACTER_STRING = _bacnet.BACNET_APPLICATION_TAG_CHARACTER_STRING
_bacnet.BACNET_APPLICATION_TAG_BIT_STRING_swigconstant(_bacnet)
BACNET_APPLICATION_TAG_BIT_STRING = _bacnet.BACNET_APPLICATION_TAG_BIT_STRING
_bacnet.BACNET_APPLICATION_TAG_ENUMERATED_swigconstant(_bacnet)
BACNET_APPLICATION_TAG_ENUMERATED = _bacnet.BACNET_APPLICATION_TAG_ENUMERATED
_bacnet.BACNET_APPLICATION_TAG_DATE_swigconstant(_bacnet)
BACNET_APPLICATION_TAG_DATE = _bacnet.BACNET_APPLICATION_TAG_DATE
_bacnet.BACNET_APPLICATION_TAG_TIME_swigconstant(_bacnet)
BACNET_APPLICATION_TAG_TIME = _bacnet.BACNET_APPLICATION_TAG_TIME
_bacnet.BACNET_APPLICATION_TAG_OBJECT_ID_swigconstant(_bacnet)
BACNET_APPLICATION_TAG_OBJECT_ID = _bacnet.BACNET_APPLICATION_TAG_OBJECT_ID
_bacnet.BACNET_APPLICATION_TAG_RESERVE1_swigconstant(_bacnet)
BACNET_APPLICATION_TAG_RESERVE1 = _bacnet.BACNET_APPLICATION_TAG_RESERVE1
_bacnet.BACNET_APPLICATION_TAG_RESERVE2_swigconstant(_bacnet)
BACNET_APPLICATION_TAG_RESERVE2 = _bacnet.BACNET_APPLICATION_TAG_RESERVE2
_bacnet.BACNET_APPLICATION_TAG_RESERVE3_swigconstant(_bacnet)
BACNET_APPLICATION_TAG_RESERVE3 = _bacnet.BACNET_APPLICATION_TAG_RESERVE3
_bacnet.MAX_BACNET_APPLICATION_TAG_swigconstant(_bacnet)
MAX_BACNET_APPLICATION_TAG = _bacnet.MAX_BACNET_APPLICATION_TAG
_bacnet.BACNET_APPLICATION_TAG_EMPTYLIST_swigconstant(_bacnet)
BACNET_APPLICATION_TAG_EMPTYLIST = _bacnet.BACNET_APPLICATION_TAG_EMPTYLIST
_bacnet.BACNET_APPLICATION_TAG_WEEKNDAY_swigconstant(_bacnet)
BACNET_APPLICATION_TAG_WEEKNDAY = _bacnet.BACNET_APPLICATION_TAG_WEEKNDAY
_bacnet.BACNET_APPLICATION_TAG_DATERANGE_swigconstant(_bacnet)
BACNET_APPLICATION_TAG_DATERANGE = _bacnet.BACNET_APPLICATION_TAG_DATERANGE
_bacnet.BACNET_APPLICATION_TAG_DATETIME_swigconstant(_bacnet)
BACNET_APPLICATION_TAG_DATETIME = _bacnet.BACNET_APPLICATION_TAG_DATETIME
_bacnet.BACNET_APPLICATION_TAG_TIMESTAMP_swigconstant(_bacnet)
BACNET_APPLICATION_TAG_TIMESTAMP = _bacnet.BACNET_APPLICATION_TAG_TIMESTAMP
_bacnet.BACNET_APPLICATION_TAG_ERROR_swigconstant(_bacnet)
BACNET_APPLICATION_TAG_ERROR = _bacnet.BACNET_APPLICATION_TAG_ERROR
_bacnet.BACNET_APPLICATION_TAG_DEVICE_OBJECT_PROPERTY_REFERENCE_swigconstant(_bacnet)
BACNET_APPLICATION_TAG_DEVICE_OBJECT_PROPERTY_REFERENCE = _bacnet.BACNET_APPLICATION_TAG_DEVICE_OBJECT_PROPERTY_REFERENCE
_bacnet.BACNET_APPLICATION_TAG_DEVICE_OBJECT_REFERENCE_swigconstant(_bacnet)
BACNET_APPLICATION_TAG_DEVICE_OBJECT_REFERENCE = _bacnet.BACNET_APPLICATION_TAG_DEVICE_OBJECT_REFERENCE
_bacnet.BACNET_APPLICATION_TAG_OBJECT_PROPERTY_REFERENCE_swigconstant(_bacnet)
BACNET_APPLICATION_TAG_OBJECT_PROPERTY_REFERENCE = _bacnet.BACNET_APPLICATION_TAG_OBJECT_PROPERTY_REFERENCE
_bacnet.BACNET_APPLICATION_TAG_DESTINATION_swigconstant(_bacnet)
BACNET_APPLICATION_TAG_DESTINATION = _bacnet.BACNET_APPLICATION_TAG_DESTINATION
_bacnet.BACNET_APPLICATION_TAG_RECIPIENT_swigconstant(_bacnet)
BACNET_APPLICATION_TAG_RECIPIENT = _bacnet.BACNET_APPLICATION_TAG_RECIPIENT
_bacnet.BACNET_APPLICATION_TAG_COV_SUBSCRIPTION_swigconstant(_bacnet)
BACNET_APPLICATION_TAG_COV_SUBSCRIPTION = _bacnet.BACNET_APPLICATION_TAG_COV_SUBSCRIPTION
_bacnet.BACNET_APPLICATION_TAG_CALENDAR_ENTRY_swigconstant(_bacnet)
BACNET_APPLICATION_TAG_CALENDAR_ENTRY = _bacnet.BACNET_APPLICATION_TAG_CALENDAR_ENTRY
_bacnet.BACNET_APPLICATION_TAG_WEEKLY_SCHEDULE_swigconstant(_bacnet)
BACNET_APPLICATION_TAG_WEEKLY_SCHEDULE = _bacnet.BACNET_APPLICATION_TAG_WEEKLY_SCHEDULE
_bacnet.BACNET_APPLICATION_TAG_SPECIAL_EVENT_swigconstant(_bacnet)
BACNET_APPLICATION_TAG_SPECIAL_EVENT = _bacnet.BACNET_APPLICATION_TAG_SPECIAL_EVENT
_bacnet.BACNET_APPLICATION_TAG_READ_ACCESS_SPECIFICATION_swigconstant(_bacnet)
BACNET_APPLICATION_TAG_READ_ACCESS_SPECIFICATION = _bacnet.BACNET_APPLICATION_TAG_READ_ACCESS_SPECIFICATION
_bacnet.BACNET_APPLICATION_TAG_LIGHTING_COMMAND_swigconstant(_bacnet)
BACNET_APPLICATION_TAG_LIGHTING_COMMAND = _bacnet.BACNET_APPLICATION_TAG_LIGHTING_COMMAND
_bacnet.PDU_TYPE_CONFIRMED_SERVICE_REQUEST_swigconstant(_bacnet)
PDU_TYPE_CONFIRMED_SERVICE_REQUEST = _bacnet.PDU_TYPE_CONFIRMED_SERVICE_REQUEST
_bacnet.PDU_TYPE_UNCONFIRMED_SERVICE_REQUEST_swigconstant(_bacnet)
PDU_TYPE_UNCONFIRMED_SERVICE_REQUEST = _bacnet.PDU_TYPE_UNCONFIRMED_SERVICE_REQUEST
_bacnet.PDU_TYPE_SIMPLE_ACK_swigconstant(_bacnet)
PDU_TYPE_SIMPLE_ACK = _bacnet.PDU_TYPE_SIMPLE_ACK
_bacnet.PDU_TYPE_COMPLEX_ACK_swigconstant(_bacnet)
PDU_TYPE_COMPLEX_ACK = _bacnet.PDU_TYPE_COMPLEX_ACK
_bacnet.PDU_TYPE_SEGMENT_ACK_swigconstant(_bacnet)
PDU_TYPE_SEGMENT_ACK = _bacnet.PDU_TYPE_SEGMENT_ACK
_bacnet.PDU_TYPE_ERROR_swigconstant(_bacnet)
PDU_TYPE_ERROR = _bacnet.PDU_TYPE_ERROR
_bacnet.PDU_TYPE_REJECT_swigconstant(_bacnet)
PDU_TYPE_REJECT = _bacnet.PDU_TYPE_REJECT
_bacnet.PDU_TYPE_ABORT_swigconstant(_bacnet)
PDU_TYPE_ABORT = _bacnet.PDU_TYPE_ABORT
_bacnet.SERVICE_CONFIRMED_ACKNOWLEDGE_ALARM_swigconstant(_bacnet)
SERVICE_CONFIRMED_ACKNOWLEDGE_ALARM = _bacnet.SERVICE_CONFIRMED_ACKNOWLEDGE_ALARM
_bacnet.SERVICE_CONFIRMED_COV_NOTIFICATION_swigconstant(_bacnet)
SERVICE_CONFIRMED_COV_NOTIFICATION = _bacnet.SERVICE_CONFIRMED_COV_NOTIFICATION
_bacnet.SERVICE_CONFIRMED_EVENT_NOTIFICATION_swigconstant(_bacnet)
SERVICE_CONFIRMED_EVENT_NOTIFICATION = _bacnet.SERVICE_CONFIRMED_EVENT_NOTIFICATION
_bacnet.SERVICE_CONFIRMED_GET_ALARM_SUMMARY_swigconstant(_bacnet)
SERVICE_CONFIRMED_GET_ALARM_SUMMARY = _bacnet.SERVICE_CONFIRMED_GET_ALARM_SUMMARY
_bacnet.SERVICE_CONFIRMED_GET_ENROLLMENT_SUMMARY_swigconstant(_bacnet)
SERVICE_CONFIRMED_GET_ENROLLMENT_SUMMARY = _bacnet.SERVICE_CONFIRMED_GET_ENROLLMENT_SUMMARY
_bacnet.SERVICE_CONFIRMED_GET_EVENT_INFORMATION_swigconstant(_bacnet)
SERVICE_CONFIRMED_GET_EVENT_INFORMATION = _bacnet.SERVICE_CONFIRMED_GET_EVENT_INFORMATION
_bacnet.SERVICE_CONFIRMED_SUBSCRIBE_COV_swigconstant(_bacnet)
SERVICE_CONFIRMED_SUBSCRIBE_COV = _bacnet.SERVICE_CONFIRMED_SUBSCRIBE_COV
_bacnet.SERVICE_CONFIRMED_SUBSCRIBE_COV_PROPERTY_swigconstant(_bacnet)
SERVICE_CONFIRMED_SUBSCRIBE_COV_PROPERTY = _bacnet.SERVICE_CONFIRMED_SUBSCRIBE_COV_PROPERTY
_bacnet.SERVICE_CONFIRMED_LIFE_SAFETY_OPERATION_swigconstant(_bacnet)
SERVICE_CONFIRMED_LIFE_SAFETY_OPERATION = _bacnet.SERVICE_CONFIRMED_LIFE_SAFETY_OPERATION
_bacnet.SERVICE_CONFIRMED_ATOMIC_READ_FILE_swigconstant(_bacnet)
SERVICE_CONFIRMED_ATOMIC_READ_FILE = _bacnet.SERVICE_CONFIRMED_ATOMIC_READ_FILE
_bacnet.SERVICE_CONFIRMED_ATOMIC_WRITE_FILE_swigconstant(_bacnet)
SERVICE_CONFIRMED_ATOMIC_WRITE_FILE = _bacnet.SERVICE_CONFIRMED_ATOMIC_WRITE_FILE
_bacnet.SERVICE_CONFIRMED_ADD_LIST_ELEMENT_swigconstant(_bacnet)
SERVICE_CONFIRMED_ADD_LIST_ELEMENT = _bacnet.SERVICE_CONFIRMED_ADD_LIST_ELEMENT
_bacnet.SERVICE_CONFIRMED_REMOVE_LIST_ELEMENT_swigconstant(_bacnet)
SERVICE_CONFIRMED_REMOVE_LIST_ELEMENT = _bacnet.SERVICE_CONFIRMED_REMOVE_LIST_ELEMENT
_bacnet.SERVICE_CONFIRMED_CREATE_OBJECT_swigconstant(_bacnet)
SERVICE_CONFIRMED_CREATE_OBJECT = _bacnet.SERVICE_CONFIRMED_CREATE_OBJECT
_bacnet.SERVICE_CONFIRMED_DELETE_OBJECT_swigconstant(_bacnet)
SERVICE_CONFIRMED_DELETE_OBJECT = _bacnet.SERVICE_CONFIRMED_DELETE_OBJECT
_bacnet.SERVICE_CONFIRMED_READ_PROPERTY_swigconstant(_bacnet)
SERVICE_CONFIRMED_READ_PROPERTY = _bacnet.SERVICE_CONFIRMED_READ_PROPERTY
_bacnet.SERVICE_CONFIRMED_READ_PROP_CONDITIONAL_swigconstant(_bacnet)
SERVICE_CONFIRMED_READ_PROP_CONDITIONAL = _bacnet.SERVICE_CONFIRMED_READ_PROP_CONDITIONAL
_bacnet.SERVICE_CONFIRMED_READ_PROP_MULTIPLE_swigconstant(_bacnet)
SERVICE_CONFIRMED_READ_PROP_MULTIPLE = _bacnet.SERVICE_CONFIRMED_READ_PROP_MULTIPLE
_bacnet.SERVICE_CONFIRMED_READ_RANGE_swigconstant(_bacnet)
SERVICE_CONFIRMED_READ_RANGE = _bacnet.SERVICE_CONFIRMED_READ_RANGE
_bacnet.SERVICE_CONFIRMED_WRITE_PROPERTY_swigconstant(_bacnet)
SERVICE_CONFIRMED_WRITE_PROPERTY = _bacnet.SERVICE_CONFIRMED_WRITE_PROPERTY
_bacnet.SERVICE_CONFIRMED_WRITE_PROP_MULTIPLE_swigconstant(_bacnet)
SERVICE_CONFIRMED_WRITE_PROP_MULTIPLE = _bacnet.SERVICE_CONFIRMED_WRITE_PROP_MULTIPLE
_bacnet.SERVICE_CONFIRMED_DEVICE_COMMUNICATION_CONTROL_swigconstant(_bacnet)
SERVICE_CONFIRMED_DEVICE_COMMUNICATION_CONTROL = _bacnet.SERVICE_CONFIRMED_DEVICE_COMMUNICATION_CONTROL
_bacnet.SERVICE_CONFIRMED_PRIVATE_TRANSFER_swigconstant(_bacnet)
SERVICE_CONFIRMED_PRIVATE_TRANSFER = _bacnet.SERVICE_CONFIRMED_PRIVATE_TRANSFER
_bacnet.SERVICE_CONFIRMED_TEXT_MESSAGE_swigconstant(_bacnet)
SERVICE_CONFIRMED_TEXT_MESSAGE = _bacnet.SERVICE_CONFIRMED_TEXT_MESSAGE
_bacnet.SERVICE_CONFIRMED_REINITIALIZE_DEVICE_swigconstant(_bacnet)
SERVICE_CONFIRMED_REINITIALIZE_DEVICE = _bacnet.SERVICE_CONFIRMED_REINITIALIZE_DEVICE
_bacnet.SERVICE_CONFIRMED_VT_OPEN_swigconstant(_bacnet)
SERVICE_CONFIRMED_VT_OPEN = _bacnet.SERVICE_CONFIRMED_VT_OPEN
_bacnet.SERVICE_CONFIRMED_VT_CLOSE_swigconstant(_bacnet)
SERVICE_CONFIRMED_VT_CLOSE = _bacnet.SERVICE_CONFIRMED_VT_CLOSE
_bacnet.SERVICE_CONFIRMED_VT_DATA_swigconstant(_bacnet)
SERVICE_CONFIRMED_VT_DATA = _bacnet.SERVICE_CONFIRMED_VT_DATA
_bacnet.SERVICE_CONFIRMED_AUTHENTICATE_swigconstant(_bacnet)
SERVICE_CONFIRMED_AUTHENTICATE = _bacnet.SERVICE_CONFIRMED_AUTHENTICATE
_bacnet.SERVICE_CONFIRMED_REQUEST_KEY_swigconstant(_bacnet)
SERVICE_CONFIRMED_REQUEST_KEY = _bacnet.SERVICE_CONFIRMED_REQUEST_KEY
_bacnet.MAX_BACNET_CONFIRMED_SERVICE_swigconstant(_bacnet)
MAX_BACNET_CONFIRMED_SERVICE = _bacnet.MAX_BACNET_CONFIRMED_SERVICE
_bacnet.SERVICE_UNCONFIRMED_I_AM_swigconstant(_bacnet)
SERVICE_UNCONFIRMED_I_AM = _bacnet.SERVICE_UNCONFIRMED_I_AM
_bacnet.SERVICE_UNCONFIRMED_I_HAVE_swigconstant(_bacnet)
SERVICE_UNCONFIRMED_I_HAVE = _bacnet.SERVICE_UNCONFIRMED_I_HAVE
_bacnet.SERVICE_UNCONFIRMED_COV_NOTIFICATION_swigconstant(_bacnet)
SERVICE_UNCONFIRMED_COV_NOTIFICATION = _bacnet.SERVICE_UNCONFIRMED_COV_NOTIFICATION
_bacnet.SERVICE_UNCONFIRMED_EVENT_NOTIFICATION_swigconstant(_bacnet)
SERVICE_UNCONFIRMED_EVENT_NOTIFICATION = _bacnet.SERVICE_UNCONFIRMED_EVENT_NOTIFICATION
_bacnet.SERVICE_UNCONFIRMED_PRIVATE_TRANSFER_swigconstant(_bacnet)
SERVICE_UNCONFIRMED_PRIVATE_TRANSFER = _bacnet.SERVICE_UNCONFIRMED_PRIVATE_TRANSFER
_bacnet.SERVICE_UNCONFIRMED_TEXT_MESSAGE_swigconstant(_bacnet)
SERVICE_UNCONFIRMED_TEXT_MESSAGE = _bacnet.SERVICE_UNCONFIRMED_TEXT_MESSAGE
_bacnet.SERVICE_UNCONFIRMED_TIME_SYNCHRONIZATION_swigconstant(_bacnet)
SERVICE_UNCONFIRMED_TIME_SYNCHRONIZATION = _bacnet.SERVICE_UNCONFIRMED_TIME_SYNCHRONIZATION
_bacnet.SERVICE_UNCONFIRMED_WHO_HAS_swigconstant(_bacnet)
SERVICE_UNCONFIRMED_WHO_HAS = _bacnet.SERVICE_UNCONFIRMED_WHO_HAS
_bacnet.SERVICE_UNCONFIRMED_WHO_IS_swigconstant(_bacnet)
SERVICE_UNCONFIRMED_WHO_IS = _bacnet.SERVICE_UNCONFIRMED_WHO_IS
_bacnet.SERVICE_UNCONFIRMED_UTC_TIME_SYNCHRONIZATION_swigconstant(_bacnet)
SERVICE_UNCONFIRMED_UTC_TIME_SYNCHRONIZATION = _bacnet.SERVICE_UNCONFIRMED_UTC_TIME_SYNCHRONIZATION
_bacnet.SERVICE_UNCONFIRMED_WRITE_GROUP_swigconstant(_bacnet)
SERVICE_UNCONFIRMED_WRITE_GROUP = _bacnet.SERVICE_UNCONFIRMED_WRITE_GROUP
_bacnet.MAX_BACNET_UNCONFIRMED_SERVICE_swigconstant(_bacnet)
MAX_BACNET_UNCONFIRMED_SERVICE = _bacnet.MAX_BACNET_UNCONFIRMED_SERVICE
_bacnet.SERVICE_SUPPORTED_ACKNOWLEDGE_ALARM_swigconstant(_bacnet)
SERVICE_SUPPORTED_ACKNOWLEDGE_ALARM = _bacnet.SERVICE_SUPPORTED_ACKNOWLEDGE_ALARM
_bacnet.SERVICE_SUPPORTED_CONFIRMED_COV_NOTIFICATION_swigconstant(_bacnet)
SERVICE_SUPPORTED_CONFIRMED_COV_NOTIFICATION = _bacnet.SERVICE_SUPPORTED_CONFIRMED_COV_NOTIFICATION
_bacnet.SERVICE_SUPPORTED_CONFIRMED_EVENT_NOTIFICATION_swigconstant(_bacnet)
SERVICE_SUPPORTED_CONFIRMED_EVENT_NOTIFICATION = _bacnet.SERVICE_SUPPORTED_CONFIRMED_EVENT_NOTIFICATION
_bacnet.SERVICE_SUPPORTED_GET_ALARM_SUMMARY_swigconstant(_bacnet)
SERVICE_SUPPORTED_GET_ALARM_SUMMARY = _bacnet.SERVICE_SUPPORTED_GET_ALARM_SUMMARY
_bacnet.SERVICE_SUPPORTED_GET_ENROLLMENT_SUMMARY_swigconstant(_bacnet)
SERVICE_SUPPORTED_GET_ENROLLMENT_SUMMARY = _bacnet.SERVICE_SUPPORTED_GET_ENROLLMENT_SUMMARY
_bacnet.SERVICE_SUPPORTED_GET_EVENT_INFORMATION_swigconstant(_bacnet)
SERVICE_SUPPORTED_GET_EVENT_INFORMATION = _bacnet.SERVICE_SUPPORTED_GET_EVENT_INFORMATION
_bacnet.SERVICE_SUPPORTED_SUBSCRIBE_COV_swigconstant(_bacnet)
SERVICE_SUPPORTED_SUBSCRIBE_COV = _bacnet.SERVICE_SUPPORTED_SUBSCRIBE_COV
_bacnet.SERVICE_SUPPORTED_SUBSCRIBE_COV_PROPERTY_swigconstant(_bacnet)
SERVICE_SUPPORTED_SUBSCRIBE_COV_PROPERTY = _bacnet.SERVICE_SUPPORTED_SUBSCRIBE_COV_PROPERTY
_bacnet.SERVICE_SUPPORTED_LIFE_SAFETY_OPERATION_swigconstant(_bacnet)
SERVICE_SUPPORTED_LIFE_SAFETY_OPERATION = _bacnet.SERVICE_SUPPORTED_LIFE_SAFETY_OPERATION
_bacnet.SERVICE_SUPPORTED_ATOMIC_READ_FILE_swigconstant(_bacnet)
SERVICE_SUPPORTED_ATOMIC_READ_FILE = _bacnet.SERVICE_SUPPORTED_ATOMIC_READ_FILE
_bacnet.SERVICE_SUPPORTED_ATOMIC_WRITE_FILE_swigconstant(_bacnet)
SERVICE_SUPPORTED_ATOMIC_WRITE_FILE = _bacnet.SERVICE_SUPPORTED_ATOMIC_WRITE_FILE
_bacnet.SERVICE_SUPPORTED_ADD_LIST_ELEMENT_swigconstant(_bacnet)
SERVICE_SUPPORTED_ADD_LIST_ELEMENT = _bacnet.SERVICE_SUPPORTED_ADD_LIST_ELEMENT
_bacnet.SERVICE_SUPPORTED_REMOVE_LIST_ELEMENT_swigconstant(_bacnet)
SERVICE_SUPPORTED_REMOVE_LIST_ELEMENT = _bacnet.SERVICE_SUPPORTED_REMOVE_LIST_ELEMENT
_bacnet.SERVICE_SUPPORTED_CREATE_OBJECT_swigconstant(_bacnet)
SERVICE_SUPPORTED_CREATE_OBJECT = _bacnet.SERVICE_SUPPORTED_CREATE_OBJECT
_bacnet.SERVICE_SUPPORTED_DELETE_OBJECT_swigconstant(_bacnet)
SERVICE_SUPPORTED_DELETE_OBJECT = _bacnet.SERVICE_SUPPORTED_DELETE_OBJECT
_bacnet.SERVICE_SUPPORTED_READ_PROPERTY_swigconstant(_bacnet)
SERVICE_SUPPORTED_READ_PROPERTY = _bacnet.SERVICE_SUPPORTED_READ_PROPERTY
_bacnet.SERVICE_SUPPORTED_READ_PROP_CONDITIONAL_swigconstant(_bacnet)
SERVICE_SUPPORTED_READ_PROP_CONDITIONAL = _bacnet.SERVICE_SUPPORTED_READ_PROP_CONDITIONAL
_bacnet.SERVICE_SUPPORTED_READ_PROP_MULTIPLE_swigconstant(_bacnet)
SERVICE_SUPPORTED_READ_PROP_MULTIPLE = _bacnet.SERVICE_SUPPORTED_READ_PROP_MULTIPLE
_bacnet.SERVICE_SUPPORTED_READ_RANGE_swigconstant(_bacnet)
SERVICE_SUPPORTED_READ_RANGE = _bacnet.SERVICE_SUPPORTED_READ_RANGE
_bacnet.SERVICE_SUPPORTED_WRITE_PROPERTY_swigconstant(_bacnet)
SERVICE_SUPPORTED_WRITE_PROPERTY = _bacnet.SERVICE_SUPPORTED_WRITE_PROPERTY
_bacnet.SERVICE_SUPPORTED_WRITE_PROP_MULTIPLE_swigconstant(_bacnet)
SERVICE_SUPPORTED_WRITE_PROP_MULTIPLE = _bacnet.SERVICE_SUPPORTED_WRITE_PROP_MULTIPLE
_bacnet.SERVICE_SUPPORTED_WRITE_GROUP_swigconstant(_bacnet)
SERVICE_SUPPORTED_WRITE_GROUP = _bacnet.SERVICE_SUPPORTED_WRITE_GROUP
_bacnet.SERVICE_SUPPORTED_DEVICE_COMMUNICATION_CONTROL_swigconstant(_bacnet)
SERVICE_SUPPORTED_DEVICE_COMMUNICATION_CONTROL = _bacnet.SERVICE_SUPPORTED_DEVICE_COMMUNICATION_CONTROL
_bacnet.SERVICE_SUPPORTED_PRIVATE_TRANSFER_swigconstant(_bacnet)
SERVICE_SUPPORTED_PRIVATE_TRANSFER = _bacnet.SERVICE_SUPPORTED_PRIVATE_TRANSFER
_bacnet.SERVICE_SUPPORTED_TEXT_MESSAGE_swigconstant(_bacnet)
SERVICE_SUPPORTED_TEXT_MESSAGE = _bacnet.SERVICE_SUPPORTED_TEXT_MESSAGE
_bacnet.SERVICE_SUPPORTED_REINITIALIZE_DEVICE_swigconstant(_bacnet)
SERVICE_SUPPORTED_REINITIALIZE_DEVICE = _bacnet.SERVICE_SUPPORTED_REINITIALIZE_DEVICE
_bacnet.SERVICE_SUPPORTED_VT_OPEN_swigconstant(_bacnet)
SERVICE_SUPPORTED_VT_OPEN = _bacnet.SERVICE_SUPPORTED_VT_OPEN
_bacnet.SERVICE_SUPPORTED_VT_CLOSE_swigconstant(_bacnet)
SERVICE_SUPPORTED_VT_CLOSE = _bacnet.SERVICE_SUPPORTED_VT_CLOSE
_bacnet.SERVICE_SUPPORTED_VT_DATA_swigconstant(_bacnet)
SERVICE_SUPPORTED_VT_DATA = _bacnet.SERVICE_SUPPORTED_VT_DATA
_bacnet.SERVICE_SUPPORTED_AUTHENTICATE_swigconstant(_bacnet)
SERVICE_SUPPORTED_AUTHENTICATE = _bacnet.SERVICE_SUPPORTED_AUTHENTICATE
_bacnet.SERVICE_SUPPORTED_REQUEST_KEY_swigconstant(_bacnet)
SERVICE_SUPPORTED_REQUEST_KEY = _bacnet.SERVICE_SUPPORTED_REQUEST_KEY
_bacnet.SERVICE_SUPPORTED_I_AM_swigconstant(_bacnet)
SERVICE_SUPPORTED_I_AM = _bacnet.SERVICE_SUPPORTED_I_AM
_bacnet.SERVICE_SUPPORTED_I_HAVE_swigconstant(_bacnet)
SERVICE_SUPPORTED_I_HAVE = _bacnet.SERVICE_SUPPORTED_I_HAVE
_bacnet.SERVICE_SUPPORTED_UNCONFIRMED_COV_NOTIFICATION_swigconstant(_bacnet)
SERVICE_SUPPORTED_UNCONFIRMED_COV_NOTIFICATION = _bacnet.SERVICE_SUPPORTED_UNCONFIRMED_COV_NOTIFICATION
_bacnet.SERVICE_SUPPORTED_UNCONFIRMED_EVENT_NOTIFICATION_swigconstant(_bacnet)
SERVICE_SUPPORTED_UNCONFIRMED_EVENT_NOTIFICATION = _bacnet.SERVICE_SUPPORTED_UNCONFIRMED_EVENT_NOTIFICATION
_bacnet.SERVICE_SUPPORTED_UNCONFIRMED_PRIVATE_TRANSFER_swigconstant(_bacnet)
SERVICE_SUPPORTED_UNCONFIRMED_PRIVATE_TRANSFER = _bacnet.SERVICE_SUPPORTED_UNCONFIRMED_PRIVATE_TRANSFER
_bacnet.SERVICE_SUPPORTED_UNCONFIRMED_TEXT_MESSAGE_swigconstant(_bacnet)
SERVICE_SUPPORTED_UNCONFIRMED_TEXT_MESSAGE = _bacnet.SERVICE_SUPPORTED_UNCONFIRMED_TEXT_MESSAGE
_bacnet.SERVICE_SUPPORTED_TIME_SYNCHRONIZATION_swigconstant(_bacnet)
SERVICE_SUPPORTED_TIME_SYNCHRONIZATION = _bacnet.SERVICE_SUPPORTED_TIME_SYNCHRONIZATION
_bacnet.SERVICE_SUPPORTED_UTC_TIME_SYNCHRONIZATION_swigconstant(_bacnet)
SERVICE_SUPPORTED_UTC_TIME_SYNCHRONIZATION = _bacnet.SERVICE_SUPPORTED_UTC_TIME_SYNCHRONIZATION
_bacnet.SERVICE_SUPPORTED_WHO_HAS_swigconstant(_bacnet)
SERVICE_SUPPORTED_WHO_HAS = _bacnet.SERVICE_SUPPORTED_WHO_HAS
_bacnet.SERVICE_SUPPORTED_WHO_IS_swigconstant(_bacnet)
SERVICE_SUPPORTED_WHO_IS = _bacnet.SERVICE_SUPPORTED_WHO_IS
_bacnet.BVLC_RESULT_swigconstant(_bacnet)
BVLC_RESULT = _bacnet.BVLC_RESULT
_bacnet.BVLC_WRITE_BROADCAST_DISTRIBUTION_TABLE_swigconstant(_bacnet)
BVLC_WRITE_BROADCAST_DISTRIBUTION_TABLE = _bacnet.BVLC_WRITE_BROADCAST_DISTRIBUTION_TABLE
_bacnet.BVLC_READ_BROADCAST_DIST_TABLE_swigconstant(_bacnet)
BVLC_READ_BROADCAST_DIST_TABLE = _bacnet.BVLC_READ_BROADCAST_DIST_TABLE
_bacnet.BVLC_READ_BROADCAST_DIST_TABLE_ACK_swigconstant(_bacnet)
BVLC_READ_BROADCAST_DIST_TABLE_ACK = _bacnet.BVLC_READ_BROADCAST_DIST_TABLE_ACK
_bacnet.BVLC_FORWARDED_NPDU_swigconstant(_bacnet)
BVLC_FORWARDED_NPDU = _bacnet.BVLC_FORWARDED_NPDU
_bacnet.BVLC_REGISTER_FOREIGN_DEVICE_swigconstant(_bacnet)
BVLC_REGISTER_FOREIGN_DEVICE = _bacnet.BVLC_REGISTER_FOREIGN_DEVICE
_bacnet.BVLC_READ_FOREIGN_DEVICE_TABLE_swigconstant(_bacnet)
BVLC_READ_FOREIGN_DEVICE_TABLE = _bacnet.BVLC_READ_FOREIGN_DEVICE_TABLE
_bacnet.BVLC_READ_FOREIGN_DEVICE_TABLE_ACK_swigconstant(_bacnet)
BVLC_READ_FOREIGN_DEVICE_TABLE_ACK = _bacnet.BVLC_READ_FOREIGN_DEVICE_TABLE_ACK
_bacnet.BVLC_DELETE_FOREIGN_DEVICE_TABLE_ENTRY_swigconstant(_bacnet)
BVLC_DELETE_FOREIGN_DEVICE_TABLE_ENTRY = _bacnet.BVLC_DELETE_FOREIGN_DEVICE_TABLE_ENTRY
_bacnet.BVLC_DISTRIBUTE_BROADCAST_TO_NETWORK_swigconstant(_bacnet)
BVLC_DISTRIBUTE_BROADCAST_TO_NETWORK = _bacnet.BVLC_DISTRIBUTE_BROADCAST_TO_NETWORK
_bacnet.BVLC_ORIGINAL_UNICAST_NPDU_swigconstant(_bacnet)
BVLC_ORIGINAL_UNICAST_NPDU = _bacnet.BVLC_ORIGINAL_UNICAST_NPDU
_bacnet.BVLC_ORIGINAL_BROADCAST_NPDU_swigconstant(_bacnet)
BVLC_ORIGINAL_BROADCAST_NPDU = _bacnet.BVLC_ORIGINAL_BROADCAST_NPDU
_bacnet.MAX_BVLC_FUNCTION_swigconstant(_bacnet)
MAX_BVLC_FUNCTION = _bacnet.MAX_BVLC_FUNCTION
_bacnet.BVLC_RESULT_SUCCESSFUL_COMPLETION_swigconstant(_bacnet)
BVLC_RESULT_SUCCESSFUL_COMPLETION = _bacnet.BVLC_RESULT_SUCCESSFUL_COMPLETION
_bacnet.BVLC_RESULT_WRITE_BROADCAST_DISTRIBUTION_TABLE_NAK_swigconstant(_bacnet)
BVLC_RESULT_WRITE_BROADCAST_DISTRIBUTION_TABLE_NAK = _bacnet.BVLC_RESULT_WRITE_BROADCAST_DISTRIBUTION_TABLE_NAK
_bacnet.BVLC_RESULT_READ_BROADCAST_DISTRIBUTION_TABLE_NAK_swigconstant(_bacnet)
BVLC_RESULT_READ_BROADCAST_DISTRIBUTION_TABLE_NAK = _bacnet.BVLC_RESULT_READ_BROADCAST_DISTRIBUTION_TABLE_NAK
_bacnet.BVLC_RESULT_REGISTER_FOREIGN_DEVICE_NAK_swigconstant(_bacnet)
BVLC_RESULT_REGISTER_FOREIGN_DEVICE_NAK = _bacnet.BVLC_RESULT_REGISTER_FOREIGN_DEVICE_NAK
_bacnet.BVLC_RESULT_READ_FOREIGN_DEVICE_TABLE_NAK_swigconstant(_bacnet)
BVLC_RESULT_READ_FOREIGN_DEVICE_TABLE_NAK = _bacnet.BVLC_RESULT_READ_FOREIGN_DEVICE_TABLE_NAK
_bacnet.BVLC_RESULT_DELETE_FOREIGN_DEVICE_TABLE_ENTRY_NAK_swigconstant(_bacnet)
BVLC_RESULT_DELETE_FOREIGN_DEVICE_TABLE_ENTRY_NAK = _bacnet.BVLC_RESULT_DELETE_FOREIGN_DEVICE_TABLE_ENTRY_NAK
_bacnet.BVLC_RESULT_DISTRIBUTE_BROADCAST_TO_NETWORK_NAK_swigconstant(_bacnet)
BVLC_RESULT_DISTRIBUTE_BROADCAST_TO_NETWORK_NAK = _bacnet.BVLC_RESULT_DISTRIBUTE_BROADCAST_TO_NETWORK_NAK
_bacnet.STATUS_FLAG_IN_ALARM_swigconstant(_bacnet)
STATUS_FLAG_IN_ALARM = _bacnet.STATUS_FLAG_IN_ALARM
_bacnet.STATUS_FLAG_FAULT_swigconstant(_bacnet)
STATUS_FLAG_FAULT = _bacnet.STATUS_FLAG_FAULT
_bacnet.STATUS_FLAG_OVERRIDDEN_swigconstant(_bacnet)
STATUS_FLAG_OVERRIDDEN = _bacnet.STATUS_FLAG_OVERRIDDEN
_bacnet.STATUS_FLAG_OUT_OF_SERVICE_swigconstant(_bacnet)
STATUS_FLAG_OUT_OF_SERVICE = _bacnet.STATUS_FLAG_OUT_OF_SERVICE
_bacnet.LOG_STATUS_LOG_DISABLED_swigconstant(_bacnet)
LOG_STATUS_LOG_DISABLED = _bacnet.LOG_STATUS_LOG_DISABLED
_bacnet.LOG_STATUS_BUFFER_PURGED_swigconstant(_bacnet)
LOG_STATUS_BUFFER_PURGED = _bacnet.LOG_STATUS_BUFFER_PURGED
_bacnet.LOG_STATUS_LOG_INTERRUPTED_swigconstant(_bacnet)
LOG_STATUS_LOG_INTERRUPTED = _bacnet.LOG_STATUS_LOG_INTERRUPTED
_bacnet.LOGGING_TYPE_POLLED_swigconstant(_bacnet)
LOGGING_TYPE_POLLED = _bacnet.LOGGING_TYPE_POLLED
_bacnet.LOGGING_TYPE_COV_swigconstant(_bacnet)
LOGGING_TYPE_COV = _bacnet.LOGGING_TYPE_COV
_bacnet.LOGGING_TYPE_TRIGGERED_swigconstant(_bacnet)
LOGGING_TYPE_TRIGGERED = _bacnet.LOGGING_TYPE_TRIGGERED
_bacnet.ACKNOWLEDGMENT_FILTER_ALL_swigconstant(_bacnet)
ACKNOWLEDGMENT_FILTER_ALL = _bacnet.ACKNOWLEDGMENT_FILTER_ALL
_bacnet.ACKNOWLEDGMENT_FILTER_ACKED_swigconstant(_bacnet)
ACKNOWLEDGMENT_FILTER_ACKED = _bacnet.ACKNOWLEDGMENT_FILTER_ACKED
_bacnet.ACKNOWLEDGMENT_FILTER_NOT_ACKED_swigconstant(_bacnet)
ACKNOWLEDGMENT_FILTER_NOT_ACKED = _bacnet.ACKNOWLEDGMENT_FILTER_NOT_ACKED
_bacnet.EVENT_STATE_FILTER_OFFNORMAL_swigconstant(_bacnet)
EVENT_STATE_FILTER_OFFNORMAL = _bacnet.EVENT_STATE_FILTER_OFFNORMAL
_bacnet.EVENT_STATE_FILTER_FAULT_swigconstant(_bacnet)
EVENT_STATE_FILTER_FAULT = _bacnet.EVENT_STATE_FILTER_FAULT
_bacnet.EVENT_STATE_FILTER_NORMAL_swigconstant(_bacnet)
EVENT_STATE_FILTER_NORMAL = _bacnet.EVENT_STATE_FILTER_NORMAL
_bacnet.EVENT_STATE_FILTER_ALL_swigconstant(_bacnet)
EVENT_STATE_FILTER_ALL = _bacnet.EVENT_STATE_FILTER_ALL
_bacnet.EVENT_STATE_FILTER_ACTIVE_swigconstant(_bacnet)
EVENT_STATE_FILTER_ACTIVE = _bacnet.EVENT_STATE_FILTER_ACTIVE
_bacnet.SELECTION_LOGIC_AND_swigconstant(_bacnet)
SELECTION_LOGIC_AND = _bacnet.SELECTION_LOGIC_AND
_bacnet.SELECTION_LOGIC_OR_swigconstant(_bacnet)
SELECTION_LOGIC_OR = _bacnet.SELECTION_LOGIC_OR
_bacnet.SELECTION_LOGIC_ALL_swigconstant(_bacnet)
SELECTION_LOGIC_ALL = _bacnet.SELECTION_LOGIC_ALL
_bacnet.RELATION_SPECIFIER_EQUAL_swigconstant(_bacnet)
RELATION_SPECIFIER_EQUAL = _bacnet.RELATION_SPECIFIER_EQUAL
_bacnet.RELATION_SPECIFIER_NOT_EQUAL_swigconstant(_bacnet)
RELATION_SPECIFIER_NOT_EQUAL = _bacnet.RELATION_SPECIFIER_NOT_EQUAL
_bacnet.RELATION_SPECIFIER_LESS_THAN_swigconstant(_bacnet)
RELATION_SPECIFIER_LESS_THAN = _bacnet.RELATION_SPECIFIER_LESS_THAN
_bacnet.RELATION_SPECIFIER_GREATER_THAN_swigconstant(_bacnet)
RELATION_SPECIFIER_GREATER_THAN = _bacnet.RELATION_SPECIFIER_GREATER_THAN
_bacnet.RELATION_SPECIFIER_LESS_THAN_OR_EQUAL_swigconstant(_bacnet)
RELATION_SPECIFIER_LESS_THAN_OR_EQUAL = _bacnet.RELATION_SPECIFIER_LESS_THAN_OR_EQUAL
_bacnet.RELATION_SPECIFIER_GREATER_THAN_OR_EQUAL_swigconstant(_bacnet)
RELATION_SPECIFIER_GREATER_THAN_OR_EQUAL = _bacnet.RELATION_SPECIFIER_GREATER_THAN_OR_EQUAL
_bacnet.COMMUNICATION_ENABLE_swigconstant(_bacnet)
COMMUNICATION_ENABLE = _bacnet.COMMUNICATION_ENABLE
_bacnet.COMMUNICATION_DISABLE_swigconstant(_bacnet)
COMMUNICATION_DISABLE = _bacnet.COMMUNICATION_DISABLE
_bacnet.COMMUNICATION_DISABLE_INITIATION_swigconstant(_bacnet)
COMMUNICATION_DISABLE_INITIATION = _bacnet.COMMUNICATION_DISABLE_INITIATION
_bacnet.MAX_BACNET_COMMUNICATION_ENABLE_DISABLE_swigconstant(_bacnet)
MAX_BACNET_COMMUNICATION_ENABLE_DISABLE = _bacnet.MAX_BACNET_COMMUNICATION_ENABLE_DISABLE
_bacnet.MESSAGE_PRIORITY_NORMAL_swigconstant(_bacnet)
MESSAGE_PRIORITY_NORMAL = _bacnet.MESSAGE_PRIORITY_NORMAL
_bacnet.MESSAGE_PRIORITY_URGENT_swigconstant(_bacnet)
MESSAGE_PRIORITY_URGENT = _bacnet.MESSAGE_PRIORITY_URGENT
_bacnet.MESSAGE_PRIORITY_CRITICAL_EQUIPMENT_swigconstant(_bacnet)
MESSAGE_PRIORITY_CRITICAL_EQUIPMENT = _bacnet.MESSAGE_PRIORITY_CRITICAL_EQUIPMENT
_bacnet.MESSAGE_PRIORITY_LIFE_SAFETY_swigconstant(_bacnet)
MESSAGE_PRIORITY_LIFE_SAFETY = _bacnet.MESSAGE_PRIORITY_LIFE_SAFETY
_bacnet.NETWORK_MESSAGE_WHO_IS_ROUTER_TO_NETWORK_swigconstant(_bacnet)
NETWORK_MESSAGE_WHO_IS_ROUTER_TO_NETWORK = _bacnet.NETWORK_MESSAGE_WHO_IS_ROUTER_TO_NETWORK
_bacnet.NETWORK_MESSAGE_I_AM_ROUTER_TO_NETWORK_swigconstant(_bacnet)
NETWORK_MESSAGE_I_AM_ROUTER_TO_NETWORK = _bacnet.NETWORK_MESSAGE_I_AM_ROUTER_TO_NETWORK
_bacnet.NETWORK_MESSAGE_I_COULD_BE_ROUTER_TO_NETWORK_swigconstant(_bacnet)
NETWORK_MESSAGE_I_COULD_BE_ROUTER_TO_NETWORK = _bacnet.NETWORK_MESSAGE_I_COULD_BE_ROUTER_TO_NETWORK
_bacnet.NETWORK_MESSAGE_REJECT_MESSAGE_TO_NETWORK_swigconstant(_bacnet)
NETWORK_MESSAGE_REJECT_MESSAGE_TO_NETWORK = _bacnet.NETWORK_MESSAGE_REJECT_MESSAGE_TO_NETWORK
_bacnet.NETWORK_MESSAGE_ROUTER_BUSY_TO_NETWORK_swigconstant(_bacnet)
NETWORK_MESSAGE_ROUTER_BUSY_TO_NETWORK = _bacnet.NETWORK_MESSAGE_ROUTER_BUSY_TO_NETWORK
_bacnet.NETWORK_MESSAGE_ROUTER_AVAILABLE_TO_NETWORK_swigconstant(_bacnet)
NETWORK_MESSAGE_ROUTER_AVAILABLE_TO_NETWORK = _bacnet.NETWORK_MESSAGE_ROUTER_AVAILABLE_TO_NETWORK
_bacnet.NETWORK_MESSAGE_INIT_RT_TABLE_swigconstant(_bacnet)
NETWORK_MESSAGE_INIT_RT_TABLE = _bacnet.NETWORK_MESSAGE_INIT_RT_TABLE
_bacnet.NETWORK_MESSAGE_INIT_RT_TABLE_ACK_swigconstant(_bacnet)
NETWORK_MESSAGE_INIT_RT_TABLE_ACK = _bacnet.NETWORK_MESSAGE_INIT_RT_TABLE_ACK
_bacnet.NETWORK_MESSAGE_ESTABLISH_CONNECTION_TO_NETWORK_swigconstant(_bacnet)
NETWORK_MESSAGE_ESTABLISH_CONNECTION_TO_NETWORK = _bacnet.NETWORK_MESSAGE_ESTABLISH_CONNECTION_TO_NETWORK
_bacnet.NETWORK_MESSAGE_DISCONNECT_CONNECTION_TO_NETWORK_swigconstant(_bacnet)
NETWORK_MESSAGE_DISCONNECT_CONNECTION_TO_NETWORK = _bacnet.NETWORK_MESSAGE_DISCONNECT_CONNECTION_TO_NETWORK
_bacnet.NETWORK_MESSAGE_INVALID_swigconstant(_bacnet)
NETWORK_MESSAGE_INVALID = _bacnet.NETWORK_MESSAGE_INVALID
_bacnet.REINITIALIZED_STATE_COLD_START_swigconstant(_bacnet)
REINITIALIZED_STATE_COLD_START = _bacnet.REINITIALIZED_STATE_COLD_START
_bacnet.REINITIALIZED_STATE_WARM_START_swigconstant(_bacnet)
REINITIALIZED_STATE_WARM_START = _bacnet.REINITIALIZED_STATE_WARM_START
_bacnet.REINITIALIZED_STATE_START_BACKUP_swigconstant(_bacnet)
REINITIALIZED_STATE_START_BACKUP = _bacnet.REINITIALIZED_STATE_START_BACKUP
_bacnet.REINITIALIZED_STATE_END_BACKUP_swigconstant(_bacnet)
REINITIALIZED_STATE_END_BACKUP = _bacnet.REINITIALIZED_STATE_END_BACKUP
_bacnet.REINITIALIZED_STATE_START_RESTORE_swigconstant(_bacnet)
REINITIALIZED_STATE_START_RESTORE = _bacnet.REINITIALIZED_STATE_START_RESTORE
_bacnet.REINITIALIZED_STATE_END_RESTORE_swigconstant(_bacnet)
REINITIALIZED_STATE_END_RESTORE = _bacnet.REINITIALIZED_STATE_END_RESTORE
_bacnet.REINITIALIZED_STATE_ABORT_RESTORE_swigconstant(_bacnet)
REINITIALIZED_STATE_ABORT_RESTORE = _bacnet.REINITIALIZED_STATE_ABORT_RESTORE
_bacnet.REINITIALIZED_STATE_IDLE_swigconstant(_bacnet)
REINITIALIZED_STATE_IDLE = _bacnet.REINITIALIZED_STATE_IDLE
_bacnet.ABORT_REASON_OTHER_swigconstant(_bacnet)
ABORT_REASON_OTHER = _bacnet.ABORT_REASON_OTHER
_bacnet.ABORT_REASON_BUFFER_OVERFLOW_swigconstant(_bacnet)
ABORT_REASON_BUFFER_OVERFLOW = _bacnet.ABORT_REASON_BUFFER_OVERFLOW
_bacnet.ABORT_REASON_INVALID_APDU_IN_THIS_STATE_swigconstant(_bacnet)
ABORT_REASON_INVALID_APDU_IN_THIS_STATE = _bacnet.ABORT_REASON_INVALID_APDU_IN_THIS_STATE
_bacnet.ABORT_REASON_PREEMPTED_BY_HIGHER_PRIORITY_TASK_swigconstant(_bacnet)
ABORT_REASON_PREEMPTED_BY_HIGHER_PRIORITY_TASK = _bacnet.ABORT_REASON_PREEMPTED_BY_HIGHER_PRIORITY_TASK
_bacnet.ABORT_REASON_SEGMENTATION_NOT_SUPPORTED_swigconstant(_bacnet)
ABORT_REASON_SEGMENTATION_NOT_SUPPORTED = _bacnet.ABORT_REASON_SEGMENTATION_NOT_SUPPORTED
_bacnet.MAX_BACNET_ABORT_REASON_swigconstant(_bacnet)
MAX_BACNET_ABORT_REASON = _bacnet.MAX_BACNET_ABORT_REASON
_bacnet.FIRST_PROPRIETARY_ABORT_REASON_swigconstant(_bacnet)
FIRST_PROPRIETARY_ABORT_REASON = _bacnet.FIRST_PROPRIETARY_ABORT_REASON
_bacnet.LAST_PROPRIETARY_ABORT_REASON_swigconstant(_bacnet)
LAST_PROPRIETARY_ABORT_REASON = _bacnet.LAST_PROPRIETARY_ABORT_REASON
_bacnet.REJECT_REASON_OTHER_swigconstant(_bacnet)
REJECT_REASON_OTHER = _bacnet.REJECT_REASON_OTHER
_bacnet.REJECT_REASON_BUFFER_OVERFLOW_swigconstant(_bacnet)
REJECT_REASON_BUFFER_OVERFLOW = _bacnet.REJECT_REASON_BUFFER_OVERFLOW
_bacnet.REJECT_REASON_INCONSISTENT_PARAMETERS_swigconstant(_bacnet)
REJECT_REASON_INCONSISTENT_PARAMETERS = _bacnet.REJECT_REASON_INCONSISTENT_PARAMETERS
_bacnet.REJECT_REASON_INVALID_PARAMETER_DATA_TYPE_swigconstant(_bacnet)
REJECT_REASON_INVALID_PARAMETER_DATA_TYPE = _bacnet.REJECT_REASON_INVALID_PARAMETER_DATA_TYPE
_bacnet.REJECT_REASON_INVALID_TAG_swigconstant(_bacnet)
REJECT_REASON_INVALID_TAG = _bacnet.REJECT_REASON_INVALID_TAG
_bacnet.REJECT_REASON_MISSING_REQUIRED_PARAMETER_swigconstant(_bacnet)
REJECT_REASON_MISSING_REQUIRED_PARAMETER = _bacnet.REJECT_REASON_MISSING_REQUIRED_PARAMETER
_bacnet.REJECT_REASON_PARAMETER_OUT_OF_RANGE_swigconstant(_bacnet)
REJECT_REASON_PARAMETER_OUT_OF_RANGE = _bacnet.REJECT_REASON_PARAMETER_OUT_OF_RANGE
_bacnet.REJECT_REASON_TOO_MANY_ARGUMENTS_swigconstant(_bacnet)
REJECT_REASON_TOO_MANY_ARGUMENTS = _bacnet.REJECT_REASON_TOO_MANY_ARGUMENTS
_bacnet.REJECT_REASON_UNDEFINED_ENUMERATION_swigconstant(_bacnet)
REJECT_REASON_UNDEFINED_ENUMERATION = _bacnet.REJECT_REASON_UNDEFINED_ENUMERATION
_bacnet.REJECT_REASON_UNRECOGNIZED_SERVICE_swigconstant(_bacnet)
REJECT_REASON_UNRECOGNIZED_SERVICE = _bacnet.REJECT_REASON_UNRECOGNIZED_SERVICE
_bacnet.MAX_BACNET_REJECT_REASON_swigconstant(_bacnet)
MAX_BACNET_REJECT_REASON = _bacnet.MAX_BACNET_REJECT_REASON
_bacnet.FIRST_PROPRIETARY_REJECT_REASON_swigconstant(_bacnet)
FIRST_PROPRIETARY_REJECT_REASON = _bacnet.FIRST_PROPRIETARY_REJECT_REASON
_bacnet.LAST_PROPRIETARY_REJECT_REASON_swigconstant(_bacnet)
LAST_PROPRIETARY_REJECT_REASON = _bacnet.LAST_PROPRIETARY_REJECT_REASON
_bacnet.ERROR_CLASS_DEVICE_swigconstant(_bacnet)
ERROR_CLASS_DEVICE = _bacnet.ERROR_CLASS_DEVICE
_bacnet.ERROR_CLASS_OBJECT_swigconstant(_bacnet)
ERROR_CLASS_OBJECT = _bacnet.ERROR_CLASS_OBJECT
_bacnet.ERROR_CLASS_PROPERTY_swigconstant(_bacnet)
ERROR_CLASS_PROPERTY = _bacnet.ERROR_CLASS_PROPERTY
_bacnet.ERROR_CLASS_RESOURCES_swigconstant(_bacnet)
ERROR_CLASS_RESOURCES = _bacnet.ERROR_CLASS_RESOURCES
_bacnet.ERROR_CLASS_SECURITY_swigconstant(_bacnet)
ERROR_CLASS_SECURITY = _bacnet.ERROR_CLASS_SECURITY
_bacnet.ERROR_CLASS_SERVICES_swigconstant(_bacnet)
ERROR_CLASS_SERVICES = _bacnet.ERROR_CLASS_SERVICES
_bacnet.ERROR_CLASS_VT_swigconstant(_bacnet)
ERROR_CLASS_VT = _bacnet.ERROR_CLASS_VT
_bacnet.ERROR_CLASS_COMMUNICATION_swigconstant(_bacnet)
ERROR_CLASS_COMMUNICATION = _bacnet.ERROR_CLASS_COMMUNICATION
_bacnet.MAX_BACNET_ERROR_CLASS_swigconstant(_bacnet)
MAX_BACNET_ERROR_CLASS = _bacnet.MAX_BACNET_ERROR_CLASS
_bacnet.FIRST_PROPRIETARY_ERROR_CLASS_swigconstant(_bacnet)
FIRST_PROPRIETARY_ERROR_CLASS = _bacnet.FIRST_PROPRIETARY_ERROR_CLASS
_bacnet.LAST_PROPRIETARY_ERROR_CLASS_swigconstant(_bacnet)
LAST_PROPRIETARY_ERROR_CLASS = _bacnet.LAST_PROPRIETARY_ERROR_CLASS
_bacnet.ERROR_CODE_OTHER_swigconstant(_bacnet)
ERROR_CODE_OTHER = _bacnet.ERROR_CODE_OTHER
_bacnet.ERROR_CODE_DEVICE_BUSY_swigconstant(_bacnet)
ERROR_CODE_DEVICE_BUSY = _bacnet.ERROR_CODE_DEVICE_BUSY
_bacnet.ERROR_CODE_CONFIGURATION_IN_PROGRESS_swigconstant(_bacnet)
ERROR_CODE_CONFIGURATION_IN_PROGRESS = _bacnet.ERROR_CODE_CONFIGURATION_IN_PROGRESS
_bacnet.ERROR_CODE_OPERATIONAL_PROBLEM_swigconstant(_bacnet)
ERROR_CODE_OPERATIONAL_PROBLEM = _bacnet.ERROR_CODE_OPERATIONAL_PROBLEM
_bacnet.ERROR_CODE_DYNAMIC_CREATION_NOT_SUPPORTED_swigconstant(_bacnet)
ERROR_CODE_DYNAMIC_CREATION_NOT_SUPPORTED = _bacnet.ERROR_CODE_DYNAMIC_CREATION_NOT_SUPPORTED
_bacnet.ERROR_CODE_NO_OBJECTS_OF_SPECIFIED_TYPE_swigconstant(_bacnet)
ERROR_CODE_NO_OBJECTS_OF_SPECIFIED_TYPE = _bacnet.ERROR_CODE_NO_OBJECTS_OF_SPECIFIED_TYPE
_bacnet.ERROR_CODE_OBJECT_DELETION_NOT_PERMITTED_swigconstant(_bacnet)
ERROR_CODE_OBJECT_DELETION_NOT_PERMITTED = _bacnet.ERROR_CODE_OBJECT_DELETION_NOT_PERMITTED
_bacnet.ERROR_CODE_OBJECT_IDENTIFIER_ALREADY_EXISTS_swigconstant(_bacnet)
ERROR_CODE_OBJECT_IDENTIFIER_ALREADY_EXISTS = _bacnet.ERROR_CODE_OBJECT_IDENTIFIER_ALREADY_EXISTS
_bacnet.ERROR_CODE_READ_ACCESS_DENIED_swigconstant(_bacnet)
ERROR_CODE_READ_ACCESS_DENIED = _bacnet.ERROR_CODE_READ_ACCESS_DENIED
_bacnet.ERROR_CODE_UNKNOWN_OBJECT_swigconstant(_bacnet)
ERROR_CODE_UNKNOWN_OBJECT = _bacnet.ERROR_CODE_UNKNOWN_OBJECT
_bacnet.ERROR_CODE_UNSUPPORTED_OBJECT_TYPE_swigconstant(_bacnet)
ERROR_CODE_UNSUPPORTED_OBJECT_TYPE = _bacnet.ERROR_CODE_UNSUPPORTED_OBJECT_TYPE
_bacnet.ERROR_CODE_CHARACTER_SET_NOT_SUPPORTED_swigconstant(_bacnet)
ERROR_CODE_CHARACTER_SET_NOT_SUPPORTED = _bacnet.ERROR_CODE_CHARACTER_SET_NOT_SUPPORTED
_bacnet.ERROR_CODE_DATATYPE_NOT_SUPPORTED_swigconstant(_bacnet)
ERROR_CODE_DATATYPE_NOT_SUPPORTED = _bacnet.ERROR_CODE_DATATYPE_NOT_SUPPORTED
_bacnet.ERROR_CODE_INCONSISTENT_SELECTION_CRITERION_swigconstant(_bacnet)
ERROR_CODE_INCONSISTENT_SELECTION_CRITERION = _bacnet.ERROR_CODE_INCONSISTENT_SELECTION_CRITERION
_bacnet.ERROR_CODE_INVALID_ARRAY_INDEX_swigconstant(_bacnet)
ERROR_CODE_INVALID_ARRAY_INDEX = _bacnet.ERROR_CODE_INVALID_ARRAY_INDEX
_bacnet.ERROR_CODE_INVALID_DATA_TYPE_swigconstant(_bacnet)
ERROR_CODE_INVALID_DATA_TYPE = _bacnet.ERROR_CODE_INVALID_DATA_TYPE
_bacnet.ERROR_CODE_NOT_COV_PROPERTY_swigconstant(_bacnet)
ERROR_CODE_NOT_COV_PROPERTY = _bacnet.ERROR_CODE_NOT_COV_PROPERTY
_bacnet.ERROR_CODE_OPTIONAL_FUNCTIONALITY_NOT_SUPPORTED_swigconstant(_bacnet)
ERROR_CODE_OPTIONAL_FUNCTIONALITY_NOT_SUPPORTED = _bacnet.ERROR_CODE_OPTIONAL_FUNCTIONALITY_NOT_SUPPORTED
_bacnet.ERROR_CODE_PROPERTY_IS_NOT_AN_ARRAY_swigconstant(_bacnet)
ERROR_CODE_PROPERTY_IS_NOT_AN_ARRAY = _bacnet.ERROR_CODE_PROPERTY_IS_NOT_AN_ARRAY
_bacnet.ERROR_CODE_UNKNOWN_PROPERTY_swigconstant(_bacnet)
ERROR_CODE_UNKNOWN_PROPERTY = _bacnet.ERROR_CODE_UNKNOWN_PROPERTY
_bacnet.ERROR_CODE_VALUE_OUT_OF_RANGE_swigconstant(_bacnet)
ERROR_CODE_VALUE_OUT_OF_RANGE = _bacnet.ERROR_CODE_VALUE_OUT_OF_RANGE
_bacnet.ERROR_CODE_WRITE_ACCESS_DENIED_swigconstant(_bacnet)
ERROR_CODE_WRITE_ACCESS_DENIED = _bacnet.ERROR_CODE_WRITE_ACCESS_DENIED
_bacnet.ERROR_CODE_NO_SPACE_FOR_OBJECT_swigconstant(_bacnet)
ERROR_CODE_NO_SPACE_FOR_OBJECT = _bacnet.ERROR_CODE_NO_SPACE_FOR_OBJECT
_bacnet.ERROR_CODE_NO_SPACE_TO_ADD_LIST_ELEMENT_swigconstant(_bacnet)
ERROR_CODE_NO_SPACE_TO_ADD_LIST_ELEMENT = _bacnet.ERROR_CODE_NO_SPACE_TO_ADD_LIST_ELEMENT
_bacnet.ERROR_CODE_NO_SPACE_TO_WRITE_PROPERTY_swigconstant(_bacnet)
ERROR_CODE_NO_SPACE_TO_WRITE_PROPERTY = _bacnet.ERROR_CODE_NO_SPACE_TO_WRITE_PROPERTY
_bacnet.ERROR_CODE_AUTHENTICATION_FAILED_swigconstant(_bacnet)
ERROR_CODE_AUTHENTICATION_FAILED = _bacnet.ERROR_CODE_AUTHENTICATION_FAILED
_bacnet.ERROR_CODE_INCOMPATIBLE_SECURITY_LEVELS_swigconstant(_bacnet)
ERROR_CODE_INCOMPATIBLE_SECURITY_LEVELS = _bacnet.ERROR_CODE_INCOMPATIBLE_SECURITY_LEVELS
_bacnet.ERROR_CODE_INVALID_OPERATOR_NAME_swigconstant(_bacnet)
ERROR_CODE_INVALID_OPERATOR_NAME = _bacnet.ERROR_CODE_INVALID_OPERATOR_NAME
_bacnet.ERROR_CODE_KEY_GENERATION_ERROR_swigconstant(_bacnet)
ERROR_CODE_KEY_GENERATION_ERROR = _bacnet.ERROR_CODE_KEY_GENERATION_ERROR
_bacnet.ERROR_CODE_PASSWORD_FAILURE_swigconstant(_bacnet)
ERROR_CODE_PASSWORD_FAILURE = _bacnet.ERROR_CODE_PASSWORD_FAILURE
_bacnet.ERROR_CODE_SECURITY_NOT_SUPPORTED_swigconstant(_bacnet)
ERROR_CODE_SECURITY_NOT_SUPPORTED = _bacnet.ERROR_CODE_SECURITY_NOT_SUPPORTED
_bacnet.ERROR_CODE_TIMEOUT_swigconstant(_bacnet)
ERROR_CODE_TIMEOUT = _bacnet.ERROR_CODE_TIMEOUT
_bacnet.ERROR_CODE_COV_SUBSCRIPTION_FAILED_swigconstant(_bacnet)
ERROR_CODE_COV_SUBSCRIPTION_FAILED = _bacnet.ERROR_CODE_COV_SUBSCRIPTION_FAILED
_bacnet.ERROR_CODE_DUPLICATE_NAME_swigconstant(_bacnet)
ERROR_CODE_DUPLICATE_NAME = _bacnet.ERROR_CODE_DUPLICATE_NAME
_bacnet.ERROR_CODE_DUPLICATE_OBJECT_ID_swigconstant(_bacnet)
ERROR_CODE_DUPLICATE_OBJECT_ID = _bacnet.ERROR_CODE_DUPLICATE_OBJECT_ID
_bacnet.ERROR_CODE_FILE_ACCESS_DENIED_swigconstant(_bacnet)
ERROR_CODE_FILE_ACCESS_DENIED = _bacnet.ERROR_CODE_FILE_ACCESS_DENIED
_bacnet.ERROR_CODE_INCONSISTENT_PARAMETERS_swigconstant(_bacnet)
ERROR_CODE_INCONSISTENT_PARAMETERS = _bacnet.ERROR_CODE_INCONSISTENT_PARAMETERS
_bacnet.ERROR_CODE_INVALID_CONFIGURATION_DATA_swigconstant(_bacnet)
ERROR_CODE_INVALID_CONFIGURATION_DATA = _bacnet.ERROR_CODE_INVALID_CONFIGURATION_DATA
_bacnet.ERROR_CODE_INVALID_FILE_ACCESS_METHOD_swigconstant(_bacnet)
ERROR_CODE_INVALID_FILE_ACCESS_METHOD = _bacnet.ERROR_CODE_INVALID_FILE_ACCESS_METHOD
_bacnet.ERROR_CODE_INVALID_FILE_START_POSITION_swigconstant(_bacnet)
ERROR_CODE_INVALID_FILE_START_POSITION = _bacnet.ERROR_CODE_INVALID_FILE_START_POSITION
_bacnet.ERROR_CODE_INVALID_PARAMETER_DATA_TYPE_swigconstant(_bacnet)
ERROR_CODE_INVALID_PARAMETER_DATA_TYPE = _bacnet.ERROR_CODE_INVALID_PARAMETER_DATA_TYPE
_bacnet.ERROR_CODE_INVALID_TIME_STAMP_swigconstant(_bacnet)
ERROR_CODE_INVALID_TIME_STAMP = _bacnet.ERROR_CODE_INVALID_TIME_STAMP
_bacnet.ERROR_CODE_MISSING_REQUIRED_PARAMETER_swigconstant(_bacnet)
ERROR_CODE_MISSING_REQUIRED_PARAMETER = _bacnet.ERROR_CODE_MISSING_REQUIRED_PARAMETER
_bacnet.ERROR_CODE_PROPERTY_IS_NOT_A_LIST_swigconstant(_bacnet)
ERROR_CODE_PROPERTY_IS_NOT_A_LIST = _bacnet.ERROR_CODE_PROPERTY_IS_NOT_A_LIST
_bacnet.ERROR_CODE_SERVICE_REQUEST_DENIED_swigconstant(_bacnet)
ERROR_CODE_SERVICE_REQUEST_DENIED = _bacnet.ERROR_CODE_SERVICE_REQUEST_DENIED
_bacnet.ERROR_CODE_UNKNOWN_VT_CLASS_swigconstant(_bacnet)
ERROR_CODE_UNKNOWN_VT_CLASS = _bacnet.ERROR_CODE_UNKNOWN_VT_CLASS
_bacnet.ERROR_CODE_UNKNOWN_VT_SESSION_swigconstant(_bacnet)
ERROR_CODE_UNKNOWN_VT_SESSION = _bacnet.ERROR_CODE_UNKNOWN_VT_SESSION
_bacnet.ERROR_CODE_NO_VT_SESSIONS_AVAILABLE_swigconstant(_bacnet)
ERROR_CODE_NO_VT_SESSIONS_AVAILABLE = _bacnet.ERROR_CODE_NO_VT_SESSIONS_AVAILABLE
_bacnet.ERROR_CODE_VT_SESSION_ALREADY_CLOSED_swigconstant(_bacnet)
ERROR_CODE_VT_SESSION_ALREADY_CLOSED = _bacnet.ERROR_CODE_VT_SESSION_ALREADY_CLOSED
_bacnet.ERROR_CODE_VT_SESSION_TERMINATION_FAILURE_swigconstant(_bacnet)
ERROR_CODE_VT_SESSION_TERMINATION_FAILURE = _bacnet.ERROR_CODE_VT_SESSION_TERMINATION_FAILURE
_bacnet.ERROR_CODE_RESERVED1_swigconstant(_bacnet)
ERROR_CODE_RESERVED1 = _bacnet.ERROR_CODE_RESERVED1
_bacnet.ERROR_CODE_ABORT_BUFFER_OVERFLOW_swigconstant(_bacnet)
ERROR_CODE_ABORT_BUFFER_OVERFLOW = _bacnet.ERROR_CODE_ABORT_BUFFER_OVERFLOW
_bacnet.ERROR_CODE_ABORT_INVALID_APDU_IN_THIS_STATE_swigconstant(_bacnet)
ERROR_CODE_ABORT_INVALID_APDU_IN_THIS_STATE = _bacnet.ERROR_CODE_ABORT_INVALID_APDU_IN_THIS_STATE
_bacnet.ERROR_CODE_ABORT_PREEMPTED_BY_HIGHER_PRIORITY_TASK_swigconstant(_bacnet)
ERROR_CODE_ABORT_PREEMPTED_BY_HIGHER_PRIORITY_TASK | |
gds_collector_=None):
if nodeName_ == 'Id':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Id')
value_ = self.gds_validate_string(value_, node, 'Id')
self.Id = value_
self.Id_nsprefix_ = child_.prefix
elif nodeName_ == 'Description':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Description')
value_ = self.gds_validate_string(value_, node, 'Description')
self.Description = value_
self.Description_nsprefix_ = child_.prefix
elif nodeName_ == 'NumberOfHandlingUnits' and child_.text:
sval_ = child_.text
ival_ = self.gds_parse_integer(sval_, node, 'NumberOfHandlingUnits')
if ival_ < 0:
raise_parse_error(child_, 'requires nonNegativeInteger')
ival_ = self.gds_validate_integer(ival_, node, 'NumberOfHandlingUnits')
self.NumberOfHandlingUnits = ival_
self.NumberOfHandlingUnits_nsprefix_ = child_.prefix
elif nodeName_ == 'AssociatedDocumentDetails':
obj_ = AssociatedEnterpriseDocumentDetail.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.AssociatedDocumentDetails.append(obj_)
obj_.original_tagname_ = 'AssociatedDocumentDetails'
elif nodeName_ == 'TrackingNumberUnits':
obj_ = TrackingNumberUnit.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.TrackingNumberUnits.append(obj_)
obj_.original_tagname_ = 'TrackingNumberUnits'
elif nodeName_ == 'HandlingUnit':
obj_ = UploadedDangerousGoodsHandlingUnit.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.HandlingUnit = obj_
obj_.original_tagname_ = 'HandlingUnit'
# end class UploadedDangerousGoodsHandlingUnitGroup
class UploadedDangerousGoodsShipmentDetail(GeneratedsSuper):
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, Attributes=None, Origin=None, Destination=None, CarrierCode=None, ServiceType=None, ShipDate=None, Offeror=None, Signatory=None, InfectiousSubstanceResponsibleContact=None, EmergencyContactNumber=None, AircraftCategoryType=None, AdditionalHandling=None, MasterTrackingId=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
if Attributes is None:
self.Attributes = []
else:
self.Attributes = Attributes
self.Attributes_nsprefix_ = None
self.Origin = Origin
self.Origin_nsprefix_ = None
self.Destination = Destination
self.Destination_nsprefix_ = None
self.CarrierCode = CarrierCode
self.validate_CarrierCodeType(self.CarrierCode)
self.CarrierCode_nsprefix_ = None
self.ServiceType = ServiceType
self.ServiceType_nsprefix_ = None
if isinstance(ShipDate, BaseStrType_):
initvalue_ = datetime_.datetime.strptime(ShipDate, '%Y-%m-%d').date()
else:
initvalue_ = ShipDate
self.ShipDate = initvalue_
self.ShipDate_nsprefix_ = None
self.Offeror = Offeror
self.Offeror_nsprefix_ = None
self.Signatory = Signatory
self.Signatory_nsprefix_ = None
self.InfectiousSubstanceResponsibleContact = InfectiousSubstanceResponsibleContact
self.InfectiousSubstanceResponsibleContact_nsprefix_ = None
self.EmergencyContactNumber = EmergencyContactNumber
self.EmergencyContactNumber_nsprefix_ = None
self.AircraftCategoryType = AircraftCategoryType
self.validate_DangerousGoodsAircraftCategoryType(self.AircraftCategoryType)
self.AircraftCategoryType_nsprefix_ = None
self.AdditionalHandling = AdditionalHandling
self.AdditionalHandling_nsprefix_ = None
self.MasterTrackingId = MasterTrackingId
self.MasterTrackingId_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, UploadedDangerousGoodsShipmentDetail)
if subclass is not None:
return subclass(*args_, **kwargs_)
if UploadedDangerousGoodsShipmentDetail.subclass:
return UploadedDangerousGoodsShipmentDetail.subclass(*args_, **kwargs_)
else:
return UploadedDangerousGoodsShipmentDetail(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_Attributes(self):
return self.Attributes
def set_Attributes(self, Attributes):
self.Attributes = Attributes
def add_Attributes(self, value):
self.Attributes.append(value)
def insert_Attributes_at(self, index, value):
self.Attributes.insert(index, value)
def replace_Attributes_at(self, index, value):
self.Attributes[index] = value
def get_Origin(self):
return self.Origin
def set_Origin(self, Origin):
self.Origin = Origin
def get_Destination(self):
return self.Destination
def set_Destination(self, Destination):
self.Destination = Destination
def get_CarrierCode(self):
return self.CarrierCode
def set_CarrierCode(self, CarrierCode):
self.CarrierCode = CarrierCode
def get_ServiceType(self):
return self.ServiceType
def set_ServiceType(self, ServiceType):
self.ServiceType = ServiceType
def get_ShipDate(self):
return self.ShipDate
def set_ShipDate(self, ShipDate):
self.ShipDate = ShipDate
def get_Offeror(self):
return self.Offeror
def set_Offeror(self, Offeror):
self.Offeror = Offeror
def get_Signatory(self):
return self.Signatory
def set_Signatory(self, Signatory):
self.Signatory = Signatory
def get_InfectiousSubstanceResponsibleContact(self):
return self.InfectiousSubstanceResponsibleContact
def set_InfectiousSubstanceResponsibleContact(self, InfectiousSubstanceResponsibleContact):
self.InfectiousSubstanceResponsibleContact = InfectiousSubstanceResponsibleContact
def get_EmergencyContactNumber(self):
return self.EmergencyContactNumber
def set_EmergencyContactNumber(self, EmergencyContactNumber):
self.EmergencyContactNumber = EmergencyContactNumber
def get_AircraftCategoryType(self):
return self.AircraftCategoryType
def set_AircraftCategoryType(self, AircraftCategoryType):
self.AircraftCategoryType = AircraftCategoryType
def get_AdditionalHandling(self):
return self.AdditionalHandling
def set_AdditionalHandling(self, AdditionalHandling):
self.AdditionalHandling = AdditionalHandling
def get_MasterTrackingId(self):
return self.MasterTrackingId
def set_MasterTrackingId(self, MasterTrackingId):
self.MasterTrackingId = MasterTrackingId
def validate_UploadedDangerousGoodsShipmentAttributeType(self, value):
result = True
# Validate type UploadedDangerousGoodsShipmentAttributeType, a restriction on xs:string.
if value is not None and Validate_simpletypes_ and self.gds_collector_ is not None:
if not isinstance(value, str):
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s is not of the correct base simple type (str)' % {"value": value, "lineno": lineno, })
return False
value = value
enumerations = ['MANUAL_SHIPPING_LABEL']
if value not in enumerations:
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s does not match xsd enumeration restriction on UploadedDangerousGoodsShipmentAttributeType' % {"value" : encode_str_2_3(value), "lineno": lineno} )
result = False
return result
def validate_CarrierCodeType(self, value):
result = True
# Validate type CarrierCodeType, a restriction on xs:string.
if value is not None and Validate_simpletypes_ and self.gds_collector_ is not None:
if not isinstance(value, str):
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s is not of the correct base simple type (str)' % {"value": value, "lineno": lineno, })
return False
value = value
enumerations = ['FDXC', 'FDXE', 'FDXG', 'FXCC', 'FXFR', 'FXSP']
if value not in enumerations:
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s does not match xsd enumeration restriction on CarrierCodeType' % {"value" : encode_str_2_3(value), "lineno": lineno} )
result = False
return result
def validate_DangerousGoodsAircraftCategoryType(self, value):
result = True
# Validate type DangerousGoodsAircraftCategoryType, a restriction on xs:string.
if value is not None and Validate_simpletypes_ and self.gds_collector_ is not None:
if not isinstance(value, str):
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s is not of the correct base simple type (str)' % {"value": value, "lineno": lineno, })
return False
value = value
enumerations = ['CARGO_AIRCRAFT_ONLY', 'PASSENGER_AND_CARGO_AIRCRAFT']
if value not in enumerations:
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s does not match xsd enumeration restriction on DangerousGoodsAircraftCategoryType' % {"value" : encode_str_2_3(value), "lineno": lineno} )
result = False
return result
def hasContent_(self):
if (
self.Attributes or
self.Origin is not None or
self.Destination is not None or
self.CarrierCode is not None or
self.ServiceType is not None or
self.ShipDate is not None or
self.Offeror is not None or
self.Signatory is not None or
self.InfectiousSubstanceResponsibleContact is not None or
self.EmergencyContactNumber is not None or
self.AircraftCategoryType is not None or
self.AdditionalHandling is not None or
self.MasterTrackingId is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='UploadedDangerousGoodsShipmentDetail', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('UploadedDangerousGoodsShipmentDetail')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'UploadedDangerousGoodsShipmentDetail':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='UploadedDangerousGoodsShipmentDetail')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='UploadedDangerousGoodsShipmentDetail', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='UploadedDangerousGoodsShipmentDetail'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='UploadedDangerousGoodsShipmentDetail', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for Attributes_ in self.Attributes:
namespaceprefix_ = self.Attributes_nsprefix_ + ':' if (UseCapturedNS_ and self.Attributes_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sAttributes>%s</%sAttributes>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(Attributes_), input_name='Attributes')), namespaceprefix_ , eol_))
if self.Origin is not None:
namespaceprefix_ = self.Origin_nsprefix_ + ':' if (UseCapturedNS_ and self.Origin_nsprefix_) else ''
self.Origin.export(outfile, level, namespaceprefix_, namespacedef_='', name_='Origin', pretty_print=pretty_print)
if self.Destination is not None:
namespaceprefix_ = self.Destination_nsprefix_ + ':' if (UseCapturedNS_ and self.Destination_nsprefix_) else ''
self.Destination.export(outfile, level, namespaceprefix_, namespacedef_='', name_='Destination', pretty_print=pretty_print)
if self.CarrierCode is not None:
namespaceprefix_ = self.CarrierCode_nsprefix_ + ':' if (UseCapturedNS_ and self.CarrierCode_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sCarrierCode>%s</%sCarrierCode>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.CarrierCode), input_name='CarrierCode')), namespaceprefix_ , eol_))
if self.ServiceType is not None:
namespaceprefix_ = self.ServiceType_nsprefix_ + ':' if (UseCapturedNS_ and self.ServiceType_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sServiceType>%s</%sServiceType>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.ServiceType), input_name='ServiceType')), namespaceprefix_ , eol_))
if self.ShipDate is not None:
namespaceprefix_ = self.ShipDate_nsprefix_ + ':' if (UseCapturedNS_ and self.ShipDate_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sShipDate>%s</%sShipDate>%s' % (namespaceprefix_ , self.gds_format_date(self.ShipDate, input_name='ShipDate'), namespaceprefix_ , eol_))
if self.Offeror is not None:
namespaceprefix_ = self.Offeror_nsprefix_ + ':' if (UseCapturedNS_ and self.Offeror_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sOfferor>%s</%sOfferor>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.Offeror), input_name='Offeror')), namespaceprefix_ , eol_))
if self.Signatory is not None:
namespaceprefix_ = self.Signatory_nsprefix_ + ':' if (UseCapturedNS_ and self.Signatory_nsprefix_) else ''
self.Signatory.export(outfile, level, namespaceprefix_, namespacedef_='', name_='Signatory', pretty_print=pretty_print)
if self.InfectiousSubstanceResponsibleContact is not None:
namespaceprefix_ = self.InfectiousSubstanceResponsibleContact_nsprefix_ + ':' if (UseCapturedNS_ and self.InfectiousSubstanceResponsibleContact_nsprefix_) else ''
self.InfectiousSubstanceResponsibleContact.export(outfile, level, namespaceprefix_, namespacedef_='', name_='InfectiousSubstanceResponsibleContact', pretty_print=pretty_print)
if self.EmergencyContactNumber is not None:
namespaceprefix_ = self.EmergencyContactNumber_nsprefix_ + ':' if (UseCapturedNS_ and self.EmergencyContactNumber_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sEmergencyContactNumber>%s</%sEmergencyContactNumber>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.EmergencyContactNumber), input_name='EmergencyContactNumber')), namespaceprefix_ , eol_))
if self.AircraftCategoryType is not None:
namespaceprefix_ = self.AircraftCategoryType_nsprefix_ + ':' if (UseCapturedNS_ and self.AircraftCategoryType_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sAircraftCategoryType>%s</%sAircraftCategoryType>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.AircraftCategoryType), input_name='AircraftCategoryType')), namespaceprefix_ , eol_))
if self.AdditionalHandling is not None:
namespaceprefix_ = self.AdditionalHandling_nsprefix_ + ':' if (UseCapturedNS_ and self.AdditionalHandling_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sAdditionalHandling>%s</%sAdditionalHandling>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.AdditionalHandling), input_name='AdditionalHandling')), namespaceprefix_ , eol_))
if self.MasterTrackingId is not None:
namespaceprefix_ = self.MasterTrackingId_nsprefix_ + ':' if (UseCapturedNS_ and self.MasterTrackingId_nsprefix_) else ''
self.MasterTrackingId.export(outfile, level, namespaceprefix_, namespacedef_='', name_='MasterTrackingId', pretty_print=pretty_print)
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ | |
+ 10213.28554621100 * x)
L1 += 0.00000001154 * mu.cost(4.14326179244 + 6261.74020952440 * x)
L1 += 0.00000001306 * mu.cost(3.67645557029 + 2301.58581590939 * x)
L1 += 0.00000001145 * mu.cost(5.12292846748 + 4562.46099302120 * x)
L1 += 0.00000001275 * mu.cost(5.14333847164 + 2693.60159338500 * x)
L1 += 0.00000000914 * mu.cost(1.22398892152 + 103.09277421860 * x)
L1 += 0.00000000955 * mu.cost(1.52875141393 + 3369.06161416760 * x)
L1 += 0.00000000908 * mu.cost(0.48223420834 + 13358.92658845020 * x)
L1 += 0.00000000892 * mu.cost(1.35161136807 + 1214.73501932060 * x)
L1 += 0.00000000998 * mu.cost(3.29665881950 + 3178.14579056760 * x)
L1 += 0.00000000980 * mu.cost(1.69212466625 + 43.71891230500 * x)
L1 += 0.00000000919 * mu.cost(2.45445889752 + 13916.01910964160 * x)
L1 += 0.00000000899 * mu.cost(5.94990531529 + 12168.00269657460 * x)
L1 += 0.00000000891 * mu.cost(5.63297246408 + 10021.90459040220 * x)
L1 += 0.00000000922 * mu.cost(3.91483430303 + 9225.53927328300 * x)
L1 += 0.00000000930 * mu.cost(0.24073004700 + 6923.95345737360 * x)
L1 += 0.00000000923 * mu.cost(1.10396074450 + 29.42950853600 * x)
L1 += 0.00000000816 * mu.cost(4.65198282005 + 2707.82868738660 * x)
L1 += 0.00000000791 * mu.cost(6.08595583868 + 2384.32327072920 * x)
L1 += 0.00000000820 * mu.cost(4.80968546763 + 533.21408344360 * x)
L1 += 0.00000000758 * mu.cost(2.06012386134 + 12935.85151592320 * x)
L1 += 0.00000000738 * mu.cost(1.03564559078 + 14314.16811304980 * x)
L1 += 0.00000000908 * mu.cost(0.69862047595 + 16173.37116840440 * x)
L1 += 0.00000000700 * mu.cost(4.08788234610 + 5202.35827933520 * x)
L1 += 0.00000000829 * mu.cost(2.01062325398 + 9866.41688066520 * x)
L1 += 0.00000000887 * mu.cost(6.04145842617 + 10021.76996979660 * x)
L1 += 0.00000000768 * mu.cost(1.39532897827 + 8273.82086703240 * x)
L1 += 0.00000000873 * mu.cost(4.45446600602 + 3316.73398895200 * x)
L1 += 0.00000000673 * mu.cost(3.02440642752 + 1039.02661079040 * x)
L1 += 0.00000000660 * mu.cost(2.83410276989 + 107.66352393860 * x)
L1 += 0.00000000838 * mu.cost(0.39195370222 + 10551.52824519400 * x)
L1 += 0.00000000708 * mu.cost(3.27560955336 + 18984.29263000960 * x)
L1 += 0.00000000658 * mu.cost(6.01853128902 + 26724.89941359840 * x)
L1 += 0.00000000685 * mu.cost(1.98132615912 + 1228.96211332220 * x)
L1 += 0.00000000595 * mu.cost(0.10260171285 + 111.18664228760 * x)
L1 += 0.00000000563 * mu.cost(1.59138368358 + 2391.43681773000 * x)
L1 += 0.00000000555 * mu.cost(2.70801962190 + 4555.34744602040 * x)
L1 += 0.00000000696 * mu.cost(2.89276686038 + 2648.45482547300 * x)
L1 += 0.00000000587 * mu.cost(4.56017988729 + 6680.24453233140 * x)
L1 += 0.00000000540 * mu.cost(2.86002662919 + 5459.37628707820 * x)
L1 += 0.00000000530 * mu.cost(5.64877399946 + 6034.21402008480 * x)
L1 += 0.00000000520 * mu.cost(0.20012848836 + 13760.59871020740 * x)
L1 += 0.00000000552 * mu.cost(5.08766140543 + 1903.43681250120 * x)
L1 += 0.00000000512 * mu.cost(3.21411265909 + 11081.21921028860 * x)
L1 += 0.00000000641 * mu.cost(5.19459033638 + 6048.44111408640 * x)
L1 += 0.00000000677 * mu.cost(3.87723948458 + 13517.87010623340 * x)
L1 += 0.00000000534 * mu.cost(0.23224033336 + 51.28033786241 * x)
L1 += 0.00000000670 * mu.cost(3.69368226469 + 3335.08950239240 * x)
L1 += 0.00000000486 * mu.cost(2.41879628327 + 3364.49086444760 * x)
L1 += 0.00000000500 * mu.cost(4.31447859057 + 3344.49376205780 * x)
L1 += 0.00000000481 * mu.cost(1.56481992611 + 1964.83862685400 * x)
L1 += 0.00000000504 * mu.cost(2.47456295599 + 3863.18984479360 * x)
L1 += 0.00000000523 * mu.cost(0.65856269237 + 853.19638175200 * x)
L1 += 0.00000000481 * mu.cost(0.12971954679 + 66.48740891440 * x)
L1 += 0.00000000535 * mu.cost(2.98601678918 + 8270.29774868340 * x)
L1 += 0.00000000450 * mu.cost(2.02303462834 + 13362.43245314700 * x)
L1 += 0.00000000448 * mu.cost(5.59827312967 + 149.56319713460 * x)
L1 += 0.00000000519 * mu.cost(2.75931838722 + 3503.07906283200 * x)
L1 += 0.00000000534 * mu.cost(4.77352933347 + 1118.75579210280 * x)
L1 += 0.00000000450 * mu.cost(4.05380888708 + 13362.46696045140 * x)
L1 += 0.00000000439 * mu.cost(4.83194205477 + 3116.26763099790 * x)
L1 += 0.00000000567 * mu.cost(5.67483490268 + 227.47613278900 * x)
L1 += 0.00000000459 * mu.cost(3.44555998004 + 6702.00024889200 * x)
L1 += 0.00000000545 * mu.cost(2.01193901951 + 7910.18696672180 * x)
L1 += 0.00000000425 * mu.cost(2.79854459343 + 433.71173787680 * x)
L1 += 0.00000000429 * mu.cost(4.30113040289 + 16858.48253293320 * x)
L1 += 0.00000000409 * mu.cost(0.05448009540 + 3304.58456002240 * x)
L1 += 0.00000000434 * mu.cost(5.70806855136 + 21.85082932640 * x)
L1 += 0.00000000399 * mu.cost(4.93233684937 + 9779.10867612540 * x)
L1 += 0.00000000532 * mu.cost(1.31038986189 + 6660.44945790720 * x)
L1 += 0.00000000398 * mu.cost(5.31345458361 + 13119.72110282519 * x)
L1 += 0.00000000459 * mu.cost(2.53671963587 + 74.78159856730 * x)
L1 += 0.00000000384 * mu.cost(2.29906801437 + 12310.18132361080 * x)
L1 += 0.00000000467 * mu.cost(5.12562716972 + 1596.18644228460 * x)
L1 += 0.00000000516 * mu.cost(5.84767782422 + 1052.26838318840 * x)
L1 += 0.00000000414 * mu.cost(4.75409582610 + 3981.49003408200 * x)
L1 += 0.00000000365 * mu.cost(3.73271671549 + 5518.75014899180 * x)
L1 += 0.00000000367 * mu.cost(0.13506394328 + 56.89837493560 * x)
L1 += 0.00000000459 * mu.cost(0.15582180531 + 9381.93999378540 * x)
L1 += 0.00000000392 * mu.cost(2.15845463651 + 3980.50971301380 * x)
L1 += 0.00000000396 * mu.cost(1.48538591462 + 17924.91069982040 * x)
L1 += 0.00000000456 * mu.cost(0.64517343174 + 6816.28993343500 * x)
L1 += 0.00000000358 * mu.cost(5.87219240658 + 3607.21946842160 * x)
L1 += 0.00000000490 * mu.cost(0.65766946042 + 3376.64029337720 * x)
L1 += 0.00000000365 * mu.cost(1.91816243676 + 3347.65866339780 * x)
L1 += 0.00000000397 * mu.cost(1.80006148744 + 7895.95987272020 * x)
L1 += 0.00000000336 * mu.cost(2.14687780119 + 6677.63442474780 * x)
L1 += 0.00000000340 * mu.cost(2.88185925998 + 17256.63153634140 * x)
L1 += 0.00000000334 * mu.cost(6.13670038311 + 5724.93569742900 * x)
L1 += 0.00000000339 * mu.cost(4.08527025169 + 664.75604513000 * x)
L1 += 0.00000000432 * mu.cost(2.52188285182 + 18454.60166491500 * x)
L1 += 0.00000000336 * mu.cost(4.22863444521 + 6696.47732458460 * x)
L1 += 0.00000000342 * mu.cost(5.96724705923 + 3546.79797513700 * x)
L1 += 0.00000000326 * mu.cost(4.02557052581 + 6872.67311951120 * x)
L1 += 0.00000000323 * mu.cost(5.05444843838 + 3237.51965248120 * x)
L1 += 0.00000000324 * mu.cost(2.89151245241 + 8329.67161059700 * x)
L1 += 0.00000000321 * mu.cost(6.25886976298 + 10235.13637553740 * x)
L1 += 0.00000000333 * mu.cost(2.57725424455 + 6684.81528205140 * x)
L1 += 0.00000000356 * mu.cost(6.27424874986 + 8671.96987044060 * x)
L1 += 0.00000000319 * mu.cost(5.05665355586 + 36.60536530420 * x)
L1 += 0.00000000305 * mu.cost(3.88755666972 + 7107.82304427560 * x)
L1 += 0.00000000322 * mu.cost(6.28125601341 + 16706.58525184800 * x)
L1 += 0.00000000334 * mu.cost(3.15240620873 + 11216.28429032400 * x)
L1 += 0.00000000287 * mu.cost(6.16467002771 + 3973.39616601300 * x)
L1 += 0.00000000283 * mu.cost(2.67802456636 + 3877.41693879520 * x)
L1 += 0.00000000283 * mu.cost(1.66293157090 + 1692.16566950240 * x)
L1 += 0.00000000276 * mu.cost(2.94210551399 + 3415.39402526710 * x)
L1 += 0.00000000275 * mu.cost(0.53418048945 + 17395.21973472580 * x)
L1 += 0.00000000355 * mu.cost(3.31406527401 + 10022.81760116760 * x)
L1 += 0.00000000311 * mu.cost(1.50310910269 + 6660.86953400080 * x)
L1 += 0.00000000269 * mu.cost(1.84517097065 + 11780.49035851620 * x)
L1 += 0.00000000270 * mu.cost(4.42425307819 + 310.84079886840 * x)
L1 += 0.00000000275 * mu.cost(3.58464612058 + 128.01884333740 * x)
L1 += 0.00000000275 * mu.cost(2.22523539580 + 3017.10701004240 * x)
L1 += 0.00000000312 * mu.cost(5.15950395287 + 7255.56965173440 * x)
L1 += 0.00000000299 * mu.cost(0.72552273097 + 155.35308913140 * x)
L1 += 0.00000000353 * mu.cost(5.70047798350 + 16460.33352952499 * x)
L1 += 0.00000000267 * mu.cost(5.97864271046 + 9499.25986200560 * x)
L1 += 0.00000000270 * mu.cost(0.77063210836 + 11236.57229942000 * x)
L1 += 0.00000000339 * mu.cost(3.36092148900 + 5625.36604155940 * x)
L1 += 0.00000000315 * mu.cost(2.33795159922 + 3281.23856478620 * x)
L1 += 0.00000000247 * mu.cost(3.71002922076 + 7373.38245462640 * x)
L1 += 0.00000000328 * mu.cost(0.18162415648 + 5618.31980486140 * x)
L1 += 0.00000000247 * mu.cost(6.27486009856 + 15508.61512327440 * x)
L1 += 0.00000000292 * mu.cost(0.14989609091 + 16304.91313009080 * x)
L1 += 0.00000000326 * mu.cost(4.53606745007 + 2178.13772229200 * x)
L1 += 0.00000000286 * mu.cost(5.47710043383 + 9168.64089834740 * x)
L1 += 0.00000000246 * mu.cost(1.49838712480 + 15110.46611986620 * x)
L1 += 0.00000000262 * mu.cost(2.58821936465 + 3336.73109134180 * x)
L1 += 0.00000000244 * mu.cost(0.84015413449 + 16062.18452611680 * x)
L1 += 0.00000000245 * mu.cost(0.37772563756 + 12721.57209941700 * x)
L1 += 0.00000000250 * mu.cost(2.26824758119 + 6784.31762761820 * x)
L1 += 0.00000000248 * mu.cost(6.22740483254 + 13149.15061136120 * x)
L1 += 0.00000000255 * mu.cost(4.93078809107 + 14158.74771361560 * x)
L1 += 0.00000000240 * mu.cost(6.15843594225 + 19800.94595622480 * x)
L1 += 0.00000000249 * mu.cost(5.47044926479 + 4407.10790388980 * x)
L1 += 0.00000000235 * mu.cost(5.38750866169 + 76.26607127560 * x)
L1 += 0.00000000258 * mu.cost(6.10384464886 + 2480.30249794700 * x)
L1 += 0.00000000306 * mu.cost(5.35546231697 + 2766.26762836500 * x)
L1 += 0.00000000236 * mu.cost(5.25670707064 + 13171.00144068760 * x)
L1 += 0.00000000224 * mu.cost(4.52466909993 + 12566.15169998280 * x)
L1 += 0.00000000220 * mu.cost(5.83694256642 + 13936.79450513400 * x)
L1 += 0.00000000271 * mu.cost(1.42460945147 + 14054.60730802600 | |
# 338
None, # 339
None, # 340
None, # 341
None, # 342
None, # 343
None, # 344
None, # 345
None, # 346
None, # 347
None, # 348
None, # 349
None, # 350
None, # 351
None, # 352
None, # 353
None, # 354
None, # 355
None, # 356
None, # 357
None, # 358
None, # 359
None, # 360
None, # 361
None, # 362
None, # 363
None, # 364
None, # 365
None, # 366
None, # 367
None, # 368
None, # 369
None, # 370
None, # 371
None, # 372
None, # 373
None, # 374
None, # 375
None, # 376
None, # 377
None, # 378
None, # 379
None, # 380
None, # 381
None, # 382
None, # 383
None, # 384
None, # 385
None, # 386
None, # 387
None, # 388
None, # 389
None, # 390
None, # 391
None, # 392
None, # 393
None, # 394
None, # 395
None, # 396
None, # 397
None, # 398
None, # 399
None, # 400
None, # 401
None, # 402
None, # 403
None, # 404
None, # 405
None, # 406
None, # 407
None, # 408
None, # 409
None, # 410
None, # 411
None, # 412
None, # 413
None, # 414
None, # 415
None, # 416
None, # 417
None, # 418
None, # 419
None, # 420
None, # 421
None, # 422
None, # 423
None, # 424
None, # 425
None, # 426
None, # 427
None, # 428
None, # 429
None, # 430
None, # 431
None, # 432
None, # 433
None, # 434
None, # 435
None, # 436
None, # 437
None, # 438
None, # 439
None, # 440
None, # 441
None, # 442
None, # 443
None, # 444
None, # 445
None, # 446
None, # 447
None, # 448
None, # 449
None, # 450
None, # 451
None, # 452
None, # 453
None, # 454
None, # 455
None, # 456
None, # 457
None, # 458
None, # 459
None, # 460
None, # 461
None, # 462
None, # 463
None, # 464
None, # 465
None, # 466
None, # 467
None, # 468
None, # 469
None, # 470
None, # 471
None, # 472
None, # 473
None, # 474
None, # 475
None, # 476
None, # 477
None, # 478
None, # 479
None, # 480
None, # 481
None, # 482
None, # 483
None, # 484
None, # 485
None, # 486
None, # 487
None, # 488
None, # 489
None, # 490
None, # 491
None, # 492
None, # 493
None, # 494
None, # 495
None, # 496
None, # 497
None, # 498
None, # 499
None, # 500
None, # 501
None, # 502
None, # 503
None, # 504
None, # 505
None, # 506
None, # 507
None, # 508
None, # 509
None, # 510
None, # 511
None, # 512
None, # 513
None, # 514
None, # 515
None, # 516
None, # 517
None, # 518
None, # 519
None, # 520
(521, TType.DOUBLE, 'requested_memonheap', None, None, ), # 521
(522, TType.DOUBLE, 'requested_memoffheap', None, None, ), # 522
(523, TType.DOUBLE, 'requested_cpu', None, None, ), # 523
(524, TType.DOUBLE, 'assigned_memonheap', None, None, ), # 524
(525, TType.DOUBLE, 'assigned_memoffheap', None, None, ), # 525
(526, TType.DOUBLE, 'assigned_cpu', None, None, ), # 526
)
def __init__(self, id=None, name=None, uptime_secs=None, status=None, num_tasks=None, num_workers=None, num_executors=None, topology_conf=None, id_to_spout_agg_stats=None, id_to_bolt_agg_stats=None, sched_status=None, topology_stats=None, owner=None, debug_options=None, replication_count=None, requested_memonheap=None, requested_memoffheap=None, requested_cpu=None, assigned_memonheap=None, assigned_memoffheap=None, assigned_cpu=None,):
self.id = id
self.name = name
self.uptime_secs = uptime_secs
self.status = status
self.num_tasks = num_tasks
self.num_workers = num_workers
self.num_executors = num_executors
self.topology_conf = topology_conf
self.id_to_spout_agg_stats = id_to_spout_agg_stats
self.id_to_bolt_agg_stats = id_to_bolt_agg_stats
self.sched_status = sched_status
self.topology_stats = topology_stats
self.owner = owner
self.debug_options = debug_options
self.replication_count = replication_count
self.requested_memonheap = requested_memonheap
self.requested_memoffheap = requested_memoffheap
self.requested_cpu = requested_cpu
self.assigned_memonheap = assigned_memonheap
self.assigned_memoffheap = assigned_memoffheap
self.assigned_cpu = assigned_cpu
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.id = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.name = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.uptime_secs = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.status = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I32:
self.num_tasks = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.I32:
self.num_workers = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.I32:
self.num_executors = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.STRING:
self.topology_conf = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.MAP:
self.id_to_spout_agg_stats = {}
(_ktype361, _vtype362, _size360 ) = iprot.readMapBegin()
for _i364 in xrange(_size360):
_key365 = iprot.readString().decode('utf-8')
_val366 = ComponentAggregateStats()
_val366.read(iprot)
self.id_to_spout_agg_stats[_key365] = _val366
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 10:
if ftype == TType.MAP:
self.id_to_bolt_agg_stats = {}
(_ktype368, _vtype369, _size367 ) = iprot.readMapBegin()
for _i371 in xrange(_size367):
_key372 = iprot.readString().decode('utf-8')
_val373 = ComponentAggregateStats()
_val373.read(iprot)
self.id_to_bolt_agg_stats[_key372] = _val373
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 11:
if ftype == TType.STRING:
self.sched_status = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 12:
if ftype == TType.STRUCT:
self.topology_stats = TopologyStats()
self.topology_stats.read(iprot)
else:
iprot.skip(ftype)
elif fid == 13:
if ftype == TType.STRING:
self.owner = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 14:
if ftype == TType.STRUCT:
self.debug_options = DebugOptions()
self.debug_options.read(iprot)
else:
iprot.skip(ftype)
elif fid == 15:
if ftype == TType.I32:
self.replication_count = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 521:
if ftype == TType.DOUBLE:
self.requested_memonheap = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 522:
if ftype == TType.DOUBLE:
self.requested_memoffheap = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 523:
if ftype == TType.DOUBLE:
self.requested_cpu = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 524:
if ftype == TType.DOUBLE:
self.assigned_memonheap = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 525:
if ftype == TType.DOUBLE:
self.assigned_memoffheap = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 526:
if ftype == TType.DOUBLE:
self.assigned_cpu = iprot.readDouble()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TopologyPageInfo')
if self.id is not None:
oprot.writeFieldBegin('id', TType.STRING, 1)
oprot.writeString(self.id.encode('utf-8'))
oprot.writeFieldEnd()
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 2)
oprot.writeString(self.name.encode('utf-8'))
oprot.writeFieldEnd()
if self.uptime_secs is not None:
oprot.writeFieldBegin('uptime_secs', TType.I32, 3)
oprot.writeI32(self.uptime_secs)
oprot.writeFieldEnd()
if self.status is not None:
oprot.writeFieldBegin('status', TType.STRING, 4)
oprot.writeString(self.status.encode('utf-8'))
oprot.writeFieldEnd()
if self.num_tasks is not None:
oprot.writeFieldBegin('num_tasks', TType.I32, 5)
oprot.writeI32(self.num_tasks)
oprot.writeFieldEnd()
if self.num_workers is not None:
oprot.writeFieldBegin('num_workers', TType.I32, 6)
oprot.writeI32(self.num_workers)
oprot.writeFieldEnd()
if self.num_executors is not None:
oprot.writeFieldBegin('num_executors', TType.I32, 7)
oprot.writeI32(self.num_executors)
oprot.writeFieldEnd()
if self.topology_conf is not None:
oprot.writeFieldBegin('topology_conf', TType.STRING, 8)
oprot.writeString(self.topology_conf.encode('utf-8'))
oprot.writeFieldEnd()
if self.id_to_spout_agg_stats is not None:
oprot.writeFieldBegin('id_to_spout_agg_stats', TType.MAP, 9)
oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.id_to_spout_agg_stats))
for kiter374,viter375 in self.id_to_spout_agg_stats.items():
oprot.writeString(kiter374.encode('utf-8'))
viter375.write(oprot)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.id_to_bolt_agg_stats is not None:
oprot.writeFieldBegin('id_to_bolt_agg_stats', TType.MAP, 10)
oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.id_to_bolt_agg_stats))
for kiter376,viter377 in self.id_to_bolt_agg_stats.items():
oprot.writeString(kiter376.encode('utf-8'))
viter377.write(oprot)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.sched_status is not None:
oprot.writeFieldBegin('sched_status', TType.STRING, 11)
oprot.writeString(self.sched_status.encode('utf-8'))
oprot.writeFieldEnd()
if self.topology_stats is not None:
oprot.writeFieldBegin('topology_stats', TType.STRUCT, 12)
self.topology_stats.write(oprot)
oprot.writeFieldEnd()
if self.owner is not None:
oprot.writeFieldBegin('owner', TType.STRING, 13)
oprot.writeString(self.owner.encode('utf-8'))
oprot.writeFieldEnd()
if self.debug_options is not None:
oprot.writeFieldBegin('debug_options', TType.STRUCT, 14)
self.debug_options.write(oprot)
oprot.writeFieldEnd()
if self.replication_count is not None:
oprot.writeFieldBegin('replication_count', TType.I32, 15)
oprot.writeI32(self.replication_count)
oprot.writeFieldEnd()
if self.requested_memonheap is not None:
oprot.writeFieldBegin('requested_memonheap', TType.DOUBLE, 521)
oprot.writeDouble(self.requested_memonheap)
oprot.writeFieldEnd()
if self.requested_memoffheap is not None:
oprot.writeFieldBegin('requested_memoffheap', TType.DOUBLE, 522)
oprot.writeDouble(self.requested_memoffheap)
oprot.writeFieldEnd()
if self.requested_cpu is not None:
oprot.writeFieldBegin('requested_cpu', TType.DOUBLE, 523)
oprot.writeDouble(self.requested_cpu)
oprot.writeFieldEnd()
if self.assigned_memonheap is not None:
oprot.writeFieldBegin('assigned_memonheap', TType.DOUBLE, 524)
oprot.writeDouble(self.assigned_memonheap)
oprot.writeFieldEnd()
if self.assigned_memoffheap is not None:
oprot.writeFieldBegin('assigned_memoffheap', TType.DOUBLE, 525)
| |
<reponame>mbosio85/ediva
## how we measure the similarity between two lists w/ IC per each node
## we have a DAG strucutre
## goal is for each Gene !! output a 'semantic distance'
# based on https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2756558/ [but different]
# with this two equal nodes will have distance '0'
# maximum distance is -2log(1/tot) ~~ 25
import networkx as nx
import cPickle as pickle
import numpy as np
import math
import random
def calc_me(DG, a, b, PW =False):
#actual calculation of IC distance
#return IC(a) + IC(b) -2*IC(MICA)
# MICA = Max IC Ancestor
if any(x not in DG.nodes()for x in [a,b]):
#means one key is not in the DG nodes,
# it can happen so we need to be safe
#return max possible value
return 2*(max([d['IC'] for n,d in DG.nodes_iter(data=True)]))
#check for obsolete nodes
#substitute by the replacement if obsolete
a = DG.node[a].get('replaced_by',a)
b = DG.node[b].get('replaced_by',b)
if any(x not in DG.nodes()for x in [a,b]):
#means one key is not in the DG nodes,
# it can happen so we need to be safe
#return max possible value
return 2*(max([d['IC'] for n,d in DG.nodes_iter(data=True)]))
if a==b :
return 0.0
#
# IC_a = DG.node[a]['IC']
# IC_b = DG.node[b]['IC']
#
# ancestors_a = list(nx.ancestors(DG,a))
# ancestors_b = list(nx.ancestors(DG,b))
#
# ancestors_a.append(a)
# ancestors_b.append(b)
#
# common_ancestors = list(set(ancestors_a) & set(ancestors_b))
# ancestors_val = [DG.node[x]['IC'] for x in common_ancestors]
#
# distance = IC_a + IC_b -2.0*max(ancestors_val)
offset =1000
distance = nx.shortest_path_length(DG,a,b,weight='dist')%offset
print distance
return distance
def list_distance(DG,Q,G,Query_distances):
#idea is :
# for each query HPO calculate all distances
# store them in a dict with HPOs as keys
# value is the minimum value of distance on the query HPOs
# So than for the list of genes it's enough to
# collect the values at columns names
# and if missing set '1'
#cover cases where no HPO from Query
# or no HPO provided, or no HPO
# associated with the gene
if 'NONE' in Q or 'NONE' in G:
return (0,Query_distances)
if len(Q) <1 or len(G) < 1:
return (0,Query_distances)
offset =1000
if Query_distances == 0:
# #build it
for k_q in Q:
if k_q not in DG.nodes():
#missing node (obsolete not updated or just wrong value)
continue
k_q = DG.node[k_q].get('replaced_by',k_q)
distance = nx.shortest_path_length(DG,k_q,weight='dist')
if Query_distances ==0:
Query_distances = {key: float(value)%offset for (key, value) in distance.items()}
print 'calc whole dist'
else:
for k in Query_distances.keys():
try:
Query_distances[k] = min([Query_distances[k] , float(distance[k])%offset] )
except:
Query_distances[k] = float(Query_distances[k])%offset
if Query_distances == 0:
#can happen when the original list has no updated HPO or wrong values
return (0,0)
Query_distances['maxval']=2*(max([d['IC'] for n,d in DG.nodes_iter(data=True)]))
#now I have the query distances value
# map the genes HPO and extract values.
# missing one : print it and add it to the db
#results = []
maxval = Query_distances['maxval']
results = [Query_distances.get(q_g,maxval) for q_g in G]
#for q_g in G:
# q_g = DG.node[q_g].get('replaced_by',q_g)
# results.append(Query_distances.get(q_g,2*(max([d['IC'] for n,d in DG.nodes_iter(data=True)]))))
final_value = np.mean(results)/maxval
if final_value > 1:
final_value = 1 #borderline cases whiere go up an down to get to the other node
return (1-final_value,Query_distances)
def calc_distance(DG,query,gene,Query_distances=0):
### DEPRECATED
## Distance (Query, Gene)
##
## Query = HPO list from user
## Gene = HPO associated to each gene
#asymmetric one
if len(query)*len(gene) ==0:
#one of the lists is empty at least
return 0
#avg [ sum_{t_i \in Q} min_{t_2 \in G} ( IC(t_1) + IC(t_2) - 2*IC(MICA(t_1,t_2) ) ) ]
#graph contains IC
distances = []
distances =[ float(min([calc_me(DG,qg,x) for x in gene])) for qg in query]
final_value = np.mean(distances)/(2*(max([d['IC'] for n,d in DG.nodes_iter(data=True)])))
#print distances
#the division is to ensure a maximum to 1
#print final_value
return (1-final_value)
def check_qualtiy(DG):
#find if all ancestors have IC <= sons
# if not, why :
for node in DG:
ancestors = nx.ancestors(DG,node)
ancestors_val = [DG.node[x]['IC'] - DG.node[node]['IC'] for x in ancestors]
problematic = [i for i, e in enumerate(ancestors_val) if e > 0]
for i in problematic:
print node
print list(ancestors)[i]
print ancestors_val[i]
return None
def get_DG_edges(HPO, outfile):
#This one generates a dict file to generate edges of the HPO graph
#download data
#wget https://raw.githubusercontent.com/obophenotype/human-phenotype-ontology/master/hp.obo
#then call this python ... hp.obo myHPO_edges.pk
import sys
import cPickle as pickle
listfile = HPO
out_HPO = dict()
replacements =[]
alternatives =[]
token = False
obsolete =False
with open(listfile) as rd:
for line in rd:
if line.startswith('id: HP:'):
if token and not obsolete:
out_HPO[name]=parents
if repl !='':
replacements.append((name,repl))
token=True
name = line.strip().split('id: ')[1]
parents = []
repl =''
obsolete =False
elif line.startswith('is_a:'):
parents.append(line.strip().split('is_a: ')[1].split(' !')[0])
elif line.startswith('replaced_by:'):
#add a field to say it's replaced
repl = line.strip().split('replaced_by: ')[1]
obsolete =False #means we can backtrack it
elif line.startswith('is_obsolete:'):
obsolete =True
elif line.startswith('alt_id:'):
#add alternative nodes, will be later added with
# replacement field for the most common one
alt = line.strip().split('alt_id: ')[1]
alternatives.append((name,alt))
elif line.startswith('consider:'):
#add alternative nodes, will be later added with
# replacement field for the most common one
alt = line.strip().split('consider: ')[1]
alternatives.append((alt,name))
obsolete =False #means we can backtrack it
out_HPO[name] = parents
out_HPO['replacements'] = replacements
out_HPO['alternatives'] = alternatives
pickle.dump( out_HPO, open( outfile,'wb'))
def generate_HPO_graph(edges_file,counts,output):
offset =1000 #penalization for the distance
#usage: python me edges.pk ontology.txt graph.pk
# counts as wget wget http://compbio.charite.de/jenkins/job/hpo.annotations/lastStableBuild/artifact/misc/phenotype_annotation.tab
# awk -F '\t' '{print $5}' < phenotype_annotation.tab | sort |uniq -c | awk '{print $2 "\t" $1}' > HPO_counts.txt
#idea is a graph with attribute the IC value per node
# calculated
# generate graph with counts:
counts_d=dict()
tot = 0
with open(counts) as rd:
for line in rd:
ff=line.strip().split('\t')
counts_d[ff[0]] = int(ff[1])
tot += int(ff[1])
print tot
# load dict with edges
edges =pickle.load(open(edges_file,'rb'))
print( len(edges.keys()))
#get replacements of obsolete nodes
replacements = dict(edges.get('replacements',[]))
tmpval = edges.pop('replacements',None)
#let's build a graph
DG = nx.DiGraph()
#populate with alternatives
#mark alternatives as replaced, it's the same for us.
alternatives = edges.get('alternatives',[])
tmpval = edges.pop('alternatives',None)
# DG.add_edges_from([(1,2)])
for k in edges.keys():
DG.add_node(k)
DG.node[k]['count']=0.0
ancestors = [(x,k) for x in edges[k]]
DG.add_edges_from(ancestors)
if k in replacements.keys():
DG.node[k]['replaced_by']=replacements[k]
DG.node[k]['IC'] = -math.log(1.0/tot)
#nx.set_node_attributes(DG, 0,'count',)
print 'edges'
print DG.number_of_edges()
print 'nodes'
print DG.number_of_nodes()
for k in DG.nodes():
DG.node[k]['count']=0.0
#populate with raw counts
for k in counts_d.keys():
DG.node[k]['count'] = counts_d[k]
DG.nodes(data='count')
#now fill it with the actual value.
for k in edges.keys():
desc = nx.descendants(DG,k)
count = DG.node[k]['count']
for i in desc:
count += DG.node[i]['count']
if count >0 :
DG.node[k]['IC'] = -math.log(float(count)/tot)
else :
DG.node[k]['IC'] = -math.log(1.0/tot) #missing nodes, set as rare as possible
#print k
#print DG.node[k]
# add edges weight
for a,b in DG.edges():
DG[a][b]['dist']=offset+abs(DG.node[a]['IC'] - DG.node[b]['IC'])
#alternatives fill in IC and count
for node,k in alternatives:
DG.add_node(k)
DG.node[k]['count']=0.0
DG.node[k]['replaced_by']=node
DG.node[k]['IC'] = DG.node[node]['IC']
#count is the IC of the node then : IC = information content
G = DG.to_undirected()
DG= G
pickle.dump(DG,open(output,'wb'))
return None
def generate_gene_2_HPO_dict(HPO_info,outfile):
#get mapping gene -> HPOs
#download from HPO charite ALL_FREQ gene to phenotype
#wget http://compbio.charite.de/jenkins/job/hpo.annotations.monthly/lastStableBuild/artifact/annotation/ALL_SOURCES_ALL_FREQUENCIES_genes_to_phenotype.txt
gene_2_HPO = dict()
with open(HPO_info) as rd:
for line in rd:
if line.startswith('#'):pass
else:
ff = line.strip().split('\t')
#format #Format: entrez-gene-id<tab>entrez-gene-symbol<tab>HPO-Term-Name<tab>HPO-Term-ID
key = ff[1]
HPO = ff[-1]
to_add = gene_2_HPO.get(key,[])
to_add.append(HPO)
to_add= list(set(to_add))
gene_2_HPO[key] = to_add
pickle.dump(gene_2_HPO, open(outfile,'wb'))
return None
def extract_HPO_related_to_gene(gene_2_HPO,gene):
#gene_2_HPO : dict with [gene] --- HPO_list
if type(gene_2_HPO) is dict:
gene_2_HPO_dict = gene_2_HPO
else:
gene_2_HPO_dict = pickle.load(open(gene_2_HPO,'rb'))
outlist = gene_2_HPO_dict.get(gene,[])
return outlist
def alter_HPO_list(DG,HPO):
#way to get a list of HPO
# for each one of these you can
# - keep it
# - choose an ancestor
# - choose a descendant
# - remove it
# - choose a HPO unrelated
# all with same priority
out_list =[]
toadd =''
for hpo in HPO:
if | |
for the lowest section type, if there are section types in `otext.tf`.
But more types can be declared as verselike, e.g. `halfverse` in the
[bhsa](https://github.com/annotation/app-bhsa/blob/master/code/config.yaml).
---
### `wrap`
Pretty: whether the child displays may be wrapped.
Default:
: boolean
* `true` if the children form a row, such rows may be wrapped
* `false` if the children form a column;
such columns may not be wrapped (into several columns)
!!! hint
For some types in
[uruk](https://github.com/annotation/app-uruk/blob/master/code/config.yaml)
it is needed to deviate from the default.
---
## `writing`
Code for triggering special fonts, see `tf.writing`.
Default:
: string `''`
---
"""
import re
import types
from ..parameters import URL_GH, URL_NB, URL_TFDOC
from ..core.helpers import console, mergeDictOfSets
from .options import INTERFACE_OPTIONS
from .helpers import parseFeatures, transitiveClosure, showDict, ORIG, NORMAL
VAR_PATTERN = re.compile(r"\{([^}]+)\}")
WRITING_DEFAULTS = dict(
akk=dict(language="akkadian", direction="ltr",),
hbo=dict(language="hebrew", direction="rtl",),
syc=dict(language="syriac", direction="rtl",),
ara=dict(language="arabic", direction="rtl",),
grc=dict(language="greek", direction="ltr",),
cld=dict(language="aramaic", direction="ltr",),
)
WRITING_DEFAULTS[""] = dict(language="", direction="ltr",)
FONT_BASE = (
"https://github.com/annotation/text-fabric/blob/master/tf/server/static/fonts"
)
METHOD = "method"
STYLE = "style"
DESCEND = "descend"
FMT_KEYS = {METHOD, STYLE}
DEFAULT_CLS = "txtn"
DEFAULT_CLS_SRC = "txto"
DEFAULT_CLS_ORIG = "txtu"
DEFAULT_CLS_TRANS = "txtt"
DEFAULT_CLS_PHONO = "txtp"
FORMAT_CLS = (
(NORMAL, DEFAULT_CLS),
(ORIG, DEFAULT_CLS_ORIG),
("trans", DEFAULT_CLS_TRANS),
("source", DEFAULT_CLS_SRC),
("phono", DEFAULT_CLS_PHONO),
)
LEVEL_DEFAULTS = dict(
level={
4: dict(flow="hor"),
3: dict(flow="hor"),
2: dict(flow="hor"),
1: dict(flow="hor"),
0: dict(flow="ver"),
},
flow=dict(ver=dict(wrap=False, stretch=False), hor=dict(wrap=True, stretch=True)),
wrap=None,
stretch=None,
)
RELATIVE_DEFAULT = "tf"
MSPEC_KEYS = set(
"""
org
repo
relative
corpus
docUrl
doi
""".strip().split()
)
PROVENANCE_DEFAULTS = (
("org", None),
("repo", None),
("relative", RELATIVE_DEFAULT),
("graphicsRelative", None),
("version", None),
("moduleSpecs", ()),
("zip", None),
("corpus", "TF dataset (unspecified)"),
("doi", None),
("webBase", None),
("webHint", None),
("webLang", None),
("webLexId", None),
("webOffset", None),
("webFeature", None),
("webUrl", None),
("webUrlZeros", None),
("webUrlLex", None),
("webLexId", None),
("webHint", None),
)
DOC_DEFAULTS = (
("docRoot", "{urlGh}"),
("docExt", ".md"),
("docBase", "{docRoot}/{org}/{repo}/blob/master/docs"),
("docPage", "home"),
("docUrl", "{docBase}/{docPage}{docExt}"),
("featureBase", "{docBase}/features/<feature>{docExt}"),
("featurePage", "home"),
("charUrl", "{tfDoc}/writing/{language}.html"),
("charText", "How TF features represent text"),
)
DATA_DISPLAY_DEFAULTS = (
("excludedFeatures", set(), False),
("noneValues", {None}, False),
("sectionSep1", " ", False),
("sectionSep2", ":", False),
("textFormats", {}, True),
("browseNavLevel", None, True),
("browseContentPretty", False, False),
("showVerseInTuple", False, False),
("exampleSection", None, True),
("exampleSectionHtml", None, True),
)
TYPE_KEYS = set(
"""
base
children
condense
features
featuresBare
flow
graphics
hidden
isBig
label
level
lexOcc
lineNumber
stretch
template
transform
verselike
wrap
""".strip().split()
)
HOOKS = """
transform
afterChild
plainCustom
prettyCustom
""".strip().split()
class AppCurrent:
def __init__(self, specs):
self.allKeys = set()
self.update(specs)
def update(self, specs):
allKeys = self.allKeys
for (k, v) in specs.items():
allKeys.add(k)
setattr(self, k, v)
def get(self, k, v):
return getattr(self, k, v)
def set(self, k, v):
self.allKeys.add(k)
setattr(self, k, v)
class Check:
def __init__(self, app, withApi):
self.app = app
self.withApi = withApi
self.errors = []
def checkSetting(self, k, v, extra=None):
app = self.app
withApi = self.withApi
errors = self.errors
dKey = self.dKey
specs = app.specs
interfaceDefaults = {inf[0]: inf[1] for inf in INTERFACE_OPTIONS}
if withApi:
customMethods = app.customMethods
api = app.api
F = api.F
T = api.T
Fall = api.Fall
allNodeFeatures = set(Fall())
nTypes = F.otype.all
sectionTypes = T.sectionTypes
if k in {"template", "label"}:
(template, feats) = extra
if template is not True and type(template) is not str:
errors.append(f"{k} must be `true` or a string")
for feat in feats:
if feat not in allNodeFeatures:
if feat not in customMethods.transform.get(dKey, {}):
errors.append(f"{k}: feature {feat} not loaded")
elif k in {"featuresBare", "features"}:
feats = extra[0]
tps = extra[1].values()
for feat in feats:
if feat not in allNodeFeatures:
errors.append(f"{k}: feature {feat} not loaded")
for tp in tps:
if tp not in nTypes:
errors.append(f"{k}: node type {tp} not present")
elif k == "exclude":
if type(v) is not dict():
errors.append(f"{k}: must be a dict of features and values")
for feat in v:
if feat not in allNodeFeatures:
errors.append(f"{k}: feature {feat} not loaded")
elif k == "base":
pass
elif k == "lineNumber":
if v not in allNodeFeatures:
errors.append(f"{k}: feature {v} not loaded")
elif k == "browseNavLevel":
legalValues = set(range(len(sectionTypes)))
if v not in legalValues:
allowed = ",".join(sorted(legalValues))
errors.append(f"{k} must be an integer in {allowed}")
elif k == "children":
if type(v) is not str and type(v) is not list:
errors.append(f"{k} must be a (list of) node types")
else:
v = {v} if type(v) is str else set(v)
for tp in v:
if tp not in nTypes:
errors.append(f"{k}: node type {tp} not present")
elif k in {"lexOcc"}:
if type(v) is not str or v not in nTypes:
errors.append(f"{k}: node type {v} not present")
elif k == "transform":
for (feat, method) in extra.items():
if type(method) is str:
errors.append(f"{k}:{feat}: {method}() not implemented in app")
elif k == "style":
if type(v) is not str or v.lower() != v:
errors.append(f"{k} must be an all lowercase string")
elif k in interfaceDefaults:
allowed = self.extra[k]
if not allowed and v is not None:
errors.append(
f"{k}={v} is not useful (dataset lacks relevant features)"
)
elif k == "textFormats":
formatStyle = specs["formatStyle"]
if type(v) is dict:
for (fmt, fmtInfo) in v.items():
for (fk, fv) in fmtInfo.items():
if fk not in FMT_KEYS:
errors.append(f"{k}: {fmt}: illegal key {fk}")
continue
if fk == METHOD:
(descendType, func) = T.splitFormat(fv)
func = f"fmt_{func}"
if not hasattr(app, func):
errors.append(
f"{k}: {fmt} needs unimplemented method {func}"
)
elif fk == STYLE:
if fv not in formatStyle:
if fv.lower() != fv:
errors.append(
f"{k}: {fmt}: style {fv}"
f" must be all lowercase"
)
else:
errors.append(f"{k} must be a dictionary")
else:
if k in {"excludedFeatures", "noneValues"}:
if type(v) is not list:
errors.append(f"{k} must be a list")
elif k in {
"sectionSep1",
"sectionSep2",
"exampleSection",
"exampleSectionHtml",
}:
if type(v) is not str:
errors.append(f"{k} must be a string")
elif k == "writing":
legalValues = set(WRITING_DEFAULTS)
if v not in legalValues:
allowed = ",".join(legalValues - {""})
errors.append(f"{k} must be the empty string or one of {allowed}")
elif k in {"direction", "language"}:
legalValues = {w[k] for w in WRITING_DEFAULTS}
if v not in legalValues:
allowed = ",".join(legalValues)
errors.append(f"{k} must be one of {allowed}")
elif k in {
"browseContentPretty",
"base",
"condense",
"graphics",
"hidden",
"isBig",
"showVerseInTuple",
"stretch",
"verselike",
"wrap",
}:
legalValues = {True, False}
if v not in legalValues:
allowed = "true,false"
errors.append(f"{k} must be a boolean in {allowed}")
elif k == "flow":
legalValues = {"hor", "ver"}
if v not in legalValues:
allowed = ",".join(legalValues)
errors.append(f"{k} must be a value in {allowed}")
elif k == "level":
legalValues = set(range(len(4)))
if v not in legalValues:
allowed = ",".join(sorted(legalValues))
errors.append(f"{k} must be an integer in {allowed}")
def checkGroup(self, cfg, defaults, dKey, postpone=set(), extra=None):
self.cfg = cfg
self.defaults = defaults
self.dKey = dKey
self.extra = extra
errors = []
errors.clear()
dSource = cfg.get(dKey, {})
for (k, v) in dSource.items():
if k in defaults:
if k not in postpone:
self.checkSetting(k, v)
else:
errors.append(f"Illegal parameter `{k}` with value {v}")
def checkItem(self, cfg, dKey):
self.cfg = cfg
self.dKey = dKey
errors = self.errors
errors.clear()
if dKey in cfg:
self.checkSetting(dKey, cfg[dKey])
def report(self):
errors = self.errors
dKey = self.dKey
if errors:
console(f"App config error(s) in {dKey}:", error=True)
for msg in errors:
console(f"\t{msg}", error=True)
self.errors = []
def _fillInDefined(template, data):
val = template.format(**data)
return None if "None" in val else val
def setAppSpecs(app, cfg, reset=False):
if not reset:
app.customMethods = AppCurrent({hook: {} for hook in HOOKS})
specs = dict(urlGh=URL_GH, urlNb=URL_NB, tfDoc=URL_TFDOC,)
app.specs = specs
specs.update(cfg)
if "apiVersion" not in specs:
specs["apiVersion"] = None
checker = Check(app, False)
dKey = "writing"
checker.checkItem(cfg, dKey)
checker.report()
value = cfg.get(dKey, "")
specs[dKey] = value
for (k, v) in WRITING_DEFAULTS[value].items():
specs[k] = v
extension = f" {value}" if value else ""
defaultClsOrig = f"{DEFAULT_CLS_ORIG}{extension}"
specs.update(extension=extension, defaultClsOrig=defaultClsOrig)
for (dKey, defaults) in (
("provenanceSpec", PROVENANCE_DEFAULTS),
("docs", DOC_DEFAULTS),
):
checker.checkGroup(cfg, {d[0] for d in defaults}, dKey)
checker.report()
dSource = cfg.get(dKey, {})
for (k, v) in defaults:
val = dSource.get(k, v)
val = (
None
if val is None
else _fillInDefined(val, specs)
# else val.format(**specs)
if type(val) is str
else val
)
specs[k] = val
if dKey == "provenanceSpec":
moduleSpecs = specs["moduleSpecs"] or []
for moduleSpec in moduleSpecs:
for k in MSPEC_KEYS:
if k in moduleSpec:
v = moduleSpec[k]
if k == "docUrl" and v is not None:
# v = v.format(**specs)
v = _fillInDefined(v, specs)
moduleSpec[k] = v
else:
moduleSpec[k] = (
specs.get(k, None)
if k in {"org", "repo"}
else RELATIVE_DEFAULT
if k == "relative"
else None
)
specs[dKey] = {k[0]: specs[k[0]] for k in defaults}
if specs["zip"] is None:
org = specs["org"]
repo = | |
ll > 0.49 * length[ii]:
r = 0.49 * length[ii] / tt[ii]
ll = 0.49 * length[ii]
else:
r = radii[jj][ii]
if ll > 0.49 * length[ii + 1]:
r = 0.49 * length[ii + 1] / tt[ii]
new_points.extend(
r * dvec[ii] / ct[ii]
+ self.polygons[jj][ii]
+ numpy.vstack((r * numpy.cos(a), r * numpy.sin(a))).transpose()
)
else:
new_points.append(self.polygons[jj][ii])
self.polygons[jj] = numpy.array(new_points)
if len(new_points) > max_points:
fracture = True
if fracture:
self.fracture(max_points, precision)
return self
def translate(self, dx, dy):
"""
Translate this polygon.
Parameters
----------
dx : number
Distance to move in the x-direction.
dy : number
Distance to move in the y-direction.
Returns
-------
out : `PolygonSet`
This object.
"""
vec = numpy.array((dx, dy))
self.polygons = [points + vec for points in self.polygons]
return self
def mirror(self, p1, p2=(0, 0)):
"""
Mirror the polygons over a line through points 1 and 2
Parameters
----------
p1 : array-like[2]
first point defining the reflection line
p2 : array-like[2]
second point defining the reflection line
Returns
-------
out : `PolygonSet`
This object.
"""
origin = numpy.array(p1)
vec = numpy.array(p2) - origin
vec_r = vec * (2 / numpy.inner(vec, vec))
self.polygons = [
numpy.outer(numpy.inner(points - origin, vec_r), vec) - points + 2 * origin
for points in self.polygons
]
return self
class Polygon(PolygonSet):
"""
Polygonal geometric object.
Parameters
----------
points : array-like[N][2]
Coordinates of the vertices of the polygon.
layer : integer
The GDSII layer number for this element.
datatype : integer
The GDSII datatype for this element (between 0 and 255).
Notes
-----
The last point should not be equal to the first (polygons are
automatically closed).
The original GDSII specification supports only a maximum of 199
vertices per polygon.
Examples
--------
>>> triangle_pts = [(0, 40), (15, 40), (10, 50)]
>>> triangle = gdspy.Polygon(triangle_pts)
>>> myCell.add(triangle)
"""
__slots__ = "layers", "datatypes", "polygons"
def __init__(self, points, layer=0, datatype=0):
self.layers = [layer]
self.datatypes = [datatype]
self.polygons = [numpy.array(points)]
def __str__(self):
return "Polygon ({} vertices, layer {}, datatype {})".format(
len(self.polygons[0]), self.layers[0], self.datatypes[0]
)
class Rectangle(PolygonSet):
"""
Rectangular geometric object.
Parameters
----------
point1 : array-like[2]
Coordinates of a corner of the rectangle.
point2 : array-like[2]
Coordinates of the corner of the rectangle opposite to `point1`.
layer : integer
The GDSII layer number for this element.
datatype : integer
The GDSII datatype for this element (between 0 and 255).
Examples
--------
>>> rectangle = gdspy.Rectangle((0, 0), (10, 20))
>>> myCell.add(rectangle)
"""
__slots__ = "layers", "datatypes", "polygons"
def __init__(self, point1, point2, layer=0, datatype=0):
self.layers = [layer]
self.datatypes = [datatype]
self.polygons = [
numpy.array(
[
[point1[0], point1[1]],
[point1[0], point2[1]],
[point2[0], point2[1]],
[point2[0], point1[1]],
]
)
]
def __str__(self):
return (
"Rectangle (({0[0]}, {0[1]}) to ({1[0]}, {1[1]}), layer {2}, datatype {3})"
).format(
self.polygons[0][0], self.polygons[0][2], self.layers[0], self.datatypes[0]
)
def __repr__(self):
return "Rectangle(({0[0]}, {0[1]}), ({1[0]}, {1[1]}), {2}, {3})".format(
self.polygons[0][0], self.polygons[0][2], self.layers[0], self.datatypes[0]
)
class Round(PolygonSet):
"""
Circular geometric object.
Represent a circle, ellipse, ring or their sections.
Parameters
----------
center : array-like[2]
Coordinates of the center of the circle/ring.
radius : number, array-like[2]
Radius of the circle/outer radius of the ring. To build an
ellipse an array of 2 numbers can be used, representing the
radii in the horizontal and vertical directions.
inner_radius : number, array-like[2]
Inner radius of the ring. To build an elliptical hole, an array
of 2 numbers can be used, representing the radii in the
horizontal and vertical directions.
initial_angle : number
Initial angle of the circular/ring section (in *radians*).
final_angle : number
Final angle of the circular/ring section (in *radians*).
tolerance : float
Approximate curvature resolution. The number of points is
automatically calculated.
number_of_points : integer or None
Manually define the number of vertices that form the object
(polygonal approximation). Overrides `tolerance`.
max_points : integer
If the number of points in the element is greater than
`max_points`, it will be fractured in smaller polygons with
at most `max_points` each. If `max_points` is zero no fracture
will occur.
layer : integer
The GDSII layer number for this element.
datatype : integer
The GDSII datatype for this element (between 0 and 255).
Notes
-----
The original GDSII specification supports only a maximum of 199
vertices per polygon.
Examples
--------
>>> circle = gdspy.Round((30, 5), 8)
>>> ell_ring = gdspy.Round((50, 5), (8, 7), inner_radius=(5, 4))
>>> pie_slice = gdspy.Round((30, 25), 8, initial_angle=0,
... final_angle=-5.0*numpy.pi/6.0)
>>> arc = gdspy.Round((50, 25), 8, inner_radius=5,
... initial_angle=-5.0*numpy.pi/6.0,
... final_angle=0)
"""
__slots__ = "layers", "datatypes", "polygons"
def __init__(
self,
center,
radius,
inner_radius=0,
initial_angle=0,
final_angle=0,
tolerance=0.01,
number_of_points=None,
max_points=199,
layer=0,
datatype=0,
):
if hasattr(radius, "__iter__"):
orx, ory = radius
radius = max(radius)
def outer_transform(a):
r = a - ((a + numpy.pi) % (2 * numpy.pi) - numpy.pi)
t = numpy.arctan2(orx * numpy.sin(a), ory * numpy.cos(a)) + r
t[a == numpy.pi] = numpy.pi
return t
else:
orx = ory = radius
def outer_transform(a):
return a
if hasattr(inner_radius, "__iter__"):
irx, iry = inner_radius
inner_radius = max(inner_radius)
def inner_transform(a):
r = a - ((a + numpy.pi) % (2 * numpy.pi) - numpy.pi)
t = numpy.arctan2(irx * numpy.sin(a), iry * numpy.cos(a)) + r
t[a == numpy.pi] = numpy.pi
return t
else:
irx = iry = inner_radius
def inner_transform(a):
return a
if isinstance(number_of_points, float):
warnings.warn(
"[GDSPY] Use of a floating number as number_of_points is deprecated in favor of tolerance.",
category=DeprecationWarning,
stacklevel=2,
)
tolerance = number_of_points
number_of_points = None
if number_of_points is None:
full_angle = (
2 * numpy.pi
if final_angle == initial_angle
else abs(final_angle - initial_angle)
)
number_of_points = max(
3,
1 + int(0.5 * full_angle / numpy.arccos(1 - tolerance / radius) + 0.5),
)
if inner_radius > 0:
number_of_points *= 2
pieces = (
1
if max_points == 0
else int(numpy.ceil(number_of_points / float(max_points)))
)
number_of_points = number_of_points // pieces
self.layers = [layer] * pieces
self.datatypes = [datatype] * pieces
self.polygons = [numpy.zeros((number_of_points, 2)) for _ in range(pieces)]
if final_angle == initial_angle and pieces > 1:
final_angle += 2 * numpy.pi
angles = numpy.linspace(initial_angle, final_angle, pieces + 1)
oang = outer_transform(angles)
iang = inner_transform(angles)
for ii in range(pieces):
if oang[ii + 1] == oang[ii]:
if inner_radius <= 0:
t = (
numpy.arange(number_of_points)
* 2.0
* numpy.pi
/ number_of_points
)
self.polygons[ii][:, 0] = numpy.cos(t) * orx + center[0]
self.polygons[ii][:, 1] = numpy.sin(t) * ory + center[1]
else:
n2 = number_of_points // 2
n1 = number_of_points - n2
t = numpy.arange(n1) * 2.0 * numpy.pi / (n1 - 1)
self.polygons[ii][:n1, 0] = numpy.cos(t) * orx + center[0]
self.polygons[ii][:n1, 1] = numpy.sin(t) * ory + center[1]
t = numpy.arange(n2) * -2.0 * numpy.pi / (n2 - 1)
self.polygons[ii][n1:, 0] = numpy.cos(t) * irx + center[0]
self.polygons[ii][n1:, 1] = numpy.sin(t) * iry + center[1]
else:
if inner_radius <= 0:
t = numpy.linspace(oang[ii], oang[ii + 1], number_of_points - 1)
self.polygons[ii][1:, 0] = numpy.cos(t) * orx + center[0]
self.polygons[ii][1:, 1] = numpy.sin(t) * ory + center[1]
self.polygons[ii][0] += center
else:
n2 = number_of_points // 2
n1 = number_of_points - n2
t = numpy.linspace(oang[ii], oang[ii + 1], n1)
self.polygons[ii][:n1, 0] = numpy.cos(t) * orx + center[0]
self.polygons[ii][:n1, 1] = numpy.sin(t) * ory + center[1]
t = numpy.linspace(iang[ii + 1], iang[ii], n2)
self.polygons[ii][n1:, 0] = numpy.cos(t) * irx + center[0]
self.polygons[ii][n1:, 1] = numpy.sin(t) * iry + center[1]
def __str__(self):
return ("Round ({} polygons, {} vertices, layers {}, datatypes {})").format(
len(self.polygons),
sum([len(p) for p in self.polygons]),
list(set(self.layers)),
list(set(self.datatypes)),
)
class Text(PolygonSet):
"""
Polygonal text object.
Each letter is formed by a series of polygons.
Parameters
----------
text : string
The text to be converted in geometric objects.
size : number
Height of the character. The width of a character and the
distance between characters are this value multiplied by 5 / 9
and 8 / 9, respectively. For vertical text, the distance is
multiplied by 11 / 9.
position : array-like[2]
Text position (lower left corner).
horizontal : bool
If True, the text is written from left to right; if
False, from top to bottom.
angle | |
<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# SolarEdge ModbusTCP
#
# Source: https://github.com/addiejanssen/domoticz-solaredge-modbustcp-plugin
# Author: <NAME> (https://addiejanssen.com)
# License: MIT
#
"""
<plugin key="SolarEdge_ModbusTCP" name="SolarEdge ModbusTCP" author="<NAME>" version="1.0.6" externallink="https://github.com/addiejanssen/domoticz-solaredge-modbustcp-plugin">
<params>
<param field="Address" label="Inverter IP Address" width="150px" required="true" />
<param field="Port" label="Inverter Port Number" width="100px" required="true" default="502" />
<param field="Mode1" label="Add missing devices" width="100px" required="true" default="Yes" >
<options>
<option label="Yes" value="Yes" default="true" />
<option label="No" value="No" />
</options>
</param>
<param field="Mode2" label="Interval" width="100px" required="true" default="5" >
<options>
<option label="5 seconds" value="5" default="true" />
<option label="10 seconds" value="10" />
<option label="20 seconds" value="20" />
<option label="30 seconds" value="30" />
<option label="60 seconds" value="60" />
</options>
</param>
<param field="Mode5" label="Log level" width="100px">
<options>
<option label="Normal" value="Normal" default="true" />
<option label="Extra" value="Extra"/>
</options>
</param>
<param field="Mode6" label="Debug" width="100px">
<options>
<option label="True" value="Debug"/>
<option label="False" value="Normal" default="true" />
</options>
</param>
</params>
</plugin>
"""
import Domoticz
import solaredge_modbus
import json
from datetime import datetime, timedelta
from enum import IntEnum, unique, auto
from pymodbus.exceptions import ConnectionException
#
# Domoticz shows graphs with intervals of 5 minutes.
# When collecting information from the inverter more frequently than that, then it makes no sense to only show the last value.
#
# The Average class can be used to calculate the average value based on a sliding window of samples.
# The number of samples stored depends on the interval used to collect the value from the inverter itself.
#
class Average:
def __init__(self):
self.samples = []
self.max_samples = 30
def set_max_samples(self, max):
self.max_samples = max
if self.max_samples < 1:
self.max_samples = 1
def update(self, new_value, scale = 0):
self.samples.append(new_value * (10 ** scale))
while (len(self.samples) > self.max_samples):
del self.samples[0]
Domoticz.Debug("Average: {} - {} values".format(self.get(), len(self.samples)))
def get(self):
return sum(self.samples) / len(self.samples)
#
# Domoticz shows graphs with intervals of 5 minutes.
# When collecting information from the inverter more frequently than that, then it makes no sense to only show the last value.
#
# The Maximum class can be used to calculate the highest value based on a sliding window of samples.
# The number of samples stored depends on the interval used to collect the value from the inverter itself.
#
class Maximum:
def __init__(self):
self.samples = []
self.max_samples = 30
def set_max_samples(self, max):
self.max_samples = max
if self.max_samples < 1:
self.max_samples = 1
def update(self, new_value, scale = 0):
self.samples.append(new_value * (10 ** scale))
while (len(self.samples) > self.max_samples):
del self.samples[0]
Domoticz.Debug("Maximum: {} - {} values".format(self.get(), len(self.samples)))
def get(self):
return max(self.samples)
#
# The Unit class lists all possible pieces of information that can be retrieved from the inverter.
#
# Not all inverters will support all these options.
# The class is used to generate a unique id for each device in Domoticz.
#
@unique
class Unit(IntEnum):
STATUS = 1
VENDOR_STATUS = 2
CURRENT = 3
P1_CURRENT = 4
P2_CURRENT = 5
P3_CURRENT = 6
P1_VOLTAGE = 7
P2_VOLTAGE = 8
P3_VOLTAGE = 9
P1N_VOLTAGE = 10
P2N_VOLTAGE = 11
P3N_VOLTAGE = 12
POWER_AC = 13
FREQUENCY = 14
POWER_APPARENT = 15
POWER_REACTIVE = 16
POWER_FACTOR = 17
ENERGY_TOTAL = 18
CURRENT_DC = 19
VOLTAGE_DC = 20
POWER_DC = 21
TEMPERATURE = 22
#
# The plugin is using a few tables to setup Domoticz and to process the feedback from the inverter.
# The Column class is used to easily identify the columns in those tables.
#
@unique
class Column(IntEnum):
ID = 0
NAME = 1
TYPE = 2
SUBTYPE = 3
SWITCHTYPE = 4
OPTIONS = 5
MODBUSNAME = 6
MODBUSSCALE = 7
FORMAT = 8
PREPEND = 9
LOOKUP = 10
MATH = 11
#
# This table represents a single phase inverter.
#
SINGLE_PHASE_INVERTER = [
# ID, NAME, TYPE, SUBTYPE, SWITCHTYPE, OPTIONS, MODBUSNAME, MODBUSSCALE, FORMAT, PREPEND, LOOKUP, MATH
[Unit.STATUS, "Status", 0xF3, 0x13, 0x00, {}, "status", None, "{}", None, solaredge_modbus.INVERTER_STATUS_MAP, None ],
[Unit.VENDOR_STATUS, "Vendor Status", 0xF3, 0x13, 0x00, {}, "vendor_status", None, "{}", None, None, None ],
[Unit.CURRENT, "Current", 0xF3, 0x17, 0x00, {}, "current", "current_scale", "{:.2f}", None, None, Average() ],
[Unit.P1_CURRENT, "P1 Current", 0xF3, 0x17, 0x00, {}, "p1_current", "current_scale", "{:.2f}", None, None, Average() ],
[Unit.P1_VOLTAGE, "P1 Voltage", 0xF3, 0x08, 0x00, {}, "p1_voltage", "voltage_scale", "{:.2f}", None, None, Average() ],
[Unit.P1N_VOLTAGE, "P1-N Voltage", 0xF3, 0x08, 0x00, {}, "p1n_voltage", "voltage_scale", "{:.2f}", None, None, Average() ],
[Unit.POWER_AC, "Power", 0xF8, 0x01, 0x00, {}, "power_ac", "power_ac_scale", "{:.2f}", None, None, Average() ],
[Unit.FREQUENCY, "Frequency", 0xF3, 0x1F, 0x00, { "Custom": "1;Hz" }, "frequency", "frequency_scale", "{:.2f}", None, None, Average() ],
[Unit.POWER_APPARENT, "Power (Apparent)", 0xF3, 0x1F, 0x00, { "Custom": "1;VA" }, "power_apparent", "power_apparent_scale", "{:.2f}", None, None, Average() ],
[Unit.POWER_REACTIVE, "Power (Reactive)", 0xF3, 0x1F, 0x00, { "Custom": "1;VAr" }, "power_reactive", "power_reactive_scale", "{:.2f}", None, None, Average() ],
[Unit.POWER_FACTOR, "Power Factor", 0xF3, 0x06, 0x00, {}, "power_factor", "power_factor_scale", "{:.2f}", None, None, Average() ],
[Unit.ENERGY_TOTAL, "Total Energy", 0xF3, 0x1D, 0x04, {}, "energy_total", "energy_total_scale", "{};{}", Unit.POWER_AC, None, None ],
[Unit.CURRENT_DC, "DC Current", 0xF3, 0x17, 0x00, {}, "current_dc", "current_dc_scale", "{:.2f}", None, None, Average() ],
[Unit.VOLTAGE_DC, "DC Voltage", 0xF3, 0x08, 0x00, {}, "voltage_dc", "voltage_dc_scale", "{:.2f}", None, None, Average() ],
[Unit.POWER_DC, "DC Power", 0xF8, 0x01, 0x00, {}, "power_dc", "power_dc_scale", "{:.2f}", None, None, Average() ],
[Unit.TEMPERATURE, "Temperature", 0xF3, 0x05, 0x00, {}, "temperature", "temperature_scale", "{:.2f}", None, None, Maximum() ]
]
#
# This table represents a three phase inverter.
#
THREE_PHASE_INVERTER = [
# ID, NAME, TYPE, SUBTYPE, SWITCHTYPE, OPTIONS, MODBUSNAME, MODBUSSCALE, FORMAT, PREPEND, LOOKUP, MATH
[Unit.STATUS, "Status", 0xF3, 0x13, 0x00, {}, "status", None, "{}", None, solaredge_modbus.INVERTER_STATUS_MAP, None ],
[Unit.VENDOR_STATUS, "Vendor Status", 0xF3, 0x13, 0x00, {}, "vendor_status", None, "{}", None, None, None ],
[Unit.CURRENT, "Current", 0xF3, 0x17, 0x00, {}, "current", "current_scale", "{:.2f}", None, None, Average() ],
[Unit.P1_CURRENT, "P1 Current", 0xF3, 0x17, 0x00, {}, "p1_current", "current_scale", "{:.2f}", None, None, Average() ],
[Unit.P2_CURRENT, "P2 Current", 0xF3, 0x17, 0x00, {}, "p2_current", "current_scale", "{:.2f}", None, None, Average() ],
[Unit.P3_CURRENT, "P3 Current", 0xF3, 0x17, 0x00, {}, "p3_current", "current_scale", "{:.2f}", None, None, Average() ],
[Unit.P1_VOLTAGE, "P1 Voltage", 0xF3, 0x08, 0x00, {}, "p1_voltage", "voltage_scale", "{:.2f}", None, None, Average() ],
[Unit.P2_VOLTAGE, "P2 Voltage", 0xF3, 0x08, 0x00, {}, "p2_voltage", "voltage_scale", "{:.2f}", None, None, Average() ],
[Unit.P3_VOLTAGE, "P3 Voltage", 0xF3, 0x08, 0x00, {}, "p3_voltage", "voltage_scale", "{:.2f}", None, None, Average() ],
[Unit.P1N_VOLTAGE, "P1-N Voltage", 0xF3, 0x08, 0x00, {}, "p1n_voltage", "voltage_scale", "{:.2f}", None, None, Average() ],
[Unit.P2N_VOLTAGE, "P2-N Voltage", 0xF3, 0x08, 0x00, {}, "p2n_voltage", "voltage_scale", "{:.2f}", None, None, Average() ],
[Unit.P3N_VOLTAGE, "P3-N Voltage", 0xF3, 0x08, 0x00, {}, "p3n_voltage", "voltage_scale", "{:.2f}", None, None, Average() ],
[Unit.POWER_AC, "Power", 0xF8, 0x01, 0x00, {}, "power_ac", "power_ac_scale", "{:.2f}", None, None, Average() ],
[Unit.FREQUENCY, "Frequency", 0xF3, 0x1F, 0x00, { "Custom": "1;Hz" }, "frequency", "frequency_scale", "{:.2f}", None, None, Average() ],
[Unit.POWER_APPARENT, "Power (Apparent)", 0xF3, 0x1F, 0x00, { "Custom": "1;VA" }, "power_apparent", "power_apparent_scale", "{:.2f}", None, None, Average() ],
[Unit.POWER_REACTIVE, "Power (Reactive)", 0xF3, 0x1F, 0x00, { "Custom": "1;VAr" }, "power_reactive", "power_reactive_scale", "{:.2f}", None, None, Average() ],
[Unit.POWER_FACTOR, "Power Factor", 0xF3, 0x06, 0x00, {}, "power_factor", "power_factor_scale", "{:.2f}", None, None, Average() ],
[Unit.ENERGY_TOTAL, "Total Energy", 0xF3, 0x1D, 0x04, {}, "energy_total", "energy_total_scale", "{};{}", Unit.POWER_AC, None, None ],
[Unit.CURRENT_DC, "DC Current", 0xF3, 0x17, 0x00, {}, "current_dc", "current_dc_scale", "{:.2f}", None, None, Average() ],
[Unit.VOLTAGE_DC, "DC Voltage", 0xF3, 0x08, 0x00, {}, "voltage_dc", "voltage_dc_scale", "{:.2f}", None, None, Average() ],
[Unit.POWER_DC, "DC Power", 0xF8, 0x01, 0x00, {}, "power_dc", "power_dc_scale", "{:.2f}", None, None, Average() ],
[Unit.TEMPERATURE, "Temperature", 0xF3, 0x05, 0x00, {}, "temperature", "temperature_scale", "{:.2f}", None, None, Maximum() ]
]
#
# The BasePlugin is the actual Domoticz plugin.
# This is where the fun starts :-)
#
class BasePlugin:
def __init__(self):
# The _LOOKUP_TABLE will point to one of the tables above, depending on the type of inverter.
self._LOOKUP_TABLE = None
# This is the solaredge_modbus Inverter object that will be used to communicate with the inverter.
self.inverter = None
# Default heartbeat is 10 seconds; therefore 30 samples in 5 minutes.
self.max_samples = 30
# Whether the plugin should add missing devices.
# If set to True, a deleted device will be added on the next restart of Domoticz.
self.add_devices = False
# When there is an issue contacting the inverter, the plugin will retry after a certain retry delay.
# The actual time after which the plugin will try again is stored in the retry after variable.
# According to the documenation, the inverter may need up to 2 minutes to "reset".
self.retrydelay = | |
jobname="{rulename}.{jobid}",
printreason=False,
quiet=False,
printshellcmds=False,
cluster_config=None,
local_input=None,
restart_times=None,
keepincomplete=False,
):
self.workflow = workflow
super().__init__(
workflow,
dag,
None,
jobname=jobname,
printreason=printreason,
quiet=quiet,
printshellcmds=printshellcmds,
cluster_config=cluster_config,
local_input=local_input,
restart_times=restart_times,
assume_shared_fs=False,
max_status_checks_per_second=10,
disable_envvar_declarations=True,
)
# use relative path to Snakefile
self.snakefile = os.path.relpath(workflow.main_snakefile)
try:
from kubernetes import config
except ImportError:
raise WorkflowError(
"The Python 3 package 'kubernetes' "
"must be installed to use Kubernetes"
)
config.load_kube_config()
import kubernetes.client
self.kubeapi = kubernetes.client.CoreV1Api()
self.batchapi = kubernetes.client.BatchV1Api()
self.namespace = namespace
self.envvars = workflow.envvars
self.secret_files = {}
self.run_namespace = str(uuid.uuid4())
self.secret_envvars = {}
self.register_secret()
self.container_image = container_image or get_container_image()
logger.info(f"Using {self.container_image} for Kubernetes jobs.")
def get_job_exec_prefix(self, job):
return "cp -rf /source/. ."
def register_secret(self):
import kubernetes.client
secret = kubernetes.client.V1Secret()
secret.metadata = kubernetes.client.V1ObjectMeta()
# create a random uuid
secret.metadata.name = self.run_namespace
secret.type = "Opaque"
secret.data = {}
for i, f in enumerate(self.workflow.get_sources()):
if f.startswith(".."):
logger.warning(
"Ignoring source file {}. Only files relative "
"to the working directory are allowed.".format(f)
)
continue
# The kubernetes API can't create secret files larger than 1MB.
source_file_size = os.path.getsize(f)
max_file_size = 1048576
if source_file_size > max_file_size:
logger.warning(
"Skipping the source file {f}. Its size {source_file_size} exceeds "
"the maximum file size (1MB) that can be passed "
"from host to kubernetes.".format(
f=f, source_file_size=source_file_size
)
)
continue
with open(f, "br") as content:
key = "f{}".format(i)
# Some files are smaller than 1MB, but grows larger after being base64 encoded
# We should exclude them as well, otherwise Kubernetes APIs will complain
encoded_contents = base64.b64encode(content.read()).decode()
encoded_size = len(encoded_contents)
if encoded_size > 1048576:
logger.warning(
"Skipping the source file {f} for secret key {key}. "
"Its base64 encoded size {encoded_size} exceeds "
"the maximum file size (1MB) that can be passed "
"from host to kubernetes.".format(
f=f,
source_file_size=source_file_size,
key=key,
encoded_size=encoded_size,
)
)
continue
self.secret_files[key] = f
secret.data[key] = encoded_contents
for e in self.envvars:
try:
key = e.lower()
secret.data[key] = base64.b64encode(os.environ[e].encode()).decode()
self.secret_envvars[key] = e
except KeyError:
continue
# Test if the total size of the configMap exceeds 1MB
config_map_size = sum(
[len(base64.b64decode(v)) for k, v in secret.data.items()]
)
if config_map_size > 1048576:
logger.warning(
"The total size of the included files and other Kubernetes secrets "
"is {}, exceeding the 1MB limit.\n".format(config_map_size)
)
logger.warning(
"The following are the largest files. Consider removing some of them "
"(you need remove at least {} bytes):".format(config_map_size - 1048576)
)
entry_sizes = {
self.secret_files[k]: len(base64.b64decode(v))
for k, v in secret.data.items()
if k in self.secret_files
}
for k, v in sorted(entry_sizes.items(), key=lambda item: item[1])[:-6:-1]:
logger.warning(" * File: {k}, original size: {v}".format(k=k, v=v))
raise WorkflowError("ConfigMap too large")
self.kubeapi.create_namespaced_secret(self.namespace, secret)
def unregister_secret(self):
import kubernetes.client
safe_delete_secret = lambda: self.kubeapi.delete_namespaced_secret(
self.run_namespace, self.namespace, body=kubernetes.client.V1DeleteOptions()
)
self._kubernetes_retry(safe_delete_secret)
# In rare cases, deleting a pod may rais 404 NotFound error.
def safe_delete_pod(self, jobid, ignore_not_found=True):
import kubernetes.client
body = kubernetes.client.V1DeleteOptions()
try:
self.kubeapi.delete_namespaced_pod(jobid, self.namespace, body=body)
except kubernetes.client.rest.ApiException as e:
if e.status == 404 and ignore_not_found:
# Can't find the pod. Maybe it's already been
# destroyed. Proceed with a warning message.
logger.warning(
"[WARNING] 404 not found when trying to delete the pod: {jobid}\n"
"[WARNING] Ignore this error\n".format(jobid=jobid)
)
else:
raise e
def shutdown(self):
self.unregister_secret()
super().shutdown()
def cancel(self):
import kubernetes.client
body = kubernetes.client.V1DeleteOptions()
with self.lock:
for j in self.active_jobs:
func = lambda: self.safe_delete_pod(j.jobid, ignore_not_found=True)
self._kubernetes_retry(func)
self.shutdown()
def run(self, job, callback=None, submit_callback=None, error_callback=None):
import kubernetes.client
super()._run(job)
exec_job = self.format_job_exec(job)
# Kubernetes silently does not submit a job if the name is too long
# therefore, we ensure that it is not longer than snakejob+uuid.
jobid = "snakejob-{}".format(
get_uuid("{}-{}-{}".format(self.run_namespace, job.jobid, job.attempt))
)
body = kubernetes.client.V1Pod()
body.metadata = kubernetes.client.V1ObjectMeta(labels={"app": "snakemake"})
body.metadata.name = jobid
# container
container = kubernetes.client.V1Container(name=jobid)
container.image = self.container_image
container.command = shlex.split("/bin/sh")
container.args = ["-c", exec_job]
container.working_dir = "/workdir"
container.volume_mounts = [
kubernetes.client.V1VolumeMount(name="workdir", mount_path="/workdir")
]
container.volume_mounts = [
kubernetes.client.V1VolumeMount(name="source", mount_path="/source")
]
node_selector = {}
if "machine_type" in job.resources.keys():
# Kubernetes labels a node by its instance type using this node_label.
node_selector["node.kubernetes.io/instance-type"] = job.resources[
"machine_type"
]
body.spec = kubernetes.client.V1PodSpec(
containers=[container], node_selector=node_selector
)
# fail on first error
body.spec.restart_policy = "Never"
# source files as a secret volume
# we copy these files to the workdir before executing Snakemake
too_large = [
path
for path in self.secret_files.values()
if os.path.getsize(path) > 1000000
]
if too_large:
raise WorkflowError(
"The following source files exceed the maximum "
"file size (1MB) that can be passed from host to "
"kubernetes. These are likely not source code "
"files. Consider adding them to your "
"remote storage instead or (if software) use "
"Conda packages or container images:\n{}".format("\n".join(too_large))
)
secret_volume = kubernetes.client.V1Volume(name="source")
secret_volume.secret = kubernetes.client.V1SecretVolumeSource()
secret_volume.secret.secret_name = self.run_namespace
secret_volume.secret.items = [
kubernetes.client.V1KeyToPath(key=key, path=path)
for key, path in self.secret_files.items()
]
# workdir as an emptyDir volume of undefined size
workdir_volume = kubernetes.client.V1Volume(name="workdir")
workdir_volume.empty_dir = kubernetes.client.V1EmptyDirVolumeSource()
body.spec.volumes = [secret_volume, workdir_volume]
# env vars
container.env = []
for key, e in self.secret_envvars.items():
envvar = kubernetes.client.V1EnvVar(name=e)
envvar.value_from = kubernetes.client.V1EnvVarSource()
envvar.value_from.secret_key_ref = kubernetes.client.V1SecretKeySelector(
key=key, name=self.run_namespace
)
container.env.append(envvar)
# request resources
container.resources = kubernetes.client.V1ResourceRequirements()
container.resources.requests = {}
container.resources.requests["cpu"] = job.resources["_cores"]
if "mem_mb" in job.resources.keys():
container.resources.requests["memory"] = "{}M".format(
job.resources["mem_mb"]
)
# capabilities
if job.needs_singularity and self.workflow.use_singularity:
# TODO this should work, but it doesn't currently because of
# missing loop devices
# singularity inside docker requires SYS_ADMIN capabilities
# see https://groups.google.com/a/lbl.gov/forum/#!topic/singularity/e9mlDuzKowc
# container.capabilities = kubernetes.client.V1Capabilities()
# container.capabilities.add = ["SYS_ADMIN",
# "DAC_OVERRIDE",
# "SETUID",
# "SETGID",
# "SYS_CHROOT"]
# Running in priviledged mode always works
container.security_context = kubernetes.client.V1SecurityContext(
privileged=True
)
pod = self._kubernetes_retry(
lambda: self.kubeapi.create_namespaced_pod(self.namespace, body)
)
logger.info(
"Get status with:\n"
"kubectl describe pod {jobid}\n"
"kubectl logs {jobid}".format(jobid=jobid)
)
self.active_jobs.append(
KubernetesJob(job, jobid, callback, error_callback, pod, None)
)
# Sometimes, certain k8s requests throw kubernetes.client.rest.ApiException
# Solving this issue requires reauthentication, as _kubernetes_retry shows
# However, reauthentication itself, under rare conditions, may also throw
# errors such as:
# kubernetes.client.exceptions.ApiException: (409), Reason: Conflict
#
# This error doesn't mean anything wrong with the k8s cluster, and users can safely
# ignore it.
def _reauthenticate_and_retry(self, func=None):
import kubernetes
# Unauthorized.
# Reload config in order to ensure token is
# refreshed. Then try again.
logger.info("Trying to reauthenticate")
kubernetes.config.load_kube_config()
subprocess.run(["kubectl", "get", "nodes"])
self.kubeapi = kubernetes.client.CoreV1Api()
self.batchapi = kubernetes.client.BatchV1Api()
try:
self.register_secret()
except kubernetes.client.rest.ApiException as e:
if e.status == 409 and e.reason == "Conflict":
logger.warning("409 conflict ApiException when registering secrets")
logger.warning(e)
else:
raise WorkflowError(
e,
"This is likely a bug in "
"https://github.com/kubernetes-client/python.",
)
if func:
return func()
def _kubernetes_retry(self, func):
import kubernetes
import urllib3
with self.lock:
try:
return func()
except kubernetes.client.rest.ApiException as e:
if e.status == 401:
# Unauthorized.
# Reload config in order to ensure token is
# refreshed. Then try again.
return self._reauthenticate_and_retry(func)
# Handling timeout that may occur in case of GKE master upgrade
except urllib3.exceptions.MaxRetryError as e:
logger.info(
"Request time out! "
"check your connection to Kubernetes master"
"Workflow will pause for 5 minutes to allow any update operations to complete"
)
time.sleep(300)
try:
return func()
except:
# Still can't reach the server after 5 minutes
raise WorkflowError(
e,
"Error 111 connection timeout, please check"
" that the k8 cluster master is reachable!",
)
def _wait_for_jobs(self):
import kubernetes
while True:
with self.lock:
if not self.wait:
return
active_jobs = self.active_jobs
self.active_jobs = list()
still_running = list()
for j in active_jobs:
with self.status_rate_limiter:
logger.debug("Checking status for pod {}".format(j.jobid))
job_not_found = False
try:
res = self._kubernetes_retry(
lambda: self.kubeapi.read_namespaced_pod_status(
j.jobid, self.namespace
)
)
except kubernetes.client.rest.ApiException as e:
if e.status == 404:
# Jobid not found
# The job is likely already done and was deleted on
# the server.
j.callback(j.job)
continue
except WorkflowError as e:
print_exception(e, self.workflow.linemaps)
j.error_callback(j.job)
continue
if res is None:
msg = (
"Unknown pod {jobid}. "
"Has the pod been deleted "
"manually?"
).format(jobid=j.jobid)
self.print_job_error(j.job, msg=msg, jobid=j.jobid)
j.error_callback(j.job)
elif res.status.phase == "Failed":
msg = (
"For details, please issue:\n"
"kubectl describe pod {jobid}\n"
"kubectl logs {jobid}"
).format(jobid=j.jobid)
# failed
self.print_job_error(j.job, msg=msg, jobid=j.jobid)
j.error_callback(j.job)
elif res.status.phase == "Succeeded":
# finished
j.callback(j.job)
func = lambda: self.safe_delete_pod(
j.jobid, ignore_not_found=True
)
self._kubernetes_retry(func)
else:
# still active
still_running.append(j)
| |
},
{
"id": 167,
"atrr": {
"color": "verde",
"size": "xxl",
"brand": "nike",
"precio": 1540
}
},
{
"id": 168,
"atrr": {
"color": "morado",
"size": "xs",
"brand": "blue",
"precio": 4444
}
},
{
"id": 169,
"atrr": {
"color": "morado",
"size": "xs",
"brand": "polo",
"precio": 15000
}
},
{
"id": 170,
"atrr": {
"color": "azul",
"size": "xl",
"brand": "polo",
"precio": 14000
}
},
{
"id": 171,
"atrr": {
"color": "amarillo",
"size": "l",
"brand": "blue",
"precio": 1540
}
},
{
"id": 172,
"atrr": {
"color": "verde",
"size": "s",
"brand": "nike",
"precio": 4558
}
},
{
"id": 173,
"atrr": {
"color": "naranja",
"size": "m",
"brand": "nike",
"precio": 14000
}
},
{
"id": 174,
"atrr": {
"color": "naranja",
"size": "l",
"brand": "zara",
"precio": 4447
}
},
{
"id": 175,
"atrr": {
"color": "azul",
"size": "s",
"brand": "zara",
"precio": 4444
}
},
{
"id": 176,
"atrr": {
"color": "naranja",
"size": "xs",
"brand": "polo",
"precio": 9500
}
},
{
"id": 177,
"atrr": {
"color": "morado",
"size": "s",
"brand": "blue",
"precio": 8889
}
},
{
"id": 178,
"atrr": {
"color": "verde",
"size": "xs",
"brand": "adidas",
"precio": 4558
}
},
{
"id": 179,
"atrr": {
"color": "verde",
"size": "xxl",
"brand": "adidas",
"precio": 8889
}
},
{
"id": 180,
"atrr": {
"color": "azul",
"size": "xxl",
"brand": "adidas",
"precio": 4569
}
},
{
"id": 181,
"atrr": {
"color": "verde",
"size": "xl",
"brand": "adidas",
"precio": 10000
}
},
{
"id": 182,
"atrr": {
"color": "verde",
"size": "xs",
"brand": "zara",
"precio": 4444
}
},
{
"id": 183,
"atrr": {
"color": "azul",
"size": "l",
"brand": "nike",
"precio": 4789
}
},
{
"id": 184,
"atrr": {
"color": "verde",
"size": "m",
"brand": "polo",
"precio": 2333
}
},
{
"id": 185,
"atrr": {
"color": "morado",
"size": "xxl",
"brand": "polo",
"precio": 4789
}
},
{
"id": 186,
"atrr": {
"color": "amarillo",
"size": "xl",
"brand": "blue",
"precio": 4558
}
},
{
"id": 187,
"atrr": {
"color": "naranja",
"size": "xxl",
"brand": "zara",
"precio": 4447
}
},
{
"id": 188,
"atrr": {
"color": "azul",
"size": "m",
"brand": "zara",
"precio": 9500
}
},
{
"id": 189,
"atrr": {
"color": "naranja",
"size": "xs",
"brand": "polo",
"precio": 4569
}
},
{
"id": 190,
"atrr": {
"color": "amarillo",
"size": "xl",
"brand": "blue",
"precio": 4447
}
},
{
"id": 191,
"atrr": {
"color": "azul",
"size": "xxl",
"brand": "nike",
"precio": 11000
}
},
{
"id": 192,
"atrr": {
"color": "azul",
"size": "xxl",
"brand": "nike",
"precio": 4447
}
},
{
"id": 193,
"atrr": {
"color": "amarillo",
"size": "xxl",
"brand": "polo",
"precio": 15000
}
},
{
"id": 194,
"atrr": {
"color": "azul",
"size": "xl",
"brand": "polo",
"precio": 9500
}
},
{
"id": 195,
"atrr": {
"color": "azul",
"size": "xs",
"brand": "blue",
"precio": 4789
}
},
{
"id": 196,
"atrr": {
"color": "naranja",
"size": "l",
"brand": "zara",
"precio": 4444
}
},
{
"id": 197,
"atrr": {
"color": "morado",
"size": "xl",
"brand": "zara",
"precio": 4447
}
},
{
"id": 198,
"atrr": {
"color": "amarillo",
"size": "xs",
"brand": "blue",
"precio": 4447
}
},
{
"id": 199,
"atrr": {
"color": "azul",
"size": "xs",
"brand": "adidas",
"precio": 4789
}
},
{
"id": 200,
"atrr": {
"color": "azul",
"size": "xs",
"brand": "polo",
"precio": 8889
}
},
{
"id": 201,
"atrr": {
"color": "naranja",
"size": "xl",
"brand": "blue",
"precio": 8889
}
},
{
"id": 202,
"atrr": {
"color": "azul",
"size": "m",
"brand": "zara",
"precio": 4569
}
},
{
"id": 203,
"atrr": {
"color": "rojo",
"size": "xs",
"brand": "zara",
"precio": 4569
}
},
{
"id": 204,
"atrr": {
"color": "amarillo",
"size": "m",
"brand": "nike",
"precio": 1540
}
},
{
"id": 205,
"atrr": {
"color": "naranja",
"size": "xs",
"brand": "nike",
"precio": 2333
}
},
{
"id": 206,
"atrr": {
"color": "naranja",
"size": "xxl",
"brand": "adidas",
"precio": 4789
}
},
{
"id": 207,
"atrr": {
"color": "azul",
"size": "l",
"brand": "zara",
"precio": 10000
}
},
{
"id": 208,
"atrr": {
"color": "rojo",
"size": "l",
"brand": "zara",
"precio": 1540
}
},
{
"id": 209,
"atrr": {
"color": "rojo",
"size": "s",
"brand": "zara",
"precio": 14000
}
},
{
"id": 210,
"atrr": {
"color": "morado",
"size": "xl",
"brand": "adidas",
"precio": 4447
}
},
{
"id": 211,
"atrr": {
"color": "amarillo",
"size": "s",
"brand": "zara",
"precio": 4558
}
},
{
"id": 212,
"atrr": {
"color": "morado",
"size": "xs",
"brand": "adidas",
"precio": 4789
}
},
{
"id": 213,
"atrr": {
"color": "amarillo",
"size": "xs",
"brand": "zara",
"precio": 4444
}
},
{
"id": 214,
"atrr": {
"color": "rojo",
"size": "xxl",
"brand": "nike",
"precio": 1540
}
},
{
"id": 215,
"atrr": {
"color": "morado",
"size": "xxl",
"brand": "polo",
"precio": 15000
}
},
{
"id": 216,
"atrr": {
"color": "azul",
"size": "xs",
"brand": "adidas",
"precio": 4447
}
},
{
"id": 217,
"atrr": {
"color": "rojo",
"size": "xs",
"brand": "polo",
"precio": 11000
}
},
{
"id": 218,
"atrr": {
"color": "amarillo",
"size": "m",
"brand": "zara",
"precio": 4558
}
},
{
"id": 219,
"atrr": {
"color": "amarillo",
"size": "xxl",
"brand": "nike",
"precio": 4558
}
},
{
"id": 220,
"atrr": {
"color": "rojo",
"size": "xxl",
"brand": "blue",
"precio": 1540
}
},
{
"id": 221,
"atrr": {
"color": "naranja",
"size": "xs",
"brand": "blue",
"precio": 2333
}
},
{
"id": 222,
"atrr": {
"color": "amarillo",
"size": "xs",
"brand": "nike",
"precio": 2333
}
},
{
"id": 223,
"atrr": {
"color": "verde",
"size": "xl",
"brand": "polo",
"precio": 4558
}
},
{
"id": 224,
"atrr": {
"color": "verde",
"size": "xs",
"brand": "blue",
"precio": 14000
}
},
{
"id": 225,
"atrr": {
"color": "verde",
"size": "xs",
"brand": "nike",
"precio": 4444
}
},
{
"id": 226,
"atrr": {
"color": "verde",
"size": "l",
"brand": "polo",
"precio": 4447
}
},
{
"id": 227,
"atrr": {
"color": "verde",
"size": "l",
"brand": "zara",
"precio": 11000
}
},
{
"id": 228,
"atrr": {
"color": "azul",
"size": "xl",
"brand": "blue",
"precio": 1540
}
},
{
"id": 229,
"atrr": {
"color": "azul",
"size": "xs",
"brand": "adidas",
"precio": 1540
}
},
{
"id": 230,
"atrr": {
"color": "azul",
"size": "m",
"brand": "nike",
"precio": 8889
}
},
{
"id": 231,
"atrr": {
"color": "naranja",
"size": "xxl",
"brand": "blue",
"precio": 4789
}
},
{
"id": 232,
"atrr": {
"color": "naranja",
"size": "xs",
"brand": "nike",
"precio": 4447
}
},
{
"id": 233,
"atrr": {
"color": "amarillo",
"size": "s",
"brand": "zara",
"precio": 4447
}
},
{
"id": 234,
"atrr": {
"color": "naranja",
"size": "m",
"brand": "adidas",
"precio": 1540
}
},
{
"id": 235,
"atrr": {
"color": "azul",
"size": "s",
"brand": "nike",
"precio": 15000
}
},
{
"id": 236,
"atrr": {
"color": "naranja",
"size": "l",
"brand": "zara",
"precio": 4569
}
},
{
"id": 237,
"atrr": {
"color": "azul",
"size": "xs",
"brand": "adidas",
"precio": 4569
}
},
{
"id": 238,
"atrr": {
"color": "naranja",
"size": "xl",
"brand": "zara",
"precio": 14000
}
},
{
"id": 239,
"atrr": {
"color": "amarillo",
"size": "xs",
"brand": "zara",
"precio": 14000
}
},
{
"id": 240,
"atrr": {
"color": "azul",
"size": "s",
"brand": "zara",
"precio": 4558
}
},
{
"id": 241,
"atrr": {
"color": "naranja",
"size": "s",
"brand": "adidas",
"precio": 4569
}
},
{
"id": 242,
"atrr": {
"color": "naranja",
"size": "xs",
"brand": "adidas",
"precio": 1540
}
},
{
"id": 243,
"atrr": {
"color": "amarillo",
"size": "s",
"brand": "polo",
"precio": 14000
}
},
{
"id": 244,
"atrr": {
"color": "azul",
"size": "xxl",
"brand": "adidas",
"precio": 1540
}
},
{
"id": 245,
"atrr": {
"color": "verde",
"size": "xs",
"brand": "polo",
"precio": 8889
}
},
{
"id": 246,
"atrr": {
"color": "verde",
"size": "xs",
"brand": "polo",
"precio": 8889
}
},
{
"id": 247,
"atrr": {
"color": "azul",
"size": "xs",
"brand": "blue",
"precio": 4789
}
},
{
"id": 248,
"atrr": {
"color": "azul",
"size": "l",
"brand": "polo",
"precio": 4789
}
},
{
"id": 249,
"atrr": {
"color": "naranja",
"size": "xxl",
"brand": "zara",
"precio": 4789
}
},
{
"id": 250,
"atrr": {
"color": "rojo",
"size": "l",
"brand": "adidas",
"precio": 4789
}
},
{
"id": 251,
"atrr": {
"color": "rojo",
"size": "xs",
"brand": "adidas",
"precio": 4444
}
},
{
"id": 252,
"atrr": | |
"Finished minimization procedure for experiment {exp_key}.".format(
exp_key=exp_key
)
)
logger.debug("Terminating mongod process.")
mongod_process.terminate()
# cleanup processes, threads and files
_cleanup_processes_files()
return trials
@_cleanup_decorator
def _fmin_parallel(
queue: multiprocessing.Queue,
fn: Callable,
exp_key: str,
space: dict,
algo: Callable = tpe.suggest,
max_evals: int = 100,
fmin_timer: float = None,
show_progressbar: bool = False,
mongo_port_address: str = "localhost:1234/scvi_db",
):
"""Launches a ``hyperopt`` minimization procedure.
"""
logger.debug("Instantiating trials object.")
# instantiate Trials object
trials = MongoTrials(
as_mongo_str(os.path.join(mongo_port_address, "jobs")), exp_key=exp_key
)
# run hyperoptimization in another fork to enable the use of fmin_timer
fmin_kwargs = {
"fn": fn,
"space": space,
"algo": algo,
"max_evals": max_evals,
"trials": trials,
"show_progressbar": show_progressbar,
}
fmin_thread = threading.Thread(target=fmin, kwargs=fmin_kwargs)
logger.debug("Calling fmin.")
# set fmin thread as daemon so it stops when the main process terminates
fmin_thread.daemon = True
fmin_thread.start()
started_threads.append(fmin_thread)
if fmin_timer:
logging.debug(
"Timer set, fmin will run for at most {timer}".format(timer=fmin_timer)
)
start_time = time.monotonic()
run_time = 0
while run_time < fmin_timer and fmin_thread.is_alive():
time.sleep(10)
run_time = time.monotonic() - start_time
else:
logging.debug("No timer, waiting for fmin")
while True:
if not fmin_thread.is_alive():
break
else:
time.sleep(10)
logger.debug("fmin returned or timer ran out.")
# queue.put uses pickle so remove attribute containing thread.lock
if hasattr(trials, "handle"):
logger.debug("Deleting Trial handle for pickling.")
del trials.handle
logger.debug("Putting Trials in Queue.")
queue.put(trials)
def _wait_for_process_or_thread(
process: Union[multiprocessing.Process, threading.Thread], event: threading.Event
):
"""Waits for a process to finish - breaks and sets ``event`` when it does.
Can be terminated by setting event from outside or by setting the global ``cleanup_event`` of this module.
"""
logger.debug("Started waiting for {name}.".format(name=process.name))
while True:
# set event and break is process is dead
if not process.is_alive():
logger.debug("{name} died. Terminating waiter.".format(name=process.name))
event.set()
break
# break if event was set
if event.is_set():
logger.debug(
"Waiting event for {name} set from outside. "
"Terminating waiter.".format(name=process.name)
)
break
if cleanup_event.is_set():
logger.debug(
"Waiting thread for {name} cleaned up.".format(name=process.name)
)
event.set()
break
time.sleep(5)
@_cleanup_decorator
def launch_workers(
stop_watchdog_event: threading.Event(),
exp_key: str,
n_cpu_workers: int = None,
gpu_ids: List[int] = None,
n_workers_per_gpu: int = 1,
reserve_timeout: float = 30.0,
workdir: str = ".",
mongo_port_address: str = "localhost:1234/scvi_db",
multiple_hosts: bool = False,
):
"""Launches the local workers which are going to run the jobs required by the minimization process.
Terminates when the worker_watchdog call finishes.
Specifically, first ``n_gpu_workers`` are launched per GPU in ``gpu_ids`` in their own spawned process.
Then, ``n_cpu_workers`` CPU workers are launched, also in their own spawned process.
The use of spawned processes (each have their own python interpreter) is mandatory for compatiblity with CUDA.
See https://pytorch.org/docs/stable/notes/multiprocessing.html for more information.
:param stop_watchdog_event: When set, this event stops the watchdog Thread
which checks that local workers are still running.
:param exp_key: This key is used by hyperopt as a suffix to the part of the MongoDb
which corresponds to the current experiment. In particular, it has to be passed to ``MongoWorker``.
:param n_cpu_workers: Number of cpu workers to launch. If None, and no GPUs are found,
defaults to ``os.cpu_count() - 1``. Else, defaults to 0.
:param gpu_ids: Ids of the GPUs to use. If None defaults to all GPUs found by ``torch``.
Note that considered gpu ids are int from ``0`` to ``torch.cuda.device_count()``.
:param n_workers_per_gpu: Number of workers ton launch per gpu found by ``torch``.
:param reserve_timeout: Amount of time, in seconds, a worker tries to reserve a job for
before throwing a ``ReserveTimeout`` Exception.
:param fmin_timeout: Amount of time, in seconds, ``fmin_process`` has to terminate
after all workers have died - before throwing a ``FminTimeoutError``.
If ``multiple_hosts`` is set to ``True``, this is set to None to disable the timineout behaviour.
:param workdir: Directory where the workers
:param mongo_port_address: Address to the running MongoDb service.
:param multiple_hosts: ``True`` if launching workers form multiple hosts.
"""
# prepare parallel logging
_logging_queue = spawn_ctx.Queue()
listener = QueueListener(_logging_queue, DispatchHandler())
listener.start()
started_processes.append(listener)
if gpu_ids is None:
n_gpus = torch.cuda.device_count()
logger.debug(
"gpu_ids is None, defaulting to all {n_gpus} GPUs found by torch.".format(
n_gpus=n_gpus
)
)
gpu_ids = list(range(n_gpus))
if n_gpus and n_cpu_workers is None:
n_cpu_workers = 0
logging.debug(
"Some GPU.s found and n_cpu_wokers is None, defaulting to n_cpu_workers = 0"
)
if not n_gpus and n_cpu_workers is None:
n_cpu_workers = os.cpu_count() - 1
logging.debug(
"No GPUs found and n_cpu_wokers is None, defaulting to n_cpu_workers = "
"{n_cpu_workers} (os.cpu_count() - 1)".format(
n_cpu_workers=n_cpu_workers
)
)
if not gpu_ids and not n_cpu_workers and not multiple_hosts:
raise ValueError("No hardware (cpu/gpu) selected/found.")
# log progress with queue and progress_listener
progress_queue = spawn_ctx.Queue()
prog_listener_kwargs = {
"progress_queue": progress_queue,
"logging_queue": _logging_queue,
}
prog_listener = spawn_ctx.Process(
target=progress_listener, kwargs=prog_listener_kwargs, name="Progress listener"
)
prog_listener.start()
started_processes.append(prog_listener)
running_workers = []
# launch gpu workers
logger.info(
"Starting {n_workers_per_gpu} worker.s for each of the {n_gpus} gpu.s set for use/"
"found.".format(n_workers_per_gpu=n_workers_per_gpu, n_gpus=len(gpu_ids))
)
for gpu_id in gpu_ids:
for sub_id in range(n_workers_per_gpu):
worker_kwargs = {
"progress_queue": progress_queue,
"logging_queue": _logging_queue,
"exp_key": exp_key,
"workdir": workdir,
"gpu": True,
"hw_id": str(gpu_id),
"reserve_timeout": reserve_timeout,
"mongo_port_address": mongo_port_address,
}
p = spawn_ctx.Process(
target=hyperopt_worker,
kwargs=worker_kwargs,
name="Worker GPU " + str(gpu_id) + ":" + str(sub_id),
)
p.start()
running_workers.append(p)
# launch cpu workers
# TODO: add cpu affinity?
logger.info(
"Starting {n_cpu_workers} cpu worker.s".format(n_cpu_workers=n_cpu_workers)
)
for cpu_id in range(n_cpu_workers):
worker_kwargs = {
"progress_queue": progress_queue,
"logging_queue": _logging_queue,
"exp_key": exp_key,
"workdir": workdir,
"gpu": False,
"hw_id": str(cpu_id),
"reserve_timeout": reserve_timeout,
"mongo_port_address": mongo_port_address,
}
p = spawn_ctx.Process(
target=hyperopt_worker,
kwargs=worker_kwargs,
name="Worker CPU " + str(cpu_id),
)
# FIXME won't terminate if parent is killed (SIGKILL)
p.start()
running_workers.append(p)
started_processes.extend(running_workers)
# wait or return if all workers have died
workers_watchdog(running_workers=running_workers, stop_event=stop_watchdog_event)
logger.debug("Worker watchdog finished, terminating workers and closing listener.")
for worker in running_workers:
if worker.is_alive():
worker.terminate()
listener.stop()
prog_listener.terminate()
@_cleanup_decorator
def progress_listener(progress_queue, logging_queue):
"""Listens to workers when they finish a job and logs progress.
Workers put in the progress_queue when they finish a job
and when they do this function sends a log to the progress logger.
"""
# write all logs to queue
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
queue_handler = QueueHandler(logging_queue)
queue_handler.setLevel(logging.DEBUG)
root_logger.addHandler(queue_handler)
logger.debug("Listener listening...")
progress_logger = logging.getLogger("progress_logger")
i = 0
while True:
# get job done signal
progress_queue.get()
i += 1
logger.info("{i} job.s done".format(i=i))
# update progress bar through ProgressHandler
progress_logger.info(None)
if cleanup_event.is_set():
break
def hyperopt_worker(
progress_queue: multiprocessing.Queue,
logging_queue: multiprocessing.Queue,
exp_key: str,
workdir: str = ".",
gpu: bool = True,
hw_id: str = None,
poll_interval: float = 1.0,
reserve_timeout: float = 30.0,
mongo_port_address: str = "localhost:1234/scvi_db",
):
"""Launches a ``hyperopt`` ``MongoWorker`` which runs jobs until ``ReserveTimeout`` is raised.
:param progress_queue: Queue in which to put None when a job is done.
:param logging_queue: Queue to send logs to using a ``QueueHandler``.
:param exp_key: This key is used by hyperopt as a suffix to the part of the MongoDb
which corresponds to the current experiment. In particular, it has to be passed to ``MongoWorker``.
:param workdir:
:param gpu: If ``True`` means a GPU is to be used.
:param hw_id: Id of the GPU to use. set via env variable ``CUDA_VISIBLE_DEVICES``.
:param poll_interval: Time to wait between attempts to reserve a job.
:param reserve_timeout: Amount of time, in seconds, a worker tries to reserve a job for
before throwing a ``ReserveTimeout`` Exception.
:param mongo_port_address: Addres to the running MongoDb service.
"""
# write all logs to queue
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
queue_handler = QueueHandler(logging_queue)
queue_handler.setLevel(logging.DEBUG)
root_logger.addHandler(queue_handler)
logger.debug("Worker working...")
os.environ["CUDA_VISIBLE_DEVICES"] = hw_id if gpu else str()
# FIXME is this stil necessary?
sys.path.append(".")
mjobs = MongoJobs.new_from_connection_str(
os.path.join(as_mongo_str(mongo_port_address), "jobs")
)
mworker = MongoWorker(mjobs, float(poll_interval), workdir=workdir, exp_key=exp_key)
while True:
# FIXME we don't protect ourselves from memory leaks, bad cleanup, etc.
try:
mworker.run_one(reserve_timeout=float(reserve_timeout))
progress_queue.put(None)
except ReserveTimeout:
logger.debug(
"Caught ReserveTimeout. "
"Exiting after failing to reserve job for {time} seconds.".format(
time=reserve_timeout
)
)
break
def workers_watchdog(
running_workers: List[multiprocessing.Process], stop_event: threading.Event()
):
"""Checks that workers in running_workers are stil running.
If none are running anymore, inform user and finish.
"""
while True:
one_alive = False
for worker in running_workers:
one_alive = one_alive or worker.is_alive()
# if all workers are dead, inform user
if not one_alive:
logger.debug(
"All workers have died, check stdout/stderr for error tracebacks."
)
break
if stop_event.is_set():
logger.debug("Stopping Event set, stopping worker watchdog.")
| |
Event.ON_EPOCH_END) \
.with_event_handler(
PlotCustomLinePlotWithLegend(visdom_logger, "Jensen-Shannon Divergence", every=1,
params={"title": "Jensen-Shannon Divergence on test data per Epoch",
"legend": ["Inputs", "Normalized"]}), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomLinePlotWithLegend(visdom_logger, "Per Dataset Mean Hausdorff Distance", every=1,
params={"title": "Per Dataset Mean Hausdorff Distance",
"legend": list(dataset_configs.keys())}), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomLinePlotWithLegend(visdom_logger, "Dice score per class per epoch", every=1,
params={"title": "Dice score on test patches per class per epoch",
"legend": ["CSF", "GM", "WM"]}), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomLinePlotWithLegend(visdom_logger,
"Hausdorff Distance per class per epoch on reconstructed iSEG image",
every=1,
params={
"title": "Hausdorff Distance per class per epoch on reconstructed iSEG image",
"legend": ["CSF", "GM", "WM"]}), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomLinePlotWithLegend(visdom_logger,
"Hausdorff Distance per class per epoch on reconstructed MRBrainS image",
every=1,
params={
"title": "Hausdorff Distance per class per epoch on reconstructed MRBrainS image",
"legend": ["CSF", "GM", "WM"]}), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomLinePlotWithLegend(visdom_logger,
"Hausdorff Distance per class per epoch on reconstructed ABIDE image",
every=1,
params={
"title": "Hausdorff Distance per class per epoch on reconstructed ABIDE image",
"legend": ["CSF", "GM", "WM"]}), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomLinePlotWithLegend(visdom_logger,
"Dice score per class per epoch on reconstructed iSEG image",
every=1,
params={
"title": "Dice score per class per epoch on reconstructed iSEG image",
"legend": ["CSF", "GM", "WM"]}), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomLinePlotWithLegend(visdom_logger,
"Dice score per class per epoch on reconstructed MRBrainS image", every=1,
params={
"title": "Dice score per class per epoch on reconstructed MRBrainS image",
"legend": ["CSF", "GM", "WM"]}), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomLinePlotWithLegend(visdom_logger,
"Dice score per class per epoch on reconstructed ABIDE image",
every=1,
params={
"title": "Dice score per class per epoch on reconstructed ABIDE image",
"legend": ["CSF", "GM", "WM"]}), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Input iSEG Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Input iSEG Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Input T2 iSEG Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Input T2 iSEG Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Normalized iSEG Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Normalized iSEG Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Segmented iSEG Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Segmented iSEG Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Ground Truth iSEG Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Ground Truth iSEG Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Initial Noise iSEG Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Initial Noise iSEG Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Augmented iSEG After Normalization",
PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Augmented iSEG After Normalization"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Augmented Input iSEG Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Augmented Input iSEG Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Input MRBrainS Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Input MRBrainS Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Input T2 MRBrainS Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Input T2 MRBrainS Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Normalized MRBrainS Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Normalized MRBrainS Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Segmented MRBrainS Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Segmented MRBrainS Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Ground Truth MRBrainS Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Ground Truth MRBrainS Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Initial Noise MRBrainS Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Initial Noise MRBrainS Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Augmented MRBrainS After Normalization",
PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Augmented MRBrainS After Normalization"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Augmented Input MRBrainS Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Augmented Input MRBrainS Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Input ABIDE Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Input ABIDE Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Normalized ABIDE Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Normalized ABIDE Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Ground Truth ABIDE Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Ground Truth ABIDE Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Segmented ABIDE Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Segmented ABIDE Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Conv1 FM", PlotType.IMAGES_PLOT,
params={"nrow": 8, "opts": {"store_history": True,
"title": "Conv1 FM"}},
every=500), Event.ON_TRAIN_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Layer1 FM", PlotType.IMAGES_PLOT,
params={"nrow": 8, "opts": {"store_history": True,
"title": "Layer1 FM"}},
every=500), Event.ON_TRAIN_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Layer2 FM", PlotType.IMAGES_PLOT,
params={"nrow": 12, "opts": {"store_history": True,
"title": "Layer2 FM"}},
every=500), Event.ON_TRAIN_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Layer3 FM", PlotType.IMAGES_PLOT,
params={"nrow": 16, "opts": {"store_history": True,
"title": "Layer3 FM"}},
every=500), Event.ON_TRAIN_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Per-Dataset Histograms", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True}}, every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Images Histograms", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True}}, every=5), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
Checkpoint(save_folder, monitor_fn=lambda model_trainer: model_trainer.valid_loss, delta=0.01,
mode=MonitorMode.MIN), Event.ON_EPOCH_END) \
.with_event_handler(PlotAvgGradientPerLayer(visdom_logger, every=25), Event.ON_TRAIN_BATCH_END)
return trainer
elif self._trainer == TrainerType.ResNet_new_loss:
trainer = ResNetTrainerNewLoss(training_config, model_trainers, dataloaders[0], dataloaders[1],
dataloaders[2],
reconstruction_datasets, normalized_reconstructor, input_reconstructor,
segmentation_reconstructor, augmented_input_reconstructor,
augmented_normalized_reconstructor,
gt_reconstructor,
run_config, dataset_configs, save_folder) \
.with_event_handler(PrintTrainingStatus(every=25), Event.ON_BATCH_END) \
.with_event_handler(PrintMonitorsTable(every=25), Event.ON_BATCH_END) \
.with_event_handler(PlotMonitors(visdom_logger), Event.ON_EPOCH_END) \
.with_event_handler(PlotLR(visdom_logger), Event.ON_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Training Generated Batch Process {}".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Training Generated Patches Process {}".format(
run_config.local_rank)}},
every=500), Event.ON_TRAIN_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Validation Generated Batch Process {}".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Validation Generated Patches Process {}".format(
run_config.local_rank)}},
every=100), Event.ON_VALID_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Test Generated Batch Process {}".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Test Generated Patches Process {}".format(
run_config.local_rank)}},
every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Training Input Batch Process {}".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Training Input Patches Process {}".format(
run_config.local_rank)}},
every=500), Event.ON_TRAIN_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Validation Input Batch Process {}".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Validation Input Patches Process {}".format(
run_config.local_rank)}},
every=100), Event.ON_VALID_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Test Input Batch Process {}".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Test Input Patches Process {}".format(
run_config.local_rank)}},
every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Training Segmented Batch Process {}".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Segmented Patches Process {}".format(
run_config.local_rank)}},
every=500), Event.ON_TRAIN_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Validation Segmented Batch Process {}".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Validation Segmented Patches Process {}".format(
run_config.local_rank)}},
every=100), Event.ON_VALID_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Test Segmented Batch Process {}".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Test Segmented Patches Process {}".format(
run_config.local_rank)}},
every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Training Segmentation Ground Truth Batch Process {}".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Training Ground Truth Patches Process {}".format(
run_config.local_rank)}},
every=500), Event.ON_TRAIN_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Validation Segmentation Ground Truth Batch Process {}".format(
run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Validation Ground Truth Patches Process {}".format(
run_config.local_rank)}},
every=100), Event.ON_VALID_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Test Segmentation Ground Truth Batch Process {}".format(
run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Test Ground Truth Patches Process {}".format(
run_config.local_rank)}},
every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Training Label Map Batch Process {}".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Training Label Map Patches Process {}".format(
run_config.local_rank)}},
every=500), Event.ON_TRAIN_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Validation Label Map Batch Process {}".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Validation Label Map Patches Process {}".format(
run_config.local_rank)}},
every=100), Event.ON_VALID_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Test Label Map Batch Process {}".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Test Label Map Patches Process {}".format(
run_config.local_rank)}},
every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Generated Intensity Histogram", PlotType.HISTOGRAM_PLOT,
params={"opts": {"title": "Generated Intensity Histogram",
"store_history": True,
"numbins": 128}}, every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Background Generated Intensity Histogram", PlotType.HISTOGRAM_PLOT,
params={"opts": {"title": "Background Generated Intensity Histogram",
"store_history": True,
"numbins": 128}}, every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "CSF Generated Intensity Histogram", PlotType.HISTOGRAM_PLOT,
params={"opts": {"title": "CSF Generated Intensity Histogram",
"store_history": True,
"numbins": 128}}, every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "GM Generated Intensity Histogram", PlotType.HISTOGRAM_PLOT,
params={"opts": {"title": "GM Generated Intensity Histogram",
"store_history": True,
"numbins": 128}}, every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "WM Generated Intensity Histogram", PlotType.HISTOGRAM_PLOT,
params={"opts": {"title": "WM Generated Intensity Histogram",
"store_history": True,
"numbins": 128}}, every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Input Intensity Histogram", PlotType.HISTOGRAM_PLOT,
params={"opts": {"title": "Inputs Intensity Histogram",
"store_history": True,
"numbins": 128}}, every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Background Input Intensity Histogram", PlotType.HISTOGRAM_PLOT,
params={"opts": {"title": "Background Input Intensity Histogram",
"store_history": True,
"numbins": 128}}, every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "CSF Input Intensity Histogram", PlotType.HISTOGRAM_PLOT,
params={"opts": {"title": "CSF Input Intensity Histogram",
"store_history": True,
"numbins": 128}}, | |
from builtins import isinstance
from copy import copy
from math import ceil
from typing import Union, Tuple
from hwt.code import Switch, Concat
from hwt.hdl.constants import INTF_DIRECTION
from hwt.hdl.frameTmpl import FrameTmpl
from hwt.hdl.transTmpl import TransTmpl
from hwt.hdl.typeShortcuts import vec
from hwt.hdl.types.array import HArray
from hwt.hdl.types.bits import Bits
from hwt.hdl.types.hdlType import HdlType
from hwt.hdl.types.struct import HStruct, HStructField
from hwt.hdl.types.structUtils import field_path_get_type, HdlType_select
from hwt.interfaces.intf_map import IntfMap_get_by_field_path, IntfMap, \
walkStructIntfAndIntfMap, HTypeFromIntfMap
from hwt.interfaces.std import BramPort_withoutClk, RegCntrl, Signal, VldSynced
from hwt.interfaces.structIntf import StructIntf
from hwt.interfaces.unionIntf import UnionSink, UnionSource
from hwt.interfaces.utils import addClkRstn
from hwt.math import log2ceil, inRange
from hwt.synthesizer.hObjList import HObjList
from hwt.synthesizer.rtlLevel.mainBases import RtlSignalBase
from hwt.synthesizer.rtlLevel.rtlSignal import RtlSignal
from hwt.synthesizer.typePath import TypePath
from hwt.synthesizer.unit import Unit
from hwtLib.abstract.addressStepTranslation import AddressStepTranslation
from ipCorePackager.constants import DIRECTION
def TransTmpl_get_min_addr(t: TransTmpl):
if not t.children:
return t.bitAddr
elif isinstance(t.children, list) and t.children:
for c in t.children:
r = TransTmpl_get_min_addr(c)
if r is not None:
return r
return None
else:
return TransTmpl_get_min_addr(t.children)
def TransTmpl_get_max_addr(t: TransTmpl):
if t.itemCnt is None:
offset = 0
else:
item_size = t.bit_length() // t.itemCnt
offset = t.bitAddr + (t.itemCnt - 1) * item_size
if not t.children:
return t.bitAddrEnd
elif isinstance(t.children, list) and t.children:
for c in reversed(t.children):
r = TransTmpl_get_max_addr(c)
if r is not None:
return offset + r
return None
else:
return offset + TransTmpl_get_max_addr(t.children)
class BusEndpoint(Unit):
"""
Abstract unit
Delegate request from bus to fields of structure
(fields are represented by various interfaces)
write has higher priority
:note: implementation is usually address decoder
.. aafig::
+------+ +----------+ +---------+
| bus +--->| +---->| field0 |
| |<---+ |<----+ |
+------+ | | +---------+
| |
| endpoint | +---------+
| +---->| field1 |
| |<----+ |
| | +---------+
| |
| | +---------+
| +---->| field2 |
| |<----+ |
+----------+ +---------+
"""
def __init__(self, structTemplate, intfCls=None, shouldEnterFn=None):
"""
:param structTemplate: instance of HStruct which describes
address space of this endpoint
:param intfCls: class of bus interface which should be used
:param shouldEnterFn: function(root_t, structFieldPath) return (shouldEnter, shouldUse)
where shouldEnter is flag that means iterator over this interface
should look inside of this actual object
and shouldUse flag means that this field should be used
(to create interface)
"""
assert intfCls is not None, "intfCls has to be specified"
self._intfCls = intfCls
self.STRUCT_TEMPLATE = structTemplate
if shouldEnterFn is None:
self.shouldEnterFn = self._defaultShouldEnterFn
else:
self.shouldEnterFn = shouldEnterFn
Unit.__init__(self)
@staticmethod
def _defaultShouldEnterFn(root: HdlType, field_path: Tuple[Union[str, int]]):
"""
Default method which resolves how the parts of input data type
should be represented on interface level.
"""
t = field_path_get_type(root, field_path)
isNonPrimitiveArray = isinstance(t, HArray) and\
not isinstance(t.element_t, Bits)
shouldEnter = isinstance(t, HStruct) or isNonPrimitiveArray
shouldUse = not shouldEnter
return shouldEnter, shouldUse
def _getWordAddrStep(self) -> int:
"""
:return: how many address units is one word on bus (e.g. 32b AXI -> 4)
"""
raise NotImplementedError(
"Should be overridden in concrete implementation, this is abstract class")
def _getAddrStep(self) -> int:
"""
:return: how many bits does 1 address unit addresses, (e.g. AXI -> 8b, index to uint32_t[N] -> 32)
"""
raise NotImplementedError(
"Should be overridden in concrete implementation, this is abstract class")
def _config(self):
self._intfCls._config(self)
def _declr(self):
addClkRstn(self)
with self._paramsShared():
self.bus = self._intfCls()
self.decoded = StructIntf(
self.STRUCT_TEMPLATE, tuple(),
instantiateFieldFn=self._mkFieldInterface)._m()
@staticmethod
def intf_for_Bits(t):
if t.const:
t = copy(t)
t.const = False
p = Signal(dtype=t, masterDir=DIRECTION.IN)
else:
p = RegCntrl()
p.DATA_WIDTH = t.bit_length()
return p
def _mkFieldInterface(self, structIntf: StructIntf, field: HStructField):
"""
Instantiate field interface for fields in structure template of this endpoint
:return: interface for specified field
"""
t = field.dtype
path = structIntf._field_path / field.name
shouldEnter, shouldUse = self.shouldEnterFn(self.STRUCT_TEMPLATE, path)
if shouldUse:
if isinstance(t, Bits):
p = BusEndpoint.intf_for_Bits(t)
elif isinstance(t, HArray):
p = BramPort_withoutClk()
assert isinstance(t.element_t, Bits), t.element_t
p.DATA_WIDTH = t.element_t.bit_length()
p.ADDR_WIDTH = log2ceil(t.size - 1)
else:
raise NotImplementedError(t)
elif shouldEnter:
if isinstance(t, HArray):
e_t = t.element_t
if isinstance(e_t, Bits):
p = HObjList()
for i_i in range(int(t.size)):
i = BusEndpoint.intf_for_Bits(e_t)
structIntf._fieldsToInterfaces[path / i_i] = i
p.append(i)
elif isinstance(e_t, HStruct):
p = HObjList(
StructIntf(t.element_t,
path / i,
instantiateFieldFn=self._mkFieldInterface)
for i in range(int(t.size))
)
for i in p:
i._fieldsToInterfaces = structIntf._fieldsToInterfaces
else:
raise NotImplementedError()
elif isinstance(t, HStruct):
p = StructIntf(t, path,
instantiateFieldFn=self._mkFieldInterface)
p._fieldsToInterfaces = structIntf._fieldsToInterfaces
else:
raise TypeError(t)
return p
def getPort(self, transTmpl: TransTmpl):
p = tuple(transTmpl.getFieldPath())
return self.decoded._fieldsToInterfaces[p]
def isInMyAddrRange(self, addr_sig):
return inRange(addr_sig, self._ADDR_MIN, self._ADDR_MAX)
def _parseTemplate(self):
self.WORD_ADDR_STEP = self._getWordAddrStep()
self.ADDR_STEP = self._getAddrStep()
AW = int(self.ADDR_WIDTH)
SUGGESTED_AW = self._suggestedAddrWidth()
assert SUGGESTED_AW <= AW, ("Address width too small", SUGGESTED_AW, AW)
tmpl = TransTmpl(self.STRUCT_TEMPLATE)
self._ADDR_MIN = TransTmpl_get_min_addr(tmpl) // self.ADDR_STEP
self._ADDR_MAX = ceil(TransTmpl_get_max_addr(tmpl) / self.ADDR_STEP)
# resolve addresses for bram port mapped fields
self._bramPortMapped = []
def shouldEnterFn(trans_tmpl: TransTmpl):
p = trans_tmpl.getFieldPath()
intf = self.decoded._fieldsToInterfaces[p]
if isinstance(intf, (StructIntf, UnionSink, UnionSource, HObjList)):
shouldEnter = True
shouldUse = False
elif isinstance(intf, BramPort_withoutClk):
shouldEnter = False
shouldUse = True
else:
shouldEnter = False
shouldUse = False
return shouldEnter, shouldUse
for ((base, end), t) in tmpl.walkFlatten(shouldEnterFn=shouldEnterFn):
self._bramPortMapped.append(((base, end), t))
# resolve exact addresses for directly mapped field parts
directly_mapped_fields = {}
for p, out in self.decoded._fieldsToInterfaces.items():
if not isinstance(out, (RegCntrl, Signal)):
continue
a = directly_mapped_fields
for _p in p:
if isinstance(_p, int) and _p != 0:
# we need spec only for first array item
break
a = a.setdefault(_p, {})
dmw = self._directly_mapped_words = []
if directly_mapped_fields:
DW = self.DATA_WIDTH
directly_mapped_t = HdlType_select(
self.STRUCT_TEMPLATE,
directly_mapped_fields)
tmpl = TransTmpl(directly_mapped_t)
frames = list(FrameTmpl.framesFromTransTmpl(
tmpl, DW, maxPaddingWords=0,
trimPaddingWordsOnStart=True,
trimPaddingWordsOnEnd=True,))
for f in frames:
f_word_offset = f.startBitAddr // DW
for (w_i, items) in f.walkWords(showPadding=True):
dmw.append((w_i + f_word_offset, items))
def _suggestedAddrWidth(self):
"""
Based on struct template resolve how many bits for
address is needed
"""
bitSize = self.STRUCT_TEMPLATE.bit_length()
wordAddrStep = self._getWordAddrStep()
addrStep = self._getAddrStep()
# align to word size
if bitSize % wordAddrStep != 0:
bitSize += wordAddrStep - (bitSize % wordAddrStep)
maxAddr = (bitSize // addrStep) - 1
return maxAddr.bit_length()
def propagateAddr(self, src_addr_sig: RtlSignal,
src_addr_step: int,
dst_addr_sig: RtlSignal,
dst_addr_step: int,
transTmpl: TransTmpl):
"""
:param src_addr_sig: input signal with address
:param src_addr_step: how many bits is addressing one unit of src_addr_sig
:param dst_addr_sig: output signal for address
:param dst_addr_step: how many bits is addressing one unit of dst_addr_sig
:param transTmpl: TransTmpl which has meta-informations
about this address space transition
"""
IN_W = src_addr_sig._dtype.bit_length()
# _prefix = transTmpl.getMyAddrPrefix(src_addr_step)
assert dst_addr_step % src_addr_step == 0
if not isinstance(transTmpl.dtype, HArray):
raise TypeError(transTmpl.dtype)
assert transTmpl.bitAddr % dst_addr_step == 0, (
f"Has to be addressable by address with this step ({transTmpl})")
addrIsAligned = transTmpl.bitAddr % transTmpl.bit_length() == 0
bitsForAlignment = AddressStepTranslation(src_addr_step, dst_addr_step).align_bits
bitsOfSubAddr = (
(transTmpl.bitAddrEnd - transTmpl.bitAddr - 1)
// dst_addr_step
).bit_length()
if addrIsAligned:
bitsOfAddr = bitsOfSubAddr + bitsForAlignment
bitsOfPrefix = IN_W - bitsOfAddr
prefix = (transTmpl.bitAddr // src_addr_step) >> bitsOfAddr
if bitsOfPrefix == 0:
addrIsInRange = True
else:
addrIsInRange = src_addr_sig[:(IN_W - bitsOfPrefix)]._eq(prefix)
addr_tmp = src_addr_sig
else:
_addr = transTmpl.bitAddr // src_addr_step
_addrEnd = transTmpl.bitAddrEnd // src_addr_step
addrIsInRange = inRange(src_addr_sig, _addr, _addrEnd)
addr_tmp = self._sig(dst_addr_sig._name +
"_addr_tmp", Bits(self.ADDR_WIDTH))
addr_tmp(src_addr_sig - _addr)
addr_h = bitsOfSubAddr + bitsForAlignment
connectedAddr = dst_addr_sig(
addr_tmp[addr_h:bitsForAlignment]
)
return (addrIsInRange, connectedAddr)
def connect_directly_mapped_read(self, ar_addr: RtlSignal,
r_data: RtlSignal, default_r_data_drive):
"""
Connect the RegCntrl.din interfaces to a bus
"""
DW = int(self.DATA_WIDTH)
ADDR_STEP = self._getAddrStep()
directlyMappedWords = []
for (w_i, items) in self._directly_mapped_words:
w_data = []
last_end = w_i * DW
for tpart in items:
assert last_end == tpart.startOfPart, (last_end, tpart.startOfPart)
if tpart.tmpl is None:
# padding
din = vec(None, tpart.bit_length())
else:
din = self.getPort(tpart.tmpl)
if isinstance(din, RegCntrl):
din = din.din
if din._dtype.bit_length() > 1:
fr = tpart.getFieldBitRange()
din = din[fr[0]:fr[1]]
w_data.append(din)
last_end = tpart.endOfPart
end_of_word = (w_i + 1) * DW
assert last_end == end_of_word, (last_end, end_of_word)
word_val = Concat(*reversed(w_data))
assert word_val._dtype.bit_length() == DW, (items, word_val)
directlyMappedWords.append((w_i * (DW // ADDR_STEP), word_val))
mux = Switch(ar_addr).add_cases(
[(word_i, r_data(val))
for (word_i, val) in directlyMappedWords]
)
if default_r_data_drive:
mux.Default(
default_r_data_drive
)
return mux
def connect_directly_mapped_write(self, aw_addr: RtlSignal,
w_data: RtlSignal, en: RtlSignal):
"""
Connect the RegCntrl.dout interfaces to a bus
"""
DW = int(self.DATA_WIDTH)
addrWidth = int(self.ADDR_WIDTH)
ADDR_STEP = self._getAddrStep()
for w_i, items in self._directly_mapped_words:
for tpart in items:
if tpart.tmpl is None:
# padding
continue
out = self.getPort(tpart.tmpl)
if not isinstance(out, RegCntrl):
continue
else:
| |
"""
Holds all combat effects logic
Inflict is to cause an affliction and add a status effect
Apply is to do something right away
All apply_* functions take these inputs and give these outputs:
apply_*(player: Player, rules: dict, left: bool) -> EffectReturn:
:param player: The player being affected
:param rules: The rules dictionary to edit
:param left: Whether the player is on the left for the sake of the rules dict
:return: Updated Player and ruleset
All inflict_* functions take these inputs and give these outputs:
inflict_*(value: int, player: Player) -> Player:
:param value: How much damage to do to target
:param player: The character being damaged
:return: Updated Player
"""
from random import randrange
from typing import Tuple, Any
Player = Any
EffectReturn = Tuple[Player, Player, dict]
def inflict_damage(value: int, player: Player) -> Player:
"""
Deal damage
"""
player.hit_points -= value
return player
def inflict_percent_damage(value: int, player: Player) -> Player:
"""
Deal percent max health damage
"""
player.hit_points -= int(round((value / 100.0) * player.max_hit_points))
return player
def inflict_heal(value: int, player: Player) -> Player:
"""
Do healing
"""
player.hit_points += value
return player
def apply_enhancement_sickness(
self: Player, target: Player, rules: dict, left: bool
) -> EffectReturn:
"""
If enhancement sick, then you can't use an enhancement this turn
"""
self.enhanced = False
return self, target, rules
# Enhanced effect of Dreamer's Moving Sidewalk - prone
def inflict_prone(value: int, player: Player) -> Player:
"""
Make the target prone.
Next turn, block loses to area
"""
player.status_effects.append(["prone", value])
return player
# Enhanced effect of Dreamer's Moving Sidewalk - prone
def apply_prone(self: Player, target: Player, rules: dict, left: bool) -> EffectReturn:
"""
Apply the effects of prone to the player:
block loses to area
"""
# "block": {"beats": ["attack"], "loses": ["disrupt", "dodge", "area"]}
if left:
# Remove area from the block: beats dict
if "area" in rules["block"]["beats"]:
rules["block"]["beats"].remove("area")
# Add area to the block: loses dict
if "area" not in rules["block"]["loses"]:
rules["block"]["loses"].append("area")
# "area": {"beats": ["disrupt", "dodge", "block"], "loses": ["attack"]}
else:
# Remove block from the area: loses dict
if "block" in rules["area"]["loses"]:
rules["area"]["loses"].remove("block")
# Add block to the area: beats dict
if "block" not in rules["area"]["beats"]:
rules["area"]["beats"].append("block")
return self, target, rules
# Enhanced effect of Dreamer's Fold Earth - disorient
def inflict_disorient(value: int, player: Player) -> Player:
"""
Make the target disoriented by adding the status effect to the target's statuses
Next turn, dodge loses to attack
"""
player.status_effects.append(["disorient", value])
return player
# Enhanced effect of Dreamer's Fold Earth - disorient
def apply_disorient(
self: Player, target: Player, rules: dict, left: bool
) -> EffectReturn:
"""
Apply the effects of disorient to the target:
dodge loses to block
"""
# "dodge": {"beats": ["attack"], "loses": ["area", "disrupt", "block"]}
if left:
# Remove area from the block: beats dict
if "block" in rules["dodge"]["beats"]:
rules["dodge"]["beats"].remove("block")
# Add area to the block: loses dict
if "block" not in rules["dodge"]["loses"]:
rules["dodge"]["loses"].append("block")
# "block": {"beats": ["area", "attack", "dodge"], "loses": ["disrupt"]}
else:
# Remove block from the area: loses dict
if "dodge" in rules["block"]["loses"]:
rules["block"]["loses"].remove("dodge")
# Add block to the area: beats dict
if "dodge" not in rules["block"]["beats"]:
rules["block"]["beats"].append("dodge")
return self, target, rules
# Enhanced effect of Chosen's Extreme Speed - haste
def inflict_haste(value: int, player: Player) -> Player:
"""
Make the target hasted.
Next turn, target's attack will beat an opposing attack (no clash)
"""
player.status_effects.append(["haste", value])
return player
# Enhanced effect of Chosen's Extreme Speed - haste
def apply_haste(self: Player, target: Player, rules: dict, left: bool) -> EffectReturn:
"""
Apply the effects of haste to the target:
attack beats attack
"""
# "attack": {"beats": ["disrupt", "area", "attack"], "loses": ["block", "dodge"]}
if left:
# Remove attack from the attack: loses dict
if "attack" in rules["attack"]["loses"]:
rules["attack"]["loses"].remove("attack")
# Add attack to the attack: beats dict
if "attack" not in rules["attack"]["beats"]:
rules["attack"]["beats"].append("attack")
# "attack": {"beats": ["disrupt", "area"], "loses": ["block", "dodge", "attack"]}
else:
# Remove attack from the attack: beats dict
if "attack" in rules["attack"]["beats"]:
rules["attack"]["beats"].remove("attack")
# Add attack to the attack: loses dict
if "attack" not in rules["attack"]["loses"]:
rules["attack"]["loses"].append("attack")
return self, target, rules
# Enhanced effect of Chemist's Poison Dart
def inflict_poison(value: int, player: Player) -> Player:
"""
Make the target take damage for value rounds by
adding the status effect to the target's statuses
"""
player.status_effects.append(["poison", value])
return player
# Enhanced effect of Chemist's Poison Dart
def apply_poison(self: Player, target: Player, rules: dict, left: bool) -> EffectReturn:
"""
Apply the effects of poison to the target:
Take 10% max HP damage
"""
self = inflict_percent_damage(value=10, player=self)
return self, target, rules
# Enhanced effect of Cloistered's High Ground
def inflict_counter_attack(value: int, player: Player) -> Player:
"""
Gain the high ground.
Next turn, your area beats attack
"""
player.status_effects.append(["counter_attack", value])
return player
# Enhanced effect of Cloistered's High Ground
def apply_counter_attack(
self: Player, target: Player, rules: dict, left: bool
) -> EffectReturn:
"""
Apply the effects of counter_attack:
area beats attack
"""
# "area": {"beats": ["disrupt", "dodge", "attack"], "loses": ["block"]}
if left:
# Remove attack from the area: loses dict
if "attack" in rules["area"]["loses"]:
rules["area"]["loses"].remove("attack")
# Add attack to the attack: beats dict
if "attack" not in rules["area"]["beats"]:
rules["area"]["beats"].append("attack")
# "attack": {"beats": ["disrupt"], "loses": ["block", "dodge", "area"]},
else:
# Remove attack from the attack: beats dict
if "area" in rules["attack"]["beats"]:
rules["attack"]["beats"].remove("area")
# Add attack to the attack: loses dict
if "area" not in rules["attack"]["loses"]:
rules["attack"]["loses"].append("area")
return self, target, rules
# Enhanced effect of Cloistered's Broad Deflection
def inflict_counter_disrupt(value: int, player: Player) -> Player:
"""
Expand your defense.
Next turn, block beats disrupt
"""
player.status_effects.append(["counter_disrupt", value])
return player
# Enhanced effect of Cloistered's Broad Deflection
def apply_counter_disrupt(
self: Player, target: Player, rules: dict, left: bool
) -> EffectReturn:
"""
Apply the effects of counter_disrupt:
block beats disrupt
"""
# "block": {"beats": ["area", "attack", "disrupt"], "loses": ["dodge"]}
if left:
# Remove disrupt from the block: loses dict
if "disrupt" in rules["block"]["loses"]:
rules["block"]["loses"].remove("disrupt")
# Add disrupt to the block: beats dict
if "disrupt" not in rules["block"]["beats"]:
rules["block"]["beats"].append("disrupt")
# "disrupt": {"beats": ["dodge"], "loses": ["attack", "area", "block"]},
else:
# Remove block from the disrupt: beats dict
if "block" in rules["disrupt"]["beats"]:
rules["disrupt"]["beats"].remove("block")
# Add block to the disrupt: loses dict
if "block" not in rules["disrupt"]["loses"]:
rules["disrupt"]["loses"].append("block")
return self, target, rules
# Enhanced effect of Creator's Conjure Weaponry / Armory Shopping
def inflict_random_gun(value: int, player: Player) -> Player:
"""
A gun materializes in your hands.
Next turn, random effect:
0) Pistol - Attack is now dodge
1) Rifle - 1.5x damage
2) Shotgun - Attack always clashes
3) Rocket Launcher - Attack is now area
"""
possible_status = ["pistol", "rifle", "shotgun", "rocket_launcher"]
player.status_effects.append([possible_status[randrange(4)], value])
return player
# Enhanced effect of Creator's Conjure Weaponry / Armory Shopping
def apply_pistol(self: Player, target: Player, rules: dict, left: bool) -> EffectReturn:
"""
Apply the effects of pistol:
attack is now dodge
"""
# "attack": {"beats": ["attack", "block"], "loses": ["area", "disrupt"]}
if left:
rules["attack"] = rules["dodge"]
# "area": {"beats": ["disrupt", "dodge", "attack"], "loses": ["block"]},
# "attack": {"beats": ["disrupt", "area"], "loses": ["block", "dodge", "attack"]},
# "block": {"beats": ["area"], "loses": ["disrupt", "dodge", "attack"]},
# "disrupt": {"beats": ["block", "dodge", "attack"], "loses": ["area"]},
# "dodge": {"beats": ["block"], "loses": ["area", "disrupt"]}}
else:
for left_key in rules:
for right_key in rules[left_key]:
# If right has attack, remove it
if "attack" in rules[left_key][right_key]:
rules[left_key][right_key].remove("attack")
# If right has dodge, add attack
if "dodge" in rules[left_key][right_key]:
rules[left_key][right_key].append("attack")
return self, target, rules
# Enhanced effect of Creator's Conjure Weaponry / Armory Shopping
def apply_rifle(self: Player, target: Player, rules: dict, left: bool) -> EffectReturn:
"""
Apply the effects of rifle:
0.5x damage guaranteed
"""
target = inflict_damage(50, target)
return self, target, rules
# Enhanced effect of Creator's Conjure Weaponry / Armory Shopping
def apply_shotgun(
self: Player, target: Player, rules: dict, left: bool
) -> EffectReturn:
"""
Apply the effects of shotgun:
attack always clashes
"""
# "attack": {"beats": [], "loses": []}
if left:
rules["attack"] = {"beats": [], "loses": []}
# "area": {"beats": ["disrupt", "dodge"], "loses": ["block"]},
# "attack": {"beats": ["disrupt", "area"], "loses": ["block", "dodge"]},
# "block": {"beats": ["area"], "loses": ["disrupt", "dodge"]},
# "disrupt": {"beats": ["block", "dodge"], "loses": ["area"]},
# "dodge": {"beats": ["block"], "loses": ["area", "disrupt"]}}
else:
for left_key in rules:
for right_key in rules[left_key]:
# If beats | |
invalid type')
def __setitem__(self, idx, value):
if idx < 0:
idx += len(self)
if idx < len(self):
_tsubasa.Slice_int_set(self.handle, idx, value)
return
raise IndexError('slice index out of range')
def __iadd__(self, value):
if not isinstance(value, _collections_abc.Iterable):
raise TypeError('Slice_int.__iadd__ takes a sequence as argument')
for elt in value:
self.append(elt)
return self
def __iter__(self):
self.index = 0
return self
def __next__(self):
if self.index < len(self):
rv = _tsubasa.Slice_int_elem(self.handle, self.index)
self.index = self.index + 1
return rv
raise StopIteration
def append(self, value):
_tsubasa.Slice_int_append(self.handle, value)
def copy(self, src):
""" copy emulates the go copy function, copying elements into this list from source list, up to min of size of each list """
mx = min(len(self), len(src))
for i in range(mx):
self[i] = src[i]
# Python type for slice []int16
class Slice_int16(GoClass):
""""""
def __init__(self, *args, **kwargs):
"""
handle=A Go-side object is always initialized with an explicit handle=arg
otherwise parameter is a python list that we copy from
"""
self.index = 0
if len(kwargs) == 1 and 'handle' in kwargs:
self.handle = kwargs['handle']
_tsubasa.IncRef(self.handle)
elif len(args) == 1 and isinstance(args[0], GoClass):
self.handle = args[0].handle
_tsubasa.IncRef(self.handle)
else:
self.handle = _tsubasa.Slice_int16_CTor()
_tsubasa.IncRef(self.handle)
if len(args) > 0:
if not isinstance(args[0], _collections_abc.Iterable):
raise TypeError('Slice_int16.__init__ takes a sequence as argument')
for elt in args[0]:
self.append(elt)
def __del__(self):
_tsubasa.DecRef(self.handle)
def __str__(self):
s = 'go.Slice_int16 len: ' + str(len(self)) + ' handle: ' + str(self.handle) + ' ['
if len(self) < 120:
s += ', '.join(map(str, self)) + ']'
return s
def __repr__(self):
return 'go.Slice_int16([' + ', '.join(map(str, self)) + '])'
def __len__(self):
return _tsubasa.Slice_int16_len(self.handle)
def __getitem__(self, key):
if isinstance(key, slice):
if key.step == None or key.step == 1:
st = key.start
ed = key.stop
if st == None:
st = 0
if ed == None:
ed = _tsubasa.Slice_int16_len(self.handle)
return Slice_int16(handle=_tsubasa.Slice_int16_subslice(self.handle, st, ed))
return [self[ii] for ii in range(*key.indices(len(self)))]
elif isinstance(key, int):
if key < 0:
key += len(self)
if key < 0 or key >= len(self):
raise IndexError('slice index out of range')
return _tsubasa.Slice_int16_elem(self.handle, key)
else:
raise TypeError('slice index invalid type')
def __setitem__(self, idx, value):
if idx < 0:
idx += len(self)
if idx < len(self):
_tsubasa.Slice_int16_set(self.handle, idx, value)
return
raise IndexError('slice index out of range')
def __iadd__(self, value):
if not isinstance(value, _collections_abc.Iterable):
raise TypeError('Slice_int16.__iadd__ takes a sequence as argument')
for elt in value:
self.append(elt)
return self
def __iter__(self):
self.index = 0
return self
def __next__(self):
if self.index < len(self):
rv = _tsubasa.Slice_int16_elem(self.handle, self.index)
self.index = self.index + 1
return rv
raise StopIteration
def append(self, value):
_tsubasa.Slice_int16_append(self.handle, value)
def copy(self, src):
""" copy emulates the go copy function, copying elements into this list from source list, up to min of size of each list """
mx = min(len(self), len(src))
for i in range(mx):
self[i] = src[i]
# Python type for slice []int32
class Slice_int32(GoClass):
""""""
def __init__(self, *args, **kwargs):
"""
handle=A Go-side object is always initialized with an explicit handle=arg
otherwise parameter is a python list that we copy from
"""
self.index = 0
if len(kwargs) == 1 and 'handle' in kwargs:
self.handle = kwargs['handle']
_tsubasa.IncRef(self.handle)
elif len(args) == 1 and isinstance(args[0], GoClass):
self.handle = args[0].handle
_tsubasa.IncRef(self.handle)
else:
self.handle = _tsubasa.Slice_int32_CTor()
_tsubasa.IncRef(self.handle)
if len(args) > 0:
if not isinstance(args[0], _collections_abc.Iterable):
raise TypeError('Slice_int32.__init__ takes a sequence as argument')
for elt in args[0]:
self.append(elt)
def __del__(self):
_tsubasa.DecRef(self.handle)
def __str__(self):
s = 'go.Slice_int32 len: ' + str(len(self)) + ' handle: ' + str(self.handle) + ' ['
if len(self) < 120:
s += ', '.join(map(str, self)) + ']'
return s
def __repr__(self):
return 'go.Slice_int32([' + ', '.join(map(str, self)) + '])'
def __len__(self):
return _tsubasa.Slice_int32_len(self.handle)
def __getitem__(self, key):
if isinstance(key, slice):
if key.step == None or key.step == 1:
st = key.start
ed = key.stop
if st == None:
st = 0
if ed == None:
ed = _tsubasa.Slice_int32_len(self.handle)
return Slice_int32(handle=_tsubasa.Slice_int32_subslice(self.handle, st, ed))
return [self[ii] for ii in range(*key.indices(len(self)))]
elif isinstance(key, int):
if key < 0:
key += len(self)
if key < 0 or key >= len(self):
raise IndexError('slice index out of range')
return _tsubasa.Slice_int32_elem(self.handle, key)
else:
raise TypeError('slice index invalid type')
def __setitem__(self, idx, value):
if idx < 0:
idx += len(self)
if idx < len(self):
_tsubasa.Slice_int32_set(self.handle, idx, value)
return
raise IndexError('slice index out of range')
def __iadd__(self, value):
if not isinstance(value, _collections_abc.Iterable):
raise TypeError('Slice_int32.__iadd__ takes a sequence as argument')
for elt in value:
self.append(elt)
return self
def __iter__(self):
self.index = 0
return self
def __next__(self):
if self.index < len(self):
rv = _tsubasa.Slice_int32_elem(self.handle, self.index)
self.index = self.index + 1
return rv
raise StopIteration
def append(self, value):
_tsubasa.Slice_int32_append(self.handle, value)
def copy(self, src):
""" copy emulates the go copy function, copying elements into this list from source list, up to min of size of each list """
mx = min(len(self), len(src))
for i in range(mx):
self[i] = src[i]
# Python type for slice []int64
class Slice_int64(GoClass):
""""""
def __init__(self, *args, **kwargs):
"""
handle=A Go-side object is always initialized with an explicit handle=arg
otherwise parameter is a python list that we copy from
"""
self.index = 0
if len(kwargs) == 1 and 'handle' in kwargs:
self.handle = kwargs['handle']
_tsubasa.IncRef(self.handle)
elif len(args) == 1 and isinstance(args[0], GoClass):
self.handle = args[0].handle
_tsubasa.IncRef(self.handle)
else:
self.handle = _tsubasa.Slice_int64_CTor()
_tsubasa.IncRef(self.handle)
if len(args) > 0:
if not isinstance(args[0], _collections_abc.Iterable):
raise TypeError('Slice_int64.__init__ takes a sequence as argument')
for elt in args[0]:
self.append(elt)
def __del__(self):
_tsubasa.DecRef(self.handle)
def __str__(self):
s = 'go.Slice_int64 len: ' + str(len(self)) + ' handle: ' + str(self.handle) + ' ['
if len(self) < 120:
s += ', '.join(map(str, self)) + ']'
return s
def __repr__(self):
return 'go.Slice_int64([' + ', '.join(map(str, self)) + '])'
def __len__(self):
return _tsubasa.Slice_int64_len(self.handle)
def __getitem__(self, key):
if isinstance(key, slice):
if key.step == None or key.step == 1:
st = key.start
ed = key.stop
if st == None:
st = 0
if ed == None:
ed = _tsubasa.Slice_int64_len(self.handle)
return Slice_int64(handle=_tsubasa.Slice_int64_subslice(self.handle, st, ed))
return [self[ii] for ii in range(*key.indices(len(self)))]
elif isinstance(key, int):
if key < 0:
key += len(self)
if key < 0 or key >= len(self):
raise IndexError('slice index out of range')
return _tsubasa.Slice_int64_elem(self.handle, key)
else:
raise TypeError('slice index invalid type')
def __setitem__(self, idx, value):
if idx < 0:
idx += len(self)
if idx < len(self):
_tsubasa.Slice_int64_set(self.handle, idx, value)
return
raise IndexError('slice index out of range')
def __iadd__(self, value):
if not isinstance(value, _collections_abc.Iterable):
raise TypeError('Slice_int64.__iadd__ takes a sequence as argument')
for elt in value:
self.append(elt)
return self
def __iter__(self):
self.index = 0
return self
def __next__(self):
if self.index < len(self):
rv = _tsubasa.Slice_int64_elem(self.handle, self.index)
self.index = self.index + 1
return rv
raise StopIteration
def append(self, value):
_tsubasa.Slice_int64_append(self.handle, value)
def copy(self, src):
""" copy emulates the go copy function, copying elements into this list from source list, up to min of size of each list """
mx = min(len(self), len(src))
for i in range(mx):
self[i] = src[i]
# Python type for slice []int8
class Slice_int8(GoClass):
""""""
def __init__(self, *args, **kwargs):
"""
handle=A Go-side object is always initialized with an explicit handle=arg
otherwise parameter is a python list that we copy from
"""
self.index = 0
if len(kwargs) == 1 and 'handle' in kwargs:
self.handle = kwargs['handle']
_tsubasa.IncRef(self.handle)
elif len(args) == 1 and isinstance(args[0], GoClass):
self.handle = args[0].handle
_tsubasa.IncRef(self.handle)
else:
self.handle = _tsubasa.Slice_int8_CTor()
_tsubasa.IncRef(self.handle)
if len(args) > 0:
if not isinstance(args[0], _collections_abc.Iterable):
raise TypeError('Slice_int8.__init__ takes a sequence as argument')
for elt in args[0]:
self.append(elt)
def __del__(self):
_tsubasa.DecRef(self.handle)
def __str__(self):
s = 'go.Slice_int8 len: ' + str(len(self)) + ' handle: ' + str(self.handle) + ' ['
if len(self) < 120:
s += ', '.join(map(str, self)) + ']'
return s
def __repr__(self):
return 'go.Slice_int8([' + ', '.join(map(str, self)) + '])'
def __len__(self):
return _tsubasa.Slice_int8_len(self.handle)
def __getitem__(self, key):
if isinstance(key, slice):
if key.step == None or key.step == 1:
st = key.start
ed = key.stop
if st == None:
st = 0
if ed == None:
ed = _tsubasa.Slice_int8_len(self.handle)
return Slice_int8(handle=_tsubasa.Slice_int8_subslice(self.handle, st, ed))
return [self[ii] for ii in range(*key.indices(len(self)))]
elif isinstance(key, int):
if key < 0:
key += len(self)
if key < 0 or key >= len(self):
raise IndexError('slice index out of range')
return _tsubasa.Slice_int8_elem(self.handle, key)
else:
raise TypeError('slice index invalid type')
def __setitem__(self, idx, value):
if idx < 0:
idx += len(self)
if idx < len(self):
_tsubasa.Slice_int8_set(self.handle, idx, value)
return
raise IndexError('slice index out of range')
def __iadd__(self, value):
if not isinstance(value, _collections_abc.Iterable):
raise TypeError('Slice_int8.__iadd__ takes a sequence as argument')
for elt in value:
self.append(elt)
return self
def __iter__(self):
self.index = 0
return self
def __next__(self):
if self.index < len(self):
rv = _tsubasa.Slice_int8_elem(self.handle, self.index)
self.index = self.index + 1
return rv
raise StopIteration
def append(self, value):
_tsubasa.Slice_int8_append(self.handle, value)
def copy(self, src):
""" copy emulates the go copy function, copying elements into this list from source list, up to min of size of each list """
mx = min(len(self), len(src))
for i in range(mx):
self[i] = src[i]
# Python type for slice []rune
class Slice_rune(GoClass):
""""""
def __init__(self, *args, **kwargs):
"""
handle=A Go-side object is always initialized with an explicit handle=arg
otherwise parameter is a python list that we copy from
"""
self.index = 0
if len(kwargs) == 1 and 'handle' in kwargs:
self.handle = kwargs['handle']
_tsubasa.IncRef(self.handle)
elif len(args) == 1 and isinstance(args[0], GoClass):
self.handle = args[0].handle
_tsubasa.IncRef(self.handle)
else:
self.handle = _tsubasa.Slice_rune_CTor()
_tsubasa.IncRef(self.handle)
if len(args) > 0:
if not isinstance(args[0], _collections_abc.Iterable):
raise TypeError('Slice_rune.__init__ takes a sequence as argument')
for elt in args[0]:
self.append(elt)
def __del__(self):
_tsubasa.DecRef(self.handle)
def __str__(self):
s = 'go.Slice_rune len: ' + str(len(self)) + ' handle: ' + str(self.handle) + ' ['
if len(self) < 120:
s += ', '.join(map(str, self)) + ']'
return s
def __repr__(self):
return 'go.Slice_rune([' + ', '.join(map(str, self)) + '])'
def __len__(self):
return _tsubasa.Slice_rune_len(self.handle)
def __getitem__(self, key):
if isinstance(key, slice):
if key.step == None or key.step == 1:
st = key.start
ed = key.stop
if st == None:
st = 0
if ed == None:
ed = _tsubasa.Slice_rune_len(self.handle)
return Slice_rune(handle=_tsubasa.Slice_rune_subslice(self.handle, st, ed))
return [self[ii] for ii in range(*key.indices(len(self)))]
elif isinstance(key, int):
if key < 0:
key += len(self)
if key < 0 or key >= len(self):
raise IndexError('slice index out of range')
return _tsubasa.Slice_rune_elem(self.handle, key)
else:
raise TypeError('slice index invalid type')
def __setitem__(self, idx, value):
if idx < 0:
idx += len(self)
if idx < len(self):
_tsubasa.Slice_rune_set(self.handle, idx, value)
return
raise IndexError('slice index out of range')
def __iadd__(self, value):
if not isinstance(value, _collections_abc.Iterable):
raise TypeError('Slice_rune.__iadd__ takes a sequence as argument')
for elt in value:
self.append(elt)
return self
def __iter__(self):
self.index = 0
return self
def __next__(self):
if self.index < len(self):
rv = _tsubasa.Slice_rune_elem(self.handle, self.index)
self.index = self.index + 1
return rv
raise StopIteration
def append(self, value):
_tsubasa.Slice_rune_append(self.handle, value)
def copy(self, src):
""" copy emulates the | |
# -*- coding: utf-8 -*-
import functools
import os
import sys
import time
from operator import itemgetter
import warnings
from threading import Lock
import asyncio
import sqlalchemy
from sqlalchemy import event, inspect, orm
from sqlalchemy.engine.url import make_url
from sqlalchemy.ext.declarative import DeclarativeMeta, declarative_base
from sqlalchemy.orm.exc import UnmappedClassError
from sqlalchemy.orm.session import Session as SessionBase
from .model import Model, DefaultMeta
__version__ = '0.1.0'
def itervalues(d):
return iter(d.values())
def _make_table(db):
def _make_table(*args, **kwargs):
if len(args) > 1 and isinstance(args[1], db.Column):
args = (args[0], db.metadata) + args[1:]
info = kwargs.pop('info', None) or {}
info.setdefault('bind_key', None)
kwargs['info'] = info
return sqlalchemy.Table(*args, **kwargs)
return _make_table
def _set_default_query_class(d, cls):
if 'query_class' not in d:
d['query_class'] = cls
def _wrap_with_default_query_class(fn, cls):
@functools.wraps(fn)
def newfn(*args, **kwargs):
_set_default_query_class(kwargs, cls)
if "backref" in kwargs:
backref = kwargs['backref']
if isinstance(backref, str):
backref = (backref, {})
_set_default_query_class(backref[1], cls)
return fn(*args, **kwargs)
return newfn
def _include_sqlalchemy(obj, cls):
for module in sqlalchemy, sqlalchemy.orm:
for key in module.__all__:
if not hasattr(obj, key):
setattr(obj, key, getattr(module, key))
# Note: obj.Table does not attempt to be a SQLAlchemy Table class.
obj.Table = _make_table(obj)
obj.relationship = _wrap_with_default_query_class(obj.relationship, cls)
obj.relation = _wrap_with_default_query_class(obj.relation, cls)
obj.dynamic_loader = _wrap_with_default_query_class(obj.dynamic_loader, cls)
obj.event = event
class SignallingSession(SessionBase):
def __init__(self, db, autocommit=False, autoflush=True, **options):
#: The application that this session belongs to.
self.app = app = db.get_app()
#track_modifications = app.config['SQLALCHEMY_TRACK_MODIFICATIONS']
bind = options.pop('bind', None) or db.engine
binds = options.pop('binds', db.get_binds(app))
#if track_modifications is None or track_modifications:
# _SessionSignalEvents.register(self)
SessionBase.__init__(
self, autocommit=autocommit, autoflush=autoflush,
bind=bind, binds=binds, **options
)
def get_bind(self, mapper=None, clause=None):
# mapper is None if someone tries to just get a connection
if mapper is not None:
info = getattr(mapper.mapped_table, 'info', {})
bind_key = info.get('bind_key')
if bind_key is not None:
state = get_state(self.app)
return state.db.get_engine(self.app, bind=bind_key)
return SessionBase.get_bind(self, mapper, clause)
class BaseQuery(orm.Query):
pass
class _QueryProperty(object):
def __init__(self, sa):
self.sa = sa
def __get__(self, obj, type):
try:
mapper = orm.class_mapper(type)
if mapper:
return type.query_class(mapper, session=self.sa.session())
except UnmappedClassError:
return None
class _EngineConnector(object):
def __init__(self, sa, app, bind=None):
self._sa = sa
self._app = app
self._engine = None
self._connected_for = None
self._bind = bind
self._lock = Lock()
def get_uri(self):
if self._bind is None:
return self._app.config.get('SQLALCHEMY_DATABASE_URI')
binds = self._app.config.get('SQLALCHEMY_BINDS') or ()
assert self._bind in binds, \
'Bind %r is not specified. Set it in the SQLALCHEMY_BINDS ' \
'configuration variable' % self._bind
return binds[self._bind]
def get_engine(self):
with self._lock:
uri = self.get_uri()
echo = self._app.config.get('SQLALCHEMY_ECHO')
if (uri, echo) == self._connected_for:
return self._engine
info = make_url(uri)
options = {'convert_unicode': True}
self._sa.apply_pool_defaults(self._app, options)
self._sa.apply_driver_hacks(self._app, info, options)
if echo:
options['echo'] = echo
self._engine = rv = sqlalchemy.create_engine(info, **options)
#if _record_queries(self._app):
# _EngineDebuggingSignalEvents(self._engine,
# self._app.import_name).register()
self._connected_for = (uri, echo)
return rv
def get_state(app):
"""Gets the state for the application"""
assert 'sqlalchemy' in app.extensions, \
'The sqlalchemy extension was not registered to the current ' \
'application. Please make sure to call init_app() first.'
return app.extensions['sqlalchemy']
class _SQLAlchemyState(object):
"""Remembers configuration for the (db, app) tuple."""
def __init__(self, db):
self.db = db
self.connectors = {}
class SQLAlchemy(object):
#: Default query class used by :attr:`Model.query` and other queries.
#: Customize this by passing ``query_class`` to :func:`SQLAlchemy`.
#: Defaults to :class:`BaseQuery`.
Query = None
def __init__(self, app=None, use_native_unicode=True, session_options=None,
metadata=None, query_class=BaseQuery, model_class=Model):
self.use_native_unicode = use_native_unicode
self.Query = query_class
self.session = self.create_scoped_session(session_options)
self.Model = self.make_declarative_base(model_class, metadata)
self._engine_lock = Lock()
self.app = app
_include_sqlalchemy(self, query_class)
if app is not None:
self.init_app(app)
@property
def metadata(self):
"""The metadata associated with ``db.Model``."""
return self.Model.metadata
def create_scoped_session(self, options=None):
"""Create a :class:`~sqlalchemy.orm.scoping.scoped_session`
on the factory from :meth:`create_session`.
An extra key ``'scopefunc'`` can be set on the ``options`` dict to
specify a custom scope function.
:param options: dict of keyword arguments passed to session class in
``create_session``
"""
if options is None:
options = {}
scopefunc = options.pop('scopefunc', asyncio.Task.current_task)
options['query_cls'] = options['query_cls'] if 'query_cls' in options else self.Query
return orm.scoped_session(
self.create_session(options), scopefunc=scopefunc
)
def create_session(self, options):
"""Create the session factory used by :meth:`create_scoped_session`.
The factory **must** return an object that SQLAlchemy recognizes as a session,
or registering session events may raise an exception.
Valid factories include a :class:`~sqlalchemy.orm.session.Session`
class or a :class:`~sqlalchemy.orm.session.sessionmaker`.
The default implementation creates a ``sessionmaker`` for :class:`SignallingSession`.
:param options: dict of keyword arguments passed to session class
"""
#return orm.sessionmaker(class_=SessionBase, db=self, **options)
return orm.sessionmaker(class_=SignallingSession, db=self, **options)
def make_declarative_base(self, model, metadata=None):
"""Creates the declarative base that all models will inherit from.
:param model: base model class (or a tuple of base classes) to pass
to :func:`~sqlalchemy.ext.declarative.declarative_base`. Or a class
returned from ``declarative_base``, in which case a new base class
is not created.
:param: metadata: :class:`~sqlalchemy.MetaData` instance to use, or
none to use SQLAlchemy's default.
``model`` can be an existing declarative base in order to support
complex customization such as changing the metaclass.
"""
if not isinstance(model, DeclarativeMeta):
model = declarative_base(
cls=model,
name='Model',
metadata=metadata,
metaclass=DefaultMeta
)
# if user passed in a declarative base and a metaclass for some reason,
# make sure the base uses the metaclass
if metadata is not None and model.metadata is not metadata:
model.metadata = metadata
if not getattr(model, 'query_class', None):
model.query_class = self.Query
model.query = _QueryProperty(self)
return model
def init_app(self, app):
"""This callback can be used to initialize an application for the
use with this database setup. Never use a database in the context
of an application not initialized that way or connections will
leak.
"""
if (
'SQLALCHEMY_DATABASE_URI' not in app.config and
'SQLALCHEMY_BINDS' not in app.config
):
warnings.warn(
'Neither SQLALCHEMY_DATABASE_URI nor SQLALCHEMY_BINDS is set. '
'Defaulting SQLALCHEMY_DATABASE_URI to "sqlite:///:memory:".'
)
app.config['SQLALCHEMY_DATABASE_URI'] = app.config.get('SQLALCHEMY_DATABASE_URI') or 'sqlite:///:memory:'
app.config['SQLALCHEMY_BINDS'] = app.config.get('SQLALCHEMY_BINDS') or None
app.config['SQLALCHEMY_NATIVE_UNICODE'] = app.config.get('SQLALCHEMY_NATIVE_UNICODE') or None
app.config['SQLALCHEMY_ECHO'] = app.config.get('SQLALCHEMY_ECHO') or False
app.config['SQLALCHEMY_RECORD_QUERIES'] = app.config.get('SQLALCHEMY_RECORD_QUERIES') or None
app.config['SQLALCHEMY_POOL_SIZE'] = app.config.get('SQLALCHEMY_POOL_SIZE') or None
app.config['SQLALCHEMY_POOL_TIMEOUT'] = app.config.get('SQLALCHEMY_POOL_TIMEOUT') or None
app.config['SQLALCHEMY_POOL_RECYCLE'] = app.config.get('SQLALCHEMY_POOL_RECYCLE') or None
app.config['SQLALCHEMY_MAX_OVERFLOW'] = app.config.get('SQLALCHEMY_MAX_OVERFLOW') or None
app.config['SQLALCHEMY_COMMIT_ON_RESPONSE'] = app.config.get('SQLALCHEMY_COMMIT_ON_RESPONSE') or False
self.app = app
if (not hasattr(app, 'extensions')) or (app.extensions is None):
app.extensions = {}
app.extensions['sqlalchemy'] = _SQLAlchemyState(self)
@app.middleware('response')
async def shutdown_session(request, response):
try:
if app.config['SQLALCHEMY_COMMIT_ON_RESPONSE']:
self.session.commit()
except:
self.session.rollback()
raise
finally:
#self.session.close()
self.session.remove()
def apply_pool_defaults(self, app, options):
def _setdefault(optionkey, configkey):
value = app.config.get(configkey)
if value is not None:
options[optionkey] = value
_setdefault('pool_size', 'SQLALCHEMY_POOL_SIZE')
_setdefault('pool_timeout', 'SQLALCHEMY_POOL_TIMEOUT')
_setdefault('pool_recycle', 'SQLALCHEMY_POOL_RECYCLE')
_setdefault('max_overflow', 'SQLALCHEMY_MAX_OVERFLOW')
def apply_driver_hacks(self, app, info, options):
"""This method is called before engine creation and used to inject
driver specific hacks into the options. The `options` parameter is
a dictionary of keyword arguments that will then be used to call
the :func:`sqlalchemy.create_engine` function.
The default implementation provides some saner defaults for things
like pool sizes for MySQL and sqlite. Also it injects the setting of
`SQLALCHEMY_NATIVE_UNICODE`.
"""
if info.drivername.startswith('mysql'):
info.query.setdefault('charset', 'utf8')
if info.drivername != 'mysql+gaerdbms':
options.setdefault('pool_size', 10)
options.setdefault('pool_recycle', 7200)
elif info.drivername == 'sqlite':
pool_size = options.get('pool_size')
detected_in_memory = False
if info.database in (None, '', ':memory:'):
detected_in_memory = True
from sqlalchemy.pool import StaticPool
options['poolclass'] = StaticPool
if 'connect_args' not in options:
options['connect_args'] = {}
options['connect_args']['check_same_thread'] = False
# we go to memory and the pool size was explicitly set
# to 0 which is fail. Let the user know that
if pool_size == 0:
raise RuntimeError('SQLite in memory database with an '
'empty queue not possible due to data '
'loss.')
# if pool size is None or explicitly set to 0 we assume the
# user did not want a queue for this sqlite connection and
# hook in the null pool.
elif not pool_size:
from sqlalchemy.pool import NullPool
options['poolclass'] = NullPool
# if it's not an in memory database we make the path absolute.
#if not detected_in_memory:
# info.database = os.path.join(app.root_path, info.database)
unu = app.config['SQLALCHEMY_NATIVE_UNICODE']
if unu is None:
unu = self.use_native_unicode
if not unu:
options['use_native_unicode'] = False
@property
def engine(self):
"""Gives access to the engine. If the database configuration is bound
to a specific application (initialized with an application) this will
always return a database connection. If however the current application
is used this might raise a :exc:`RuntimeError` if no application is
active at the moment.
"""
return self.get_engine()
def make_connector(self, app=None, bind=None):
"""Creates the connector for a given state and bind."""
| |
2*m.b23*m.b163 + 2*m.b23*m.b165 +
2*m.b23*m.b166 + 2*m.b23*m.b168 + 2*m.b23*m.b170 + 2*m.b23*m.b171 - 2*m.b23*m.b173 - 2*m.b23*
m.b176 + 2*m.b23*m.b177 + 2*m.b23*m.b178 - 2*m.b23*m.b181 - 2*m.b23*m.b182 - 2*m.b24*m.b95 - 5
*m.b24 - 2*m.b24*m.b96 - 2*m.b24*m.b97 - 2*m.b24*m.b98 + 2*m.b24*m.b99 + 2*m.b24*m.b100 - 2*
m.b24*m.b103 - 2*m.b24*m.b104 + 2*m.b24*m.b105 + 2*m.b24*m.b106 - 2*m.b24*m.b109 - 2*m.b24*
m.b110 + 2*m.b24*m.b113 + 2*m.b24*m.b114 - 2*m.b24*m.b117 - 2*m.b24*m.b118 + 2*m.b24*m.b119 -
2*m.b24*m.b121 + 2*m.b24*m.b122 - 2*m.b24*m.b124 + 2*m.b24*m.b126 + 2*m.b24*m.b129 + 2*m.b24*
m.b132 + 2*m.b24*m.b133 + 2*m.b24*m.b136 + 2*m.b24*m.b137 + 2*m.b24*m.b138 + 2*m.b24*m.b141 +
2*m.b24*m.b142 + 2*m.b24*m.b145 + 2*m.b24*m.b146 - 2*m.b24*m.b148 - 2*m.b24*m.b150 - 2*m.b24*
m.b151 - 2*m.b24*m.b154 + 2*m.b24*m.b162 + 2*m.b24*m.b163 + 2*m.b24*m.b166 + 2*m.b24*m.b167 -
2*m.b24*m.b168 - 2*m.b24*m.b169 - 2*m.b24*m.b172 - 2*m.b24*m.b173 + 2*m.b24*m.b175 + 2*m.b24*
m.b178 + 2*m.b24*m.b179 - 2*m.b24*m.b182 + 2*m.b25*m.b92 - 10*m.b25 + 2*m.b25*m.b94 - 2*m.b25*
m.b96 - 2*m.b25*m.b98 + 2*m.b25*m.b99 + 2*m.b25*m.b100 + 2*m.b25*m.b101 + 2*m.b25*m.b102 - 2*
m.b25*m.b104 + 2*m.b25*m.b107 - 2*m.b25*m.b110 + 2*m.b25*m.b111 + 2*m.b25*m.b112 + 2*m.b25*
m.b113 + 2*m.b25*m.b114 - 2*m.b25*m.b116 - 2*m.b25*m.b117 - 2*m.b25*m.b118 + 2*m.b25*m.b119 +
2*m.b25*m.b120 - 2*m.b25*m.b121 - 2*m.b25*m.b124 + 2*m.b25*m.b125 + 2*m.b25*m.b126 - 2*m.b25*
m.b127 + 2*m.b25*m.b129 + 2*m.b25*m.b130 + 2*m.b25*m.b133 + 2*m.b25*m.b135 + 2*m.b25*m.b136 +
2*m.b25*m.b138 + 2*m.b25*m.b139 + 2*m.b25*m.b142 + 2*m.b25*m.b144 + 2*m.b25*m.b145 - 2*m.b25*
m.b148 - 2*m.b25*m.b150 - 2*m.b25*m.b151 - 2*m.b25*m.b154 - 2*m.b25*m.b155 - 2*m.b25*m.b157 -
2*m.b25*m.b158 - 2*m.b25*m.b161 + 2*m.b25*m.b163 + 2*m.b25*m.b165 + 2*m.b25*m.b166 - 2*m.b25*
m.b173 + 2*m.b25*m.b174 + 2*m.b25*m.b175 - 2*m.b25*m.b176 + 2*m.b25*m.b177 + 2*m.b25*m.b178 -
2*m.b25*m.b181 - 2*m.b25*m.b182 + 2*m.b26*m.b94 + 23*m.b26 - 2*m.b26*m.b95 - 2*m.b26*m.b96 - 2
*m.b26*m.b98 - 2*m.b26*m.b100 - 2*m.b26*m.b104 - 2*m.b26*m.b106 - 2*m.b26*m.b108 - 2*m.b26*
m.b109 - 2*m.b26*m.b110 - 2*m.b26*m.b112 - 2*m.b26*m.b113 - 2*m.b26*m.b114 - 2*m.b26*m.b115 -
2*m.b26*m.b116 + 2*m.b26*m.b128 - 5*m.b128 + 2*m.b26*m.b129 + 2*m.b26*m.b131 - 3*m.b131 + 2*
m.b26*m.b132 + 2*m.b26*m.b133 + 2*m.b26*m.b135 - 2*m.b26*m.b143 + 14*m.b143 - 2*m.b26*m.b145
- 2*m.b26*m.b146 - 2*m.b26*m.b151 - 2*m.b26*m.b153 - 2*m.b26*m.b154 - 2*m.b26*m.b164 + 14*
m.b164 - 2*m.b26*m.b166 - 2*m.b26*m.b167 - 2*m.b26*m.b169 - 2*m.b26*m.b171 - 2*m.b26*m.b172 -
2*m.b26*m.b173 - 2*m.b26*m.b175 - 2*m.b26*m.b176 + 2*m.b26*m.b177 - 2*m.b26*m.b180 - 2*m.b26*
m.b181 - 2*m.b27*m.b92 + 11*m.b27 - 2*m.b27*m.b93 + 2*m.b27*m.b94 - 2*m.b27*m.b95 - 2*m.b27*
m.b96 - 2*m.b27*m.b98 - 2*m.b27*m.b100 - 2*m.b27*m.b101 - 2*m.b27*m.b102 - 2*m.b27*m.b104 - 2*
m.b27*m.b106 - 2*m.b27*m.b108 - 2*m.b27*m.b109 - 2*m.b27*m.b110 - 2*m.b27*m.b112 - 2*m.b27*
m.b113 - 2*m.b27*m.b114 + 2*m.b27*m.b118 + 2*m.b27*m.b119 + 2*m.b27*m.b121 + 2*m.b27*m.b122 +
2*m.b27*m.b123 + 2*m.b27*m.b125 + 2*m.b27*m.b128 + 2*m.b27*m.b129 + 2*m.b27*m.b131 + 2*m.b27*
m.b132 + 2*m.b27*m.b133 + 2*m.b27*m.b135 - 2*m.b27*m.b139 - 2*m.b27*m.b143 - 2*m.b27*m.b145 -
2*m.b27*m.b147 - 2*m.b27*m.b151 - 2*m.b27*m.b153 + 2*m.b27*m.b155 + 2*m.b27*m.b156 + 2*m.b27*
m.b157 + 2*m.b27*m.b159 - 2*m.b27*m.b164 - 2*m.b27*m.b166 - 2*m.b27*m.b169 - 2*m.b27*m.b171 -
2*m.b27*m.b173 - 2*m.b27*m.b175 + 2*m.b27*m.b177 - 2*m.b27*m.b180 - 2*m.b28*m.b92 + 24*m.b28
- 2*m.b28*m.b93 - 2*m.b28*m.b94 - 2*m.b28*m.b95 - 2*m.b28*m.b96 + 2*m.b28*m.b98 - 2*m.b28*
m.b99 - 2*m.b28*m.b101 - 2*m.b28*m.b102 - 2*m.b28*m.b103 - 2*m.b28*m.b105 - 2*m.b28*m.b107 - 2
*m.b28*m.b109 - 2*m.b28*m.b110 - 2*m.b28*m.b111 - 2*m.b28*m.b113 - 2*m.b28*m.b114 - 2*m.b28*
m.b115 - 2*m.b28*m.b116 + 2*m.b28*m.b118 + 2*m.b28*m.b119 + 2*m.b28*m.b121 + 2*m.b28*m.b122 +
2*m.b28*m.b123 + 2*m.b28*m.b125 + 2*m.b28*m.b128 + 2*m.b28*m.b129 + 2*m.b28*m.b131 + 2*m.b28*
m.b132 + 2*m.b28*m.b133 + 2*m.b28*m.b135 - 2*m.b28*m.b138 - 2*m.b28*m.b139 - 2*m.b28*m.b140 +
3*m.b140 - 2*m.b28*m.b141 - 2*m.b28*m.b142 - 2*m.b28*m.b143 - 2*m.b28*m.b144 - 2*m.b28*m.b145
- 2*m.b28*m.b146 - 2*m.b28*m.b147 - 2*m.b28*m.b151 - 2*m.b28*m.b153 - 2*m.b28*m.b154 + 2*
m.b28*m.b155 + 2*m.b28*m.b156 + 2*m.b28*m.b157 + 2*m.b28*m.b159 - 2*m.b28*m.b164 - 2*m.b28*
m.b166 - 2*m.b28*m.b167 - 2*m.b28*m.b169 - 2*m.b28*m.b171 - 2*m.b28*m.b172 - 2*m.b28*m.b173 -
2*m.b28*m.b175 - 2*m.b28*m.b176 + 2*m.b28*m.b177 - 2*m.b28*m.b180 - 2*m.b28*m.b181 - 2*m.b29*
m.b92 + 5*m.b29 - 2*m.b29*m.b93 + 2*m.b29*m.b94 - 2*m.b29*m.b95 - 2*m.b29*m.b96 - 2*m.b29*
m.b98 - 2*m.b29*m.b100 - 2*m.b29*m.b104 - 2*m.b29*m.b106 - 2*m.b29*m.b108 - 2*m.b29*m.b112 - 2
*m.b29*m.b113 - 2*m.b29*m.b114 + 2*m.b29*m.b118 + 2*m.b29*m.b119 + 2*m.b29*m.b121 + 2*m.b29*
m.b122 + 2*m.b29*m.b123 + 2*m.b29*m.b125 + 2*m.b29*m.b128 + 2*m.b29*m.b129 + 2*m.b29*m.b131 +
2*m.b29*m.b132 + 2*m.b29*m.b133 + 2*m.b29*m.b135 - 2*m.b29*m.b145 - 2*m.b29*m.b153 - 2*m.b29*
m.b166 - 2*m.b29*m.b171 - 2*m.b29*m.b175 - 2*m.b29*m.b180 - 2*m.b30*m.b94 + 10*m.b30 + 2*m.b30
*m.b98 - 2*m.b30*m.b99 - 2*m.b30*m.b101 - 2*m.b30*m.b102 + 2*m.b30*m.b104 - 2*m.b30*m.b105 + 2
*m.b30*m.b108 - 2*m.b30*m.b109 - 2*m.b30*m.b110 - 2*m.b30*m.b111 - 2*m.b30*m.b138 - 2*m.b30*
m.b139 - 2*m.b30*m.b141 - 2*m.b30*m.b143 - 2*m.b30*m.b144 - 2*m.b30*m.b147 + 2*m.b30*m.b148 +
2*m.b30*m.b150 - 2*m.b30*m.b151 + 2*m.b30*m.b155 + 2*m.b30*m.b156 + 2*m.b30*m.b157 + 2*m.b30*
m.b159 - 2*m.b30*m.b162 - 2*m.b30*m.b164 - 2*m.b30*m.b165 + 2*m.b30*m.b168 - 2*m.b30*m.b169 -
2*m.b30*m.b173 - 2*m.b30*m.b174 + 2*m.b30*m.b177 + 2*m.b31*m.b94 + 16*m.b31 - 2*m.b31*m.b95 -
2*m.b31*m.b96 - 2*m.b31*m.b98 - 2*m.b31*m.b100 - 2*m.b31*m.b104 - 2*m.b31*m.b106 - 2*m.b31*
m.b108 - 2*m.b31*m.b109 - 2*m.b31*m.b110 + 2*m.b31*m.b111 - 2*m.b31*m.b113 - 2*m.b31*m.b114 -
2*m.b31*m.b115 - 2*m.b31*m.b116 + 2*m.b31*m.b128 + 2*m.b31*m.b129 + 2*m.b31*m.b131 + 2*m.b31*
m.b132 + 2*m.b31*m.b133 + 2*m.b31*m.b135 - 2*m.b31*m.b143 + 2*m.b31*m.b144 - 2*m.b31*m.b145 -
2*m.b31*m.b146 - 2*m.b31*m.b151 + 2*m.b31*m.b152 - 2*m.b31*m.b153 - 2*m.b31*m.b154 - 2*m.b31*
m.b164 + 2*m.b31*m.b165 - 2*m.b31*m.b166 - 2*m.b31*m.b167 - 2*m.b31*m.b169 + 2*m.b31*m.b170 -
2*m.b31*m.b171 - 2*m.b31*m.b172 - 2*m.b31*m.b173 + 2*m.b31*m.b174 - 2*m.b31*m.b175 - 2*m.b31*
m.b176 + 2*m.b31*m.b177 - 2*m.b31*m.b180 - 2*m.b31*m.b181 - 2*m.b32*m.b92 + 20*m.b32 - 2*m.b32
*m.b93 - 2*m.b32*m.b94 + 2*m.b32*m.b98 - 2*m.b32*m.b99 - 2*m.b32*m.b101 - 2*m.b32*m.b102 - 2*
m.b32*m.b103 + 2*m.b32*m.b106 + 2*m.b32*m.b108 - 2*m.b32*m.b109 - 2*m.b32*m.b110 - 2*m.b32*
m.b111 - 2*m.b32*m.b113 - 2*m.b32*m.b114 - 2*m.b32*m.b115 - 2*m.b32*m.b116 + 2*m.b32*m.b118 +
2*m.b32*m.b119 + 2*m.b32*m.b121 + 2*m.b32*m.b122 + 2*m.b32*m.b123 + 2*m.b32*m.b125 - 2*m.b32*
m.b138 - 2*m.b32*m.b139 - 2*m.b32*m.b140 - 2*m.b32*m.b143 - 2*m.b32*m.b144 - 2*m.b32*m.b145 -
2*m.b32*m.b146 - 2*m.b32*m.b147 + 2*m.b32*m.b149 + 2*m.b32*m.b150 - 2*m.b32*m.b151 - 2*m.b32*
m.b153 - 2*m.b32*m.b154 + 2*m.b32*m.b155 + 2*m.b32*m.b156 + 2*m.b32*m.b157 + 2*m.b32*m.b159 +
2*m.b32*m.b162 + 2*m.b32*m.b163 - 2*m.b32*m.b164 - 2*m.b32*m.b166 - 2*m.b32*m.b167 - 2*m.b32*
m.b169 - 2*m.b32*m.b170 - 2*m.b32*m.b171 - 2*m.b32*m.b172 - 2*m.b32*m.b173 - 2*m.b32*m.b174 -
2*m.b32*m.b175 - 2*m.b32*m.b176 + 2*m.b32*m.b177 - 2*m.b32*m.b180 - 2*m.b32*m.b181 - 2*m.b33*
m.b95 + 17*m.b33 - 2*m.b33*m.b96 - 2*m.b33*m.b101 - 2*m.b33*m.b102 + 2*m.b33*m.b103 + 2*m.b33*
m.b104 - 2*m.b33*m.b109 - 2*m.b33*m.b110 - 2*m.b33*m.b113 - 2*m.b33*m.b114 - 2*m.b33*m.b115 -
2*m.b33*m.b116 + 2*m.b33*m.b128 + 2*m.b33*m.b129 + 2*m.b33*m.b131 + 2*m.b33*m.b132 + 2*m.b33*
m.b133 + 2*m.b33*m.b135 - 2*m.b33*m.b139 + 2*m.b33*m.b140 - 2*m.b33*m.b143 - 2*m.b33*m.b145 -
2*m.b33*m.b146 - 2*m.b33*m.b147 + 2*m.b33*m.b148 - 2*m.b33*m.b151 - 2*m.b33*m.b153 - 2*m.b33*
m.b154 + 2*m.b33*m.b155 + 2*m.b33*m.b156 + 2*m.b33*m.b157 + 2*m.b33*m.b159 - 2*m.b33*m.b162 -
2*m.b33*m.b163 - 2*m.b33*m.b164 - 2*m.b33*m.b165 - 2*m.b33*m.b166 - 2*m.b33*m.b167 - 2*m.b33*
m.b169 - 2*m.b33*m.b171 - 2*m.b33*m.b172 - 2*m.b33*m.b173 - 2*m.b33*m.b175 - 2*m.b33*m.b176 +
2*m.b33*m.b177 - 2*m.b33*m.b180 - 2*m.b33*m.b181 - 2*m.b34*m.b92 + 21*m.b34 - 2*m.b34*m.b93 -
2*m.b34*m.b94 - 2*m.b34*m.b95 - 2*m.b34*m.b96 - 2*m.b34*m.b97 - 2*m.b34*m.b99 - 2*m.b34*m.b101
- 2*m.b34*m.b102 - 2*m.b34*m.b103 - 2*m.b34*m.b105 - 2*m.b34*m.b107 - 2*m.b34*m.b109 - 2*
m.b34*m.b110 - 2*m.b34*m.b111 - 2*m.b34*m.b113 - 2*m.b34*m.b114 - 2*m.b34*m.b115 - 2*m.b34*
m.b116 + 2*m.b34*m.b118 + 2*m.b34*m.b119 + 2*m.b34*m.b121 + 2*m.b34*m.b122 + 2*m.b34*m.b123 +
2*m.b34*m.b125 + 2*m.b34*m.b128 + 2*m.b34*m.b129 + 2*m.b34*m.b131 + 2*m.b34*m.b132 + 2*m.b34*
m.b133 + 2*m.b34*m.b135 - 2*m.b34*m.b139 - 2*m.b34*m.b143 - 2*m.b34*m.b145 - 2*m.b34*m.b146 -
2*m.b34*m.b147 - 2*m.b34*m.b151 - 2*m.b34*m.b153 - 2*m.b34*m.b154 + 2*m.b34*m.b155 + 2*m.b34*
m.b156 + 2*m.b34*m.b157 + 2*m.b34*m.b159 - 2*m.b34*m.b164 - 2*m.b34*m.b166 - 2*m.b34*m.b167 -
2*m.b34*m.b169 - 2*m.b34*m.b171 - 2*m.b34*m.b172 - 2*m.b34*m.b173 - 2*m.b34*m.b175 - 2*m.b34*
m.b176 + 2*m.b34*m.b177 - 2*m.b34*m.b180 - 2*m.b34*m.b181 - 2*m.b35*m.b92 + 28*m.b35 - 2*m.b35
*m.b93 - 2*m.b35*m.b94 + 2*m.b35*m.b98 - 2*m.b35*m.b99 - 2*m.b35*m.b103 - 2*m.b35*m.b105 - 2*
m.b35*m.b107 - 2*m.b35*m.b109 - 2*m.b35*m.b110 - 2*m.b35*m.b111 - 2*m.b35*m.b113 - 2*m.b35*
m.b114 - 2*m.b35*m.b115 - 2*m.b35*m.b116 + 2*m.b35*m.b118 + 2*m.b35*m.b119 + 2*m.b35*m.b121 +
2*m.b35*m.b122 + 2*m.b35*m.b123 + 2*m.b35*m.b125 - 2*m.b35*m.b138 - 2*m.b35*m.b140 - 2*m.b35*
m.b141 - 2*m.b35*m.b142 - 2*m.b35*m.b143 - 2*m.b35*m.b144 - 2*m.b35*m.b145 - 2*m.b35*m.b146 -
2*m.b35*m.b151 - 2*m.b35*m.b153 - 2*m.b35*m.b154 - 2*m.b35*m.b164 - 2*m.b35*m.b166 - 2*m.b35*
m.b167 - 2*m.b35*m.b169 - 2*m.b35*m.b171 - 2*m.b35*m.b172 - 2*m.b35*m.b173 - 2*m.b35*m.b175 -
2*m.b35*m.b176 + 2*m.b35*m.b177 - 2*m.b35*m.b180 - 2*m.b35*m.b181 - 2*m.b36*m.b92 - 3*m.b36 -
2*m.b36*m.b93 - 2*m.b36*m.b95 - | |
<filename>packages/gst-editing-services/gst_conan/build/__init__.py
from collections import OrderedDict as odict
from conans import ConanFile
import fnmatch
import os
import re
import shutil
import sys
import traceback
from .PkgConfigFile import PkgConfigFile
from .. import base
from .. import configuration
def conanBuildTypes() -> list:
'''
These are the conan build types that we allow.
:return: The list of types allowed.
'''
return ["Debug", "Release"]
def copyFiles(pattern:str, srcFolder:str, destFolder:str, keepPath:bool=True) -> list:
'''
Copies files having a specified pattern within the source folder. This function exists because I had problems using
`self.copy` inside the conanfile.
:param pattern: The wildcard filename pattern. See the python function `fnmatch.fnmatch` for information on wildcards.
:param srcFolder: The source folder (i.e. where the files are copied from)
:param destFolder: The destination folder (where files are copied to).
:param keepPath: If true, the relative path underneath `srcFolder` will be preserved for the copy into `destFolder`.
If false, the files are copied directly under destFolder.
:return: The list of copied files relative to the `destFolder`.
'''
output = []
files = findFiles(pattern, srcFolder)
for file in files:
srcFile = os.path.join(srcFolder, file)
if keepPath:
destFile = file
else:
destFile = os.path.basename(file)
output.append(destFile)
destFile = os.path.join(destFolder, destFile)
parentFolder = os.path.dirname(destFile)
if parentFolder != None and len(parentFolder) > 0:
os.makedirs(parentFolder, exist_ok=True)
shutil.copy2(srcFile, destFile, follow_symlinks=False)
return output
def copyOneFile(pattern:str, srcFolder:str, destFolder:str, keepPath:bool=True) -> str:
'''
This is the same as `copyFile` except it throws if the number of files copied is not exactly one.
'''
output = copyFiles(pattern, srcFolder, destFolder, keepPath)
if len(output) == 0:
raise Exception(f"Failed to find {pattern} within folder: {srcFolder}")
if len(output) > 1:
raise Exception(f"Found multiple {pattern} within folder: {srcFolder}")
return output[0]
def copyOneSharedObjectFileGroup(pattern:str, srcFolder:str, destFolder:str, keepPath:bool=True) -> list:
'''
The same as `copySharedObjectFileGroups` except there must be exactly one group, otherwise it throws.
'''
output = []
groups = findSharedObjectGroups(pattern, srcFolder)
if len(groups) == 0:
raise Exception(f"Failed to find {pattern} within folder: {srcFolder}")
if len(groups) > 1:
raise Exception(f"Found multiple {pattern} groups within folder: {srcFolder}")
for group in groups:
for file in group:
srcFile = os.path.join(srcFolder, file)
if keepPath:
destFile = file
else:
destFile = os.path.basename(file)
output.append(destFile)
destFile = os.path.join(destFolder, destFile)
parentFolder = os.path.dirname(destFile)
if parentFolder != None and len(parentFolder) > 0:
os.makedirs(parentFolder, exist_ok=True)
shutil.copy2(srcFile, destFile, follow_symlinks=False)
return output
def copySharedObjectFileGroups(pattern:str, srcFolder:str, destFolder:str, keepPath:bool=True) -> list:
'''
Each *.so file can have multiple companions with a version number appended after the `.so` suffix. The companions
can be files or links to files. For example:
libwhatever.so
libwhatever.so.0 [symlink --> libwhatever.so.0.1234.0]
libwhatever.so.0.1234.0
libwhatever.so.1 [symlink --> libwhatever.so.1.4321.0]
libwhatever.so.1.4321.0
This method finds any filename whose prefix (before the `.so`) match the given pattern. For each pattern, the `*.so`
file is copied with all of it's companions.
:param pattern: The wildcard filename pattern for the prefix of the file (before the `.so`). This should not include
the `.so` or anything that comes after. See the python function `fnmatch.fnmatch` for information on wildcards.
:param srcFolder: The source folder (i.e. where the files are copied from)
:param destFolder: The destination folder (where files are copied to).
:param keepPath: If true, the relative path underneath `srcFolder` will be preserved for the copy into `destFolder`.
If false, the files are copied directly under destFolder.
:return: The list of copied files relative to the `destFolder`.
'''
output = []
groups = findSharedObjectGroups(pattern, srcFolder)
for group in groups:
for file in groups:
srcFile = os.path.join(srcFolder, file)
if keepPath:
destFile = file
else:
destFile = os.path.basename(file)
output.append(destFile)
destFile = os.path.join(destFolder, destFile)
parentFolder = os.path.dirname(destFile)
if parentFolder != None and len(parentFolder) > 0:
os.makedirs(parentFolder, exist_ok=True)
shutil.copy2(srcFile, destFile, follow_symlinks=False)
return output
def copytree(srcFolder:str, destFolder:str, includeSubfolders:bool=True, onlyNewerSources:bool=True, ignoreFolders:set=None):
'''
Copy elements under `srcFolder` to under `destFolder`.
:param srcFolder: The source folder.
:param destFolder: The destination folder.
:param includeSubfolders: If true, subfolders are also copied (with full contents).
:param onlyNewerSources: If true, a file is only copied if it does not exist at the destination location, or if the
file at the destination location is older than the file to be copied.
:return: None
'''
if not os.path.isdir(srcFolder):
raise Exception("The source folder is not valid.")
os.makedirs(destFolder, exist_ok=True)
for item in os.listdir(srcFolder):
src = os.path.join(srcFolder, item)
dest = os.path.join(destFolder, item)
if os.path.isdir(src):
if includeSubfolders and (ignoreFolders == None or not (item in ignoreFolders)):
copytree(src, dest)
else:
doCopy = True
if onlyNewerSources and os.path.isfile(dest):
doCopy = os.path.getmtime(src) > os.path.getmtime(dest)
if doCopy:
shutil.copy2(src, dest)
def doConanPackage(conanfile:ConanFile, packageInfo:configuration.PackageInfo, buildOutputFolder:str) -> None:
'''
This is typically called from the conanfile during from the `package` function. This method executes most of the
logic around copying build output, but it does not copy header files. The caller must do that.
:param conanfile: The conanfile at the time whent the `package` function is being called.
:param packageInfo: The package information.
:param buildOutputFolder: The folder where build output can be found.
:return: Nothing.
'''
try:
#if conanfile.settings.os == "Windows":
# extExe = ".exe"
# extLib = ".lib"
# extSo = ".dll"
#elif conanfile.settings.os == "Linux":
# extExe = ""
# extLib = ".a"
# extSo = ".so"
#else:
# raise Exception("Unsupported OS: " + str(conanfile.settings.os))
extExe = ""
extLib = ".a"
extSo = ".so"
# Copy executables to 'bin' folder
for exe in packageInfo.executables:
copyOneFile(f"{exe}{extExe}",
srcFolder=buildOutputFolder,
destFolder=os.path.join(conanfile.package_folder, "bin"),
keepPath=False)
# Copy static libs to 'lib' folder
for lib in packageInfo.staticlibs:
copyOneFile(f"{lib}{extLib}",
srcFolder=buildOutputFolder,
destFolder=os.path.join(conanfile.package_folder, "lib"),
keepPath=False)
# Copy plugins to 'plugins' folder
if packageInfo.plugins:
for pluginName, pluginInfo in packageInfo.plugins.items():
if pluginInfo.get("optional"):
doPlugin = eval(f"conanfile.options.{pluginName}")
else:
doPlugin = True
if doPlugin:
lib = pluginInfo.get("lib")
if lib:
lib = f"{lib}"
else:
lib = f"libgst{pluginName}"
destFolder = os.path.join(conanfile.package_folder, "plugins")
try:
if conanfile.settings.os == "Linux":
copyOneSharedObjectFileGroup(lib, buildOutputFolder, destFolder, keepPath=False)
else:
copyOneFile(f"{lib}{extSo}", buildOutputFolder, destFolder, keepPath=False)
except Exception:
conanfile.output.error(f"Failed to find the file {lib}{extSo}.")
conanfile.output.error(f"You may need to install some packages on your machine.")
conanfile.output.error(f"Look for machine setup instructions: https://github.com/Panopto/gst-conan")
innerException = sys.exc_info()[0]
raise Exception(f"Failed to find the file {lib}{extSo}.") from innerException
# Start a list of sharedlibs to be copied.
if packageInfo.sharedlibs:
sharedlibs = packageInfo.sharedlibs.copy()
else:
sharedlibs = []
# Run through pkg-config files
if packageInfo.pkgconfigs:
srcPcFolder = os.path.join(buildOutputFolder, "pkgconfig")
destGirFolder = os.path.join(conanfile.package_folder, "data", "gir-1.0")
destPcFolder = os.path.join(conanfile.package_folder, "pc-installed")
destTypelibFolder = os.path.join(conanfile.package_folder, "lib", "girepository-1.0")
os.makedirs(destPcFolder)
for pcName, pcInfo in packageInfo.pkgconfigs.items():
lib = pcInfo.get("lib")
if lib != None:
sharedlibs.append(lib)
gir = pcInfo.get("gir")
if gir != None:
copyOneFile(f"{gir}.gir", buildOutputFolder, destGirFolder, keepPath=False)
copyOneFile(f"{gir}.typelib", buildOutputFolder, destTypelibFolder, keepPath=False)
# Copy the original pkg-config file
shutil.copy2(src=os.path.join( srcPcFolder, f"{pcName}.pc"),
dst=os.path.join(destPcFolder, f"{pcName}.pc"))
# Load the pkg-config file, modify, and save
pcFile = PkgConfigFile()
pcFile.load(os.path.join(srcPcFolder, f"{pcName}.pc"))
pcFile.variables["prefix"] = conanfile.package_folder
pcFile.variables["exec_prefix"] = "${prefix}"
pcFile.variables["libdir"] = "${prefix}/lib"
pcFile.variables["includedir"] = "${prefix}/include"
if pcFile.variables.get("pluginsdir"):
pcFile.variables["pluginsdir"] = "${prefix}/plugins"
if pcFile.variables.get("toolsdir"):
pcFile.variables["toolsdir"] = "${prefix}/bin"
if pcFile.variables.get("datarootdir"):
pcFile.variables["datarootdir"] = "${prefix}/data"
if pcFile.variables.get("datadir"):
pcFile.variables["datadir"] = "${prefix}/data"
if pcFile.variables.get("girdir"):
pcFile.variables["girdir"] = "${prefix}/data/gir-1.0"
if pcFile.variables.get("typelibdir"):
pcFile.variables["typelibdir"] = "${libdir}/girepository-1.0"
# This is where conan's cmake generator expects the *.pc files to be.
pcFile.save(os.path.join(conanfile.package_folder, f"{pcName}.pc"))
# Copy shared libs to 'lib' folder
for lib in sharedlibs:
if conanfile.settings.os == "Linux":
copyOneSharedObjectFileGroup(lib,
srcFolder=buildOutputFolder,
destFolder=os.path.join(conanfile.package_folder, "lib"),
keepPath=False)
else:
copyOneFile(f"{lib}{extSo}",
srcFolder=buildOutputFolder,
destFolder=os.path.join(conanfile.package_folder, "lib"),
keepPath=False)
except:
conanfile.output.error(traceback.format_exc())
raise
def doConanPackageInfo(conanfile:ConanFile, packageInfo:configuration.PackageInfo) -> None:
'''
This is typically called from the conanfile during from the `package_info` function. This method executes
all of the logic around attaching user_info and cpp_info to the conan package.
:param conanfile: The conanfile at the time whent the `package_info` function is being called.
:param packageInfo: The package information.
:param buildOutputFolder: The folder where build output can be found.
:return: Nothing.
'''
try:
conanfile.cpp_info.bindirs = ["bin"]
conanfile.cpp_info.includedirs = ["include"]
conanfile.cpp_info.libdirs = ["lib"]
#if conanfile.settings.os == "Windows":
# extSo = ".dll"
# extLib = ".lib"
#elif conanfile.settings.os == "Linux":
# extSo = ".so"
# extLib = ".a"
#else:
# raise Exception(f"Unsupported OS: {conanfile.settings.os}")
extSo = ".so"
extLib = ".a"
conanfile.cpp_info.libs = []
for pcName, pcInfo in packageInfo.pkgconfigs.items():
lib = pcInfo.get("lib")
if lib != None:
conanfile.cpp_info.libs.append(f"{lib}{extSo}")
for lib in packageInfo.sharedlibs:
conanfile.cpp_info.libs.append(f"{lib}{extSo}")
for lib in packageInfo.staticlibs:
conanfile.cpp_info.libs.append(f"{lib}{extLib}")
if packageInfo.plugins and len(packageInfo.plugins) > 0:
conanfile.user_info.plugins = os.path.join(conanfile.cpp_info.rootpath, "plugins")
except:
conanfile.output.error(traceback.format_exc())
raise
def findFiles(pattern:str, folder:str, recursive:bool=True, prefix=None) -> list:
'''
Find | |
* 1j * t_values[37]))
+ (R39 / (1 + w * 1j * t_values[38]))
+ (R40 / (1 + w * 1j * t_values[39]))
+ (R41 / (1 + w * 1j * t_values[40]))
+ (R42 / (1 + w * 1j * t_values[41]))
+ (R43 / (1 + w * 1j * t_values[42]))
+ (R44 / (1 + w * 1j * t_values[43]))
+ (R45 / (1 + w * 1j * t_values[44]))
+ (R46 / (1 + w * 1j * t_values[45]))
+ (R47 / (1 + w * 1j * t_values[46]))
+ (R48 / (1 + w * 1j * t_values[47]))
+ (R49 / (1 + w * 1j * t_values[48]))
+ (R50 / (1 + w * 1j * t_values[49]))
+ (R51 / (1 + w * 1j * t_values[50]))
+ (R52 / (1 + w * 1j * t_values[51]))
+ (R53 / (1 + w * 1j * t_values[52]))
+ (R54 / (1 + w * 1j * t_values[53]))
+ (R55 / (1 + w * 1j * t_values[54]))
+ (R56 / (1 + w * 1j * t_values[55]))
+ (R57 / (1 + w * 1j * t_values[56]))
)
def KK_RC58_fit(params, w, t_values):
"""
Kramers-Kronig Function: -RC-
<NAME> (<EMAIL> / <EMAIL>)
"""
Rs = params["Rs"]
R1 = params["R1"]
R2 = params["R2"]
R3 = params["R3"]
R4 = params["R4"]
R5 = params["R5"]
R6 = params["R6"]
R7 = params["R7"]
R8 = params["R8"]
R9 = params["R9"]
R10 = params["R10"]
R11 = params["R11"]
R12 = params["R12"]
R13 = params["R13"]
R14 = params["R14"]
R15 = params["R15"]
R16 = params["R16"]
R17 = params["R17"]
R18 = params["R18"]
R19 = params["R19"]
R20 = params["R20"]
R21 = params["R21"]
R22 = params["R22"]
R23 = params["R23"]
R24 = params["R24"]
R25 = params["R25"]
R26 = params["R26"]
R27 = params["R27"]
R28 = params["R28"]
R29 = params["R29"]
R30 = params["R30"]
R31 = params["R31"]
R32 = params["R32"]
R33 = params["R33"]
R34 = params["R34"]
R35 = params["R35"]
R36 = params["R36"]
R37 = params["R37"]
R38 = params["R38"]
R39 = params["R39"]
R40 = params["R40"]
R41 = params["R41"]
R42 = params["R42"]
R43 = params["R43"]
R44 = params["R44"]
R45 = params["R45"]
R46 = params["R46"]
R47 = params["R47"]
R48 = params["R48"]
R49 = params["R49"]
R50 = params["R50"]
R51 = params["R51"]
R52 = params["R52"]
R53 = params["R53"]
R54 = params["R54"]
R55 = params["R55"]
R56 = params["R56"]
R57 = params["R57"]
R58 = params["R58"]
return (
Rs
+ (R1 / (1 + w * 1j * t_values[0]))
+ (R2 / (1 + w * 1j * t_values[1]))
+ (R3 / (1 + w * 1j * t_values[2]))
+ (R4 / (1 + w * 1j * t_values[3]))
+ (R5 / (1 + w * 1j * t_values[4]))
+ (R6 / (1 + w * 1j * t_values[5]))
+ (R7 / (1 + w * 1j * t_values[6]))
+ (R8 / (1 + w * 1j * t_values[7]))
+ (R9 / (1 + w * 1j * t_values[8]))
+ (R10 / (1 + w * 1j * t_values[9]))
+ (R11 / (1 + w * 1j * t_values[10]))
+ (R12 / (1 + w * 1j * t_values[11]))
+ (R13 / (1 + w * 1j * t_values[12]))
+ (R14 / (1 + w * 1j * t_values[13]))
+ (R15 / (1 + w * 1j * t_values[14]))
+ (R16 / (1 + w * 1j * t_values[15]))
+ (R17 / (1 + w * 1j * t_values[16]))
+ (R18 / (1 + w * 1j * t_values[17]))
+ (R19 / (1 + w * 1j * t_values[18]))
+ (R20 / (1 + w * 1j * t_values[19]))
+ (R21 / (1 + w * 1j * t_values[20]))
+ (R22 / (1 + w * 1j * t_values[21]))
+ (R23 / (1 + w * 1j * t_values[22]))
+ (R24 / (1 + w * 1j * t_values[23]))
+ (R25 / (1 + w * 1j * t_values[24]))
+ (R26 / (1 + w * 1j * t_values[25]))
+ (R27 / (1 + w * 1j * t_values[26]))
+ (R28 / (1 + w * 1j * t_values[27]))
+ (R29 / (1 + w * 1j * t_values[28]))
+ (R30 / (1 + w * 1j * t_values[29]))
+ (R31 / (1 + w * 1j * t_values[30]))
+ (R32 / (1 + w * 1j * t_values[31]))
+ (R33 / (1 + w * 1j * t_values[32]))
+ (R34 / (1 + w * 1j * t_values[33]))
+ (R35 / (1 + w * 1j * t_values[34]))
+ (R36 / (1 + w * 1j * t_values[35]))
+ (R37 / (1 + w * 1j * t_values[36]))
+ (R38 / (1 + w * 1j * t_values[37]))
+ (R39 / (1 + w * 1j * t_values[38]))
+ (R40 / (1 + w * 1j * t_values[39]))
+ (R41 / (1 + w * 1j * t_values[40]))
+ (R42 / (1 + w * 1j * t_values[41]))
+ (R43 / (1 + w * 1j * t_values[42]))
+ (R44 / (1 + w * 1j * t_values[43]))
+ (R45 / (1 + w * 1j * t_values[44]))
+ (R46 / (1 + w * 1j * t_values[45]))
+ (R47 / (1 + w * 1j * t_values[46]))
+ (R48 / (1 + w * 1j * t_values[47]))
+ (R49 / (1 + w * 1j * t_values[48]))
+ (R50 / (1 + w * 1j * t_values[49]))
+ (R51 / (1 + w * 1j * t_values[50]))
+ (R52 / (1 + w * 1j * t_values[51]))
+ (R53 / (1 + w * 1j * t_values[52]))
+ (R54 / (1 + w * 1j * t_values[53]))
+ (R55 / (1 + w * 1j * t_values[54]))
+ (R56 / (1 + w * 1j * t_values[55]))
+ (R57 / (1 + w * 1j * t_values[56]))
+ (R58 / (1 + w * 1j * t_values[57]))
)
def KK_RC59_fit(params, w, t_values):
"""
Kramers-Kronig Function: -RC-
<NAME> (<EMAIL> / <EMAIL>)
"""
Rs = params["Rs"]
R1 = params["R1"]
R2 = params["R2"]
R3 = params["R3"]
R4 = params["R4"]
R5 = params["R5"]
R6 = params["R6"]
R7 = params["R7"]
R8 = params["R8"]
R9 = params["R9"]
R10 = params["R10"]
R11 = params["R11"]
R12 = params["R12"]
R13 = params["R13"]
R14 = params["R14"]
R15 = params["R15"]
R16 = params["R16"]
R17 = params["R17"]
R18 = params["R18"]
R19 = params["R19"]
R20 = params["R20"]
R21 = params["R21"]
R22 = params["R22"]
R23 = params["R23"]
R24 = params["R24"]
R25 = params["R25"]
R26 = params["R26"]
R27 = params["R27"]
R28 = params["R28"]
R29 = params["R29"]
R30 = params["R30"]
R31 = params["R31"]
R32 = params["R32"]
R33 = params["R33"]
R34 = params["R34"]
R35 = params["R35"]
R36 = params["R36"]
R37 = params["R37"]
R38 = params["R38"]
R39 = params["R39"]
R40 = params["R40"]
R41 = params["R41"]
R42 = params["R42"]
R43 = params["R43"]
R44 = params["R44"]
R45 = params["R45"]
R46 = params["R46"]
R47 = params["R47"]
R48 = params["R48"]
R49 = params["R49"]
R50 = params["R50"]
R51 = params["R51"]
R52 = params["R52"]
R53 = params["R53"]
R54 = params["R54"]
R55 = params["R55"]
R56 = params["R56"]
R57 = params["R57"]
R58 = params["R58"]
R59 = params["R59"]
return (
Rs
+ (R1 / (1 + w * 1j * t_values[0]))
+ (R2 / (1 + w * 1j * t_values[1]))
+ (R3 / (1 + w * 1j * t_values[2]))
+ (R4 / (1 + w * 1j * t_values[3]))
+ (R5 / (1 + w * 1j * t_values[4]))
+ (R6 / (1 + w * 1j * t_values[5]))
+ (R7 / (1 + w * 1j * t_values[6]))
+ (R8 / (1 + w * 1j * t_values[7]))
+ (R9 / (1 + w * 1j * t_values[8]))
+ (R10 / (1 + w * 1j * t_values[9]))
+ (R11 / (1 + w * 1j * t_values[10]))
+ (R12 / (1 + w * | |
in 1.5
Parameters
----------
ax : `Axes`
an Axes instance to plot to
opens : sequence
sequence of opening values
closes : sequence
sequence of closing values
highs : sequence
sequence of high values
lows : sequence
sequence of low values
ticksize : int
size of open and close ticks in points
colorup : color
the color of the lines where close >= open
colordown : color
the color of the lines where close < open
Returns
-------
ret : list
a list of lines added to the axes
"""
warnings.warn(_warn_str.format(fun='plot_day_summary2'), mplDeprecation)
return plot_day_summary2_ohlc(ax, opens, highs, lows, closes, ticksize,
colorup, colordown)
def plot_day_summary2_ochl(ax, opens, closes, highs, lows, ticksize=4,
colorup='k', colordown='r',
):
"""Represent the time, open, close, high, low, as a vertical line
ranging from low to high. The left tick is the open and the right
tick is the close.
Parameters
----------
ax : `Axes`
an Axes instance to plot to
opens : sequence
sequence of opening values
closes : sequence
sequence of closing values
highs : sequence
sequence of high values
lows : sequence
sequence of low values
ticksize : int
size of open and close ticks in points
colorup : color
the color of the lines where close >= open
colordown : color
the color of the lines where close < open
Returns
-------
ret : list
a list of lines added to the axes
"""
return plot_day_summary2_ohlc(ax, opens, highs, lows, closes, ticksize,
colorup, colordown)
def plot_day_summary2_ohlc(ax, opens, highs, lows, closes, ticksize=4,
colorup='k', colordown='r',
):
"""Represent the time, open, high, low, close as a vertical line
ranging from low to high. The left tick is the open and the right
tick is the close.
Parameters
----------
ax : `Axes`
an Axes instance to plot to
opens : sequence
sequence of opening values
highs : sequence
sequence of high values
lows : sequence
sequence of low values
closes : sequence
sequence of closing values
ticksize : int
size of open and close ticks in points
colorup : color
the color of the lines where close >= open
colordown : color
the color of the lines where close < open
Returns
-------
ret : list
a list of lines added to the axes
"""
# note this code assumes if any value open, high, low, close is
# missing they all are missing
rangeSegments = [((i, low), (i, high)) for i, low, high in
zip(xrange(len(lows)), lows, highs) if low != -1]
# the ticks will be from ticksize to 0 in points at the origin and
# we'll translate these to the i, close location
openSegments = [((-ticksize, 0), (0, 0))]
# the ticks will be from 0 to ticksize in points at the origin and
# we'll translate these to the i, close location
closeSegments = [((0, 0), (ticksize, 0))]
offsetsOpen = [(i, open) for i, open in
zip(xrange(len(opens)), opens) if open != -1]
offsetsClose = [(i, close) for i, close in
zip(xrange(len(closes)), closes) if close != -1]
scale = ax.figure.dpi * (1.0 / 72.0)
tickTransform = Affine2D().scale(scale, 0.0)
r, g, b = colorConverter.to_rgb(colorup)
colorup = r, g, b, 1
r, g, b = colorConverter.to_rgb(colordown)
colordown = r, g, b, 1
colord = {True: colorup,
False: colordown,
}
colors = [colord[open < close] for open, close in
zip(opens, closes) if open != -1 and close != -1]
assert(len(rangeSegments) == len(offsetsOpen))
assert(len(offsetsOpen) == len(offsetsClose))
assert(len(offsetsClose) == len(colors))
useAA = 0, # use tuple here
lw = 1, # and here
rangeCollection = LineCollection(rangeSegments,
colors=colors,
linewidths=lw,
antialiaseds=useAA,
)
openCollection = LineCollection(openSegments,
colors=colors,
antialiaseds=useAA,
linewidths=lw,
offsets=offsetsOpen,
transOffset=ax.transData,
)
openCollection.set_transform(tickTransform)
closeCollection = LineCollection(closeSegments,
colors=colors,
antialiaseds=useAA,
linewidths=lw,
offsets=offsetsClose,
transOffset=ax.transData,
)
closeCollection.set_transform(tickTransform)
minpy, maxx = (0, len(rangeSegments))
miny = min([low for low in lows if low != -1])
maxy = max([high for high in highs if high != -1])
corners = (minpy, miny), (maxx, maxy)
ax.update_datalim(corners)
ax.autoscale_view()
# add these last
ax.add_collection(rangeCollection)
ax.add_collection(openCollection)
ax.add_collection(closeCollection)
return rangeCollection, openCollection, closeCollection
def candlestick2_ochl(ax, opens, closes, highs, lows, width=4,
colorup='k', colordown='r',
alpha=0.75,
):
"""Represent the open, close as a bar line and high low range as a
vertical line.
Preserves the original argument order.
Parameters
----------
ax : `Axes`
an Axes instance to plot to
opens : sequence
sequence of opening values
closes : sequence
sequence of closing values
highs : sequence
sequence of high values
lows : sequence
sequence of low values
ticksize : int
size of open and close ticks in points
colorup : color
the color of the lines where close >= open
colordown : color
the color of the lines where close < open
alpha : float
bar transparency
Returns
-------
ret : tuple
(lineCollection, barCollection)
"""
candlestick2_ohlc(ax, opens, highs, closes, lows, width=width,
colorup=colorup, colordown=colordown,
alpha=alpha)
def candlestick2(ax, opens, closes, highs, lows, width=4,
colorup='k', colordown='r',
alpha=0.75,
):
"""Represent the open, close as a bar line and high low range as a
vertical line.
This function has been deprecated in 1.4 in favor of
`candlestick2_ochl`, which maintains the original argument order,
or `candlestick2_ohlc`, which uses the open-high-low-close order.
This function will be removed in 1.5
Parameters
----------
ax : `Axes`
an Axes instance to plot to
opens : sequence
sequence of opening values
closes : sequence
sequence of closing values
highs : sequence
sequence of high values
lows : sequence
sequence of low values
ticksize : int
size of open and close ticks in points
colorup : color
the color of the lines where close >= open
colordown : color
the color of the lines where close < open
alpha : float
bar transparency
Returns
-------
ret : tuple
(lineCollection, barCollection)
"""
warnings.warn(_warn_str.format(fun='candlestick2'),
mplDeprecation)
candlestick2_ohlc(ax, opens, highs, lows, closes, width=width,
colorup=colorup, colordown=colordown,
alpha=alpha)
def candlestick2_ohlc(ax, opens, highs, lows, closes, width=4,
colorup='k', colordown='r',
alpha=0.75,
):
"""Represent the open, close as a bar line and high low range as a
vertical line.
Parameters
----------
ax : `Axes`
an Axes instance to plot to
opens : sequence
sequence of opening values
highs : sequence
sequence of high values
lows : sequence
sequence of low values
closes : sequence
sequence of closing values
ticksize : int
size of open and close ticks in points
colorup : color
the color of the lines where close >= open
colordown : color
the color of the lines where close < open
alpha : float
bar transparency
Returns
-------
ret : tuple
(lineCollection, barCollection)
"""
# note this code assumes if any value open, low, high, close is
# missing they all are missing
delta = width / 2.
barVerts = [((i - delta, open),
(i - delta, close),
(i + delta, close),
(i + delta, open))
for i, open, close in zip(xrange(len(opens)), opens, closes)
if open != -1 and close != -1]
rangeSegments = [((i, low), (i, high))
for i, low, high in zip(xrange(len(lows)), lows, highs)
if low != -1]
r, g, b = colorConverter.to_rgb(colorup)
colorup = r, g, b, alpha
r, g, b = colorConverter.to_rgb(colordown)
colordown = r, g, b, alpha
colord = {True: colorup,
False: colordown,
}
colors = [colord[open < close]
for open, close in zip(opens, closes)
if open != -1 and close != -1]
assert(len(barVerts) == len(rangeSegments))
useAA = 0, # use tuple here
lw = 0.5, # and here
rangeCollection = LineCollection(rangeSegments,
colors=((0, 0, 0, 1), ),
linewidths=lw,
antialiaseds = useAA,
)
barCollection = PolyCollection(barVerts,
facecolors=colors,
edgecolors=((0, 0, 0, 1), ),
antialiaseds=useAA,
linewidths=lw,
)
minx, maxx = 0, len(rangeSegments)
miny = min([low for low in lows if low != -1])
maxy = max([high for high in highs if high != -1])
corners = (minx, miny), (maxx, maxy)
ax.update_datalim(corners)
ax.autoscale_view()
# add these last
ax.add_collection(barCollection)
ax.add_collection(rangeCollection)
return rangeCollection, barCollection
def volume_overlay(ax, opens, closes, volumes,
colorup='k', colordown='r',
width=4, alpha=1.0):
"""Add a volume overlay to the current axes. The opens and closes
are used to determine the color of the bar. -1 is missing. If a
value | |
# A script to help doing the deliveries.
# Now using the Casava directory structure
# The user is asked to provide a project ID, a run name, and an UPPMAX project
import sys
import os
import glob
import re
import grp
from datetime import datetime
import argparse
import stat
from subprocess import check_call, CalledProcessError
from scilifelab.utils.misc import filtered_walk, query_yes_no, touch_file
from scilifelab.utils.timestamp import utc_time
def fixProjName(pname):
newname = pname[0].upper()
postperiod = False
for i in range(1, len(pname)):
if pname[i] == ".":
newname += pname[i]
postperiod = True
elif postperiod:
newname += pname[i].upper()
postperiod = False
else:
newname += pname[i]
postperiod = False
return newname
def is_fastq(fname):
fastq_ext = [".fastq.gz",
".fastq",
"_fastq.txt.gz",
"_fastq.txt",
".fastq..gz",
"_fastq.txt..gz"
]
for ext in fastq_ext:
if fname.endswith(ext):
return True
return False
def create_final_name(fname, date, fc_id, sample_name):
"""Create the final name of the delivered file
"""
# Split the file name according to CASAVA convention
m = re.match(r'(\S+?)_(?:[ACGTN\-]+|NoIndex|Undetermined)_L0*(\d+)_R(\d)_\d+\.fastq(.*)', fname)
if m is not None:
lane = m.group(2)
read = m.group(3)
ext = m.group(4)
else:
# Split the file name according to bcbb convention
m = re.match(r'(\d+)_(\d+)_([^_]+)_(\d+)_(?:nophix_)?(\d+)_fastq.txt(.*)', fname)
if m is None:
raise ValueError("Could not parse file name {:s} correctly!".format(fname))
lane = m.group(1)
read = m.group(5)
ext = m.group(6)
dest_file_name = "{:s}.fastq{:s}".format("_".join([lane,
date,
fc_id,
sample_name,
read]),
ext.replace('..','.'))
return dest_file_name
def get_file_copy_list(proj_base_dir, dest_proj_path, fcid, deliver_all_fcs, deliver_nophix, skip_list):
to_copy = []
for fqfile in filtered_walk(proj_base_dir,
is_fastq,
include_dirs=[fcid] if not deliver_all_fcs else None,
exclude_dirs=skip_list):
# Get the run_name and sample_name from the path
sample_name, run_name, _ = os.path.relpath(fqfile,proj_base_dir).split(os.sep,2)
date, fc_id = run_name.split('_')
# Skip if we deliver from nophix and the parent dir is not nophix (or vice versa)
pdir = os.path.basename(os.path.dirname(fqfile))
if deliver_nophix and pdir != "nophix":
continue
if not deliver_nophix and pdir != run_name:
continue
# Skip if a compressed version of the current file exists
if os.path.exists("{:s}.gz".format(fqfile)):
print("WARNING: Both compressed and non-compressed versions of {:s} exists! " \
"Is compression/decompression in progress? Will deliver compressed version " \
"but you should make sure that the delivered files are complete!".format(fqfile))
continue
print("DEBUG: source_delivery_path = {:s}".format(os.path.dirname(fqfile)))
fname = os.path.basename(fqfile)
print(fname)
dest_run_path = os.path.join(dest_proj_path, sample_name, run_name)
dest_file_name = create_final_name(fname,date,fc_id,sample_name)
to_copy.append([fqfile,
dest_run_path,
dest_file_name])
return to_copy
def rsync_files(to_copy, logfile, group, dry):
# Iterate over the files to copy and create directories and copy files as necessary
successful = 0
uid = os.getuid()
gid = os.getgid()
if group is not None and len(group) > 0:
gid = grp.getgrnam(group).gr_gid
for src_file, dst_dir, dst_name in to_copy:
dst_file = os.path.join(dst_dir, dst_name)
print "Will copy (rsync) ", src_file, "to ", dst_file
if not dry:
# Create the destination directory if necessary
logfile.write("[{:s}] - Creating run-level delivery directory: {:s} " \
"(or leaving it in place if already present)\n".format(utc_time(),
dst_dir))
if os.path.exists(dst_dir):
print("Directory {:s} already exists!".format(dst_dir))
else:
try:
# Create directory hierarchy with ug+rwX permissions
os.makedirs(dst_dir, 0770)
except:
print("Could not create run-level delivery directory!")
clean_exit(1,logfile,dry)
# Rsync the file across
command_to_execute = ['rsync',
'-ac',
src_file,
dst_file]
logfile.write("[{:s}] - Executing command: {:s}\n".format(utc_time(), " ".join(command_to_execute)))
logfile.flush()
try:
check_call(command_to_execute)
except CalledProcessError, e:
logfile.write("[{:s}] - rsync exited with exit code {:d}\n".format(utc_time(), e.returncode))
raise e
logfile.write("[{:s}] - rsync exited with exit code 0\n".format(utc_time()))
successful += 1
print("{:d} of {:d} files copied successfully".format(successful,len(to_copy)))
# Modify the permissions to ug+rw
os.chmod(dst_file, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP)
def main():
parser = argparse.ArgumentParser(description="A script to help doing the deliveries, now using the Casava directory structure. " \
"The user is asked to provide a project ID, a run name, and an UPPMAX project")
parser.add_argument('-c', '--casava-path', action="store", dest="caspath", default='/proj/a2010002/nobackup/illumina/',
help="Specify a path to a Casava directory manually")
parser.add_argument('-l', '--log-path', action="store", dest="logpath", default='/proj/a2010002/private/delivery_logs',
help="Specify a path to a log file")
parser.add_argument('-i', '--interactive', action="store_true", dest="interactive", default=False,
help="Interactively select samples to be delivered")
parser.add_argument('-d', '--dry-run', action="store_true", dest="dry", default=False,
help="Dry run: nothing will be done")
parser.add_argument('-a', '--deliver-all-fcs', action="store_true", dest="deliver_all_fcs", default=False,
help="rsync samples from all flow cells. Default is to only deliver from specified flowcell")
parser.add_argument('-p', '--nophix', action="store_true", dest="deliver_nophix", default=False,
help="Deliver fastq files from nophix subdirectory. Default is to deliver from run directory")
parser.add_argument('-g', '--group', action="store", dest="group", default="uppmax",
help="Group membership to set on copied files")
parser.add_argument('project_name', action='store', help="Project name to deliver, e.g. J.Doe_10_01")
parser.add_argument('flowcell_id', action='store', help="Flowcell id to deliver, e.g. 120824_BD1915ACXX")
parser.add_argument('uppmax_id', action='store', help="UPPMAX project id to deliver to, e.g. b2012001")
args = parser.parse_args()
print("""\n****** Deprication ******\nPlease note that this script is deprecated and the functionality has been replaced by 'pm deliver raw-data'\n""")
if not args.project_name in os.listdir(args.caspath):
print("Could not find project. Check directory listing:")
for f in os.listdir(args.caspath):
print(f)
clean_exit(0,None,args.dry)
fcid = args.flowcell_id
fcid_comp = fcid.split('_')
if len(fcid_comp) > 2:
fcid = fcid_comp[0] + '_' + fcid_comp[-1]
print("FCID format too long, trying {:s}".format(fcid))
dt = datetime.now()
time_str = "_".join([str(dt.year),
str(dt.month),
str(dt.day),
str(dt.hour),
str(dt.minute),
str(dt.second)])
logfilename = os.path.join(os.path.normpath(args.logpath),"{:s}.log".format(time_str))
if not args.dry:
logfile = open(logfilename, "w")
else:
logfile = sys.stdout
logfile.write("[{:s}] - Project to move files for:\n{:s}\n".format(utc_time(), args.project_name))
logfile.flush()
proj_base_dir = os.path.join(args.caspath, args.project_name)
skip_list = []
if args.interactive:
for sample_dir in os.listdir(proj_base_dir):
if not os.path.isdir(os.path.join(proj_base_dir,sample_dir)):
continue
if not query_yes_no("Deliver sample {:s}?".format(sample_dir), default="no"):
skip_list.append(sample_dir)
created_proj_dir_name = fixProjName(args.project_name)
del_path_top = '/proj/' + args.uppmax_id + "/INBOX/" + created_proj_dir_name
to_copy = get_file_copy_list(proj_base_dir,
del_path_top,
fcid,
args.deliver_all_fcs,
args.deliver_nophix,
skip_list)
# Prompt user if any of the files are non-compressed
for fqfile, _, _ in to_copy:
if os.path.splitext(fqfile)[1] == ".gz":
continue
print("WARNING: The file {:s}, which you are about to deliver, does not seem to be compressed. " \
"It is recommended that you compress files prior to delivery.".format(fqfile))
if query_yes_no("Do you wish to continue delivering " \
"uncompressed fastq files?", default="yes"):
break
clean_exit(1,logfile,args.dry)
rsync_files(to_copy,
logfile,
args.group,
args.dry)
# Touch the flag for the Uppmax cronjob to fix the INBOX permissions
touch_file(os.path.join("/sw","uppmax","var","inboxfix","schedule",args.uppmax_id))
clean_exit(0,logfile,args.dry)
def clean_exit(exitcode, logfile, dry=False):
"""Close the logfile and exit with the given exit code
"""
if not dry and logfile is not None:
logfile.close()
sys.exit(exitcode)
if __name__ == "__main__":
main()
########## Tests ###########
import unittest
import shutil
import tempfile
import random
import uuid
class TestDataDelivery(unittest.TestCase):
def test_fixProjName(self):
"""Fix project name
"""
test_pnames = [("j.doe_11_01","J.Doe_11_01"),
("j.Doe_11_01","J.Doe_11_01"),
("J.doe_11_01","J.Doe_11_01"),
("J.Doe_11_01","J.Doe_11_01"),
("doe_11_01","Doe_11_01"),
("j.d.doe_11_01","J.D.Doe_11_01"),]
for test_pname, exp_pname in test_pnames:
obs_pname = fixProjName(test_pname)
self.assertEqual(obs_pname,
exp_pname,
"Did not get the expected fix ({:s}) for project name {:s} (got {:s})".format(exp_pname,test_pname,obs_pname))
def test_is_fastq(self):
"""Determine if a file name corresponds to a fastq file
"""
test_fnames = [("foo.fastq",True),
("foo.fastq.gz",True),
("foo_fastq.txt",True),
("foo_fastq.txt.gz",True),
("foo.fastq.bar",False),
("foo.txt",False),]
for test_fname, exp_result in test_fnames:
obs_result = is_fastq(test_fname)
self.assertEqual(obs_result,
exp_result,
"Did not get expected result ({:s}) for file name {:s}".format(str(exp_result),test_fname))
def _create_test_files(self, root):
to_copy = []
for n in xrange(10):
fd, sfile = tempfile.mkstemp(suffix=".tmp", prefix="rsync_test_", dir=root)
os.close(fd)
# Generate destination file hierarchies
ddir = root
for l in xrange(random.randint(1,5)):
ddir = os.path.join(ddir,str(uuid.uuid4()))
to_copy.append([sfile,ddir,"{:s}.tmp".format(str(uuid.uuid4()))])
return to_copy
def test_rsync_files(self):
"""Test the rsync functionality
"""
root = tempfile.mkdtemp(prefix="rsync_test_")
# Create some files to move
to_copy = self._create_test_files(root)
# Run rsync
with open(os.devnull, 'w') as f:
old_stdout = sys.stdout
sys.stdout = f
rsync_files(to_copy,sys.stdout,None,False)
sys.stdout = old_stdout
# Verify the copy process
for src, ddir, dname in to_copy:
self.assertTrue(os.path.exists(src),
"The rsync process have removed source file")
self.assertTrue(os.path.exists(ddir) and os.path.isdir(ddir),
"The expected destination directory was not created")
dfile = os.path.join(ddir,dname)
self.assertTrue(os.path.exists(dfile) and os.path.isfile(dfile),
"The expected destination file was not created")
exp_stat = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP
obs_stat = stat.S_IMODE(os.stat(dfile).st_mode)
self.assertEqual(obs_stat,
exp_stat,
"The mode of the created file is not as expected")
shutil.rmtree(root)
def test_rsync_set_group(self):
"""Test setting the group membership on rsync'd files
"""
root = tempfile.mkdtemp(prefix="rsync_test_set_group_")
avail_groups = os.getgroups()
exp_group = grp.getgrgid(avail_groups[random.randint(1,len(avail_groups))-1])[0]
# Create some files to move
to_copy = self._create_test_files(root)
# Run rsync
with open(os.devnull, 'w') as f:
old_stdout = sys.stdout
sys.stdout = f
rsync_files(to_copy,sys.stdout,exp_group,False)
sys.stdout = old_stdout
# Verify the copy process set the correct group on created directories
for ddir in set([d[1] for d in to_copy]):
gid = | |
"""
response = yield {
"method": "DOM.copyTo",
"params": filter_none(
{
"nodeId": int(nodeId),
"targetNodeId": int(targetNodeId),
"insertBeforeNodeId": int(insertBeforeNodeId)
if insertBeforeNodeId
else None,
}
),
}
return NodeId(response["nodeId"])
def describe_node(
nodeId: Optional[NodeId] = None,
backendNodeId: Optional[BackendNodeId] = None,
objectId: Optional[runtime.RemoteObjectId] = None,
depth: Optional[int] = None,
pierce: Optional[bool] = None,
) -> Generator[dict, dict, Node]:
"""Describes node given its id, does not require domain to be enabled. Does not start tracking any
objects, can be used for automation.
Parameters
----------
nodeId: Optional[NodeId]
Identifier of the node.
backendNodeId: Optional[BackendNodeId]
Identifier of the backend node.
objectId: Optional[runtime.RemoteObjectId]
JavaScript object id of the node wrapper.
depth: Optional[int]
The maximum depth at which children should be retrieved, defaults to 1. Use -1 for the
entire subtree or provide an integer larger than 0.
pierce: Optional[bool]
Whether or not iframes and shadow roots should be traversed when returning the subtree
(default is false).
Returns
-------
node: Node
Node description.
"""
response = yield {
"method": "DOM.describeNode",
"params": filter_none(
{
"nodeId": int(nodeId) if nodeId else None,
"backendNodeId": int(backendNodeId) if backendNodeId else None,
"objectId": str(objectId) if objectId else None,
"depth": depth,
"pierce": pierce,
}
),
}
return Node.from_json(response["node"])
def scroll_into_view_if_needed(
nodeId: Optional[NodeId] = None,
backendNodeId: Optional[BackendNodeId] = None,
objectId: Optional[runtime.RemoteObjectId] = None,
rect: Optional[Rect] = None,
) -> dict:
"""Scrolls the specified rect of the given node into view if not already visible.
Note: exactly one between nodeId, backendNodeId and objectId should be passed
to identify the node.
Parameters
----------
nodeId: Optional[NodeId]
Identifier of the node.
backendNodeId: Optional[BackendNodeId]
Identifier of the backend node.
objectId: Optional[runtime.RemoteObjectId]
JavaScript object id of the node wrapper.
rect: Optional[Rect]
The rect to be scrolled into view, relative to the node's border box, in CSS pixels.
When omitted, center of the node will be used, similar to Element.scrollIntoView.
**Experimental**
"""
return {
"method": "DOM.scrollIntoViewIfNeeded",
"params": filter_none(
{
"nodeId": int(nodeId) if nodeId else None,
"backendNodeId": int(backendNodeId) if backendNodeId else None,
"objectId": str(objectId) if objectId else None,
"rect": rect.to_json() if rect else None,
}
),
}
def disable() -> dict:
"""Disables DOM agent for the given page."""
return {"method": "DOM.disable", "params": {}}
def discard_search_results(searchId: str) -> dict:
"""Discards search results from the session with the given id. `getSearchResults` should no longer
be called for that search.
Parameters
----------
searchId: str
Unique search session identifier.
**Experimental**
"""
return {"method": "DOM.discardSearchResults", "params": {"searchId": searchId}}
def enable() -> dict:
"""Enables DOM agent for the given page."""
return {"method": "DOM.enable", "params": {}}
def focus(
nodeId: Optional[NodeId] = None,
backendNodeId: Optional[BackendNodeId] = None,
objectId: Optional[runtime.RemoteObjectId] = None,
) -> dict:
"""Focuses the given element.
Parameters
----------
nodeId: Optional[NodeId]
Identifier of the node.
backendNodeId: Optional[BackendNodeId]
Identifier of the backend node.
objectId: Optional[runtime.RemoteObjectId]
JavaScript object id of the node wrapper.
"""
return {
"method": "DOM.focus",
"params": filter_none(
{
"nodeId": int(nodeId) if nodeId else None,
"backendNodeId": int(backendNodeId) if backendNodeId else None,
"objectId": str(objectId) if objectId else None,
}
),
}
def get_attributes(nodeId: NodeId) -> Generator[dict, dict, list[str]]:
"""Returns attributes for the specified node.
Parameters
----------
nodeId: NodeId
Id of the node to retrieve attibutes for.
Returns
-------
attributes: list[str]
An interleaved array of node attribute names and values.
"""
response = yield {"method": "DOM.getAttributes", "params": {"nodeId": int(nodeId)}}
return response["attributes"]
def get_box_model(
nodeId: Optional[NodeId] = None,
backendNodeId: Optional[BackendNodeId] = None,
objectId: Optional[runtime.RemoteObjectId] = None,
) -> Generator[dict, dict, BoxModel]:
"""Returns boxes for the given node.
Parameters
----------
nodeId: Optional[NodeId]
Identifier of the node.
backendNodeId: Optional[BackendNodeId]
Identifier of the backend node.
objectId: Optional[runtime.RemoteObjectId]
JavaScript object id of the node wrapper.
Returns
-------
model: BoxModel
Box model for the node.
"""
response = yield {
"method": "DOM.getBoxModel",
"params": filter_none(
{
"nodeId": int(nodeId) if nodeId else None,
"backendNodeId": int(backendNodeId) if backendNodeId else None,
"objectId": str(objectId) if objectId else None,
}
),
}
return BoxModel.from_json(response["model"])
def get_content_quads(
nodeId: Optional[NodeId] = None,
backendNodeId: Optional[BackendNodeId] = None,
objectId: Optional[runtime.RemoteObjectId] = None,
) -> Generator[dict, dict, list[Quad]]:
"""Returns quads that describe node position on the page. This method
might return multiple quads for inline nodes.
Parameters
----------
nodeId: Optional[NodeId]
Identifier of the node.
backendNodeId: Optional[BackendNodeId]
Identifier of the backend node.
objectId: Optional[runtime.RemoteObjectId]
JavaScript object id of the node wrapper.
Returns
-------
quads: list[Quad]
Quads that describe node layout relative to viewport.
**Experimental**
"""
response = yield {
"method": "DOM.getContentQuads",
"params": filter_none(
{
"nodeId": int(nodeId) if nodeId else None,
"backendNodeId": int(backendNodeId) if backendNodeId else None,
"objectId": str(objectId) if objectId else None,
}
),
}
return [Quad(q) for q in response["quads"]]
def get_document(
depth: Optional[int] = None, pierce: Optional[bool] = None
) -> Generator[dict, dict, Node]:
"""Returns the root DOM node (and optionally the subtree) to the caller.
Parameters
----------
depth: Optional[int]
The maximum depth at which children should be retrieved, defaults to 1. Use -1 for the
entire subtree or provide an integer larger than 0.
pierce: Optional[bool]
Whether or not iframes and shadow roots should be traversed when returning the subtree
(default is false).
Returns
-------
root: Node
Resulting node.
"""
response = yield {
"method": "DOM.getDocument",
"params": filter_none({"depth": depth, "pierce": pierce}),
}
return Node.from_json(response["root"])
@deprecated(version=1.3)
def get_flattened_document(
depth: Optional[int] = None, pierce: Optional[bool] = None
) -> Generator[dict, dict, list[Node]]:
"""Returns the root DOM node (and optionally the subtree) to the caller.
Deprecated, as it is not designed to work well with the rest of the DOM agent.
Use DOMSnapshot.captureSnapshot instead.
Parameters
----------
depth: Optional[int]
The maximum depth at which children should be retrieved, defaults to 1. Use -1 for the
entire subtree or provide an integer larger than 0.
pierce: Optional[bool]
Whether or not iframes and shadow roots should be traversed when returning the subtree
(default is false).
Returns
-------
nodes: list[Node]
Resulting node.
"""
response = yield {
"method": "DOM.getFlattenedDocument",
"params": filter_none({"depth": depth, "pierce": pierce}),
}
return [Node.from_json(n) for n in response["nodes"]]
def get_nodes_for_subtree_by_style(
nodeId: NodeId,
computedStyles: list[CSSComputedStyleProperty],
pierce: Optional[bool] = None,
) -> Generator[dict, dict, list[NodeId]]:
"""Finds nodes with a given computed style in a subtree.
Parameters
----------
nodeId: NodeId
Node ID pointing to the root of a subtree.
computedStyles: list[CSSComputedStyleProperty]
The style to filter nodes by (includes nodes if any of properties matches).
pierce: Optional[bool]
Whether or not iframes and shadow roots in the same target should be traversed when returning the
results (default is false).
Returns
-------
nodeIds: list[NodeId]
Resulting nodes.
**Experimental**
"""
response = yield {
"method": "DOM.getNodesForSubtreeByStyle",
"params": filter_none(
{
"nodeId": int(nodeId),
"computedStyles": [c.to_json() for c in computedStyles],
"pierce": pierce,
}
),
}
return [NodeId(n) for n in response["nodeIds"]]
def get_node_for_location(
x: int,
y: int,
includeUserAgentShadowDOM: Optional[bool] = None,
ignorePointerEventsNone: Optional[bool] = None,
) -> Generator[dict, dict, dict]:
"""Returns node id at given location. Depending on whether DOM domain is enabled, nodeId is
either returned or not.
Parameters
----------
x: int
X coordinate.
y: int
Y coordinate.
includeUserAgentShadowDOM: Optional[bool]
False to skip to the nearest non-UA shadow root ancestor (default: false).
ignorePointerEventsNone: Optional[bool]
Whether to ignore pointer-events: none on elements and hit test them.
Returns
-------
backendNodeId: BackendNodeId
Resulting node.
frameId: page.FrameId
Frame this node belongs to.
nodeId: Optional[NodeId]
Id of the node at given coordinates, only when enabled and requested document.
"""
response = yield {
"method": "DOM.getNodeForLocation",
"params": filter_none(
{
"x": x,
"y": y,
"includeUserAgentShadowDOM": includeUserAgentShadowDOM,
"ignorePointerEventsNone": ignorePointerEventsNone,
}
),
}
return {
"backendNodeId": BackendNodeId(response["backendNodeId"]),
"frameId": page.FrameId(response["frameId"]),
"nodeId": NodeId(response["nodeId"]) if "nodeId" in response else None,
}
def get_outer_html(
nodeId: Optional[NodeId] = None,
backendNodeId: Optional[BackendNodeId] = None,
objectId: Optional[runtime.RemoteObjectId] = None,
) -> Generator[dict, dict, str]:
"""Returns node's HTML markup.
Parameters
----------
nodeId: Optional[NodeId]
Identifier of the node.
backendNodeId: Optional[BackendNodeId]
Identifier of the backend node.
objectId: Optional[runtime.RemoteObjectId]
JavaScript object id of the node wrapper.
Returns
-------
outerHTML: str
Outer HTML markup.
"""
response = yield {
"method": "DOM.getOuterHTML",
"params": filter_none(
{
"nodeId": int(nodeId) if nodeId else None,
"backendNodeId": int(backendNodeId) if backendNodeId else None,
"objectId": str(objectId) if objectId else None,
}
),
}
return response["outerHTML"]
def get_relayout_boundary(nodeId: NodeId) -> Generator[dict, dict, NodeId]:
"""Returns the id of the nearest ancestor that is a relayout boundary.
Parameters
----------
nodeId: NodeId
Id of the node.
| |
<gh_stars>0
#! /usr/bin/python
#
# Python ctypes bindings for VLC
# Copyright (C) 2009 the VideoLAN team
# $Id: $
#
# Authors: <NAME> <olivier.aubert at liris.cnrs.fr>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA.
#
"""This module provides bindings for the
U{libvlc<http://wiki.videolan.org/ExternalAPI>}.
You can find documentation at U{http://www.advene.org/download/python-ctypes/}.
Basically, the most important class is L{Instance}, which is used to
create a libvlc Instance. From this instance, you can then create
L{MediaPlayer} and L{MediaListPlayer} instances.
"""
import logging
import ctypes
import sys
build_date="Mon Sep 20 15:42:06 2010"
# Used for win32 and MacOS X
detected_plugin_path=None
if sys.platform == 'linux2':
try:
dll=ctypes.CDLL('libvlc.so')
except OSError:
dll=ctypes.CDLL('libvlc.so.5')
elif sys.platform == 'win32':
import ctypes.util
import os
detected_plugin_path=None
path=ctypes.util.find_library('libvlc.dll')
if path is None:
# Try to use registry settings
import _winreg
detected_plugin_path_found = None
subkey, name = 'Software\\VideoLAN\\VLC','InstallDir'
for hkey in _winreg.HKEY_LOCAL_MACHINE, _winreg.HKEY_CURRENT_USER:
try:
reg = _winreg.OpenKey(hkey, subkey)
detected_plugin_path_found, type_id = _winreg.QueryValueEx(reg, name)
_winreg.CloseKey(reg)
break
except _winreg.error:
pass
if detected_plugin_path_found:
detected_plugin_path = detected_plugin_path_found
else:
# Try a standard location.
p='c:\\Program Files\\VideoLAN\\VLC\\libvlc.dll'
if os.path.exists(p):
detected_plugin_path=os.path.dirname(p)
os.chdir(detected_plugin_path)
# If chdir failed, this will not work and raise an exception
path='libvlc.dll'
else:
detected_plugin_path=os.path.dirname(path)
dll=ctypes.CDLL(path)
elif sys.platform == 'darwin':
# FIXME: should find a means to configure path
d='/Applications/VLC.app'
import os
if os.path.exists(d):
dll=ctypes.CDLL(d+'/Contents/MacOS/lib/libvlc.dylib')
detected_plugin_path=d+'/Contents/MacOS/modules'
else:
# Hope some default path is set...
dll=ctypes.CDLL('libvlc.dylib')
#
# Generated enum types.
#
class EventType(ctypes.c_ulong):
"""*
Event types
"""
_names={
0: 'MediaMetaChanged',
1: 'MediaSubItemAdded',
2: 'MediaDurationChanged',
3: 'MediaParsedChanged',
4: 'MediaFreed',
5: 'MediaStateChanged',
0x100: 'MediaPlayerMediaChanged',
257: 'MediaPlayerNothingSpecial',
258: 'MediaPlayerOpening',
259: 'MediaPlayerBuffering',
260: 'MediaPlayerPlaying',
261: 'MediaPlayerPaused',
262: 'MediaPlayerStopped',
263: 'MediaPlayerForward',
264: 'MediaPlayerBackward',
265: 'MediaPlayerEndReached',
266: 'MediaPlayerEncounteredError',
267: 'MediaPlayerTimeChanged',
268: 'MediaPlayerPositionChanged',
269: 'MediaPlayerSeekableChanged',
270: 'MediaPlayerPausableChanged',
271: 'MediaPlayerTitleChanged',
272: 'MediaPlayerSnapshotTaken',
273: 'MediaPlayerLengthChanged',
0x200: 'MediaListItemAdded',
513: 'MediaListWillAddItem',
514: 'MediaListItemDeleted',
515: 'MediaListWillDeleteItem',
0x300: 'MediaListViewItemAdded',
769: 'MediaListViewWillAddItem',
770: 'MediaListViewItemDeleted',
771: 'MediaListViewWillDeleteItem',
0x400: 'MediaListPlayerPlayed',
1025: 'MediaListPlayerNextItemSet',
1026: 'MediaListPlayerStopped',
0x500: 'MediaDiscovererStarted',
1281: 'MediaDiscovererEnded',
0x600: 'VlmMediaAdded',
1537: 'VlmMediaRemoved',
1538: 'VlmMediaChanged',
1539: 'VlmMediaInstanceStarted',
1540: 'VlmMediaInstanceStopped',
1541: 'VlmMediaInstanceStatusInit',
1542: 'VlmMediaInstanceStatusOpening',
1543: 'VlmMediaInstanceStatusPlaying',
1544: 'VlmMediaInstanceStatusPause',
1545: 'VlmMediaInstanceStatusEnd',
1546: 'VlmMediaInstanceStatusError',
}
def __repr__(self):
return ".".join((self.__class__.__module__, self.__class__.__name__, self._names[self.value]))
def __eq__(self, other):
return ( (isinstance(other, ctypes.c_ulong) and self.value == other.value)
or (isinstance(other, (int, long)) and self.value == other ) )
def __ne__(self, other):
return not self.__eq__(other)
EventType.MediaMetaChanged=EventType(0)
EventType.MediaSubItemAdded=EventType(1)
EventType.MediaDurationChanged=EventType(2)
EventType.MediaParsedChanged=EventType(3)
EventType.MediaFreed=EventType(4)
EventType.MediaStateChanged=EventType(5)
EventType.MediaPlayerMediaChanged=EventType(0x100)
EventType.MediaPlayerNothingSpecial=EventType(257)
EventType.MediaPlayerOpening=EventType(258)
EventType.MediaPlayerBuffering=EventType(259)
EventType.MediaPlayerPlaying=EventType(260)
EventType.MediaPlayerPaused=EventType(261)
EventType.MediaPlayerStopped=EventType(262)
EventType.MediaPlayerForward=EventType(263)
EventType.MediaPlayerBackward=EventType(264)
EventType.MediaPlayerEndReached=EventType(265)
EventType.MediaPlayerEncounteredError=EventType(266)
EventType.MediaPlayerTimeChanged=EventType(267)
EventType.MediaPlayerPositionChanged=EventType(268)
EventType.MediaPlayerSeekableChanged=EventType(269)
EventType.MediaPlayerPausableChanged=EventType(270)
EventType.MediaPlayerTitleChanged=EventType(271)
EventType.MediaPlayerSnapshotTaken=EventType(272)
EventType.MediaPlayerLengthChanged=EventType(273)
EventType.MediaListItemAdded=EventType(0x200)
EventType.MediaListWillAddItem=EventType(513)
EventType.MediaListItemDeleted=EventType(514)
EventType.MediaListWillDeleteItem=EventType(515)
EventType.MediaListViewItemAdded=EventType(0x300)
EventType.MediaListViewWillAddItem=EventType(769)
EventType.MediaListViewItemDeleted=EventType(770)
EventType.MediaListViewWillDeleteItem=EventType(771)
EventType.MediaListPlayerPlayed=EventType(0x400)
EventType.MediaListPlayerNextItemSet=EventType(1025)
EventType.MediaListPlayerStopped=EventType(1026)
EventType.MediaDiscovererStarted=EventType(0x500)
EventType.MediaDiscovererEnded=EventType(1281)
EventType.VlmMediaAdded=EventType(0x600)
EventType.VlmMediaRemoved=EventType(1537)
EventType.VlmMediaChanged=EventType(1538)
EventType.VlmMediaInstanceStarted=EventType(1539)
EventType.VlmMediaInstanceStopped=EventType(1540)
EventType.VlmMediaInstanceStatusInit=EventType(1541)
EventType.VlmMediaInstanceStatusOpening=EventType(1542)
EventType.VlmMediaInstanceStatusPlaying=EventType(1543)
EventType.VlmMediaInstanceStatusPause=EventType(1544)
EventType.VlmMediaInstanceStatusEnd=EventType(1545)
EventType.VlmMediaInstanceStatusError=EventType(1546)
class Meta(ctypes.c_ulong):
"""* Meta data types */
"""
_names={
0: 'Title',
1: 'Artist',
2: 'Genre',
3: 'Copyright',
4: 'Album',
5: 'TrackNumber',
6: 'Description',
7: 'Rating',
8: 'Date',
9: 'Setting',
10: 'URL',
11: 'Language',
12: 'NowPlaying',
13: 'Publisher',
14: 'EncodedBy',
15: 'ArtworkURL',
16: 'TrackID',
}
def __repr__(self):
return ".".join((self.__class__.__module__, self.__class__.__name__, self._names[self.value]))
def __eq__(self, other):
return ( (isinstance(other, ctypes.c_ulong) and self.value == other.value)
or (isinstance(other, (int, long)) and self.value == other ) )
def __ne__(self, other):
return not self.__eq__(other)
Meta.Title=Meta(0)
Meta.Artist=Meta(1)
Meta.Genre=Meta(2)
Meta.Copyright=Meta(3)
Meta.Album=Meta(4)
Meta.TrackNumber=Meta(5)
Meta.Description=Meta(6)
Meta.Rating=Meta(7)
Meta.Date=Meta(8)
Meta.Setting=Meta(9)
Meta.URL=Meta(10)
Meta.Language=Meta(11)
Meta.NowPlaying=Meta(12)
Meta.Publisher=Meta(13)
Meta.EncodedBy=Meta(14)
Meta.ArtworkURL=Meta(15)
Meta.TrackID=Meta(16)
class State(ctypes.c_ulong):
"""*
Note the order of libvlc_state_t enum must match exactly the order of
\see mediacontrol_PlayerStatus, \see input_state_e enums,
and VideoLAN.LibVLC.State (at bindings/cil/src/media.cs).
Expected states by web plugins are:
IDLE/CLOSE=0, OPENING=1, BUFFERING=2, PLAYING=3, PAUSED=4,
STOPPING=5, ENDED=6, ERROR=7
"""
_names={
0: 'NothingSpecial',
1: 'Opening',
2: 'Buffering',
3: 'Playing',
4: 'Paused',
5: 'Stopped',
6: 'Ended',
7: 'Error',
}
def __repr__(self):
return ".".join((self.__class__.__module__, self.__class__.__name__, self._names[self.value]))
def __eq__(self, other):
return ( (isinstance(other, ctypes.c_ulong) and self.value == other.value)
or (isinstance(other, (int, long)) and self.value == other ) )
def __ne__(self, other):
return not self.__eq__(other)
State.NothingSpecial=State(0)
State.Opening=State(1)
State.Buffering=State(2)
State.Playing=State(3)
State.Paused=State(4)
State.Stopped=State(5)
State.Ended=State(6)
State.Error=State(7)
class TrackType(ctypes.c_ulong):
"""
"""
_names={
-1: 'unknown',
0: 'audio',
1: 'video',
2: 'text',
}
def __repr__(self):
return ".".join((self.__class__.__module__, self.__class__.__name__, self._names[self.value]))
def __eq__(self, other):
return ( (isinstance(other, ctypes.c_ulong) and self.value == other.value)
or (isinstance(other, (int, long)) and self.value == other ) )
def __ne__(self, other):
return not self.__eq__(other)
TrackType.unknown=TrackType(-1)
TrackType.audio=TrackType(0)
TrackType.video=TrackType(1)
TrackType.text=TrackType(2)
class PlaybackMode(ctypes.c_ulong):
"""*
Defines playback modes for playlist.
"""
_names={
0: 'default',
1: 'loop',
2: 'repeat',
}
def __repr__(self):
return ".".join((self.__class__.__module__, self.__class__.__name__, self._names[self.value]))
def __eq__(self, other):
return ( (isinstance(other, ctypes.c_ulong) and self.value == other.value)
or (isinstance(other, (int, long)) and self.value == other ) )
def __ne__(self, other):
return not self.__eq__(other)
PlaybackMode.default=PlaybackMode(0)
PlaybackMode.loop=PlaybackMode(1)
PlaybackMode.repeat=PlaybackMode(2)
class VideoMarqueeOption(ctypes.c_ulong):
"""*
Marq options definition
"""
_names={
0: 'Enable',
1: 'Text',
2: 'Color',
3: 'Opacity',
4: 'Position',
5: 'Refresh',
6: 'Size',
7: 'Timeout',
8: 'marquee_X',
9: 'marquee_Y',
}
def __repr__(self):
return ".".join((self.__class__.__module__, self.__class__.__name__, self._names[self.value]))
def __eq__(self, other):
return ( (isinstance(other, ctypes.c_ulong) and self.value == other.value)
or (isinstance(other, (int, long)) and self.value == other ) )
def __ne__(self, other):
return not self.__eq__(other)
VideoMarqueeOption.Enable=VideoMarqueeOption(0)
VideoMarqueeOption.Text=VideoMarqueeOption(1)
VideoMarqueeOption.Color=VideoMarqueeOption(2)
VideoMarqueeOption.Opacity=VideoMarqueeOption(3)
VideoMarqueeOption.Position=VideoMarqueeOption(4)
VideoMarqueeOption.Refresh=VideoMarqueeOption(5)
VideoMarqueeOption.Size=VideoMarqueeOption(6)
VideoMarqueeOption.Timeout=VideoMarqueeOption(7)
VideoMarqueeOption.marquee_X=VideoMarqueeOption(8)
VideoMarqueeOption.marquee_Y=VideoMarqueeOption(9)
class VideoLogoOption(ctypes.c_ulong):
"""* option values for libvlc_video_{get,set}_logo_{int,string} */
"""
_names={
0: 'enable',
1: 'file',
2: 'logo_x',
3: 'logo_y',
4: 'delay',
5: 'repeat',
6: 'opacity',
7: 'position',
}
def __repr__(self):
return ".".join((self.__class__.__module__, self.__class__.__name__, self._names[self.value]))
def __eq__(self, other):
return ( (isinstance(other, ctypes.c_ulong) and self.value == other.value)
or (isinstance(other, (int, long)) and self.value == other ) )
def __ne__(self, other):
return not self.__eq__(other)
VideoLogoOption.enable=VideoLogoOption(0)
VideoLogoOption.file=VideoLogoOption(1)
VideoLogoOption.logo_x=VideoLogoOption(2)
VideoLogoOption.logo_y=VideoLogoOption(3)
VideoLogoOption.delay=VideoLogoOption(4)
VideoLogoOption.repeat=VideoLogoOption(5)
VideoLogoOption.opacity=VideoLogoOption(6)
VideoLogoOption.position=VideoLogoOption(7)
class VideoAdjustOption(ctypes.c_ulong):
"""* option values for libvlc_video_{get,set}_adjust_{int,float,bool} */
"""
_names={
0: 'Enable',
1: 'Contrast',
2: 'Brightness',
3: 'Hue',
4: 'Saturation',
5: 'Gamma',
}
def __repr__(self):
return ".".join((self.__class__.__module__, self.__class__.__name__, self._names[self.value]))
def __eq__(self, other):
return ( (isinstance(other, ctypes.c_ulong) and self.value == other.value)
or (isinstance(other, (int, long)) and self.value == other ) )
def __ne__(self, other):
return not self.__eq__(other)
VideoAdjustOption.Enable=VideoAdjustOption(0)
VideoAdjustOption.Contrast=VideoAdjustOption(1)
VideoAdjustOption.Brightness=VideoAdjustOption(2)
VideoAdjustOption.Hue=VideoAdjustOption(3)
VideoAdjustOption.Saturation=VideoAdjustOption(4)
VideoAdjustOption.Gamma=VideoAdjustOption(5)
class AudioOutputDeviceTypes(ctypes.c_ulong):
"""*
Audio device types
"""
_names={
-1: 'Error',
1: 'Mono',
2: 'Stereo',
4: '_2F2R',
5: '_3F2R',
6: '_5_1',
7: '_6_1',
8: '_7_1',
10: 'SPDIF',
}
def __repr__(self):
return ".".join((self.__class__.__module__, self.__class__.__name__, self._names[self.value]))
def __eq__(self, other):
return ( (isinstance(other, ctypes.c_ulong) and self.value == other.value)
or (isinstance(other, (int, long)) and self.value == other ) )
def __ne__(self, other):
return not self.__eq__(other)
AudioOutputDeviceTypes.Error=AudioOutputDeviceTypes(-1)
AudioOutputDeviceTypes.Mono=AudioOutputDeviceTypes(1)
AudioOutputDeviceTypes.Stereo=AudioOutputDeviceTypes(2)
AudioOutputDeviceTypes._2F2R=AudioOutputDeviceTypes(4)
AudioOutputDeviceTypes._3F2R=AudioOutputDeviceTypes(5)
AudioOutputDeviceTypes._5_1=AudioOutputDeviceTypes(6)
AudioOutputDeviceTypes._6_1=AudioOutputDeviceTypes(7)
AudioOutputDeviceTypes._7_1=AudioOutputDeviceTypes(8)
AudioOutputDeviceTypes.SPDIF=AudioOutputDeviceTypes(10)
class AudioOutputChannel(ctypes.c_ulong):
"""*
Audio channels
"""
_names={
-1: 'Error',
1: 'Stereo',
2: 'RStereo',
3: 'Left',
4: 'Right',
5: 'Dolbys',
}
def __repr__(self):
return ".".join((self.__class__.__module__, self.__class__.__name__, self._names[self.value]))
def __eq__(self, other):
return ( (isinstance(other, ctypes.c_ulong) and self.value == other.value)
or (isinstance(other, (int, long)) and self.value == other ) )
def __ne__(self, other):
return not self.__eq__(other)
AudioOutputChannel.Error=AudioOutputChannel(-1)
AudioOutputChannel.Stereo=AudioOutputChannel(1)
AudioOutputChannel.RStereo=AudioOutputChannel(2)
AudioOutputChannel.Left=AudioOutputChannel(3)
AudioOutputChannel.Right=AudioOutputChannel(4)
AudioOutputChannel.Dolbys=AudioOutputChannel(5)
#
# End of generated enum types.
#
class ListPOINTER(object):
'''Just like a POINTER but accept a list of ctype as an argument.
'''
def __init__(self, etype):
self.etype = etype
def from_param(self, param):
if isinstance(param, (list, tuple)):
return (self.etype * len(param))(*param)
class LibVLCException(Exception):
"""Python exception raised by libvlc methods.
"""
pass
# From libvlc_structures.h
class MediaStats(ctypes.Structure):
_fields_= [
('read_bytes', ctypes.c_int ),
('input_bitrate', ctypes.c_float),
('demux_read_bytes', ctypes.c_int ),
('demux_bitrate', ctypes.c_float),
('demux_corrupted', ctypes.c_int ),
('demux_discontinuity', ctypes.c_int ),
('decoded_video', ctypes.c_int ),
('decoded_audio', ctypes.c_int ),
('displayed_pictures', ctypes.c_int ),
('lost_pictures', ctypes.c_int ),
('played_abuffers', ctypes.c_int ),
('lost_abuffers', ctypes.c_int ),
('sent_packets', ctypes.c_int ),
('sent_bytes', ctypes.c_int ),
('send_bitrate', ctypes.c_float),
]
def __str__(self):
return "MediaStats\n%s" % "\n".join( "%s:\t%s" % (n, getattr(self, n)) for n in self._fields_ )
class MediaTrackInfo(ctypes.Structure):
_fields_= [
('codec' , ctypes.c_uint32),
('id' , ctypes.c_int),
('type' , TrackType),
('profile' , ctypes.c_int),
('level' , ctypes.c_int),
('channels_or_height', ctypes.c_uint),
('rate_or_width' , ctypes.c_uint),
]
def __str__(self):
return "MediaTrackInfo \n%s" % "\n".join( "%s:\t%s" % (n, getattr(self, n)) for n in self._fields_ )
class PlaylistItem(ctypes.Structure):
_fields_= [
('id', ctypes.c_int),
('uri', ctypes.c_char_p),
('name', ctypes.c_char_p),
]
def __str__(self):
return "PlaylistItem #%d %s (%uri)" % (self.id, self.name, self.uri)
class LogMessage(ctypes.Structure):
_fields_= [
('size', ctypes.c_uint),
('severity', ctypes.c_int),
('type', ctypes.c_char_p),
('name', ctypes.c_char_p),
('header', ctypes.c_char_p),
('message', ctypes.c_char_p),
]
def __init__(self):
super(LogMessage, self).__init__()
self.size=ctypes.sizeof(self)
def __str__(self):
return "vlc.LogMessage(%d:%s): %s" % (self.severity, self.type, self.message)
class AudioOutput(ctypes.Structure):
def __str__(self):
return "vlc.AudioOutput(%s:%s)" % (self.name, self.description)
AudioOutput._fields_= [
('name', ctypes.c_char_p),
('description', ctypes.c_char_p),
('next', ctypes.POINTER(AudioOutput)),
]
class TrackDescription(ctypes.Structure):
def __str__(self):
return "vlc.TrackDescription(%d:%s)" % (self.id, self.name)
TrackDescription._fields_= [
('id', ctypes.c_int),
('name', ctypes.c_char_p),
('next', ctypes.POINTER(TrackDescription)),
]
def track_description_list(head):
"""Convert a TrackDescription linked list to a python list, and release the linked list.
"""
l = []
item = head
while item:
l.append( (item.contents.id, item.contents.name) )
item = item.contents.next
if head:
libvlc_track_description_release(head)
return l
### End of header.py ###
class EventManager(object):
def __new__(cls, pointer=None):
'''Internal method used for instanciating wrappers from ctypes.
'''
if pointer is None:
raise Exception("Internal method. Surely this class cannot be | |
int_tx['value'] = csv_tx['value']
int_tx['input'] = csv_tx['input']
int_tx['output'] = csv_tx['output']
int_tx['traceType'] = csv_tx['trace_type']
int_tx['callType'] = csv_tx['call_type']
int_tx['rewardType'] = csv_tx['reward_type']
int_tx['gas'] = csv_tx['gas']
int_tx['gasUsed'] = csv_tx['gas_used']
tx_hash = int_tx_asoc[csv_tx['block_number'] + '-' + csv_tx['transaction_index']]
int_tx['transactionHash'] = tx_hash
int_tx['timestamp'] = transactions[tx_hash]['timestamp']
int_tx['error'] = csv_tx['error']
self._highest_internal_tx += 1
internal_txs[self._highest_internal_tx] = int_tx
transactions[tx_hash]['internalTxIndex'] += 1
str_index = str(transactions[tx_hash]['internalTxIndex'])
txs_write_dict[tx_hash + '-tit-' + str_index] = (
self._highest_internal_tx)
if int_tx['from'] not in addresses and int_tx['from'] != '':
addresses[int_tx['from']] = {'code': '0x',
'mined': [],
'newInputTxs': [],
'newOutputTxs': [],
'newInputTokens': [],
'newOutputTokens': [],
'newIntInputTxs': [],
'newIntOutputTxs': [(self._highest_internal_tx,
int_tx['value'],
int_tx['timestamp'])]}
elif int_tx['from'] != '':
addresses[int_tx['from']]['newIntOutputTxs'].append(
(self._highest_internal_tx, int_tx['value'], int_tx['timestamp']))
if int_tx['to'] not in addresses and int_tx['to'] != '':
addresses[int_tx['to']] = {'code': '0x',
'mined': [],
'newInputTxs': [],
'newOutputTxs': [],
'newInputTokens': [],
'newOutputTokens': [],
'newIntInputTxs': [(self._highest_internal_tx,
int_tx['value'],
int_tx['timestamp'])],
'newIntOutputTxs': []}
elif int_tx['to'] != '':
addresses[int_tx['to']]['newIntInputTxs'].append(
(self._highest_internal_tx, int_tx['value'], int_tx['timestamp']))
internal_txs[self._highest_internal_tx] = int_tx
return (addresses, transactions, txs_write_dict, internal_txs)
def fill_addresses(self, addresses: Dict, transactions: Dict,
tokens: Dict, token_txs: List) -> Tuple[Dict, Dict]:
"""
Fill addresses with transaction information.
Args:
addresses: Currently processed addresses.
transactions: Currently processed transactions.
tokens: Currently processed tokens.
token_txs: Currently processed token transactions.
Returns:
Addresses with new information.
"""
LOG.info('Filling addresses.')
updated_tokens, filtered_token_txs = self.expand_tokens(tokens, token_txs)
addresses, updated_tokens = self.fill_addresses_tokens(addresses,
updated_tokens,
filtered_token_txs)
addresses_encode = {}
addresses_write_dict = {}
for addr_hash, addr_dict in addresses.items():
existing_data = db_get_wrapper(self.db, b'address-' + addr_hash.encode())
# Address not yet in records
if existing_data is not None:
existing_address = coder.decode_address(existing_data)
last_input_tx_index = int(existing_address['inputTxIndex'])
last_output_tx_index = int(existing_address['outputTxIndex'])
last_input_token_tx_index = int(existing_address['inputTokenTxIndex'])
last_output_token_tx_index = int(existing_address['outputTokenTxIndex'])
last_input_int_tx_index = int(existing_address['inputIntTxIndex'])
last_output_int_tx_index = int(existing_address['outputIntTxIndex'])
last_mined_block_index = int(existing_address['minedIndex'])
else:
last_input_tx_index = 0
last_output_tx_index = 0
last_input_token_tx_index = 0
last_output_token_tx_index = 0
last_input_int_tx_index = 0
last_output_int_tx_index = 0
last_mined_block_index = 0
address_encode = {}
if existing_data is not None:
address_encode['tokenContract'] = existing_address['tokenContract']
if addr_hash in updated_tokens:
updated_tokens[addr_hash]['type'] = existing_address['tokenContract']
else:
if 'tokenContract' in addr_dict:
address_encode['tokenContract'] = addr_dict['tokenContract']
if addr_hash in updated_tokens:
updated_tokens[addr_hash]['type'] = addr_dict['tokenContract']
else:
address_encode['tokenContract'] = 'False'
address_encode['balance'] = 'null'
if existing_data is not None:
address_encode['code'] = existing_address['code']
else:
address_encode['code'] = addr_dict['code']
for input_tx in addr_dict['newInputTxs']:
last_input_tx_index += 1
addresses_write_dict[addr_hash + '-i-' + str(last_input_tx_index)] = (
str(input_tx[0]) + '-' + str(input_tx[1]) + '-' + str(input_tx[2]))
for output_tx in addr_dict['newOutputTxs']:
last_output_tx_index += 1
addresses_write_dict[addr_hash + '-o-' + str(last_output_tx_index)] = (
str(output_tx[0]) + '-' + str(output_tx[1]) + '-' + str(output_tx[2]))
for mined_hash in addr_dict['mined']:
last_mined_block_index += 1
addresses_write_dict[addr_hash + '-b-' + str(last_mined_block_index)] = mined_hash
address_encode['inputTxIndex'] = last_input_tx_index
address_encode['outputTxIndex'] = last_output_tx_index
address_encode['inputTokenTxIndex'] = last_input_token_tx_index
address_encode['outputTokenTxIndex'] = last_output_token_tx_index
address_encode['inputIntTxIndex'] = last_input_int_tx_index
address_encode['outputIntTxIndex'] = last_output_int_tx_index
address_encode['minedIndex'] = last_mined_block_index
addresses_encode[addr_hash] = address_encode
# Also add token information to the addresses.
addresses_encode, updated_tokens, addresses_write_dict = self.fill_addrs_token_txs(
addresses, addresses_encode, updated_tokens, addresses_write_dict)
# Also add internal transactions to addresses
addresses_encode, addresses_write_dict = self.fill_addrs_int_txs(
addresses, addresses_encode, addresses_write_dict)
return (addresses_encode, addresses_write_dict, updated_tokens, filtered_token_txs)
def fill_addrs_int_txs(self, addresses: Dict, addresses_encode: Dict,
addresses_write_dict: Dict) -> Tuple[Dict, Dict, Dict]:
"""
Fills address information with internal transactions.
Args:
addresses: Currently processed addresses.
addresses_encode: Addresses partially prepared for DB write.
addresses_write_dict: Dictionary containing info connecting addresses with their txs.
Returns:
Updated addresses.
"""
for addr_hash, addr_dict in addresses.items():
last_input_int_tx_index = addresses_encode[addr_hash]['inputIntTxIndex']
last_output_int_tx_index = addresses_encode[addr_hash]['outputIntTxIndex']
for input_tx in addr_dict['newIntInputTxs']:
last_input_int_tx_index += 1
addresses_write_dict[addr_hash + '-ii-' + str(last_input_int_tx_index)] = (
str(input_tx[0]) + '-' + str(input_tx[1]) + '-' + str(input_tx[2]))
for output_tx in addr_dict['newIntOutputTxs']:
last_output_int_tx_index += 1
addresses_write_dict[addr_hash + '-io-' + str(last_output_int_tx_index)] = (
str(output_tx[0]) + '-' + str(output_tx[1]) + '-' + str(output_tx[2]))
addresses_encode[addr_hash]['inputIntTxIndex'] = last_input_int_tx_index
addresses_encode[addr_hash]['outputIntTxIndex'] = last_output_int_tx_index
return (addresses_encode, addresses_write_dict)
def fill_addresses_tokens(self, addresses: Dict, tokens: Dict,
token_txs: Dict) -> Tuple[Dict, Dict]:
"""
Fill addresses and tokens with token transactions.
Args:
addresses: Addresses containing workable data.
tokens: Tokens whose transactions were found.
token_txs: List of token transactions.
Returns:
Addresses enriched with token transactions data.
"""
for token_tx_index, token_tx in token_txs.items():
if token_tx['addressFrom'] not in addresses and token_tx['addressFrom'] != '':
addresses[token_tx['addressFrom']] = {'code': '0x',
'mined': [],
'newInputTxs': [],
'newOutputTxs': [],
'newInputTokens': [],
'newOutputTokens': [(token_tx_index,
token_tx['timestamp'])],
'newIntInputTxs': [],
'newIntOutputTxs': []}
elif token_tx['addressFrom'] != '':
addresses[token_tx['addressFrom']]['newOutputTokens'].append(
(token_tx_index, token_tx['timestamp']))
if token_tx['addressTo'] not in addresses and token_tx['addressTo'] != '':
addresses[token_tx['addressTo']] = {'code': '0x',
'mined': [],
'newInputTxs': [],
'newOutputTxs': [],
'newInputTokens': [(token_tx_index,
token_tx['timestamp'])],
'newOutputTokens': [],
'newIntInputTxs': [],
'newIntOutputTxs': []}
elif token_tx['addressTo'] != '':
addresses[token_tx['addressTo']]['newInputTokens'].append(
(token_tx_index, token_tx['timestamp']))
tokens[token_tx['tokenAddress']]['transactions'].append(
(token_tx_index, token_tx['timestamp']))
return (addresses, tokens)
def expand_tokens(self, tokens: Dict, token_txs: List) -> Tuple[Dict, Dict]:
"""
Find all relevant tokens from DB and reject not-found transactions.
Args:
tokens: Tokens gathered so far (in this batch).
token_txs: Token transactions to get other token info.
Returns:
Updated token list.
"""
full_tokens = {}
filtered_txs = {}
for token_tx in token_txs:
data = db_get_wrapper(self.db, b'token-' + token_tx['tokenAddress'].encode())
if data is not None:
db_token = coder.decode_token(data)
db_token['transactions'] = []
full_tokens[token_tx['tokenAddress']] = db_token
self._highest_token_tx += 1
filtered_txs[self._highest_token_tx] = token_tx
elif token_tx['tokenAddress'] in tokens:
full_tokens[token_tx['tokenAddress']] = tokens[token_tx['tokenAddress']]
self._highest_token_tx += 1
filtered_txs[self._highest_token_tx] = token_tx
for token in tokens:
if token not in full_tokens:
full_tokens[token] = tokens[token]
return (full_tokens, filtered_txs)
def fill_addrs_token_txs(self, addresses: Dict, addresses_encode: Dict,
tokens: Dict, addresses_write_dict: Dict) -> Tuple[Dict, Dict, Dict]:
"""
Fills address information with token transactions.
Args:
addresses: Currently processed addresses.
addresses_encode: Addresses partially prepared for DB write.
tokens: All relevant tokens.
addresses_write_dict: Dictionary containing info connecting addresses with their txs.
Returns:
Updated addresses, tokens, and associating dictionary.
"""
for addr_hash, addr_dict in addresses.items():
last_input_token_tx_index = addresses_encode[addr_hash]['inputTokenTxIndex']
last_output_token_tx_index = addresses_encode[addr_hash]['outputTokenTxIndex']
for input_token_tx in addr_dict['newInputTokens']:
last_input_token_tx_index += 1
addresses_write_dict[addr_hash + '-ti-' + str(last_input_token_tx_index)] = (
str(input_token_tx[0]) + '-' + str(input_token_tx[1]))
for output_token_tx in addr_dict['newOutputTokens']:
last_output_token_tx_index += 1
addresses_write_dict[addr_hash + '-to-' + str(last_output_token_tx_index)] = (
str(output_token_tx[0]) + '-' + str(output_token_tx[1]))
addresses_encode[addr_hash]['inputTokenTxIndex'] = last_input_token_tx_index
addresses_encode[addr_hash]['outputTokenTxIndex'] = last_output_token_tx_index
for token_addr, token_dict in tokens.items():
token_tx_index = token_dict['txIndex']
for token_tx in token_dict['transactions']:
token_tx_index += 1
addresses_write_dict[token_addr + '-tt-' + str(token_tx_index)] = (
str(token_tx[0]) + '-' + str(token_tx[1]))
token_dict['txIndex'] = token_tx_index
return (addresses_encode, tokens, addresses_write_dict)
def update_bulk_db(self, blocks: Dict, transactions: Dict, addresses: Dict,
tokens: Dict, addresses_write_dict: Dict, token_txs: Dict,
address_code_asoc: Dict, internal_txs: Dict, txs_write_dict: Dict) -> None:
"""
Updates the database with bulk data.
Args:
blocks: Dictionary containing blocks.
transactions: Dictionary containing transactions.
addresses: Dictionary containing addresses.
tokens: Dictionary containing tokens.
addresses_write_dict: Data connecting addresses to their blocks/txs.
token_txs: Dictionary containing token transactions.
address_code_asoc: Contract codes of addresses (saved separately due to lot of data).
internal_txs: Internal transactions ti be written to DB.
txs_write_dict: Associations between internal txs and txs.
"""
self.db_lock.acquire()
print('lock started')
LOG.info('Writing to database.')
wb = rocksdb.WriteBatch()
for block_hash, block_dict in blocks.items():
if 'transactionIndexRange' not in block_dict:
block_dict['transactionIndexRange'] = ''
block_value = coder.encode_block(block_dict)
wb.put(b'block-' + str(block_dict['number']).encode(), block_value)
wb.put(b'hash-block-' + str(block_dict['hash']).encode(),
str(block_dict['number']).encode())
wb.put(b'timestamp-block-' + str(block_dict['timestamp']).encode(),
str(block_dict['number']).encode())
for tx_hash, tx_dict in transactions.items():
if 'logs' not in tx_dict:
tx_dict['logs'] = ''
tx_value = coder.encode_transaction(tx_dict)
wb.put(b'transaction-' + tx_hash.encode(), tx_value)
for addr_hash, addr_dict in addresses.items():
address_value = coder.encode_address(addr_dict)
wb.put(b'address-' + str(addr_hash).encode(), address_value)
for addr_hash, token_dict in tokens.items():
token_value = coder.encode_token(token_dict)
wb.put(b'token-' + str(addr_hash).encode(), token_value)
for token_tx_index, token_tx_dict in token_txs.items():
token_tx_value = coder.encode_token_tx(token_tx_dict)
wb.put(b'token-tx-' + str(token_tx_index).encode(), token_tx_value)
for addr_key, addr_data in addresses_write_dict.items():
wb.put(b'associated-data-' + str(addr_key).encode(), str(addr_data).encode())
for code_key, code_data in address_code_asoc.items():
wb.put(code_key.encode(), code_data.encode())
for tx_key, tx_data in txs_write_dict.items():
wb.put(b'associated-data-' + tx_key.encode(), str(tx_data).encode())
for internal_tx_index, internal_tx_dict in internal_txs.items():
internal_tx_value = coder.encode_internal_tx(internal_tx_dict)
wb.put(b'internal-tx-' + str(internal_tx_index).encode(), internal_tx_value)
self.db.write(wb)
self.db_lock.release()
print('lock ended')
def update_database(db_location: str,
interface: str,
confirmations: int,
bulk_size: int,
process_traces: bool,
datapath: str,
gather_tokens: bool,
max_workers: int,
db_lock: Any,
db: Any = None) -> None:
"""
Updates database with new entries.
Args:
db_location: Where the DB is located.
interface: Path to the Geth blockchain node.
confirmations: How many confirmations a block has to have.
bulk_size: How many blocks to be included in bulk DB update.
process_traces: Whether to get addresses from traces.
datapath: Path for temporary file created in DB creation.
gather_tokens: Whether to also gather token information.
max_workers: Maximum workers in Ethereum ETL.
db_lock: Mutex that prevents simultanious DB write and read (to prevent read errors).
db: Database instance.
"""
db_updater = DatabaseUpdater(db, interface, confirmations, bulk_size, db_lock, process_traces,
datapath, gather_tokens, max_workers)
# sync occurs multiple times as present will change before sync | |
# encoding: utf-8
"""
Tests of fairgraph.electrophysiology module, using a mock Http client
which returns data loaded from the files in the test_data directory.
"""
from fairgraph.base import KGQuery, KGProxy, as_list, Distribution
from fairgraph.commons import BrainRegion, CellType, QuantitativeValue
from fairgraph.core import use_namespace as use_core_namespace
from fairgraph.electrophysiology import (
Trace, MultiChannelMultiTrialRecording, PatchedCell, Slice, BrainSlicingActivity,
PatchedSlice, PatchedCellCollection, PatchClampActivity, PatchClampExperiment,
QualifiedTraceGeneration, QualifiedMultiTraceGeneration,
IntraCellularSharpElectrodeExperiment, IntraCellularSharpElectrodeRecordedCell,
IntraCellularSharpElectrodeRecordedCellCollection,
IntraCellularSharpElectrodeRecordedSlice, IntraCellularSharpElectrodeRecording,
ElectrodeImplantationActivity, ExtracellularElectrodeExperiment, ImplantedBrainTissue,
ExtracellularElectrodeExperiment,ECoGExperiment, CulturingActivity, Device, Task,
Sensor, ElectrodeArrayExperiment, ElectrodePlacementActivity, EEGExperiment, CellCulture,
list_kg_classes, use_namespace as use_electrophysiology_namespace)
from fairgraph.minds import Dataset
from .utils import kg_client, MockKGObject, test_data_lookup, BaseTestKG
from pyxus.resources.entity import Instance
import pytest
test_data_lookup.update({
"/v0/data/neuralactivity/experiment/brainslicing/v0.1.0/": "test/test_data/nexus/electrophysiology/brainslicing_list_0_10.json",
"/v0/data/neuralactivity/experiment/brainslicingactivity/v0.1.0/": "test/test_data/nexus/electrophysiology/brainslicingactivity_list_0_10.json",
"/v0/data/neuralactivity/experiment/intracellularsharpelectrodeexperiment/v0.1.0/": "test/test_data/nexus/electrophysiology/intracellularsharpelectrodeexperiment_list_0_10.json",
"/v0/data/neuralactivity/experiment/intrasharprecordedcell/v0.1.0/": "test/test_data/nexus/electrophysiology/intracellularsharpelectroderecordedcell_list_0_10.json",
"/v0/data/neuralactivity/experiment/intrasharprecordedcellcollection/v0.1.0/": "test/test_data/nexus/electrophysiology/intracellularsharpelectroderecordedcellcollection_list_0_10.json",
"/v0/data/neuralactivity/experiment/intrasharprecordedslice/v0.1.0/": "test/test_data/nexus/electrophysiology/intracellularsharpelectroderecordedslice_list_0_10.json",
"/v0/data/neuralactivity/experiment/intrasharpelectrode/v0.1.0/": "test/test_data/nexus/electrophysiology/intracellularsharpelectroderecording_list_0_10.json",
"/v0/data/neuralactivity/electrophysiology/multichannelmultitrialrecording/v0.1.0/": "test/test_data/nexus/electrophysiology/multichannelmultitrialrecording_list_0_10.json",
"/v0/data/neuralactivity/electrophysiology/multitrace/v0.1.0/": "test/test_data/nexus/electrophysiology/multitrace_list_0_10.json",
"/v0/data/neuralactivity/electrophysiology/multitrace/v0.1.1/": "test/test_data/nexus/electrophysiology/multitrace_list_0_10.json",
"/v0/data/neuralactivity/electrophysiology/multitrace/v0.2.0/": "test/test_data/nexus/electrophysiology/multitrace_list_0_10.json",
"/v0/data/neuralactivity/experiment/multitracegeneration/v0.1.0/": "test/test_data/nexus/electrophysiology/multitracegeneration_list_0_10.json",
"/v0/data/neuralactivity/experiment/patchclampactivity/v0.1.0/": "test/test_data/nexus/electrophysiology/patchclampactivity_list_0_10.json",
"/v0/data/neuralactivity/experiment/patchclampexperiment/v0.1.0/": "test/test_data/nexus/electrophysiology/patchclampexperiment_list_0_10.json",
"/v0/data/neuralactivity/experiment/patchedcell/v0.1.0/": "test/test_data/nexus/electrophysiology/patchedcell_list_0_50.json",
"/v0/data/neuralactivity/experiment/patchedcellcollection/v0.1.0/": "test/test_data/nexus/electrophysiology/patchedcellcollection_list_0_10.json",
"/v0/data/neuralactivity/experiment/patchedslice/v0.1.0/": "test/test_data/nexus/electrophysiology/patchedslice_list_0_10.json",
"/v0/data/neuralactivity/electrophysiology/multitracegeneration/v0.1.0/": "test/test_data/nexus/electrophysiology/qualifiedmultitracegeneration_list_0_10.json",
"/v0/data/neuralactivity/electrophysiology/multitracegeneration/v0.2.3/": "test/test_data/nexus/electrophysiology/qualifiedmultitracegeneration_list_0_10.json",
"/v0/data/neuralactivity/experiment/qualifiedtracegeneration/v0.1.0/": "test/test_data/nexus/electrophysiology/qualifiedtracegeneration_list_0_10.json",
"/v0/data/neuralactivity/core/slice/v0.1.0/": "test/test_data/nexus/electrophysiology/slice_list_0_10.json",
"/v0/data/neuralactivity/electrophysiology/stimulusexperiment/v0.1.0/": "test/test_data/nexus/electrophysiology/patchclampexperiment_list_0_10.json",
"/v0/data/neuralactivity/electrophysiology/stimulusexperiment/v0.2.1/": "test/test_data/nexus/electrophysiology/intracellularsharpelectrodeexperiment_list_0_10.json",
"/v0/data/neuralactivity/electrophysiology/stimulusexperiment/v0.3.0/": "test/test_data/nexus/electrophysiology/patchclampexperiment_list_0_10.json",
"/v0/data/neuralactivity/electrophysiology/trace/v0.1.0/": "test/test_data/nexus/electrophysiology/trace_list_0_10.json",
"/v0/data/neuralactivity/electrophysiology/tracegeneration/v0.1.0/": "test/test_data/nexus/electrophysiology/tracegeneration_list_0_10.json",
"/v0/data/neuralactivity/experiment/wholecellpatchclamp/v0.1.0/": "test/test_data/nexus/electrophysiology/wholecellpatchclamp_list_0_10.json",
"/v0/data/neuralactivity/experiment/wholecellpatchclamp/v0.3.0/": "test/test_data/nexus/electrophysiology/wholecellpatchclamp_list_0_10.json",
"/v0/data/neuralactivity/experiment/patchedcell/v0.1.0/5ab24291-8dca-4a45-a484-8a8c28d396e2": "test/test_data/nexus/electrophysiology/patchedcell_example.json",
"/query/neuralactivity/experiment/patchedcell/v0.1.0/fgModified/instances": "test/test_data/kgquery/electrophysiology/patchedcell_list_simple_0_10.json",
"/query/neuralactivity/experiment/patchedcell/v0.1.0/fgResolvedModified/instances": "test/test_data/kgquery/electrophysiology/patchedcell_list_resolved_0_10.json",
"/query/neuralactivity/experiment/brainslicing/v0.1.0/fgResolved/instances": "test/test_data/kgquery/electrophysiology/brainslicingactivity_list_resolved_0_10.json",
"/query/neuralactivity/electrophysiology/stimulusexperiment/v0.2.1/fgResolved/instances": "test/test_data/kgquery/electrophysiology/intracellularsharpelectrodeexperiment_list_resolved_0_10.json",
"/query/neuralactivity/experiment/intrasharprecordedcell/v0.1.0/fgResolvedModified/instances": "test/test_data/kgquery/electrophysiology/intracellularsharpelectroderecordedcell_list_resolved_0_10.json",
"/query/neuralactivity/experiment/intrasharprecordedcellcollection/v0.1.0/fgResolved/instances": "test/test_data/kgquery/electrophysiology/intracellularsharpelectroderecordedcellcollection_list_resolved_0_10.json",
"/query/neuralactivity/experiment/intrasharprecordedslice/v0.1.0/fgResolved/instances": "test/test_data/kgquery/electrophysiology/intracellularsharpelectroderecordedslice_list_resolved_0_10.json",
"/query/neuralactivity/experiment/intrasharpelectrode/v0.1.0/fgResolved/instances": "test/test_data/kgquery/electrophysiology/intracellularsharpelectroderecording_list_resolved_0_10.json",
"/query/neuralactivity/electrophysiology/multitrace/v0.1.0/fgResolved/instances": "test/test_data/kgquery/electrophysiology/multichannelmultitrialrecording_list_resolved_0_10.json",
"/query/neuralactivity/electrophysiology/multitrace/v0.1.1/fgResolved/instances": "test/test_data/kgquery/electrophysiology/multichannelmultitrialrecording_list_resolved_0_10.json",
"/query/neuralactivity/electrophysiology/multitrace/v0.2.0/fgResolved/instances": "test/test_data/kgquery/electrophysiology/multichannelmultitrialrecording_list_resolved_0_10.json",
"/query/neuralactivity/experiment/wholecellpatchclamp/v0.1.0/fgResolved/instances": "test/test_data/kgquery/electrophysiology/patchclampactivity_list_resolved_0_10.json",
"/query/neuralactivity/experiment/wholecellpatchclamp/v0.3.0/fgResolved/instances": "test/test_data/kgquery/electrophysiology/patchclampactivity_list_resolved_0_10.json",
"/query/neuralactivity/electrophysiology/stimulusexperiment/v0.1.0/fgResolved/instances": "test/test_data/kgquery/electrophysiology/patchclampexperiment_list_resolved_0_10.json",
"/query/neuralactivity/electrophysiology/stimulusexperiment/v0.3.0/fgResolved/instances": "test/test_data/kgquery/electrophysiology/patchclampexperiment_list_resolved_0_10.json",
"/query/neuralactivity/experiment/patchedcellcollection/v0.1.0/fgResolved/instances": "test/test_data/kgquery/electrophysiology/patchedcellcollection_list_resolved_0_10.json",
"/query/neuralactivity/experiment/patchedslice/v0.1.0/fgResolved/instances": "test/test_data/kgquery/electrophysiology/patchedslice_list_resolved_0_10.json",
"/query/neuralactivity/electrophysiology/multitracegeneration/v0.1.0/fgResolved/instances": "test/test_data/kgquery/electrophysiology/qualifiedmultitracegeneration_list_resolved_0_10.json",
"/query/neuralactivity/electrophysiology/multitracegeneration/v0.2.3/fgResolved/instances": "test/test_data/kgquery/electrophysiology/qualifiedmultitracegeneration_list_resolved_0_10.json",
"/query/neuralactivity/electrophysiology/tracegeneration/v0.1.0/fgResolved/instances": "test/test_data/kgquery/electrophysiology/qualifiedtracegeneration_list_resolved_0_10.json",
"/query/neuralactivity/core/slice/v0.1.0/fgResolved/instances": "test/test_data/kgquery/electrophysiology/slice_list_resolved_0_10.json",
"/query/neuralactivity/electrophysiology/trace/v0.1.0/fgResolved/instances": "test/test_data/kgquery/electrophysiology/trace_list_resolved_0_10.json",
"/query/neuralactivity/experiment/brainslicing/v0.1.0/fgSimple/instances": "test/test_data/kgquery/electrophysiology/brainslicingactivity_list_resolved_0_10.json",
"/query/neuralactivity/electrophysiology/stimulusexperiment/v0.2.1/fgSimple/instances": "test/test_data/kgquery/electrophysiology/intracellularsharpelectrodeexperiment_list_resolved_0_10.json",
"/query/neuralactivity/experiment/intrasharprecordedcell/v0.1.0/fgModified/instances": "test/test_data/kgquery/electrophysiology/intracellularsharpelectroderecordedcell_list_resolved_0_10.json",
"/query/neuralactivity/experiment/intrasharprecordedcellcollection/v0.1.0/fgSimple/instances": "test/test_data/kgquery/electrophysiology/intracellularsharpelectroderecordedcellcollection_list_resolved_0_10.json",
"/query/neuralactivity/experiment/intrasharprecordedslice/v0.1.0/fgSimple/instances": "test/test_data/kgquery/electrophysiology/intracellularsharpelectroderecordedslice_list_resolved_0_10.json",
"/query/neuralactivity/experiment/intrasharpelectrode/v0.1.0/fgSimple/instances": "test/test_data/kgquery/electrophysiology/intracellularsharpelectroderecording_list_resolved_0_10.json",
"/query/neuralactivity/electrophysiology/multitrace/v0.1.0/fgSimple/instances": "test/test_data/kgquery/electrophysiology/multichannelmultitrialrecording_list_resolved_0_10.json",
"/query/neuralactivity/electrophysiology/multitrace/v0.1.1/fgSimple/instances": "test/test_data/kgquery/electrophysiology/multichannelmultitrialrecording_list_resolved_0_10.json",
"/query/neuralactivity/electrophysiology/multitrace/v0.2.0/fgSimple/instances": "test/test_data/kgquery/electrophysiology/multichannelmultitrialrecording_list_resolved_0_10.json",
"/query/neuralactivity/experiment/wholecellpatchclamp/v0.1.0/fgSimple/instances": "test/test_data/kgquery/electrophysiology/patchclampactivity_list_resolved_0_10.json",
"/query/neuralactivity/experiment/wholecellpatchclamp/v0.3.0/fgSimple/instances": "test/test_data/kgquery/electrophysiology/patchclampactivity_list_resolved_0_10.json",
"/query/neuralactivity/electrophysiology/stimulusexperiment/v0.1.0/fgSimple/instances": "test/test_data/kgquery/electrophysiology/patchclampexperiment_list_resolved_0_10.json",
"/query/neuralactivity/electrophysiology/stimulusexperiment/v0.3.0/fgSimple/instances": "test/test_data/kgquery/electrophysiology/patchclampexperiment_list_resolved_0_10.json",
"/query/neuralactivity/experiment/patchedcellcollection/v0.1.0/fgSimple/instances": "test/test_data/kgquery/electrophysiology/patchedcellcollection_list_resolved_0_10.json",
"/query/neuralactivity/experiment/patchedslice/v0.1.0/fgSimple/instances": "test/test_data/kgquery/electrophysiology/patchedslice_list_resolved_0_10.json",
"/query/neuralactivity/electrophysiology/multitracegeneration/v0.1.0/fgSimple/instances": "test/test_data/kgquery/electrophysiology/qualifiedmultitracegeneration_list_resolved_0_10.json",
"/query/neuralactivity/electrophysiology/multitracegeneration/v0.2.3/fgSimple/instances": "test/test_data/kgquery/electrophysiology/qualifiedmultitracegeneration_list_resolved_0_10.json",
"/query/neuralactivity/electrophysiology/tracegeneration/v0.1.0/fgSimple/instances": "test/test_data/kgquery/electrophysiology/qualifiedtracegeneration_list_resolved_0_10.json",
"/query/neuralactivity/core/slice/v0.1.0/fgSimple/instances": "test/test_data/kgquery/electrophysiology/slice_list_resolved_0_10.json",
"/query/neuralactivity/electrophysiology/trace/v0.1.0/fgSimple/instances": "test/test_data/kgquery/electrophysiology/trace_list_resolved_0_10.json",
})
use_core_namespace("neuralactivity")
use_electrophysiology_namespace("neuralactivity")
class TestPatchedCell(BaseTestKG):
class_under_test = PatchedCell
def test_list_nexus(self, kg_client):
cells = PatchedCell.list(kg_client, api="nexus", size=50)
assert len(cells) == 30
assert cells[0].brain_location == BrainRegion("hippocampus CA1")
assert isinstance(cells[0].collection, KGQuery)
assert cells[0].cell_type == CellType("hippocampus CA1 pyramidal cell")
assert isinstance(cells[0].experiments, KGQuery)
assert cells[0].pipette_id is None
assert cells[0].seal_resistance is None
assert cells[0].pipette_resistance is None
assert cells[0].liquid_junction_potential is None
assert cells[0].labeling_compound is None
assert cells[0].reversal_potential_cl == QuantitativeValue(-16.0, unit_text="mV")
def test_list_with_filter(self, kg_client):
cells = PatchedCell.list(kg_client, api="nexus", brain_region=BrainRegion("hippocampus CA1"), size=50)
assert len(cells) == 26
def test_list_kgquery_simple(self, kg_client):
cls = self.class_under_test
objects = cls.list(kg_client, api="query", size=10, resolved=False)
assert len(objects) == 10, len(objects)
def test_list_kgquery_resolved(self, kg_client):
cls = self.class_under_test
objects = cls.list(kg_client, api="query", size=10, resolved=True)
assert len(objects) == 10, len(objects)
def test_get_from_uri_nexus(self, kg_client):
uri = "https://nexus.humanbrainproject.org/v0/data/neuralactivity/experiment/patchedcell/v0.1.0/5ab24291-8dca-4a45-a484-8a8c28d396e2"
cell = PatchedCell.from_uri(uri, kg_client, api="nexus")
assert isinstance(cell, PatchedCell)
assert cell.id == uri
assert cell.brain_location == [BrainRegion('lobule 5 of the cerebellar vermis'),
BrainRegion('lobule 6 of the cerebellar vermis'),
BrainRegion('lobule 7 of the cerebellar vermis'),
BrainRegion('lobule 8 of the cerebellar vermis')]
assert isinstance(cell.collection, KGQuery)
assert isinstance(cell.experiments, KGQuery)
def test_get_from_uri_kgquery_simple(self, kg_client): # TODO: UPDATE STORED QUERY
uri = "https://nexus.humanbrainproject.org/v0/data/neuralactivity/experiment/patchedcell/v0.1.0/b813a2f7-5e87-4827-81cd-0008da041648"
cell = PatchedCell.from_uri(uri, kg_client, api="query", resolved=False)
assert isinstance(cell, PatchedCell)
assert cell.brain_location == [BrainRegion('5th cerebellar lobule'),
BrainRegion('6th cerebellar lobule'),
BrainRegion('7th cerebellar lobule'),
BrainRegion('8th cerebellar lobule')]
assert isinstance(cell.collection, KGQuery)
assert isinstance(cell.experiments, KGQuery)
def test_get_from_uri_kgquery_resolved(self, kg_client):
uri = "https://nexus.humanbrainproject.org/v0/data/neuralactivity/experiment/patchedcell/v0.1.0/b813a2f7-5e87-4827-81cd-0008da041648"
cell = PatchedCell.from_uri(uri, kg_client, api="query", resolved=True)
assert isinstance(cell, PatchedCell)
assert cell.id == uri
assert cell.brain_location == [BrainRegion('5th cerebellar lobule'),
BrainRegion('6th cerebellar lobule'),
BrainRegion('7th cerebellar lobule'),
BrainRegion('8th cerebellar lobule')]
assert isinstance(cell.collection, KGQuery)
assert isinstance(cell.experiments, KGQuery)
def test_get_from_uuid(self, kg_client):
uri = "https://nexus.humanbrainproject.org/v0/data/neuralactivity/experiment/patchedcell/v0.1.0/5ab24291-8dca-4a45-a484-8a8c28d396e2"
a = PatchedCell.from_uri(uri, kg_client, api="nexus")
b = PatchedCell.from_uuid("5ab24291-8dca-4a45-a484-8a8c28d396e2", kg_client, api="nexus")
assert a == b
assert a.id == uri
def test_get_from_uri_with_cache(self, kg_client):
assert len(kg_client.cache) == 0
assert kg_client._nexus_client._http_client.request_count == 0
uri = "https://nexus.humanbrainproject.org/v0/data/neuralactivity/experiment/patchedcell/v0.1.0/5ab24291-8dca-4a45-a484-8a8c28d396e2"
# 1st call
cell1 = PatchedCell.from_uri(uri, kg_client, api="nexus")
assert len(kg_client.cache) == 1
assert kg_client._nexus_client._http_client.request_count == 1
assert uri in kg_client.cache
# 2nd call
cell2 = PatchedCell.from_uri(uri, kg_client, api="nexus")
assert kg_client._nexus_client._http_client.request_count == 1 # should be unchanged if cache was used
# 3rd call, without cache
cell3 = PatchedCell.from_uri(uri, kg_client, use_cache=False, api="nexus")
assert kg_client._nexus_client._http_client.request_count == 2
assert cell1.id == cell2.id == cell3.id == uri
def test_by_name_nexus(self, kg_client):
cell = PatchedCell.by_name("sub2epsp.samp1", kg_client, api="nexus")
assert cell.uuid == "5ab24291-8dca-4a45-a484-8a8c28d396e2"
def test_by_name_nexus_not_found(self, kg_client):
cell = PatchedCell.by_name("qwertyuiop", kg_client, api="nexus")
assert cell is None
def test_by_name_kgquery(self, kg_client):
cell = PatchedCell.by_name("GF1.samp1", kg_client, api="query")
assert cell.uuid == "b813a2f7-5e87-4827-81cd-0008da041648"
def test_round_trip(self, kg_client):
cell1 = PatchedCell("example001",
brain_location=BrainRegion("primary auditory cortex"),
collection=None,
cell_type=CellType("pyramidal cell"),
experiments=None,
pipette_id=31,
seal_resistance=QuantitativeValue(1.2, "GΩ"),
pipette_resistance=QuantitativeValue(1.5, "MΩ"),
liquid_junction_potential=QuantitativeValue(5.0, "mV"),
labeling_compound="0.1% biocytin ",
reversal_potential_cl=QuantitativeValue(-65, "mV"))
instance = Instance(PatchedCell.path, cell1._build_data(kg_client), Instance.path)
instance.data["@id"] = "http://fake_uuid_93f9cd9a9b"
instance.data["@type"] = PatchedCell.type
cell2 = PatchedCell.from_kg_instance(instance, kg_client)
for field in ("name", "brain_location", "cell_type",
"pipette_id", "seal_resistance", "pipette_resistance",
"liquid_junction_potential", "labeling_compound",
"reversal_potential_cl"):
assert getattr(cell1, field) == getattr(cell2, field)
def test_repr(self):
try:
unicode
except NameError:
cell = PatchedCell("example001",
brain_location=BrainRegion("primary auditory cortex"),
collection=None,
cell_type=CellType("pyramidal cell"),
experiments=None,
pipette_id=31,
seal_resistance=QuantitativeValue(1.2, "GΩ"),
pipette_resistance=QuantitativeValue(1.5, "MΩ"),
liquid_junction_potential=None,
labeling_compound="0.1% biocytin ",
reversal_potential_cl=None)
expected_repr = ("PatchedCell(name='example001', "
"brain_location=BrainRegion('primary auditory cortex', 'http://purl.obolibrary.org/obo/UBERON_0034751'), "
"cell_type=CellType('pyramidal cell', 'http://purl.obolibrary.org/obo/CL_0000598'), "
"pipette_id=31, seal_resistance=QuantitativeValue(1.2 'GΩ'), "
"pipette_resistance=QuantitativeValue(1.5 'MΩ'), "
"labeling_compound='0.1% biocytin ', id=None)")
assert repr(cell) == expected_repr
else:
pytest.skip(
"The remaining lifespan of Python 2 is too short to fix unicode representation errors")
class TestTrace(BaseTestKG):
class_under_test = Trace
def test_list_nexus(self, kg_client):
cls = self.class_under_test
objects = cls.list(kg_client, api="nexus", size=10)
assert len(objects) == 10, len(objects)
def test_list_kgquery_simple(self, kg_client):
cls = self.class_under_test
objects = cls.list(kg_client, api="query", size=10, resolved=False)
assert len(objects) == 10, len(objects)
def test_list_kgquery_resolved(self, kg_client):
cls = self.class_under_test
objects = cls.list(kg_client, api="query", size=10, resolved=True)
assert len(objects) == 10, len(objects)
def test_round_trip(self, kg_client):
trace1 = Trace("example001",
data_location=Distribution("http://example.com/example.csv",
content_type="text/tab-separated-values"),
generated_by=MockKGObject(id="http://fake_uuid_abc123", type=PatchClampExperiment.type),
generation_metadata=MockKGObject(id="http://fake_uuid_def456", type=QualifiedTraceGeneration.type),
channel=42,
data_unit="mV",
time_step=QuantitativeValue(0.1, "ms"),
part_of=MockKGObject(id="http://fake_uuid_ghi789", type=Dataset.type))
instance = Instance(Trace.path, trace1._build_data(kg_client), Instance.path)
instance.data["@id"] = "http://fake_uuid_6a5d6ecf87"
instance.data["@type"] = Trace.type
trace2 = Trace.from_kg_instance(instance, kg_client)
for field in ("name", "data_location", "channel", "data_unit", "time_step"):
assert getattr(trace1, field) == getattr(trace2, field)
for field in ("generated_by", "generation_metadata", "part_of"):
obj1 = getattr(trace1, field)
obj2 = getattr(trace2, field)
assert isinstance(obj2, KGProxy)
assert obj1.id == obj2.id
assert obj1.type == obj2.type
class TestMultiChannelMultiTrialRecording(BaseTestKG):
class_under_test = MultiChannelMultiTrialRecording
def test_list_nexus(self, kg_client):
cls = self.class_under_test
objects = cls.list(kg_client, api="nexus", size=10)
assert len(objects) == 4, len(objects)
def test_list_kgquery_simple(self, kg_client):
cls = self.class_under_test
objects = cls.list(kg_client, api="query", size=10, resolved=False)
assert len(objects) == 10, len(objects)
def test_list_kgquery_resolved(self, kg_client):
cls = self.class_under_test
objects = cls.list(kg_client, api="query", size=10, resolved=True)
assert len(objects) == 10, len(objects)
class TestSlice(BaseTestKG):
class_under_test = Slice
def test_list_nexus(self, kg_client):
cls = self.class_under_test
objects = cls.list(kg_client, api="nexus", size=10)
assert len(objects) == 10, len(objects)
def test_list_kgquery_simple(self, kg_client):
cls = self.class_under_test
objects = cls.list(kg_client, api="query", size=10, resolved=False)
assert len(objects) == 10, len(objects)
def test_list_kgquery_resolved(self, kg_client):
cls = self.class_under_test
objects = cls.list(kg_client, api="query", size=10, resolved=True)
assert len(objects) == 10, len(objects)
class TestBrainSlicingActivity(BaseTestKG):
class_under_test = BrainSlicingActivity
def test_list_nexus(self, kg_client):
cls = self.class_under_test
objects = cls.list(kg_client, api="nexus", size=10)
assert len(objects) == 10, len(objects)
def test_list_kgquery_simple(self, kg_client):
cls = self.class_under_test
objects = cls.list(kg_client, api="query", size=10, resolved=False)
assert len(objects) == 10, len(objects)
def test_list_kgquery_resolved(self, kg_client):
cls = self.class_under_test
objects = cls.list(kg_client, api="query", size=10, resolved=True)
assert len(objects) == 10, len(objects)
class TestPatchedSlice(BaseTestKG):
class_under_test = PatchedSlice
def test_list_nexus(self, kg_client):
cls = self.class_under_test
objects = cls.list(kg_client, api="nexus", size=10)
assert len(objects) == 10, len(objects)
def test_list_kgquery_simple(self, kg_client):
cls = self.class_under_test
objects = cls.list(kg_client, api="query", size=10, resolved=False)
assert len(objects) == 10, len(objects)
def test_list_kgquery_resolved(self, kg_client):
cls = self.class_under_test
objects = cls.list(kg_client, api="query", size=10, resolved=True)
assert len(objects) == 10, len(objects)
class TestPatchedCellCollection(BaseTestKG):
class_under_test = PatchedCellCollection
def test_list_nexus(self, kg_client):
cls = self.class_under_test
objects = cls.list(kg_client, api="nexus", size=10)
assert len(objects) == 10, len(objects)
def test_list_kgquery_resolved(self, kg_client):
cls = self.class_under_test
objects = cls.list(kg_client, api="query", size=10, resolved=True)
assert len(objects) == 10, len(objects)
class TestPatchClampActivity(BaseTestKG):
class_under_test = PatchClampActivity
def test_list_nexus(self, kg_client):
cls = self.class_under_test
objects = cls.list(kg_client, api="nexus", size=10)
assert len(objects) == 10, len(objects)
def test_list_kgquery_simple(self, kg_client):
cls = self.class_under_test
objects = cls.list(kg_client, api="query", size=10, resolved=False)
assert len(objects) == 10, len(objects)
def test_list_kgquery_resolved(self, kg_client):
cls = self.class_under_test
objects = cls.list(kg_client, api="query", size=10, resolved=True)
assert len(objects) == 10, len(objects)
class TestPatchClampExperiment(BaseTestKG):
class_under_test = PatchClampExperiment
def test_list_nexus(self, kg_client):
cls = self.class_under_test
objects = cls.list(kg_client, api="nexus", size=10)
assert len(objects) == 10, len(objects)
def test_list_kgquery_simple(self, kg_client):
cls = self.class_under_test
cls.set_strict_mode(False, field_name="name") # some examples have empty names
cls.set_strict_mode(False, field_name="stimulus")
objects = cls.list(kg_client, api="query", size=10, resolved=False)
assert len(objects) == 10, len(objects)
def test_list_kgquery_resolved(self, kg_client):
cls = self.class_under_test
cls.set_strict_mode(False, field_name="name") # some examples have empty names
cls.set_strict_mode(False, field_name="stimulus")
objects = cls.list(kg_client, api="query", size=10, resolved=True)
assert len(objects) == 10, len(objects)
class TestQualifiedTraceGeneration(BaseTestKG):
class_under_test = QualifiedTraceGeneration
def test_list_nexus(self, kg_client):
cls = self.class_under_test
objects = cls.list(kg_client, api="nexus", size=10)
assert len(objects) == 10, len(objects)
def test_list_kgquery_simple(self, kg_client):
cls = self.class_under_test
objects = cls.list(kg_client, api="query", size=10, resolved=False)
assert len(objects) == 10, len(objects)
def test_list_kgquery_resolved(self, kg_client):
cls = | |
None
if 'job' in local_var_params:
body_params = local_var_params['job']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyAuth', 'JWTAuth'] # noqa: E501
return self.api_client.call_api(
'/projects/{owner}/{name}/jobs', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CreatedContent', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def download_job_artifact(self, owner, name, job_id, **kwargs): # noqa: E501
"""Download an artifact from the job folder # noqa: E501
Get a download link for an artifact in a job folder # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.download_job_artifact(owner, name, job_id, async_req=True)
>>> result = thread.get()
:param owner: (required)
:type owner: str
:param name: (required)
:type name: str
:param job_id: (required)
:type job_id: str
:param path: The path to an file within a project folder
:type path: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: object
"""
kwargs['_return_http_data_only'] = True
return self.download_job_artifact_with_http_info(owner, name, job_id, **kwargs) # noqa: E501
def download_job_artifact_with_http_info(self, owner, name, job_id, **kwargs): # noqa: E501
"""Download an artifact from the job folder # noqa: E501
Get a download link for an artifact in a job folder # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.download_job_artifact_with_http_info(owner, name, job_id, async_req=True)
>>> result = thread.get()
:param owner: (required)
:type owner: str
:param name: (required)
:type name: str
:param job_id: (required)
:type job_id: str
:param path: The path to an file within a project folder
:type path: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(object, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'owner',
'name',
'job_id',
'path'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method download_job_artifact" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `download_job_artifact`") # noqa: E501
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `download_job_artifact`") # noqa: E501
# verify the required parameter 'job_id' is set
if self.api_client.client_side_validation and ('job_id' not in local_var_params or # noqa: E501
local_var_params['job_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `job_id` when calling `download_job_artifact`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'job_id' in local_var_params:
path_params['job_id'] = local_var_params['job_id'] # noqa: E501
query_params = []
if 'path' in local_var_params and local_var_params['path'] is not None: # noqa: E501
query_params.append(('path', local_var_params['path'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyAuth', 'JWTAuth'] # noqa: E501
return self.api_client.call_api(
'/projects/{owner}/{name}/jobs/{job_id}/artifacts/download', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='object', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def get_job(self, owner, name, job_id, **kwargs): # noqa: E501
"""Get a Job # noqa: E501
Retrieve a job. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_job(owner, name, job_id, async_req=True)
>>> result = thread.get()
:param owner: (required)
:type owner: str
:param name: (required)
:type name: str
:param job_id: (required)
:type job_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: CloudJob
"""
kwargs['_return_http_data_only'] = True
return self.get_job_with_http_info(owner, name, job_id, **kwargs) # noqa: E501
def get_job_with_http_info(self, owner, name, job_id, **kwargs): # noqa: E501
"""Get a Job # noqa: E501
Retrieve a job. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_job_with_http_info(owner, name, job_id, async_req=True)
>>> result = thread.get()
:param owner: (required)
:type owner: str
:param name: (required)
:type name: str
:param job_id: (required)
:type job_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(CloudJob, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'owner',
'name',
'job_id'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_job" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `get_job`") # noqa: E501
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `get_job`") # noqa: E501
# verify the required parameter 'job_id' is set
if self.api_client.client_side_validation and ('job_id' not in local_var_params or # noqa: E501
local_var_params['job_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `job_id` when calling `get_job`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if | |
<gh_stars>1-10
# -*- coding: utf-8 -*-
# Licensed under a MIT style license - see LICENSE.rst
"""Download BOSS data files from a remote server.
The remote module is responsible for downloading data files into a local filesystem
using a directory layout that mirrors the remote data source. Most scripts will
create a single :class:`Manager` object using the default constructor for this
purpose::
import bossdata
mirror = bossdata.remote.Manager()
This mirror object is normally configured by the `$BOSS_DATA_URL` and `$BOSS_LOCAL_ROOT`
environment variables and no other modules uses these variables, except through a
a :class:`Manager` object. These parameters can also be set by :class:`Manager` constructor
arguments. When neither the environment variables nor the constructor arguments are set,
a default data URL appropriate for the most recent public data release (DR14) is used, and
a temporary directory is created and used for the local root.
:class:`Manager` objects have no knowledge of how
data files are organized or named: use the :mod:`bossdata.path` module to
build the paths of frequently used data files. See :doc:`/usage` for recommendations
on using the :mod:`bossdata.path` and :mod:`bossdata.remote` modules together.
"""
from __future__ import division, print_function
import os
import stat
import os.path
import math
import tempfile
import requests
from progressbar import ProgressBar, Percentage, Bar, FileTransferSpeed
class Manager(object):
"""Manage downloads of BOSS data via HTTP.
The default mapping from remote to local filenames is to mirror the remote file hierarchy
on the local disk. The normal mode of operation is to establish the local root for the
mirror using the BOSS_LOCAL_ROOT environment variable. When the constructor is called
with no arguments, it will raise a ValueError if either BOSS_DATA_URL or BOSS_LOCAL_ROOT
is not set.
Args:
data_url(str): Base URL of all BOSS data files. A trailing / on the URL is optional. If
this arg is None, then the value of the BOSS_DATA_URL environment variable we be
used instead.
local_root(str): Local path to use as the root of the locally mirrored file
hierarchy. If this arg is None, then the value of the BOSS_LOCAL_ROOT environment
variable, if any, will be used instead. If a value is provided, it should identify
an existing writeable directory.
Raises:
ValueError: No such directory local_root or missing data_url.
"""
def __init__(self, data_url=None, local_root=None, verbose=True):
# Constructor args take precendence over constructor args.
self.data_url = os.getenv('BOSS_DATA_URL') if data_url is None else data_url
if self.data_url is None:
self.data_url = Manager.default_data_url
if verbose:
print('Using the default "{}" since $BOSS_DATA_URL is not set.'.format(
self.data_url))
# Look for a local filesystem URL. The triple slash here is because the
# hostname component is omitted so it defaults to localhost.
# See https://en.wikipedia.org/wiki/File_URI_scheme
if self.data_url.startswith('file:///'):
self.data_is_remote = False
# Truncate the leading 'file://' to start with a single leading '/'.
self.data_url = self.data_url[7:]
else:
self.data_is_remote = True
# Do we have a plain URL or a URL:username:password triplet?
try:
self.data_url, username, password = self.data_url.split('%')
self.authorization = username, password
except ValueError:
self.authorization = None
self.data_url = self.data_url.rstrip('/')
self.local_root = os.getenv('BOSS_LOCAL_ROOT') if local_root is None else local_root
if self.local_root is None:
# Create a temporary directory to use.
self.local_root = tempfile.mkdtemp(suffix='_bossdata')
print('Using a temporary directory for locally mirrored data.',
'Set $BOSS_LOCAL_ROOT to specify a permanent location.')
if not os.path.isdir(self.local_root):
raise ValueError('Cannot use non-existent path "{}" as local root.'.format(
self.local_root))
default_data_url = 'http://dr14.sdss.org'
"""Default to use when $BOSS_DATA_URL is not set.
See :doc:`/scripts` and :doc:`/usage` for details.
"""
def download(self, remote_path, local_path, chunk_size=4096, progress_min_size=10):
"""Download a single BOSS data file.
Downloads are streamed so that the memory requirements are independent of the
file size. During the download, the file is written to its final location but
with '.downloading' appended to the file name. This means than any download
that is interrupted or fails will normally not lead to an incomplete file
being returned by a subsequent call to :meth:`get`. Instead, the file will
be re-downloaded. Tere is no facility for resuming a previous partial download.
After a successful download, the file is renamed to its final location and
has its permission bits set to read only (to prevent accidental modifications
of files that are supposed to exactly mirror the remote file system).
Args:
remote_path(str): The full path to the remote file relative to the remote
server root, which should normally be obtained using :class:`bossdata.path`
methods.
local_path(str): The (absolute or relative) path of the local file to write.
chunk_size(int): Size of data chunks to use for the streaming download. Larger
sizes will potentially download faster but also require more memory.
progress_min_size(int): Display a text progress bar for any downloads whose size
in Mb exceeds this value. No progress bar will ever be shown if this
value is None.
Returns:
str: Absolute local path of the downloaded file.
Raises:
ValueError: local_path directory does not exist.
RuntimeError: HTTP request returned an error status.
"""
if not local_path:
raise ValueError('Missing required argument local_path.')
local_path = os.path.abspath(local_path)
# Check that the local path points to an existing directory.
if not os.path.isdir(os.path.dirname(local_path)):
raise ValueError('local_path directory does not exist: {}.'.format(
os.path.dirname(local_path)))
# Prepare the HTTP request. For details on the timeout parameter see
# http://docs.python-requests.org/en/latest/user/advanced/#timeouts
url = self.data_url + '/' + remote_path.lstrip('/')
try:
request = requests.get(url, stream=True, auth=self.authorization,
timeout=(3.05, 27))
if request.status_code != requests.codes.ok:
if request.status_code == requests.codes.NOT_FOUND:
raise RuntimeError('There is no remote file {}.'.format(remote_path))
else:
raise RuntimeError('HTTP request returned error code {} for {}.'.format(
request.status_code, url))
except requests.exceptions.RequestException as e:
raise RuntimeError('HTTP request failed for {}: {}.'.format(url, str(e)))
# Check that there is enough free space, if possible.
progress_bar = None
file_size = request.headers.get('content-length', None)
if file_size is not None:
file_size = int(file_size)
parent_path = os.path.dirname(local_path)
file_stat = os.statvfs(parent_path)
free_space = file_stat.f_bavail * file_stat.f_frsize
Mb = 1 << 20
if file_size + 1 * Mb > free_space:
raise RuntimeError('File size ({:.1f}Mb) exceeds free space for {}.'.format(
file_size / (1.0 * Mb), local_path))
if progress_min_size is not None and file_size > progress_min_size * Mb:
label = os.path.basename(local_path)
progress_bar = ProgressBar(
widgets=[label, ' ', Percentage(), Bar(), ' ', FileTransferSpeed()],
maxval=math.ceil(file_size / chunk_size)).start()
# Stream the request response binary content into a temporary file.
progress = 0
try:
with open(local_path + '.downloading', 'wb') as f:
for chunk in request.iter_content(chunk_size=chunk_size):
f.write(chunk)
if progress_bar:
progress += 1
progress_bar.update(progress)
# Make the temporary file read only by anyone.
os.chmod(local_path + '.downloading', stat.S_IREAD | stat.S_IRGRP | stat.S_IROTH)
# Move the temporary file to its permanent location.
os.rename(local_path + '.downloading', local_path)
except requests.exceptions.RequestException as e:
raise RuntimeError('HTTP streaming failed for {}: {}.'.format(url, str(e)))
except IOError as e:
raise RuntimeError('Streaming IO error for {}: {}.'.format(url, str(e)))
if progress_bar:
progress_bar.finish()
return local_path
def local_path(self, remote_path, suffix=None, new_suffix=None):
"""Get the local path corresponding to a remote path.
Does not check that the file or its parent directory exists. Use :meth:`get` to
ensure that the file exists, downloading it if necessary.
Args:
remote_path(str): The full path to the remote file relative to the remote
server root, which should normally be obtained using :class:`bossdata.path`
methods.
suffix(str): The expected suffix of the returned local path. A
RuntimeError is raised when the local path does not have this
suffix according to :meth:`str.endswith`, unless this parameter is None.
new_suffix(str): Replace suffix with this value.
No change is performed when this parameter is None, and ``suffix`` must
also be set with this parameter is not None.
Returns:
str: Absolute local path of the local file that mirrors the remote file,
with a possible suffix replacement.
Raises:
ValueError: The ``new_suffix`` parameter is set but ``suffix`` is None.
RuntimeError: The local path does not have the expected suffix.
"""
if new_suffix is not None and suffix is None:
raise ValueError('Must specify a suffix when specifying new_suffix.')
# Check for the expected suffix if one is provided.
if suffix is not None and not remote_path.endswith(suffix):
raise RuntimeError(
'Path {} does not end with "{}".'.format(remote_path, suffix))
# Replace the suffix if requested.
if new_suffix is not None:
# We cannot use str.replace() here in case suffix appears more than once.
remote_path = remote_path[:-len(suffix)] + new_suffix
if self.data_is_remote or new_suffix is not | |
<reponame>Jmahaja1/genieparser
# Python
import unittest
from unittest.mock import Mock
# ATS
from pyats.topology import Device
# Metaparser
from genie.metaparser.util.exceptions import SchemaEmptyParserError
from genie.libs.parser.junos.show_ldp import (ShowLDPSession,
ShowLdpNeighbor,
ShowLdpSessionIpaddressDetail,
ShowLdpDatabaseSessionIpaddress,
ShowLDPInterface,ShowLDPInterfaceDetail,
ShowLDPOverview)
# =================================
# Unit test for 'show ldp session'
# =================================
class TestShowLDPSession(unittest.TestCase):
'''unit test for "show ldp session'''
device = Device(name='aDevice')
maxDiff = None
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
'ldp-session-information': {
'ldp-session': [{
'ldp-neighbor-address': '10.34.2.250',
'ldp-session-state': 'Operational',
'ldp-connection-state': 'Open',
'ldp-remaining-time': '26',
'ldp-session-adv-mode': 'DU'
}]
}
}
golden_output = {
'execute.return_value':
'''
Address State Connection Hold time Adv. Mode
10.34.2.250 Operational Open 26 DU
'''
}
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowLDPSession(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
def test_golden(self):
self.device = Mock(**self.golden_output)
obj = ShowLDPSession(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output)
# ===============================================
# Unit test for 'show ldp interface {interface}'
# ===============================================
class TestShowLDPInterface(unittest.TestCase):
'''unit test for "show ldp interface {interface}'''
device = Device(name='aDevice')
maxDiff = None
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
"ldp-interface-information": {
"ldp-interface": {
"interface-name": "ge-0/0/0.0",
"ldp-interface-local-address": "10.169.14.157",
"ldp-label-space-id": "10.169.14.240:0",
"ldp-neighbor-count": "1",
"ldp-next-hello": "3"
}
}
}
golden_output = {
'execute.return_value':
'''
show ldp interface ge-0/0/0.0
Interface Address Label space ID Nbr Next
count hello
ge-0/0/0.0 10.169.14.157 10.169.14.240:0 1 3
'''
}
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowLDPInterface(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse(interface='ge-0/0/0.0')
def test_golden(self):
self.device = Mock(**self.golden_output)
obj = ShowLDPInterface(device=self.device)
parsed_output = obj.parse(interface='ge-0/0/0.0')
self.assertEqual(parsed_output, self.golden_parsed_output)
# =====================================================
# Unit test for 'show ldp interface {interface} detail'
# =====================================================
class TestShowLDPInterfaceDetail(unittest.TestCase):
'''unit test for "show ldp interface {interface} detail'''
device = Device(name='aDevice')
maxDiff = None
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
"ldp-interface-information": {
"ldp-interface": {
"interface-name": "ge-0/0/0.0",
"ldp-interface-local-address": "10.169.14.157",
"ldp-label-space-id": "10.169.14.240:0",
"ldp-neighbor-count": "1",
"ldp-next-hello": "1",
"ldp-transport-address": "10.169.14.240",
"ldp-hello-interval": "5",
"ldp-holdtime": "15",
}
}
}
golden_output = {
'execute.return_value':
'''
show ldp interface ge-0/0/0.0 detail
Interface Address Label space ID Nbr Next
count hello
ge-0/0/0.0 10.169.14.157 10.169.14.240:0 1 1
Hello interval: 5, Hold time: 15, Transport address: 10.169.14.240
'''
}
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowLDPInterfaceDetail(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse(interface='ge-0/0/0.0')
def test_golden(self):
self.device = Mock(**self.golden_output)
obj = ShowLDPInterfaceDetail(device=self.device)
parsed_output = obj.parse(interface='ge-0/0/0.0')
self.assertEqual(parsed_output, self.golden_parsed_output)
# =================================
# Unit test for 'show ldp neighbor'
# =================================
class TestShowLDPSession(unittest.TestCase):
'''unit test for "show ldp session'''
device = Device(name='aDevice')
maxDiff = None
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
'ldp-neighbor-information':
{'ldp-neighbor': [
{'interface-name': 'ge-0/0/0.0',
'ldp-label-space-id': '10.34.2.250:0',
'ldp-neighbor-address': '10.169.14.158',
'ldp-remaining-time': '14'
}
]
}
}
golden_output = {
'execute.return_value':
'''
show ldp neighbor
Address Interface Label space ID Hold time
10.169.14.158 ge-0/0/0.0 10.34.2.250:0 14
'''
}
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowLdpNeighbor(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
def test_golden(self):
self.device = Mock(**self.golden_output)
obj = ShowLdpNeighbor(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output)
# =================================
# Unit test for 'show ldp database session ipaddress'
# =================================
class TestShowLDPSession(unittest.TestCase):
'''unit test for "show ldp database session ipaddress'''
device = Device(name='aDevice')
maxDiff = None
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
"ldp-database-information": {
"ldp-database": [
{
"ldp-binding": [
{
"ldp-label": "3",
"ldp-prefix": "10.34.2.250/32"
},
{
"ldp-label": "16",
"ldp-prefix": "10.169.14.240/32"
}
],
"ldp-database-type": "Input label database",
"ldp-label-received": "2",
"ldp-session-id": "10.169.14.240:0--10.34.2.250:0"
},
{
"ldp-binding": [
{
"ldp-label": "16",
"ldp-prefix": "10.34.2.250/32"
},
{
"ldp-label": "3",
"ldp-prefix": "10.169.14.240/32"
}
],
"ldp-database-type": "Output label database",
"ldp-label-advertised": "2",
"ldp-session-id": "10.169.14.240:0--10.34.2.250:0"
}
]
}
}
golden_output = {
'execute.return_value':
'''
show ldp database 10.34.2.250
Input label database, 10.169.14.240:0--10.34.2.250:0
Labels received: 2
Label Prefix
3 10.34.2.250/32
16 10.169.14.240/32
Output label database, 10.169.14.240:0--10.34.2.250:0
Labels advertised: 2
Label Prefix
16 10.34.2.250/32
3 10.169.14.240/32
'''
}
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowLdpDatabaseSessionIpaddress(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
def test_golden(self):
self.device = Mock(**self.golden_output)
obj = ShowLdpDatabaseSessionIpaddress(device=self.device)
parsed_output = obj.parse(ipaddress='10.34.2.250')
self.assertEqual(parsed_output, self.golden_parsed_output)
# =================================
# Unit test for 'show ldp neighbor'
# =================================
class TestShowLdpNeighbor(unittest.TestCase):
'''unit test for "show ldp neighbor '''
device = Device(name='aDevice')
maxDiff = None
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
'ldp-neighbor-information':
{'ldp-neighbor': [
{'interface-name': 'ge-0/0/0.0',
'ldp-label-space-id': '10.34.2.250:0',
'ldp-neighbor-address': '10.169.14.158',
'ldp-remaining-time': '14'
}
]
}
}
golden_output = {
'execute.return_value':
'''
show ldp neighbor
Address Interface Label space ID Hold time
10.169.14.158 ge-0/0/0.0 10.34.2.250:0 14
'''
}
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowLdpNeighbor(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
def test_golden(self):
self.device = Mock(**self.golden_output)
obj = ShowLdpNeighbor(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output)
# =================================
# Unit test for 'show ldp database session ipaddress'
# =================================
class TestShowLdpDatabaseSessionIpaddress(unittest.TestCase):
'''unit test for "show ldp database session ipaddress'''
device = Device(name='aDevice')
maxDiff = None
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
"ldp-database-information": {
"ldp-database": [
{
"ldp-binding": [
{
"ldp-label": "3",
"ldp-prefix": "10.34.2.250/32"
},
{
"ldp-label": "16",
"ldp-prefix": "10.169.14.240/32"
}
],
"ldp-database-type": "Input label database",
"ldp-label-received": "2",
"ldp-session-id": "10.169.14.240:0--10.34.2.250:0"
},
{
"ldp-binding": [
{
"ldp-label": "16",
"ldp-prefix": "10.34.2.250/32"
},
{
"ldp-label": "3",
"ldp-prefix": "10.169.14.240/32"
}
],
"ldp-database-type": "Output label database",
"ldp-label-advertised": "2",
"ldp-session-id": "10.169.14.240:0--10.34.2.250:0"
}
]
}
}
golden_output = {
'execute.return_value':
'''
show ldp database 10.34.2.250
Input label database, 10.169.14.240:0--10.34.2.250:0
Labels received: 2
Label Prefix
3 10.34.2.250/32
16 10.169.14.240/32
Output label database, 10.169.14.240:0--10.34.2.250:0
Labels advertised: 2
Label Prefix
16 10.34.2.250/32
3 10.169.14.240/32
'''
}
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowLdpDatabaseSessionIpaddress(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
def test_golden(self):
self.device = Mock(**self.golden_output)
obj = ShowLdpDatabaseSessionIpaddress(device=self.device)
parsed_output = obj.parse(ipaddress='10.34.2.250')
self.assertEqual(parsed_output, self.golden_parsed_output)
# ===============================================
# Unit test for 'show ldp interface {interface}'
# ===============================================
class TestShowLDPInterface(unittest.TestCase):
'''unit test for "show ldp interface {interface}'''
device = Device(name='aDevice')
maxDiff = None
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
"ldp-interface-information": {
"ldp-interface": {
"interface-name": "ge-0/0/0.0",
"ldp-interface-local-address": "10.1.2.2",
"ldp-label-space-id": "10.204.14.100:0",
"ldp-neighbor-count": "1",
"ldp-next-hello": "3"
}
}
}
golden_output = {
'execute.return_value':
'''
show ldp interface ge-0/0/0.0
Interface Address Label space ID Nbr Next
count hello
ge-0/0/0.0 10.1.2.2 10.204.14.100:0 1 3
'''
}
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowLDPInterface(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse(interface='ge-0/0/0.0')
def test_golden(self):
self.device = Mock(**self.golden_output)
obj = ShowLDPInterface(device=self.device)
parsed_output = obj.parse(interface='ge-0/0/0.0')
self.assertEqual(parsed_output, self.golden_parsed_output)
# =====================================================
# Unit test for 'show ldp interface {interface} detail'
# =====================================================
class TestShowLDPInterfaceDetail(unittest.TestCase):
'''unit test for "show ldp interface {interface} detail'''
device = Device(name='aDevice')
maxDiff = None
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
"ldp-interface-information": {
"ldp-interface": {
"interface-name": "ge-0/0/0.0",
"ldp-interface-local-address": "10.1.2.2",
"ldp-label-space-id": "10.204.14.100:0",
"ldp-neighbor-count": "1",
"ldp-next-hello": "1",
"ldp-transport-address": "10.204.14.100",
"ldp-hello-interval": "5",
"ldp-holdtime": "15",
}
}
}
golden_output = {
'execute.return_value':
'''
show ldp interface ge-0/0/0.0 detail
Interface Address Label space ID Nbr Next
count hello
ge-0/0/0.0 10.1.2.2 10.204.14.100:0 1 1
Hello interval: 5, Hold time: 15, Transport address: 10.204.14.100
'''
}
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowLDPInterfaceDetail(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse(interface='ge-0/0/0.0')
def test_golden(self):
self.device = Mock(**self.golden_output)
obj = ShowLDPInterfaceDetail(device=self.device)
parsed_output = obj.parse(interface='ge-0/0/0.0')
self.assertEqual(parsed_output, self.golden_parsed_output)
# =================================
# Unit test for 'show ldp overview'
# =================================
class TestShowLDPOverview(unittest.TestCase):
'''unit test for "show ldp overview'''
device = Device(name='aDevice')
maxDiff = None
empty_output = {'execute.return_value': ''}
golden_output = {'execute.return_value': '''
show ldp overview
Instance: master
Reference count: 2
Router ID: 10.204.14.100
LDP inet: enabled
Transport preference: IPv4
Message id: 4
Configuration sequence: 1
Deaggregate: disabled
Explicit null: disabled
IPv6 tunneling: disabled
Strict targeted hellos: disabled
Loopback if added: no
Route preference: 9
Unicast transit LSP chaining: disabled
P2MP transit LSP chaining: disabled
Transit LSP statistics based on route statistics: disabled
LDP route acknowledgement: enabled
BGP export: enabled
LDP mtu discovery: disabled
LDP SR Mapping Client: disabled
Capabilities enabled: none
Egress FEC capabilities enabled: entropy-label-capability
Downstream unsolicited Sessions:
Operational: 1
Retention: liberal
Control: ordered
Auto targeted sessions:
Auto targeted: disabled
Dynamic tunnel session count: 0
P2MP:
Recursive route: disabled
No rsvp tunneling: disabled
Timers:
Keepalive interval: 10, Keepalive timeout: 30
Link hello interval: 5, Link hello hold time: 15
Targeted hello interval: 15, Targeted hello hold time: 45
Label withdraw delay: 60, Make before break timeout: 30
Make before break switchover delay: 3
Link protection timeout: 120
Graceful restart:
Restart: disabled, Helper: enabled, Restart in process: false
Reconnect time: 60000, Max neighbor reconnect time: 120000
Recovery time: 160000, Max neighbor recovery time: 240000
Traffic Engineering:
Bgp igp: disabled
Both ribs: disabled
Mpls forwarding: disabled
IGP:
Tracking igp metric: disabled
Sync session up delay: 10
Session protection:
Session protection: disabled
Session protection timeout: 0
Interface addresses advertising:
10.1.2.2
LDP Job:
Read job time quantum: 1000, | |
# Copyright 2018 Google LLC All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
from google.auth.transport import requests
from google.oauth2 import service_account
_BASE_URL = "https://healthcare.googleapis.com/v1beta1"
# [START healthcare_get_session]
def get_session(service_account_json):
"""
Returns an authorized Requests Session class using the service account
credentials JSON. This class is used to perform requests to the
Healthcare API endpoint.
"""
# Pass in the credentials and project ID. If none supplied, get them
# from the environment.
credentials = service_account.Credentials.from_service_account_file(
service_account_json
)
scoped_credentials = credentials.with_scopes(
["https://www.googleapis.com/auth/cloud-platform"]
)
# Create a requests Session object with the credentials.
session = requests.AuthorizedSession(scoped_credentials)
return session
# [END healthcare_get_session]
# [START healthcare_create_resource]
def create_patient(
service_account_json, base_url, project_id, cloud_region, dataset_id, fhir_store_id
):
"""Creates a new Patient resource in a FHIR store."""
url = "{}/projects/{}/locations/{}".format(base_url, project_id, cloud_region)
fhir_store_path = "{}/datasets/{}/fhirStores/{}/fhir/Patient".format(
url, dataset_id, fhir_store_id
)
# Make an authenticated API request
session = get_session(service_account_json)
headers = {"Content-Type": "application/fhir+json;charset=utf-8"}
body = {
"name": [{"use": "official", "family": "Smith", "given": ["Darcy"]}],
"gender": "female",
"birthDate": "1970-01-01",
"resourceType": "Patient",
}
response = session.post(fhir_store_path, headers=headers, json=body)
response.raise_for_status()
resource = response.json()
print("Created Patient resource with ID {}".format(resource["id"]))
return response
# [END healthcare_create_resource]
# [START healthcare_create_encounter]
def create_encounter(
service_account_json,
base_url,
project_id,
cloud_region,
dataset_id,
fhir_store_id,
patient_id,
):
"""Creates a new Encounter resource in a FHIR store based on a Patient."""
url = "{}/projects/{}/locations/{}".format(
base_url, project_id, cloud_region
)
fhir_store_path = "{}/datasets/{}/fhirStores/{}/fhir/Encounter".format(
url, dataset_id, fhir_store_id
)
# Make an authenticated API request
session = get_session(service_account_json)
headers = {"Content-Type": "application/fhir+json;charset=utf-8"}
body = {
"status": "finished",
"class": {
"system": "http://hl7.org/fhir/v3/ActCode",
"code": "IMP",
"display": "inpatient encounter",
},
"reason": [
{
"text": "The patient had an abnormal heart rate. She was"
" concerned about this."
}
],
"subject": {"reference": "Patient/{}".format(patient_id)},
"resourceType": "Encounter",
}
response = session.post(fhir_store_path, headers=headers, json=body)
response.raise_for_status()
resource = response.json()
print("Created Encounter resource with ID {}".format(resource["id"]))
return response
# [END healthcare_create_encounter]
# [START healthcare_create_observation]
def create_observation(
service_account_json,
base_url,
project_id,
cloud_region,
dataset_id,
fhir_store_id,
patient_id,
encounter_id,
):
"""
Creates a new Observation resource in a FHIR store based on
an Encounter.
"""
url = "{}/projects/{}/locations/{}".format(
base_url, project_id, cloud_region
)
fhir_store_path = "{}/datasets/{}/fhirStores/{}/fhir/Observation".format(
url, dataset_id, fhir_store_id
)
# Make an authenticated API request
session = get_session(service_account_json)
headers = {"Content-Type": "application/fhir+json;charset=utf-8"}
body = {
"resourceType": "Observation",
"identifier": [{"system": "my-code-system", "value": "ABC-12345"}],
"status": "final",
"subject": {"reference": "Patient/{}".format(patient_id)},
"effectiveDateTime": "2019-01-01T00:00:00+00:00",
"valueQuantity": {"value": 80, "unit": "bpm"},
"context": {"reference": "Encounter/{}".format(encounter_id)},
}
response = session.post(fhir_store_path, headers=headers, json=body)
response.raise_for_status()
resource = response.json()
print("Created Observation resource with ID {}".format(resource["id"]))
return response
# [END healthcare_create_observation]
# [START healthcare_delete_resource]
def delete_resource(
service_account_json,
base_url,
project_id,
cloud_region,
dataset_id,
fhir_store_id,
resource_type,
resource_id,
):
"""
Deletes a FHIR resource. Regardless of whether the operation succeeds or
fails, the server returns a 200 OK HTTP status code. To check that the
resource was successfully deleted, search for or get the resource and
see if it exists.
"""
url = "{}/projects/{}/locations/{}".format(base_url, project_id, cloud_region)
resource_path = "{}/datasets/{}/fhirStores/{}/fhir/{}/{}".format(
url, dataset_id, fhir_store_id, resource_type, resource_id
)
# Make an authenticated API request
session = get_session(service_account_json)
response = session.delete(resource_path)
print("Deleted {} resource with ID {}.".format(resource_type, resource_id))
return response
# [END healthcare_delete_resource]
# [START healthcare_get_resource]
def get_resource(
service_account_json,
base_url,
project_id,
cloud_region,
dataset_id,
fhir_store_id,
resource_type,
resource_id,
):
"""Gets a FHIR resource."""
url = "{}/projects/{}/locations/{}".format(base_url, project_id, cloud_region)
resource_path = "{}/datasets/{}/fhirStores/{}/fhir/{}/{}".format(
url, dataset_id, fhir_store_id, resource_type, resource_id
)
# Make an authenticated API request
session = get_session(service_account_json)
headers = {"Content-Type": "application/fhir+json;charset=utf-8"}
response = session.get(resource_path, headers=headers)
response.raise_for_status()
resource = response.json()
print("Got {} resource:".format(resource["resourceType"]))
print(json.dumps(resource, indent=2))
return resource
# [END healthcare_get_resource]
# [START healthcare_list_resource_history]
def list_resource_history(
service_account_json,
base_url,
project_id,
cloud_region,
dataset_id,
fhir_store_id,
resource_type,
resource_id,
):
"""Gets the history of a resource."""
url = "{}/projects/{}/locations/{}".format(base_url, project_id, cloud_region)
resource_path = "{}/datasets/{}/fhirStores/{}/fhir/{}/{}".format(
url, dataset_id, fhir_store_id, resource_type, resource_id
)
# Make an authenticated API request
session = get_session(service_account_json)
headers = {"Content-Type": "application/fhir+json;charset=utf-8"}
response = session.get(resource_path + "/_history", headers=headers)
response.raise_for_status()
resource = response.json()
print(
"History for {} resource:".format(
resource["entry"][0]["resource"]["resourceType"]
)
)
print(json.dumps(resource, indent=2))
return resource
# [END healthcare_list_resource_history]
# [START healthcare_get_resource_history]
def get_resource_history(
service_account_json,
base_url,
project_id,
cloud_region,
dataset_id,
fhir_store_id,
resource_type,
resource_id,
version_id,
):
"""Gets a version resource."""
url = "{}/projects/{}/locations/{}".format(base_url, project_id, cloud_region)
resource_path = "{}/datasets/{}/fhirStores/{}/fhir/{}/{}".format(
url, dataset_id, fhir_store_id, resource_type, resource_id
)
resource_path += "/_history/{}".format(version_id)
# Make an authenticated API request
session = get_session(service_account_json)
headers = {"Content-Type": "application/fhir+json;charset=utf-8"}
response = session.get(resource_path, headers=headers)
response.raise_for_status()
resource = response.json()
print("Got history for {} resource:".format(resource_type))
print(json.dumps(resource, indent=2))
return resource
# [END healthcare_get_resource_history]
# [START healthcare_export_fhir_resources]
def export_resources(
service_account_json,
base_url,
project_id,
cloud_region,
dataset_id,
fhir_store_id,
gcs_destination,
):
"""Exports resources in a FHIR store."""
url = "{}/projects/{}/locations/{}".format(base_url, project_id, cloud_region)
resource_path = "{}/datasets/{}/fhirStores/{}".format(
url, dataset_id, fhir_store_id
)
# Make an authenticated API request
session = get_session(service_account_json)
headers = {"Content-Type": "application/fhir+json;charset=utf-8"}
body = {"gcsDestination": {"uriPrefix": gcs_destination}}
response = session.post(resource_path + ":export", headers=headers, json=body)
response.raise_for_status()
resource = response.json()
print(json.dumps(resource, indent=2))
return resource
# [END healthcare_export_fhir_resources]
# [START healthcare_import_fhir_resources]
def import_resources(
service_account_json,
base_url,
project_id,
cloud_region,
dataset_id,
fhir_store_id,
gcs_source,
):
"""Exports resources in a FHIR store."""
url = "{}/projects/{}/locations/{}".format(base_url, project_id, cloud_region)
resource_path = "{}/datasets/{}/fhirStores/{}".format(
url, dataset_id, fhir_store_id
)
# Make an authenticated API request
session = get_session(service_account_json)
headers = {"Content-Type": "application/fhir+json;charset=utf-8"}
body = {
"gcsSource": {"uriPrefix": gcs_source},
"gcsErrorDestination": {"uriPrefix": gcs_source + "_errors"},
}
response = session.post(resource_path + ":import", headers=headers, json=body)
response.raise_for_status()
resource = response.json()
print(json.dumps(resource, indent=2))
return resource
# [END healthcare_import_fhir_resources]
# [START healthcare_delete_resource_purge]
def delete_resource_purge(
service_account_json,
base_url,
project_id,
cloud_region,
dataset_id,
fhir_store_id,
resource_type,
resource_id,
):
"""Deletes versions of a resource (excluding current version)."""
url = "{}/projects/{}/locations/{}".format(base_url, project_id, cloud_region)
resource_path = "{}/datasets/{}/fhirStores/{}/fhir/{}/{}".format(
url, dataset_id, fhir_store_id, resource_type, resource_id
)
resource_path += "/$purge"
# Make an authenticated API request
session = get_session(service_account_json)
headers = {"Content-Type": "application/fhir+json;charset=utf-8"}
response = session.delete(resource_path, headers=headers)
response.raise_for_status()
if response.status_code < 400:
print(
"Deleted versions of {} resource "
"(excluding current version).".format(resource_type)
)
return response
# [END healthcare_delete_resource_purge]
# [START healthcare_update_resource]
def update_resource(
service_account_json,
base_url,
project_id,
cloud_region,
dataset_id,
fhir_store_id,
resource_type,
resource_id,
):
"""Updates an existing resource."""
url = "{}/projects/{}/locations/{}".format(base_url, project_id, cloud_region)
resource_path = "{}/datasets/{}/fhirStores/{}/fhir/{}/{}".format(
url, dataset_id, fhir_store_id, resource_type, resource_id
)
# Make an authenticated API request
session = get_session(service_account_json)
headers = {"Content-Type": "application/fhir+json;charset=utf-8"}
body = {"resourceType": resource_type, "active": True, "id": resource_id}
response = session.put(resource_path, headers=headers, json=body)
response.raise_for_status()
resource = response.json()
print("Updated {} resource:".format(resource["resourceType"]))
print(json.dumps(resource, indent=2))
return resource
# [END healthcare_update_resource]
# [START healthcare_conditional_update_resource]
def conditional_update_resource(
service_account_json,
base_url,
project_id,
cloud_region,
dataset_id,
fhir_store_id,
patient_id,
encounter_id,
):
"""
If a resource is found based on the search criteria specified in
the query parameters, updates the entire contents of that resource.
"""
url = "{}/projects/{}/locations/{}".format(base_url, project_id, cloud_region)
# The search query in this request updates all Observations
# using the Observation's identifier (ABC-12345 in my-code-system)
# so that their 'status' is 'cancelled'.
resource_path = "{}/datasets/{}/fhirStores/{}/fhir/Observation".format(
url, dataset_id, fhir_store_id
)
# Make an authenticated API request
session = get_session(service_account_json)
body = {
"effectiveDateTime": "2019-01-01T00:00:00+00:00",
"resourceType": "Observation",
"context": {"reference": "Encounter/{}".format(encounter_id)},
"identifier": [{"system": "my-code-system", "value": "ABC-12345"}],
"status": "cancelled",
"subject": {"reference": "Patient/{}".format(patient_id)},
"valueQuantity": {"unit": "bpm", "value": 80},
}
headers = {"Content-Type": "application/fhir+json;charset=utf-8"}
params = {"identifier": "my-code-system|ABC-12345"}
response = session.put(resource_path, headers=headers, params=params, json=body)
response.raise_for_status()
resource = response.json()
print(
"Conditionally updated Observations with the identifier "
"'my-code-system|ABC-12345' to have a 'status' of "
"'cancelled'."
)
print(json.dumps(resource, indent=2))
return resource
# [END healthcare_conditional_update_resource]
# [START healthcare_conditional_delete_resource]
def conditional_delete_resource(
service_account_json, base_url, project_id, cloud_region, dataset_id, fhir_store_id
):
"""Deletes FHIR resources that match a search query."""
url = "{}/projects/{}/locations/{}".format(base_url, project_id, cloud_region)
# The search query in this request deletes all Observations
# with a status of 'cancelled'.
resource_path = "{}/datasets/{}/fhirStores/{}/fhir/Observation".format(
url, dataset_id, fhir_store_id
)
# The search query is passed in as a query string parameter.
params = {"status": "cancelled"}
# Make an authenticated API request
session = get_session(service_account_json)
response = session.delete(resource_path, params=params)
print(response.url)
if response.status_code != 404: # Don't consider missing to be error
response.raise_for_status()
print("Conditionally deleted all Observations with status='cancelled'.")
return response
# [END healthcare_conditional_delete_resource]
# [START healthcare_patch_resource]
def patch_resource(
service_account_json,
base_url,
project_id,
cloud_region,
dataset_id,
fhir_store_id,
resource_type,
resource_id,
):
"""Updates part of an existing resource.."""
url = "{}/projects/{}/locations/{}".format(base_url, project_id, cloud_region)
resource_path = | |
if not self.__bool__():
self.a, self.b, self.c, self.d, self.e = 0, 1, 1, 0, 1
elif int(self) == self:
self.a, self.b, self.c, self.d, self.e = int(self), 1, 1, 0, 1
elif int(self.b) != self.b or int(self.c) != self.c or int(self.d) != self.d or int(self.e) != self.e:
self.a, self.b, self.c, self.d, self.e = self.todec(), 1, 1, 0, 1
else:
if self.b < 0:
self.a, self.b, self.d = -self.a, -self.b, -self.d
if self.c == self.e == 1:
self.a, self.c, self.d, self.e = self.a + self.d, 1, 0, 1
elif self.e > self.c:
self.a, self.c, self.d, self.e = self.d, self.e, self.a, self.c
if abs(self.b) > 10000:
self.a, self.b, self.c, self.d, self.e = self.todec(), 1, 1, 0, 1
def limit_denominator(self, fractuple, max_denominator=None):
"""Closest Fraction to self with denominator at most max_denominator."""
# Algorithm notes: For any real number x, define a *best upper
# approximation* to x to be a rational number p/q such that:
#
# (1) p/q >= x, and
# (2) if p/q > r/s >= x then s > q, for any rational r/s.
#
# Define *best lower approximation* similarly. Then it can be
# proved that a rational number is a best upper or lower
# approximation to x if, and only if, it is a convergent or
# semiconvergent of the (unique shortest) continued fraction
# associated to x.
#
# To find a best rational approximation with denominator <= M,
# we find the best upper and lower approximations with
# denominator <= M and take whichever of these is closer to x.
# In the event of a tie, the bound with smaller denominator is
# chosen. If both denominators are equal (which can happen
# only when max_denominator == 1 and self is midway between
# two integers) the lower bound---i.e., the floor of self, is
# taken.
if not max_denominator:
max_denominator = 10**(getcontext().prec - 2)
if len(fractuple) != 2:
raise ValueError(
"fractuple should be (a,b) represents the fraction a/b")
if max_denominator < 1:
raise ValueError("max_denominator should be at least 1")
ngcd = gcd(fractuple[0], fractuple[1])
fractuple = (fractuple[0] // ngcd, fractuple[1] // ngcd)
if fractuple[1] <= max_denominator:
return fractuple
p0, q0, p1, q1 = 0, 1, 1, 0
n, d = fractuple
while True:
a = n // d
q2 = q0 + a * q1
if q2 > max_denominator:
break
p0, q0, p1, q1 = p1, q1, p0 + a * p1, q2
n, d = d, n - a * d
k = (max_denominator - q0) // q1
bound1 = (p0 + k * p1, q0 + k * q1)
bound2 = (p1, q1)
if abs(bound2[0] / bound2[1] - fractuple[0] / fractuple[1]
) <= abs(bound1[0] / bound1[1] - fractuple[0] / fractuple[1]):
return bound2
else:
return bound1
class cfrac(Calculator):
"""Fraction type (Complex):
__ __ __ __
a1√c1+d1√e1 a2√c2+d2√e2
----------- + -----------i
b1 b2
real + imag *i
"""
def __init__(self, real=0, imag=None):
if isinstance(real, complex):
if imag:
self.real = frac(real.real)
self.imag = frac(imag)
else:
self.real = frac(real.real)
self.imag = frac(real.imag)
elif isinstance(real, cfrac):
if imag:
self.real = frac(real.real)
self.imag = frac(imag)
else:
self.real = frac(real.real)
self.imag = frac(real.imag)
else:
if imag:
self.real = frac(real)
self.imag = frac(imag)
else:
self.real = frac(real)
self.imag = frac(0)
def __float__(self):
return float(self.real)
def todec(self):
return self.real.todec()
def __int__(self):
return int(self.real)
def __complex__(self):
return complex(float(self.real), float(self.imag))
def __round__(self, n=0):
return round(D(self.real), n)
def __str__(self):
if not self.imag:
return str(self.real)
elif self.imag > 0:
return str(self.real) + "+" + str(self.imag) + "i"
else:
return str(self.real) + str(self.imag) + "i"
def pretty(self, mathdisp=True, before='', after=''):
# TODO: use before after
# sq(i
if mathdisp:
sr, si = self.real.pretty(
True, before).split(), self.imag.pretty(True).split()
wbefore, wafter = ' ' * len(before), ' ' * len(after)
if self.imag:
rstr = ["", "", "", "", ""]
if len(sr) == 1:
rstr[2] = sr[0]
base = 2
elif len(sr) == 2:
rstr[1], rstr[2] = sr[0], sr[1]
base = 2
elif len(sr) == 3:
rstr[2], rstr[3], rstr[4] = sr[0], sr[1], sr[2]
base = 3
elif len(sr) == 4:
rstr = sr
base = 3
maxlen = len(max(rstr, key=len))
if len(si) == 1:
if self.imag > 0:
rstr[
base] += " " * (maxlen - len(rstr[base]) + 1) + "+" + si[0] + "i" + after
else:
rstr[base] += " " * \
(maxlen - len(rstr[base]) + 1) + \
si[0] + "i" + after
elif len(si) == 2:
rstr[base - 1] += " " * \
(maxlen - len(rstr[base - 1]) + 1) + si[0] + wafter
rstr[base] += " " * \
(maxlen - len(rstr[base]) + 1) + si[1] + "i" + after
elif len(si) == 3:
rstr[base - 1] += " " * \
(maxlen - len(rstr[base - 1]) + 1) + si[0] + wafter
rstr[base] += " " * \
(maxlen - len(rstr[base]) + 1) + si[1] + "i" + after
rstr[base + 1] += " " * \
(maxlen - len(rstr[base + 1]) + 1) + si[2] + wafter
elif len(si) == 4:
rstr[base - 2] += " " * \
(maxlen - len(rstr[base - 2]) + 1) + si[0] + wafter
rstr[base - 1] += " " * \
(maxlen - len(rstr[base - 1]) + 1) + si[1] + wafter
rstr[base] += " " * \
(maxlen - len(rstr[base]) + 1) + si[2] + "i" + after
rstr[base + 1] += " " * \
(maxlen - len(rstr[base]) + 1) + si[3] + wafter
return "\n".join(l for l in rstr).strip()
else:
return self.real.pretty(True, before, after)
else:
if not self.imag:
return self.real.pretty(False, before, after)
elif self.imag > 0:
return before + \
self.real.pretty(
False) + "+" + self.imag.pretty(False) + "i" + after
else:
return before + \
self.real.pretty(
False) + self.imag.pretty(False) + "i" + after
def __repr__(self):
return "cfrac(%s,%s)" % (repr(self.real), repr(self.imag))
def __eq__(self, other):
try:
return self.__complex__() == complex(other)
except:
return False
def __ne__(self, other):
try:
return self.__complex__() != complex(other)
except:
return True
def __hash__(self):
return hash(self.__complex__())
def __bool__(self):
return self.real != 0 and self.imag != 0
def __add__(self, other):
y = cfrac(other)
return cfrac(self.real + y.real, self.imag + y.imag)
def __sub__(self, other):
y = cfrac(other)
return cfrac(self.real - y.real, self.imag - y.imag)
def __mul__(self, other):
y = cfrac(other)
return cfrac(self.real * y.real - self.imag * y.imag,
self.real * y.imag + self.imag * y.real)
def __truediv__(self, other):
y = cfrac(other)
div = y.real**2 + y.imag**2
return cfrac((self.real * y.real + self.imag * y.imag) /
div, (self.imag * y.real - self.real * y.imag) / div)
def __pow__(self, other):
sign = lambda x: x and (1, -1)[x < 0]
if self == other == 0:
raise MathERROR
if int(other) == other:
result = cfrac(1)
for i in range(abs(other)):
result *= self
if other < 0:
result = 1 / result
return result
elif other == .5:
return cfrac(((self.__abs__() + self.real) / 2)**.5,
sign(self.imag) * ((self.__abs__() - self.real) / 2)**.5)
else:
return pow(self, other)
def __radd__(self, other):
return self.__add__(other)
def __rsub__(self, other):
return self.__sub__(other).__neg__()
def __rmul__(self, other):
return self.__mul__(other)
def __rtruediv__(self, other):
if not self:
raise MathERROR
y = cfrac(other)
div = self.real**2 + self.imag**2
return cfrac((y.real * self.real + y.imag * self.imag) /
div, (y.imag * self.real - y.real * self.imag) / div)
def __rpow__(self, other):
return cfrac(other).__pow__(self)
def __neg__(self):
return cfrac(-self.real, -self.imag)
def __pos__(self):
return self
def conjugate(self):
return cfrac(self.real, -self.imag)
def copy(self):
return cfrac(self)
def __abs__(self):
return (self.real**2 + self.imag**2)**.5
def phase(self):
pass
def polar(self):
pass
def isreal(self):
return self.imag == 0
class dcomplex(Calculator):
"""Complex numbers use Decimal."""
def __init__(self, real=0, imag=None):
if isinstance(real, complex):
if imag:
self.real = Decimal(real.real)
self.imag = Decimal(imag)
else:
self.real = Decimal(real.real)
self.imag = Decimal(real.imag)
elif isinstance(real, cfrac):
if imag:
self.real = self._ntodec(real.real)
self.imag = self._ntodec(imag)
else:
self.real = self._ntodec(real.real)
self.imag = self._ntodec(real.imag)
else:
if imag:
self.real = self._ntodec(real)
self.imag = self._ntodec(imag)
else:
self.real = self._ntodec(real)
self.imag = Decimal(0)
def | |
preferred_lifetime: The preferred lifetime value for this roaming
host object.
pxe_lease_time: The PXE lease time value for this roaming host
object. Some hosts use PXE (Preboot Execution Environment) to
boot remotely from a server. To better manage your IP resources,
set a different lease time for PXE boot requests. You can
configure the DHCP server to allocate an IP address with a
shorter lease time to hosts that send PXE boot requests, so IP
addresses are not leased longer than necessary.A 32-bit unsigned
integer that represents the duration, in seconds, for which the
update is cached. Zero indicates that the update is not cached.
template: If set on creation, the roaming host will be created
according to the values specified in the named template.
use_bootfile: Use flag for: bootfile
use_bootserver: Use flag for: bootserver
use_ddns_domainname: Use flag for: ddns_domainname
use_deny_bootp: Use flag for: deny_bootp
use_enable_ddns: Use flag for: enable_ddns
use_ignore_dhcp_option_list_request: Use flag for:
ignore_dhcp_option_list_request
use_ipv6_ddns_domainname: Use flag for: ipv6_ddns_domainname
use_ipv6_domain_name: Use flag for: ipv6_domain_name
use_ipv6_domain_name_servers: Use flag for: ipv6_domain_name_servers
use_ipv6_enable_ddns: Use flag for: ipv6_enable_ddns
use_ipv6_options: Use flag for: ipv6_options
use_nextserver: Use flag for: nextserver
use_options: Use flag for: options
use_preferred_lifetime: Use flag for: preferred_lifetime
use_pxe_lease_time: Use flag for: pxe_lease_time
use_valid_lifetime: Use flag for: valid_lifetime
valid_lifetime: The valid lifetime value for this roaming host
object.
"""
_infoblox_type = 'roaminghost'
_fields = ['address_type', 'bootfile', 'bootserver',
'client_identifier_prepend_zero', 'comment', 'ddns_domainname',
'ddns_hostname', 'deny_bootp', 'dhcp_client_identifier',
'disable', 'enable_ddns', 'enable_pxe_lease_time', 'extattrs',
'force_roaming_hostname', 'ignore_dhcp_option_list_request',
'ipv6_client_hostname', 'ipv6_ddns_domainname',
'ipv6_ddns_hostname', 'ipv6_domain_name',
'ipv6_domain_name_servers', 'ipv6_duid', 'ipv6_enable_ddns',
'ipv6_force_roaming_hostname', 'ipv6_match_option',
'ipv6_options', 'ipv6_template', 'mac', 'match_client', 'name',
'network_view', 'nextserver', 'options', 'preferred_lifetime',
'pxe_lease_time', 'template', 'use_bootfile', 'use_bootserver',
'use_ddns_domainname', 'use_deny_bootp', 'use_enable_ddns',
'use_ignore_dhcp_option_list_request',
'use_ipv6_ddns_domainname', 'use_ipv6_domain_name',
'use_ipv6_domain_name_servers', 'use_ipv6_enable_ddns',
'use_ipv6_options', 'use_nextserver', 'use_options',
'use_preferred_lifetime', 'use_pxe_lease_time',
'use_valid_lifetime', 'valid_lifetime']
_search_for_update_fields = ['address_type', 'name', 'network_view']
_updateable_search_fields = ['address_type', 'comment',
'dhcp_client_identifier', 'ipv6_duid',
'ipv6_match_option', 'mac', 'match_client',
'name', 'network_view']
_all_searchable_fields = ['address_type', 'comment',
'dhcp_client_identifier', 'ipv6_duid',
'ipv6_match_option', 'mac', 'match_client',
'name', 'network_view']
_return_fields = ['address_type', 'extattrs', 'name', 'network_view']
_remap = {}
_shadow_fields = ['_ref']
_custom_field_processing = {
'ipv6_options': Dhcpoption.from_dict,
'options': Dhcpoption.from_dict,
}
class Ruleset(InfobloxObject):
""" Ruleset: DNS Ruleset object.
Corresponds to WAPI object 'ruleset'
Represents a Ruleset object, which is a collection of rules that is
used to match domain names.
Fields:
comment: Descriptive comment about the Ruleset object.
disabled: The flag that indicates if the Ruleset object is disabled.
name: The name of this Ruleset object.
nxdomain_rules: The list of Rules assigned to this Ruleset object.
Rules can be set only when the Ruleset type is set to
"NXDOMAIN".
type: The type of this Ruleset object.
"""
_infoblox_type = 'ruleset'
_fields = ['comment', 'disabled', 'name', 'nxdomain_rules', 'type']
_search_for_update_fields = ['disabled', 'name', 'type']
_updateable_search_fields = ['comment', 'disabled', 'name', 'type']
_all_searchable_fields = ['comment', 'disabled', 'name', 'type']
_return_fields = ['comment', 'disabled', 'name', 'type']
_remap = {}
_shadow_fields = ['_ref']
_custom_field_processing = {
'nxdomain_rules': Nxdomainrule.from_dict,
}
class SamlAuthservice(InfobloxObject):
""" SamlAuthservice: SAML authentication service object.
Corresponds to WAPI object 'saml:authservice'
This object represents SAML authentication service.
Fields:
comment: The descriptive comment for the SAML authentication
service.
idp: The SAML Identity Provider to use for authentication.
name: The name of the SAML authentication service.
session_timeout: The session timeout in seconds.
"""
_infoblox_type = 'saml:authservice'
_fields = ['comment', 'idp', 'name', 'session_timeout']
_search_for_update_fields = ['name']
_updateable_search_fields = ['comment', 'name']
_all_searchable_fields = ['comment', 'name']
_return_fields = ['name']
_remap = {}
_shadow_fields = ['_ref']
class Scavengingtask(InfobloxObject):
""" Scavengingtask: DNS scavenging task object.
Corresponds to WAPI object 'scavengingtask'
The DNS scavenging task object provides information on scavenging
process state.
Fields:
action: The scavenging action.
associated_object: The reference to the object associated with the
scavenging task.
end_time: The scavenging process end time.
processed_records: The number of processed during scavenging
resource records.
reclaimable_records: The number of resource records that are allowed
to be reclaimed during the scavenging process.
reclaimed_records: The number of reclaimed during the scavenging
process resource records.
start_time: The scavenging process start time.
status: The scavenging process status. This is a read-only
attribute.
"""
_infoblox_type = 'scavengingtask'
_fields = ['action', 'associated_object', 'end_time', 'processed_records',
'reclaimable_records', 'reclaimed_records', 'start_time',
'status']
_search_for_update_fields = ['action', 'associated_object', 'status']
_updateable_search_fields = []
_all_searchable_fields = ['action', 'associated_object', 'status']
_return_fields = ['action', 'associated_object', 'status']
_remap = {}
_shadow_fields = ['_ref']
class Scheduledtask(InfobloxObject):
""" Scheduledtask: Scheduled Task object.
Corresponds to WAPI object 'scheduledtask'
This object represents a scheduled task.
Fields:
approval_status: The approval status of the task.
approver: The approver of the task.
approver_comment: The comment specified by the approver of the task.
automatic_restart: Indicates whether there will be an automatic
restart when the appliance executes the task.
changed_objects: A list of objects that are affected by the task.
dependent_tasks: If this scheduled task has dependent tasks, their
references will be returned in this field.
execute_now: If this field is set to True the specified task will be
executed immediately.
execution_details: Messages generated by the execution of the
scheduled task after its completion.
execution_details_type: The type of details generated by the
execution of the scheduled task after its completion.
execution_status: The execution status of the task.
execution_time: The time when the appliance executed the task.
is_network_insight_task: Indicates whether this is a Network Insight
scheduled task.
member: The member where this task was created.
predecessor_task: If this scheduled task has a predecessor task set,
its reference will be returned in this field.
re_execute_task: If set to True, if the scheduled task is a Network
Insight task and it failed, a new task will be cloned from this
task and re-executed.
scheduled_time: The time when the task is scheduled to occur.
submit_time: The time when the task was submitted.
submitter: The submitter of the task.
submitter_comment: The comment specified by the submitter of the
task.
task_id: The task ID.
task_type: The task type.
ticket_number: The task ticket number.
"""
_infoblox_type = 'scheduledtask'
_fields = ['approval_status', 'approver', 'approver_comment',
'automatic_restart', 'changed_objects', 'dependent_tasks',
'execute_now', 'execution_details', 'execution_details_type',
'execution_status', 'execution_time', 'is_network_insight_task',
'member', 'predecessor_task', 're_execute_task',
'scheduled_time', 'submit_time', 'submitter',
'submitter_comment', 'task_id', 'task_type', 'ticket_number']
_search_for_update_fields = ['approval_status', 'execution_status',
'task_id']
_updateable_search_fields = ['approval_status', 'scheduled_time']
_all_searchable_fields = ['approval_status', 'approver',
'execution_status', 'execution_time', 'member',
'scheduled_time', 'submit_time', 'submitter',
'task_id']
_return_fields = ['approval_status', 'execution_status', 'task_id']
_remap = {}
_shadow_fields = ['_ref']
_custom_field_processing = {
'changed_objects': Changedobject.from_dict,
}
class Search(InfobloxObject):
""" Search: Search object.
Corresponds to WAPI object 'search'
The
search
object is used to perform global searches for multiple object types
in the database. This object contains only search parameters and
returns objects that match the search criteria. The returned objects
are base objects for the respective object types.
Search is the only allowed operation for
search
objects.
NOTE: Only one of the following can be used each time: 'address',
'mac_address', 'duid' or 'fqdn'.
Fields:
"""
_infoblox_type = 'search'
_fields = []
_search_for_update_fields = []
_updateable_search_fields = []
_all_searchable_fields = []
_return_fields = []
_remap = {}
_shadow_fields = ['_ref']
class ASharedRecordBase(InfobloxObject):
@classmethod
def get_v4_class(cls):
return ASharedRecord
@classmethod
def get_v6_class(cls):
return AAAASharedRecord
class ASharedRecord(ASharedRecordBase):
""" ASharedRecord: DNS Shared A record object.
Corresponds to WAPI object 'sharedrecord:a'
A shared A (address) record is similar to a regular A record. It
maps a domain name to an IPv4 address. The difference is that a
shared A record should be added to a shared record group. If the
shared record group is associated with other zones, the shared A
record is shared among these zones.
Fields:
comment: Comment for this shared record; maximum 256 characters.
disable: Determines if this shared record is disabled or not. False
means that the record is enabled.
dns_name: The name for this shared record in punycode format.
extattrs: Extensible attributes associated with the object.For valid
values for extensible attributes, see the following information.
ipv4addr: The IPv4 Address of the shared record.
name: Name for this shared record. This value can be in unicode
format.
shared_record_group: The name of the shared record group in which
the record resides.
ttl: The Time To Live (TTL) value for this shared record. A 32-bit
unsigned integer that represents the duration, in seconds, for
which the shared record is | |
<gh_stars>1-10
#!/usr/bin/env python
import sys
import logging
import argparse
import pandas as pd
from cobindability.BED import compare_bed,peakwise_ovcoef,cooccur_peak,srog_peak
#from cobindability.ovpmi import cal_pmi
from cobindability.bw import bigwig_corr
from cobindability.ovstat import ov_stats
from cobindability import version
from cobindability.ovbootstrap import bootstrap_coef,bootstrap_npmi
from cobindability.coefcal import ov_coef, ov_jaccard, ov_ss, ov_sd, pmi_value, npmi_value
__author__ = "<NAME>"
__copyright__ = "Copyleft"
__credits__ = []
__license__ = "GPL"
__version__ = version.version
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
def main():
pd.set_option('display.float_format', lambda x: '%.4f' % x)
general_help = "**cobind: collocation analyses of genomic regions**"
bed_help = "Genomic regions in BED, BED-like or bigBed format. The BED-like format includes:\
'bed3', 'bed4', 'bed6', 'bed12', 'bedgraph', 'narrowpeak', 'broadpeak', 'gappedpeak'.\
BED and BED-like format can be plain text, compressed (.gz, .z, .bz, .bz2, .bzip2) \
or remote (http://, https://, ftp://) files. Do not compress BigBed foramt.\
BigBed file can also be a remote file."
# sub commands and help.
commands = {
'overlap' : "Calculate the overlapping coefficient (O) between two sets of genomic regions. O = |A and B| / (|A|*|B|)**0.5",
'jaccard' : "Calculate the Jaccard similarity coefficient (J) between two sets of genomic regions. J = |A and B| / |A or B|",
'dice' : "Calculate the Sørensen–Dice coefficient (SD) between two sets of genomic regions. SD = 2*|A and B| / (|A| + |B|)",
'simpson' : "Calculate the Szymkiewicz–Simpson coefficient (SS) between two sets of genomic regions. SS = |A and B| / min(|A|, |B|)",
'pmi' : "Calculate the pointwise mutual information (PMI) between two sets of genomic regions. PMI = log(p(|A and B|)) - log(p(|A|)) - log(p(|B|))",
'npmi' : "Calculate the normalized pointwise mutual information (NPMI) between two sets of genomic regions. NPMI = log(p(|A|)*p(|B|)) / log(p(|A and B|)) - 1",
'cooccur' : "Evaluate if two sets of genomic regions are significantly overlapped in given background regions.",
'covary' : "Calculate the covariance (Pearson, Spearman and Kendall coefficients) of binding intensities between two sets of genomic regions.",
'srog' : "Report the code of Spatial Relation Of Genomic (SROG) regions. SROG codes include 'disjoint','touch','equal','overlap','contain','within'.",
'stat' : "Wrapper function. Report basic statistics of genomic regions, and calculate overlapping measurements, including \"O\", \"J\", \"SD\", \"SS\", \"PMI\", \"NPMI\", without bootstrap resampling or generating peakwise measurements.",
}
#create parse
parser = argparse.ArgumentParser(description=general_help, epilog='', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-v', '--version', action='version', version='%s %s' % ('cobind', __version__))
# create sub-parser
sub_parsers = parser.add_subparsers(help='Sub-command description:')
parser_overlap = sub_parsers.add_parser('overlap', help=commands['overlap'])
parser_jaccard = sub_parsers.add_parser('jaccard', help=commands['jaccard'])
parser_dice = sub_parsers.add_parser('dice', help=commands['dice'])
parser_simpson = sub_parsers.add_parser('simpson', help=commands['simpson'])
parser_pmi = sub_parsers.add_parser('pmi', help=commands['pmi'])
parser_npmi = sub_parsers.add_parser('npmi', help=commands['npmi'])
parser_cooccur = sub_parsers.add_parser('cooccur', help=commands['cooccur'])
parser_covary = sub_parsers.add_parser('covary', help=commands['covary'])
parser_srog = sub_parsers.add_parser('srog', help=commands['srog'])
parser_stat = sub_parsers.add_parser('stat', help=commands['stat'])
# create the parser for the "overlap" sub-command
parser_overlap.add_argument("bed1", type=str, metavar ="input_A.bed",help=bed_help)
parser_overlap.add_argument("bed2", type=str, metavar ="input_B.bed",help=bed_help)
parser_overlap.add_argument('-n', '--ndraws', type=int, dest="iter", default = 20, help="Times of resampling to estimate confidence intervals. Set to '0' to turn off resampling.(default: %(default)d)")
parser_overlap.add_argument('-f', '--fraction', type=int, dest="subsample", default = 0.75, help="Resampling fraction. (default: %(default).2f)")
parser_overlap.add_argument('-b', '--background', type=int, dest="bgsize", default = 1.4e9, help="The size of the cis-regulatory genomic regions. This is about 1.4Gb For the human genome. (default: %(default)d)")
parser_overlap.add_argument("-o", "--save", action="store_true", help="If set, will save peak-wise coefficients to files (\"input_A_peakwise_scores.tsv\" and \"input_B_peakwise_scores.tsv\").")
parser_overlap.add_argument("-d", "--debug",action="store_true", help="Print detailed information for debugging.")
# create the parser for the "jaccard" sub-command
parser_jaccard.add_argument("bed1", type=str, metavar ="input_A.bed",help=bed_help)
parser_jaccard.add_argument("bed2", type=str, metavar ="input_B.bed",help=bed_help)
parser_jaccard.add_argument('-n', '--ndraws', type=int, dest="iter", default = 20, help="Times of resampling to estimate confidence intervals. Set to '0' to turn off resampling.(default: %(default)d)")
parser_jaccard.add_argument('-f', '--fraction', type=int, dest="subsample", default = 0.75, help="Resampling fraction. (default: %(default).2f)")
parser_jaccard.add_argument('-b', '--background', type=int, dest="bgsize", default = 1.4e9, help="The size of the cis-regulatory genomic regions. This is about 1.4Gb For the human genome. (default: %(default)d)")
parser_jaccard.add_argument("-o", "--save", action="store_true", help="If set, will save peak-wise coefficients to files (\"input_A_peakwise_scores.tsv\" and \"input_B_peakwise_scores.tsv\").")
parser_jaccard.add_argument("-d", "--debug",action="store_true", help="Print detailed information for debugging.")
# create the parser for the "dice" sub-command
parser_dice.add_argument("bed1", type=str, metavar ="input_A.bed",help=bed_help)
parser_dice.add_argument("bed2", type=str, metavar ="input_B.bed",help=bed_help)
parser_dice.add_argument('-n', '--ndraws', type=int, dest="iter", default = 20, help="Times of resampling to estimate confidence intervals. Set to '0' to turn off resampling.(default: %(default)d)")
parser_dice.add_argument('-f', '--fraction', type=int, dest="subsample", default = 0.75, help="Resampling fraction. (default: %(default).2f)")
parser_dice.add_argument('-b', '--background', type=int, dest="bgsize", default = 1.4e9, help="The size of the cis-regulatory genomic regions. This is about 1.4Gb For the human genome. (default: %(default)d)")
parser_dice.add_argument("-o", "--save", action="store_true", help="If set, will save peak-wise coefficients to files (\"input_A_peakwise_scores.tsv\" and \"input_B_peakwise_scores.tsv\").")
parser_dice.add_argument("-d", "--debug",action="store_true", help="Print detailed information for debugging.")
# create the parser for the "simpson" sub-command
parser_simpson.add_argument("bed1", type=str, metavar ="input_A.bed",help=bed_help)
parser_simpson.add_argument("bed2", type=str, metavar ="input_B.bed",help=bed_help)
parser_simpson.add_argument('-n', '--ndraws', type=int, dest="iter", default = 20, help="Times of resampling to estimate confidence intervals. Set to '0' to turn off resampling.(default: %(default)d)")
parser_simpson.add_argument('-f', '--fraction', type=int, dest="subsample", default = 0.75, help="Resampling fraction. (default: %(default).2f)")
parser_simpson.add_argument('-b', '--background', type=int, dest="bgsize", default = 1.4e9, help="The size of the cis-regulatory genomic regions. This is about 1.4Gb For the human genome. (default: %(default)d)")
parser_simpson.add_argument("-o", "--save", action="store_true", help="If set, will save peak-wise coefficients to files (\"input_A_peakwise_scores.tsv\" and \"input_B_peakwise_scores.tsv\").")
parser_simpson.add_argument("-d", "--debug",action="store_true", help="Print detailed information for debugging.")
# create the parser for the "pmi" sub-command
parser_pmi.add_argument("bed1", type=str, metavar ="input_A.bed",help=bed_help)
parser_pmi.add_argument("bed2", type=str, metavar ="input_B.bed",help=bed_help)
parser_pmi.add_argument('-n', '--ndraws', type=int, dest="iter", default = 20, help="Times of resampling to estimate confidence intervals. Set to '0' to turn off resampling.(default: %(default)d)")
parser_pmi.add_argument('-f', '--fraction', type=int, dest="subsample", default = 0.75, help="Resampling fraction. (default: %(default).2f)")
parser_pmi.add_argument('-b', '--background', type=int, dest="bgsize", default = 1.4e9, help="The size of the cis-regulatory genomic regions. This is about 1.4Gb For the human genome. (default: %(default)d)")
parser_pmi.add_argument("-o", "--save", action="store_true", help="If set, will save peak-wise coefficients to files (\"input_A_peakwise_scores.tsv\" and \"input_B_peakwise_scores.tsv\").")
parser_pmi.add_argument("-d", "--debug",action="store_true", help="Print detailed information for debugging.")
# create the parser for the "npmi" sub-command
parser_npmi.add_argument("bed1", type=str, metavar ="input_A.bed",help=bed_help)
parser_npmi.add_argument("bed2", type=str, metavar ="input_B.bed",help=bed_help)
parser_npmi.add_argument('-n', '--ndraws', type=int, dest="iter", default = 20, help="Times of resampling to estimate confidence intervals. Set to '0' to turn off resampling.(default: %(default)d)")
parser_npmi.add_argument('-f', '--fraction', type=int, dest="subsample", default = 0.75, help="Resampling fraction. (default: %(default).2f)")
parser_npmi.add_argument('-b', '--background', type=int, dest="bgsize", default = 1.4e9, help="The size of the cis-regulatory genomic regions. This is about 1.4Gb For the human genome. (default: %(default)d)")
parser_npmi.add_argument("-o", "--save", action="store_true", help="If set, will save peak-wise coefficients to files (\"input_A_peakwise_scores.tsv\" and \"input_B_peakwise_scores.tsv\").")
parser_npmi.add_argument("-d", "--debug",action="store_true", help="Print detailed information for debugging.")
# create the parser for the "cooccur" sub-command
parser_cooccur.add_argument("bed1", type=str, metavar ="input_A.bed",help=bed_help)
parser_cooccur.add_argument("bed2", type=str, metavar ="input_B.bed",help=bed_help)
parser_cooccur.add_argument("bed3", type=str, metavar ="background.bed",help="Genomic regions as the background (e.g., all promoters, all enhancers).")
parser_cooccur.add_argument("output", type=str, metavar ="output.tsv",help="For each genomic region in the \"background.bed\" file, add another column indicating if this region is \"input_A specific (i.e., A+B-)\", \"input_B specific (i.e., A-B+)\", \"co-occur (i.e., A+B+)\" or \"neither (i.e, A-B-)\". ")
parser_cooccur.add_argument('--ncut', type=int, dest="n_cut", default = 1, help="The minimum overlap size. (default: %(default)d)")
parser_cooccur.add_argument('--pcut', type=float, dest="p_cut", default = 0.0, help="The minimum overlap percentage. (default: %(default)f)")
parser_cooccur.add_argument("-d", "--debug",action="store_true", help="Print detailed information for debugging.")
# create the parser for the "covary" sub-command
parser_covary.add_argument("bed1", type=str, metavar="input_A.bed",help=bed_help)
parser_covary.add_argument("bw1", type=str, metavar="input_A.bw",help="Input bigWig file matched to 'input_A.bed'. BigWig file can be local or remote. Note: the chromosome IDs must be consistent between BED and bigWig files.")
parser_covary.add_argument("bed2", type=str, metavar="input_B.bed",help=bed_help)
parser_covary.add_argument("bw2", type=str, metavar="input_B.bw",help="Input bigWig file matched to 'input_B.bed'. BigWig file can be local or remote. Note: the chromosome IDs must be consistent between BED and bigWig files.")
parser_covary.add_argument("output", type=str,metavar="output_prefix", help="Prefix of output files. Three files will be generated: \"output_prefix_bedA_unique.tsv\" (input_A.bed specific regions and their bigWig scores), \"output_prefix_bedB_unique.tsv\" (input_B.bed specific regions and their bigWig scores), and \"output_prefix_common.tsv\"(input_A.bed and input_B.bed overlapped regions and their bigWig scores).")
parser_covary.add_argument("--na", type=str, dest="na_label", default = 'nan', help="Symbols used to represent the missing values. (default: %(default)s)")
parser_covary.add_argument('--type', type=str, dest="score_type", choices=['mean', 'min', 'max'], default = 'mean', help="Summary statistic score type ('min','mean' or 'max') of a genomic region. (default: %(default)s)")
parser_covary.add_argument('--topx', type=float, dest="top_X", default = 1.0, help="Fraction (if 0 < top_X <= 1) or number (if top_X > 1) of genomic regions used to calculate Pearson, Spearman, Kendall's correlations. If TOP_X == 1 (i.e., 100%%), all the genomic regions will be used to calculate correlations. (default: %(default)s)")
parser_covary.add_argument('--min_sig', type=float, dest="min_signal", default = 0, help="Genomic region with summary statistic score <= this will be removed. (default: %(default)s)")
parser_covary.add_argument("--exact", dest="exact", action="store_true", help="If set, calculate the \"exact\" summary statistic score rather than \"zoom-level\" score for each genomic region.")
parser_covary.add_argument("--keepna", dest="keepna", action="store_true", help="If set, a genomic region will be kept even it does not have summary statistical score in either of the two bigWig files. This flag only affects the output TSV files.")
parser_covary.add_argument("-d", "--debug",action="store_true", help="Print detailed information for debugging.")
# create the parser for the "srog" sub-command
parser_srog.add_argument("bed1", type=str, metavar ="input_A.bed",help="Genomic regions in BED, BED-like or bigBed format. If 'name' (the 4th column) is not provided, the default name is \"chrom:start-end\". If strand (the 6th column) is not provided, the default strand is \"+\".")
parser_srog.add_argument("bed2", type=str, metavar ="input_B.bed",help="Genomic regions in BED, BED-like or bigBed format. | |
<filename>h1/api/provider_project_agent_api.py<gh_stars>0
"""
HyperOne
HyperOne API # noqa: E501
The version of the OpenAPI document: 0.1.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from h1.api_client import ApiClient, Endpoint as _Endpoint
from h1.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from h1.model.agent import Agent
from h1.model.agent_credential import AgentCredential
from h1.model.enabled_service import EnabledService
from h1.model.event import Event
from h1.model.inline_response400 import InlineResponse400
from h1.model.metric import Metric
from h1.model.point import Point
from h1.model.provider_agent_resource import ProviderAgentResource
from h1.model.provider_agent_resource_event import ProviderAgentResourceEvent
from h1.model.provider_project_agent_create import ProviderProjectAgentCreate
from h1.model.provider_project_agent_credential_patch import ProviderProjectAgentCredentialPatch
from h1.model.provider_project_agent_transfer import ProviderProjectAgentTransfer
from h1.model.provider_project_agent_update import ProviderProjectAgentUpdate
from h1.model.resource_service import ResourceService
from h1.model.tag import Tag
from h1.model.tag_array import TagArray
class ProviderProjectAgentApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def __provider_project_agent_create(
self,
project_id,
location_id,
provider_project_agent_create,
**kwargs
):
"""Create provider/agent # noqa: E501
Create agent # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.provider_project_agent_create(project_id, location_id, provider_project_agent_create, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
provider_project_agent_create (ProviderProjectAgentCreate):
Keyword Args:
x_idempotency_key (str): Idempotency key. [optional]
x_dry_run (str): Dry run. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Agent
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['provider_project_agent_create'] = \
provider_project_agent_create
return self.call_with_http_info(**kwargs)
self.provider_project_agent_create = _Endpoint(
settings={
'response_type': (Agent,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/provider/{locationId}/project/{projectId}/agent',
'operation_id': 'provider_project_agent_create',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'provider_project_agent_create',
'x_idempotency_key',
'x_dry_run',
],
'required': [
'project_id',
'location_id',
'provider_project_agent_create',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'provider_project_agent_create':
(ProviderProjectAgentCreate,),
'x_idempotency_key':
(str,),
'x_dry_run':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'x_idempotency_key': 'x-idempotency-key',
'x_dry_run': 'x-dry-run',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'provider_project_agent_create': 'body',
'x_idempotency_key': 'header',
'x_dry_run': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__provider_project_agent_create
)
def __provider_project_agent_credential_create(
self,
project_id,
location_id,
agent_id,
agent_credential,
**kwargs
):
"""Create provider/agent.credential # noqa: E501
Create provider/agent.credential # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.provider_project_agent_credential_create(project_id, location_id, agent_id, agent_credential, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
agent_id (str): Agent Id
agent_credential (AgentCredential):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
AgentCredential
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['agent_id'] = \
agent_id
kwargs['agent_credential'] = \
agent_credential
return self.call_with_http_info(**kwargs)
self.provider_project_agent_credential_create = _Endpoint(
settings={
'response_type': (AgentCredential,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/provider/{locationId}/project/{projectId}/agent/{agentId}/credential',
'operation_id': 'provider_project_agent_credential_create',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'agent_id',
'agent_credential',
],
'required': [
'project_id',
'location_id',
'agent_id',
'agent_credential',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'agent_id':
(str,),
'agent_credential':
(AgentCredential,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'agent_id': 'agentId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'agent_id': 'path',
'agent_credential': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__provider_project_agent_credential_create
)
def __provider_project_agent_credential_delete(
self,
project_id,
location_id,
agent_id,
credential_id,
**kwargs
):
"""Delete provider/agent.credential # noqa: E501
Delete provider/agent.credential # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.provider_project_agent_credential_delete(project_id, location_id, agent_id, credential_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
agent_id (str): Agent Id
credential_id (str): credentialId
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Agent
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['agent_id'] = \
agent_id
kwargs['credential_id'] = \
credential_id
return self.call_with_http_info(**kwargs)
self.provider_project_agent_credential_delete = _Endpoint(
settings={
'response_type': (Agent,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/provider/{locationId}/project/{projectId}/agent/{agentId}/credential/{credentialId}',
'operation_id': 'provider_project_agent_credential_delete',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'agent_id',
'credential_id',
],
'required': [
'project_id',
'location_id',
'agent_id',
'credential_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'agent_id':
(str,),
'credential_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'agent_id': 'agentId',
'credential_id': 'credentialId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'agent_id': 'path',
'credential_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__provider_project_agent_credential_delete
)
def __provider_project_agent_credential_get(
self,
project_id,
location_id,
agent_id,
credential_id,
**kwargs
):
"""Get provider/agent.credential # noqa: E501
Get provider/agent.credential # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.provider_project_agent_credential_get(project_id, location_id, agent_id, credential_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
agent_id (str): Agent Id
credential_id (str): credentialId
Keyword Args:
_return_http_data_only (bool): response data without head status
code | |
negotiation are set on the request which will be responded to with the
guarded resource.
"""
renders = []
portal = self.createPortal(
lambda: SillyRealm(
anonymousAvatarFactory=lambda: InspectfulPage(renders)))
channel = self.createGuard(portal)
request = channel.makeFakeRequest(
'%s/?foo=1&bar=2' % self.getGuardPath()).followAllRedirects()
self.assertEquals(request.written.getvalue(), '')
self.assertEqual(
renders, [({'foo': ['1'], 'bar': ['2']},
None,
None,
'GET',
{'host': 'fake.com'})])
def test_loginRestoresRequestParameters(self):
"""
After login has succeeded, the GET arguments, POST arguments, body,
method, and headers from the request which triggered the login are set
on the request which will be responded to with the guarded resource.
"""
renders = []
portal = self.createPortal(
lambda: SillyRealm(
authenticatedAvatarFactory=lambda: InspectfulPage(renders)))
portal.registerChecker(
InMemoryUsernamePasswordDatabaseDontUse(test='test'),
IUsernamePassword)
channel = self.createGuard(portal)
# Negotiate a session.
request = channel.makeFakeRequest(self.getGuardPath())
request = request.followAllRedirects()
# Pretend to be a resource before login which requires login and saves
# request state.
request.session.args = {'foo': ['1'], 'bar': ['2']}
request.session.fields = None
request.session.method = 'GET'
request.session.content = None
request.session.received_headers = {'host': 'fake.com',
'extra': 'bar'}
# Perform the login.
request = channel.makeFakeRequest(
self.getGuardPath() + '/__login__?username=test&password=<PASSWORD>')
request = request.followAllRedirects()
self.assertEquals(request.written.getvalue(), '')
self.assertEqual(
renders, [({'foo': ['1'], 'bar': ['2']},
None,
None,
'GET',
{'host': 'fake.com', 'extra': 'bar'})])
def test_oldRequestParametersIgnored(self):
"""
The request parameters from the initial session negotiation request are
I{not} set on the login request.
"""
renders = []
portal = self.createPortal(
lambda: SillyRealm(
authenticatedAvatarFactory=lambda: InspectfulPage(renders)))
portal.registerChecker(
InMemoryUsernamePasswordDatabaseDontUse(test='test'),
IUsernamePassword)
channel = self.createGuard(portal)
# Negotiate a session using a request with some parameters.
request = channel.makeFakeRequest(
self.getGuardPath() + "?foo=bar&bar=baz")
request = request.followAllRedirects()
# Perform the login.
request = channel.makeFakeRequest(
self.getGuardPath() + '/__login__?username=test&password=<PASSWORD>')
request = request.followAllRedirects()
self.assertEquals(request.written.getvalue(), '')
self.assertEqual(
renders, [({'username': ['test'], 'password': ['<PASSWORD>']},
None,
'',
'GET',
{'host': 'fake.com'})])
def testNoSlash(self):
"""URL-based sessions do not fail even if there is no slash after the session key."""
p = self.createPortal()
chan = self.createGuard(p)
req = chan.makeFakeRequest('%s/' % self.getGuardPath(), requestClass=FakeHTTPRequest_noCookies).followAllRedirects()
# We should have the final resource, which is an anonymous resource
self.assertEquals(req.written.getvalue(), "No")
# now try requesting just the guard path
self.failUnless(req.path.startswith('%s/%s' % (self.getGuardPath(), guard.SESSION_KEY)))
self.failUnless(req.path.endswith('/'))
req = chan.makeFakeRequest(req.path[:-1], requestClass=FakeHTTPRequest_noCookies).followAllRedirects()
# it should work just as well as with the slash
# (not actually the same page, but SillyPage always says the same thing here)
self.assertEquals(req.written.getvalue(), "No")
def testTrailingSlashMatters_noCookies(self):
class TrailingSlashPage(rend.Page):
def locateChild(self, context, segments):
return self.__class__('%s/%s' % (self.original, segments[0])), segments[1:]
class TrailingSlashAvatar(TrailingSlashPage):
def renderHTTP(self, context):
return 'Authenticated %s' % self.original
class TrailingSlashAnonymous(TrailingSlashPage):
def renderHTTP(self, ctx):
return 'Anonymous %s' % self.original
class TrailingSlashRealm:
implements(IRealm)
def __init__(self, path):
self.path = path
def requestAvatar(self, avatarId, mind, *interfaces):
if avatarId == ANONYMOUS:
return inevow.IResource, TrailingSlashAnonymous(self.path), lambda: None
else:
return inevow.IResource, TrailingSlashAvatar(self.path), lambda: None
p = self.createPortal(realmFactory=lambda : TrailingSlashRealm(self.getGuardPath()))
chan = self.createGuard(p)
req = chan.makeFakeRequest('%s/' % self.getGuardPath(), requestClass=FakeHTTPRequest_noCookies).followAllRedirects()
# We should have the final resource, which is an anonymous resource
self.assertEquals(req.written.getvalue(), "Anonymous %s/" % self.getGuardPath())
# now try requesting just the guard path
self.failUnless(req.path.startswith('%s/%s' % (self.getGuardPath(), guard.SESSION_KEY)))
self.failUnless(req.path.endswith('/'))
req = chan.makeFakeRequest(req.path[:-1], requestClass=FakeHTTPRequest_noCookies).followAllRedirects()
# it should no longer have the trailing slash
self.assertEquals(req.written.getvalue(), "Anonymous %s" % self.getGuardPath())
def testTrailingSlashMatters_withCookies(self):
# omitting the trailing slash when not using session keys can
# only be done when the guard is not the root resource
if not self.guardPath:
return
class TrailingSlashPage(rend.Page):
def locateChild(self, context, segments):
return self.__class__('%s/%s' % (self.original, segments[0])), segments[1:]
class TrailingSlashAvatar(TrailingSlashPage):
def renderHTTP(self, context):
return 'Authenticated %s' % self.original
class TrailingSlashAnonymous(TrailingSlashPage):
def renderHTTP(self, ctx):
return 'Anonymous %s' % self.original
class TrailingSlashRealm:
implements(IRealm)
def __init__(self, path):
self.path = path
def requestAvatar(self, avatarId, mind, *interfaces):
if avatarId == ANONYMOUS:
return inevow.IResource, TrailingSlashAnonymous(self.path), lambda: None
else:
return inevow.IResource, TrailingSlashAvatar(self.path), lambda: None
p = self.createPortal(realmFactory=lambda : TrailingSlashRealm(self.getGuardPath()))
chan = self.createGuard(p)
req = chan.makeFakeRequest('%s/' % self.getGuardPath()).followAllRedirects()
# We should have the final resource, which is an anonymous resource
self.assertEquals(req.written.getvalue(), "Anonymous %s/" % self.getGuardPath())
req = chan.makeFakeRequest('%s' % self.getGuardPath()).followAllRedirects()
# We should have the final resource, which is an anonymous resource
self.assertEquals(req.written.getvalue(), "Anonymous %s" % self.getGuardPath())
def testPlainTextCookie(self):
"""Cookies from non-SSL sites have no secure attribute."""
p = self.createPortal()
chan = self.createGuard(p)
req = chan.makeFakeRequest('%s/xxx/yyy/' % self.getGuardPath())
self.assertEquals( len(req._cookieCache.values()), 1, "Bad number of cookies in response.")
cookie, a, kw = req._cookieCache.values()[0]
secure = kw.get('secure', None)
self.failIf(secure)
def testPlainTextCookie_evenWithSecureCookies(self):
"""Cookies from non-SSL sites have no secure attribute, even if secureCookie is true."""
p = self.createPortal()
chan = self.createGuard(p)
gu = getGuard(chan)
gu.secureCookies = False
req = chan.makeFakeRequest('%s/xxx/yyy/' % self.getGuardPath())
self.assertEquals( len(req._cookieCache.values()), 1, "Bad number of cookies in response.")
cookie, a, kw = req._cookieCache.values()[0]
secure = kw.get('secure', None)
self.failIf(secure)
def testSecureCookie_secureCookies(self):
"""Cookies from SSL sites have secure=True."""
p = self.createPortal()
chan = self.createGuard(p)
req = chan.makeFakeRequest('%s/xxx/yyy/' % self.getGuardPath(),
requestClass=FakeHTTPRequest_forceSSL)
self.assertEquals( len(req._cookieCache.values()), 1, "Bad number of cookies in response.")
cookie, a, kw = req._cookieCache.values()[0]
secure = kw.get('secure', None)
self.failUnless(secure)
def testSecureCookie_noSecureCookies(self):
"""Cookies from SSL sites do not have secure=True if secureCookies is false."""
p = self.createPortal()
chan = self.createGuard(p)
gu = getGuard(chan)
gu.secureCookies = False
req = chan.makeFakeRequest('%s/xxx/yyy/' % self.getGuardPath(),
requestClass=FakeHTTPRequest_forceSSL)
self.assertEquals( len(req._cookieCache.values()), 1, "Bad number of cookies in response.")
cookie, a, kw = req._cookieCache.values()[0]
secure = kw.get('secure', None)
self.failIf(secure)
def testPersistentCookie_persistentCookies(self):
"""Cookies from sites are saved to disk because SessionWrapper.persistentCookies=True."""
p = self.createPortal()
chan = self.createGuard(p)
gu = getGuard(chan)
gu.persistentCookies = True
req = chan.makeFakeRequest('%s/xxx/yyy/' % self.getGuardPath(),
requestClass=FakeHTTPRequest)
self.assertEquals( len(req._cookieCache.values()), 1, "Bad number of cookies in response.")
cookie, a, kw = req._cookieCache.values()[0]
expires = kw.get('expires', None)
self.failIfIdentical(expires, None)
def testPersistentCookie_noPersistentCookies(self):
"""Cookies from sites are not saved to disk because SessionWrapper.persistentCookies=False."""
p = self.createPortal()
chan = self.createGuard(p)
req = chan.makeFakeRequest('%s/xxx/yyy/' % self.getGuardPath(),
requestClass=FakeHTTPRequest)
self.assertEquals( len(req._cookieCache.values()), 1, "Bad number of cookies in response.")
cookie, a, kw = req._cookieCache.values()[0]
expires = kw.get('expires', None)
self.failUnlessIdentical(expires, None)
def testCookiePath(self):
"""Cookies get the correct path setting sites have no secure attribute."""
p = self.createPortal()
chan = self.createGuard(p)
req = chan.makeFakeRequest('%s/xxx/yyy/' % self.getGuardPath())
self.assertEquals( len(req._cookieCache.values()), 1, "Bad number of cookies in response.")
cookie, a, kw = req._cookieCache.values()[0]
path = kw.get('path', None)
wanted = self.getGuardPath()
if wanted == '':
wanted = '/'
self.failUnlessEqual(path, wanted)
def test_defaultCookieDomain(self):
"""
No domain restriction is set on a cookie by default.
"""
portal = self.createPortal()
channel = self.createGuard(portal)
request = channel.makeFakeRequest('%s/abc' % (self.getGuardPath(),))
cookie, args, kwargs = request._cookieCache.values()[0]
self.assertEqual(kwargs['domain'], None)
def test_specifiedCookieDomain(self):
"""
The domain restriction defined by
L{SessionWrapper.cookieDomainForRequest} is set on the cookie.
"""
portal = self.createPortal()
requests = []
class SpecialSessionWrapper(guard.SessionWrapper):
def cookieDomainForRequest(self, request):
requests.append(request)
return 'example.com'
self.wrapperFactory = SpecialSessionWrapper
channel = self.createGuard(portal)
request = channel.makeFakeRequest('%s/abc' % (self.getGuardPath(),))
cookie, args, kwargs = request._cookieCache.values()[0]
self.assertEqual(kwargs['domain'], 'example.com')
self.assertEqual(requests, [request])
def testLoginExtraPath(self):
p = self.createPortal()
p.registerChecker(InMemoryUsernamePasswordDatabaseDontUse(test='test'), IUsernamePassword)
chan = self.createGuard(p)
req = chan.makeFakeRequest('%s/__login__/sub/path?username=test&password=<PASSWORD>' % self.getGuardPath()).followAllRedirects()
self.assertEquals(req.written.getvalue(), "Yes")
self.assertEquals(req.path, '%s/sub/path' % self.getGuardPath())
def testLoginExtraPath_withSlash(self):
p = self.createPortal()
p.registerChecker(InMemoryUsernamePasswordDatabaseDontUse(test='test'), IUsernamePassword)
chan = self.createGuard(p)
req = chan.makeFakeRequest('%s/__login__/sub/path/?username=test&password=<PASSWORD>' % self.getGuardPath()).followAllRedirects()
self.assertEquals(req.written.getvalue(), "Yes")
self.assertEquals(req.path, '%s/sub/path/' % self.getGuardPath())
def testLogoutExtraPath(self):
p = self.createPortal()
p.registerChecker(InMemoryUsernamePasswordDatabaseDontUse(test='test'), IUsernamePassword)
chan = self.createGuard(p)
req = chan.makeFakeRequest('%s/__login__?username=test&password=<PASSWORD>' % self.getGuardPath()).followAllRedirects()
self.assertEquals(req.written.getvalue(), "Yes")
# Log out
req2 = chan.makeFakeRequest("%s/__logout__/sub/path" % self.getGuardPath()).followRedirect()
self.assertEquals(req2.written.getvalue(), "No")
self.assertEquals(req2.path, '%s/sub/path' % self.getGuardPath())
def testLogoutExtraPath_withSlash(self):
p = self.createPortal()
p.registerChecker(InMemoryUsernamePasswordDatabaseDontUse(test='test'), IUsernamePassword)
chan = self.createGuard(p)
req = chan.makeFakeRequest('%s/__login__?username=test&password=<PASSWORD>' % self.getGuardPath()).followAllRedirects()
self.assertEquals(req.written.getvalue(), "Yes")
# Log out
req2 = chan.makeFakeRequest("%s/__logout__/sub/path/" % self.getGuardPath()).followRedirect()
self.assertEquals(req2.written.getvalue(), "No")
self.assertEquals(req2.path, '%s/sub/path/' % self.getGuardPath())
def testGetLoggedInRoot_getLogin(self):
p = self.createPortal(realmFactory=GetLoggedInRealm)
p.registerChecker(InMemoryUsernamePasswordDatabaseDontUse(test='test'), IUsernamePassword)
chan = self.createGuard(p)
req = chan.makeFakeRequest('%s/__login__?username=test&password=<PASSWORD>' % self.getGuardPath()).followAllRedirects()
self.assertEquals(req.written.getvalue(), "GetLoggedInAvatar")
def testGetLoggedInRoot_httpAuthLogin(self):
p = self.createPortal(realmFactory=GetLoggedInRealm)
p.registerChecker(InMemoryUsernamePasswordDatabaseDontUse(test='test'), IUsernamePassword)
chan = self.createGuard(p)
for x in range(4):
req = chan.makeFakeRequest('%s/' % self.getGuardPath(), "test", "test")
self.assertEquals(req.written.getvalue(), "GetLoggedInAvatar")
self.assertEquals(len(self.sessions),1)
def testErrorPage_httpAuth(self):
"""Failed HTTP Auth results in a 403 error."""
p = self.createPortal()
p.registerChecker(InMemoryUsernamePasswordDatabaseDontUse(test='test'),
IUsernamePassword)
chan = self.createGuard(p)
req = chan.makeFakeRequest('%s' % self.getGuardPath(),
"test", "invalid-password")
self.assertEquals(req.headers.get('location', None), None)
self.assertEquals(req.code, 403)
self.assertEquals(req.written.getvalue(),
'<html><head><title>Forbidden</title></head>'
+'<body><h1>Forbidden</h1>Request was forbidden.'
+'</body></html>')
self.assertEquals(req.path, self.getGuardPath())
def testErrorPage_httpAuth_deep(self):
"""Failed HTTP Auth results in a 403 error."""
p = self.createPortal()
p.registerChecker(InMemoryUsernamePasswordDatabaseDontUse(test='test'),
IUsernamePassword)
chan = self.createGuard(p)
req = chan.makeFakeRequest('%s/quux/thud' % self.getGuardPath(),
"test", "invalid-password")
self.assertEquals(req.headers.get('location', None), None)
self.assertEquals(req.code, 403)
self.assertEquals(req.written.getvalue(),
'<html><head><title>Forbidden</title></head>'
+'<body><h1>Forbidden</h1>Request was forbidden.'
+'</body></html>')
self.assertEquals(req.path, '%s/quux/thud' % self.getGuardPath())
def testErrorPage_getLogin(self):
"""Failed normal login results in anonymous view of the same page."""
p = self.createPortal()
p.registerChecker(InMemoryUsernamePasswordDatabaseDontUse(test='test'),
IUsernamePassword)
chan = self.createGuard(p)
req = chan.makeFakeRequest(
'%s/__login__?username=test&password=<PASSWORD>'
% self.getGuardPath()).followAllRedirects()
self.assertEquals(req.written.getvalue(), 'No')
wanted = self.getGuardPath()
if wanted == | |
0.53, 0.2))
fd(1)
color((0.09, 0.22, 0.5))
fd(3)
color((1.0, 1.0, 1.0))
fd(4)
color((0.52, 0.59, 0.74))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((0.98, 0.74, 0.0))
fd(17)
color((0.97, 0.73, 0.0))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((0.0, 0.0, 0.0))
fd(20)
gt(-128.0,17.5)
fd(19)
color((0.09, 0.22, 0.5))
fd(2)
color((0.29, 0.34, 0.39))
fd(1)
color((0.98, 0.74, 0.0))
fd(17)
color((0.11, 0.23, 0.49))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.11, 0.24, 0.51))
fd(1)
color((1.0, 1.0, 1.0))
fd(4)
color((0.09, 0.22, 0.5))
fd(3)
color((0.35, 0.37, 0.35))
fd(1)
color((0.98, 0.74, 0.0))
fd(20)
color((0.49, 0.45, 0.27))
fd(1)
color((0.09, 0.22, 0.5))
fd(5)
color((0.36, 0.38, 0.35))
fd(1)
color((0.98, 0.74, 0.0))
fd(19)
color((0.15, 0.26, 0.46))
fd(1)
color((0.09, 0.22, 0.5))
fd(3)
color((1.0, 1.0, 1.0))
fd(3)
color((0.92, 0.93, 0.95))
fd(1)
color((0.09, 0.22, 0.5))
fd(8)
color((0.19, 0.28, 0.44))
fd(1)
color((0.92, 0.7, 0.04))
fd(1)
color((0.98, 0.74, 0.0))
fd(31)
color((0.8, 0.64, 0.1))
fd(1)
color((0.17, 0.27, 0.45))
fd(1)
color((0.09, 0.22, 0.5))
fd(7)
color((0.13, 0.26, 0.52))
fd(1)
color((0.09, 0.22, 0.5))
fd(3)
color((0.19, 0.28, 0.44))
fd(1)
color((0.98, 0.74, 0.0))
fd(19)
color((0.09, 0.22, 0.5))
fd(6)
color((0.97, 0.73, 0.01))
fd(1)
color((0.98, 0.74, 0.0))
fd(21)
color((0.37, 0.38, 0.34))
fd(1)
color((0.09, 0.22, 0.5))
fd(3)
color((1.0, 1.0, 1.0))
fd(4)
color((0.12, 0.25, 0.52))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.11, 0.23, 0.49))
fd(1)
color((0.98, 0.74, 0.0))
fd(17)
color((0.3, 0.35, 0.38))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((0.0, 0.0, 0.0))
fd(19)
gt(-128.0,16.5)
fd(18)
color((0.09, 0.22, 0.5))
fd(3)
color((0.98, 0.74, 0.0))
fd(17)
color((0.4, 0.4, 0.33))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((1.0, 1.0, 1.0))
fd(4)
color((0.2, 0.31, 0.56))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((0.1, 0.22, 0.49))
fd(1)
color((0.98, 0.74, 0.0))
fd(20)
color((0.31, 0.35, 0.38))
fd(1)
color((0.09, 0.22, 0.5))
fd(7)
color((0.51, 0.47, 0.26))
fd(1)
color((0.98, 0.74, 0.0))
fd(19)
color((0.16, 0.26, 0.46))
fd(1)
color((0.09, 0.22, 0.5))
fd(3)
color((0.33, 0.42, 0.63))
fd(1)
color((0.09, 0.22, 0.5))
fd(7)
color((0.79, 0.63, 0.11))
fd(1)
color((0.98, 0.74, 0.0))
fd(38)
color((0.98, 0.73, 0.0))
fd(1)
color((0.67, 0.56, 0.17))
fd(1)
color((0.09, 0.22, 0.5))
fd(7)
color((0.16, 0.26, 0.46))
fd(1)
color((0.98, 0.74, 0.0))
fd(19)
color((0.09, 0.22, 0.5))
fd(3)
color((0.15, 0.27, 0.53))
fd(1)
color((0.17, 0.29, 0.54))
fd(1)
color((0.09, 0.22, 0.5))
fd(3)
color((0.98, 0.74, 0.0))
fd(22)
color((0.1, 0.23, 0.49))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((0.19, 0.31, 0.55))
fd(1)
color((1.0, 1.0, 1.0))
fd(4)
color((0.09, 0.22, 0.5))
fd(2)
color((0.37, 0.38, 0.34))
fd(1)
color((0.98, 0.74, 0.0))
fd(17)
color((0.09, 0.22, 0.5))
fd(2)
color((0.09, 0.23, 0.51))
fd(1)
color((0.0, 0.0, 0.0))
fd(18)
gt(-128.0,15.5)
fd(18)
color((0.09, 0.22, 0.5))
fd(2)
color((0.82, 0.64, 0.09))
fd(1)
color((0.98, 0.74, 0.0))
fd(16)
color((0.92, 0.7, 0.04))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((0.99, 0.99, 1.0))
fd(1)
color((1.0, 1.0, 1.0))
fd(3)
color((0.65, 0.7, 0.81))
fd(1)
color((0.09, 0.22, 0.5))
fd(3)
color((0.98, 0.74, 0.0))
fd(20)
color((0.3, 0.34, 0.38))
fd(1)
color((0.09, 0.22, 0.5))
fd(9)
color((0.61, 0.52, 0.21))
fd(1)
color((0.98, 0.74, 0.0))
fd(19)
color((0.16, 0.26, 0.46))
fd(1)
color((0.09, 0.22, 0.5))
fd(6)
color((0.1, 0.23, 0.49))
fd(1)
color((0.8, 0.63, 0.1))
fd(1)
color((0.98, 0.74, 0.0))
fd(45)
color((0.65, 0.55, 0.18))
fd(1)
color((0.09, 0.22, 0.5))
fd(3)
color((0.17, 0.27, 0.45))
fd(1)
color((0.98, 0.74, 0.0))
fd(19)
color((0.1, 0.23, 0.49))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((0.1, 0.24, 0.51))
fd(1)
color((1.0, 1.0, 1.0))
fd(2)
color((0.33, 0.43, 0.64))
fd(1)
color((0.09, 0.22, 0.5))
fd(3)
color((0.92, 0.7, 0.04))
fd(1)
color((0.98, 0.74, 0.0))
fd(21)
color((0.09, 0.22, 0.5))
fd(3)
color((0.62, 0.67, 0.79))
fd(1)
color((1.0, 1.0, 1.0))
fd(3)
color((0.99, 0.99, 1.0))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((0.92, 0.7, 0.04))
fd(1)
color((0.98, 0.74, 0.0))
fd(16)
color((0.83, 0.65, 0.09))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((0.0, 0.0, 0.0))
fd(18)
gt(-128.0,14.5)
fd(17)
color((0.09, 0.22, 0.5))
fd(3)
color((0.98, 0.74, 0.0))
fd(17)
color((0.09, 0.22, 0.5))
fd(2)
color((0.63, 0.68, 0.8))
fd(1)
color((1.0, 1.0, 1.0))
fd(3)
color((0.97, 0.97, 0.98))
fd(1)
color((0.09, 0.22, 0.5))
fd(3)
color((0.98, 0.74, 0.0))
fd(20)
color((0.19, 0.28, 0.44))
fd(1)
color((0.09, 0.22, 0.5))
fd(11)
color((0.77, 0.62, 0.12))
fd(1)
color((0.98, 0.74, 0.0))
fd(19)
color((0.13, 0.25, 0.47))
fd(1)
color((0.09, 0.22, 0.5))
fd(3)
color((0.56, 0.49, 0.24))
fd(1)
color((0.98, 0.74, 0.0))
fd(51)
color((0.37, 0.38, 0.34))
fd(1)
color((0.98, 0.74, 0.0))
fd(19)
color((0.09, 0.22, 0.5))
fd(3)
color((0.1, 0.24, 0.51))
fd(1)
color((1.0, 1.0, 1.0))
fd(4)
color((0.44, 0.52, 0.69))
fd(1)
color((0.09, 0.22, 0.5))
fd(3)
color((0.95, 0.72, 0.02))
fd(1)
color((0.98, 0.74, 0.0))
fd(21)
color((0.09, 0.22, 0.5))
fd(3)
color((0.96, 0.97, 0.98))
fd(1)
color((1.0, 1.0, 1.0))
fd(3)
color((0.64, 0.69, 0.8))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((0.98, 0.74, 0.0))
fd(17)
color((0.09, 0.22, 0.5))
fd(3)
color((0.0, 0.0, 0.0))
fd(17)
gt(-128.0,13.5)
fd(17)
color((0.09, 0.22, 0.5))
fd(2)
color((0.97, 0.73, 0.0))
fd(1)
color((0.98, 0.74, 0.0))
fd(16)
color((0.2, 0.29, 0.44))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((1.0, 1.0, 1.0))
fd(4)
color((0.09, 0.22, 0.5))
fd(3)
color((0.87, 0.67, 0.06))
fd(1)
color((0.98, 0.74, 0.0))
fd(19)
color((0.28, 0.33, 0.39))
fd(1)
color((0.09, 0.22, 0.5))
fd(13)
color((0.83, 0.65, 0.09))
fd(1)
color((0.98, 0.74, 0.0))
fd(19)
color((0.16, 0.26, 0.46))
fd(1)
color((0.89, 0.69, 0.05))
fd(1)
color((0.98, 0.74, 0.0))
fd(72)
color((0.14, 0.25, 0.47))
fd(1)
color((0.09, 0.22, 0.5))
fd(3)
color((1.0, 1.0, 1.0))
fd(6)
color((0.57, 0.63, 0.76))
fd(1)
color((0.09, 0.22, 0.5))
fd(3)
color((0.98, 0.74, 0.0))
fd(21)
color((0.89, 0.68, 0.05))
fd(1)
color((0.09, 0.22, 0.5))
fd(3)
color((1.0, 1.0, 1.0))
fd(4)
color((0.09, 0.22, 0.5))
fd(2)
color((0.19, 0.28, 0.44))
fd(1)
color((0.98, 0.74, 0.0))
fd(16)
color((0.97, 0.73, 0.0))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((0.0, 0.0, 0.0))
fd(17)
gt(-128.0,12.5)
fd(16)
color((0.09, 0.22, 0.5))
fd(2)
color((0.29, 0.34, 0.39))
fd(1)
color((0.98, 0.74, 0.0))
fd(16)
color((0.86, 0.67, 0.07))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((1.0, 1.0, 1.0))
fd(4)
color((0.16, 0.28, 0.54))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((0.4, 0.4, 0.32))
fd(1)
color((0.98, 0.74, 0.0))
fd(19)
color((0.41, 0.41, 0.32))
fd(1)
color((0.09, 0.22, 0.5))
fd(15)
color((0.84, 0.65, 0.08))
fd(1)
color((0.98, 0.74, 0.0))
fd(91)
color((0.16, 0.27, 0.46))
fd(1)
color((0.09, 0.22, 0.5))
fd(3)
color((1.0, 1.0, 1.0))
fd(8)
color((0.53, 0.6, 0.74))
fd(1)
color((0.09, 0.22, 0.5))
fd(3)
color((0.95, 0.72, 0.02))
fd(1)
color((0.98, 0.74, 0.0))
fd(20)
color((0.45, 0.43, 0.3))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((0.15, 0.27, 0.53))
fd(1)
color((1.0, 1.0, 1.0))
fd(4)
color((0.09, 0.22, 0.5))
fd(2)
color((0.85, 0.66, 0.07))
fd(1)
color((0.98, 0.74, 0.0))
fd(16)
color((0.3, 0.35, 0.38))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((0.0, 0.0, 0.0))
fd(16)
gt(-128.0,11.5)
fd(15)
color((0.09, 0.21, 0.51))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((0.98, 0.74, 0.0))
fd(17)
color((0.09, 0.22, 0.5))
fd(2)
color((0.85, 0.87, 0.92))
fd(1)
color((1.0, 1.0, 1.0))
fd(3)
color((0.75, 0.78, 0.86))
fd(1)
color((0.09, 0.22, 0.5))
fd(3)
color((0.98, 0.74, 0.0))
fd(19)
color((0.51, 0.47, 0.26))
fd(1)
color((0.09, 0.22, 0.5))
fd(17)
color((0.95, 0.72, 0.02))
fd(1)
color((0.98, 0.74, 0.0))
fd(89)
color((0.13, 0.25, 0.47))
fd(1)
color((0.09, 0.22, 0.5))
fd(3)
color((1.0, 1.0, 1.0))
fd(10)
color((0.45, 0.53, 0.7))
fd(1)
color((0.09, 0.22, 0.5))
fd(3)
color((0.98, 0.73, 0.0))
fd(1)
color((0.98, 0.74, 0.0))
fd(20)
color((0.1, 0.23, 0.49))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((0.72, 0.76, 0.85))
fd(1)
color((1.0, 1.0, 1.0))
fd(3)
color((0.88, 0.9, 0.93))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((0.98, 0.74, 0.0))
fd(17)
color((0.09, 0.22, 0.5))
fd(2)
color((0.08, 0.22, 0.51))
fd(1)
color((0.0, 0.0, 0.0))
fd(15)
gt(-128.0,10.5)
fd(15)
color((0.09, 0.22, 0.5))
fd(2)
color((0.58, 0.51, 0.22))
fd(1)
color((0.98, 0.74, 0.0))
fd(16)
color((0.15, 0.26, 0.46))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.15, 0.27, 0.53))
fd(1)
color((1.0, 1.0, 1.0))
fd(4)
color((0.09, 0.22, 0.5))
fd(3)
color((0.98, 0.74, 0.0))
fd(19)
color((0.8, 0.64, 0.1))
fd(1)
color((0.09, 0.22, 0.5))
fd(19)
color((0.96, 0.72, 0.02))
fd(1)
color((0.98, 0.74, 0.0))
fd(87)
color((0.29, 0.34, 0.38))
fd(1)
color((0.09, 0.22, 0.5))
fd(3)
color((1.0, 1.0, 1.0))
fd(12)
color((0.4, 0.49, 0.67))
fd(1)
color((0.09, 0.22, 0.5))
fd(3)
color((0.98, 0.74, 0.0))
fd(21)
color((0.09, 0.22, 0.5))
fd(3)
color((1.0, 1.0, 1.0))
fd(4)
color((0.17, 0.29, 0.54))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.15, 0.26, 0.47))
fd(1)
color((0.98, 0.74, 0.0))
fd(16)
color((0.6, 0.51, 0.22))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((0.0, 0.0, 0.0))
fd(15)
gt(-128.0,9.5)
fd(14)
color((0.09, 0.22, 0.51))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((0.98, 0.74, 0.0))
fd(16)
color((0.88, 0.68, 0.06))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((1.0, 1.0, 1.0))
fd(4)
color((0.09, 0.22, 0.5))
fd(3)
color((0.78, 0.62, 0.11))
fd(1)
color((0.98, 0.74, 0.0))
fd(18)
color((0.91, 0.69, 0.04))
fd(1)
color((0.09, 0.22, 0.5))
fd(21)
color((0.95, 0.72, 0.02))
fd(1)
color((0.98, 0.74, 0.0))
fd(85)
color((0.38, 0.39, 0.34))
fd(1)
color((0.09, 0.22, 0.5))
fd(3)
color((1.0, 1.0, 1.0))
fd(14)
color((0.21, 0.33, 0.57))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((0.1, 0.23, 0.49))
fd(1)
color((0.98, 0.74, 0.0))
fd(20)
color((0.8, 0.63, 0.11))
fd(1)
color((0.09, 0.22, 0.5))
fd(3)
color((1.0, 1.0, 1.0))
fd(4)
color((0.09, 0.22, 0.5))
fd(2)
color((0.87, 0.67, 0.06))
fd(1)
color((0.98, 0.74, 0.0))
fd(16)
color((0.09, 0.22, 0.5))
fd(2)
color((0.09, 0.22, 0.51))
fd(1)
color((0.0, 0.0, 0.0))
fd(14)
gt(-128.0,8.5)
fd(14)
color((0.09, 0.22, 0.5))
fd(2)
color((0.82, 0.64, 0.09))
fd(1)
color((0.98, 0.74, 0.0))
fd(16)
color((0.09, 0.22, 0.5))
fd(2)
color((0.85, 0.88, 0.92))
fd(1)
color((1.0, 1.0, 1.0))
fd(3)
color((0.5, 0.58, 0.73))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((0.13, 0.24, 0.48))
fd(1)
color((0.98, 0.74, 0.0))
fd(18)
color((0.98, 0.73, 0.0))
fd(1)
color((0.09, 0.22, 0.5))
fd(23)
color((0.98, 0.73, 0.0))
fd(1)
color((0.98, 0.74, 0.0))
fd(83)
color((0.47, 0.44, 0.28))
fd(1)
color((0.09, 0.22, 0.5))
fd(3)
color((1.0, 1.0, 1.0))
fd(16)
color((0.12, 0.25, 0.51))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((0.1, 0.23, 0.49))
fd(1)
color((0.98, 0.74, 0.0))
fd(20)
color((0.15, 0.26, 0.46))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((0.49, 0.56, 0.72))
fd(1)
color((1.0, 1.0, 1.0))
fd(3)
color((0.87, 0.89, 0.93))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((0.98, 0.74, 0.0))
fd(16)
color((0.83, 0.65, 0.09))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((0.0, 0.0, 0.0))
fd(14)
gt(-128.0,7.5)
fd(13)
color((0.09, 0.23, 0.51))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((0.98, 0.74, 0.0))
fd(16)
color((0.35, 0.37, 0.35))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.11, 0.24, 0.51))
fd(1)
color((1.0, 1.0, 1.0))
fd(4)
color((0.09, 0.22, 0.5))
fd(3)
color((0.98, 0.74, 0.0))
fd(19)
color((0.09, 0.22, 0.5))
fd(25)
color((0.98, 0.73, 0.0))
fd(1)
color((0.98, 0.74, 0.0))
fd(81)
color((0.92, 0.7, 0.04))
fd(1)
color((0.09, 0.22, 0.5))
fd(4)
color((0.93, 0.95, 0.96))
fd(1)
color((1.0, 1.0, 1.0))
fd(16)
color((0.09, 0.22, 0.5))
fd(3)
color((0.47, 0.44, 0.28))
fd(1)
color((0.98, 0.74, 0.0))
fd(20)
color((0.09, 0.22, 0.5))
fd(3)
color((1.0, 1.0, 1.0))
fd(4)
color((0.11, 0.24, 0.51))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.34, 0.36, 0.36))
fd(1)
color((0.98, 0.74, 0.0))
fd(16)
color((0.09, 0.22, 0.5))
fd(2)
color((0.08, 0.22, 0.51))
fd(1)
color((0.0, 0.0, 0.0))
fd(13)
gt(-128.0,6.5)
fd(13)
color((0.09, 0.22, 0.5))
fd(2)
color((0.84, 0.65, 0.08))
fd(1)
color((0.98, 0.74, 0.0))
fd(15)
color((0.97, 0.73, 0.0))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((1.0, 1.0, 1.0))
fd(4)
color((0.09, 0.23, 0.5))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((0.91, 0.69, 0.04))
fd(1)
color((0.98, 0.74, 0.0))
fd(18)
color((0.1, 0.23, 0.49))
fd(1)
color((0.09, 0.22, 0.5))
fd(26)
color((0.98, 0.74, 0.0))
fd(83)
color((0.12, 0.24, 0.48))
fd(1)
color((0.09, 0.22, 0.5))
fd(3)
color((0.47, 0.55, 0.71))
fd(1)
color((1.0, 1.0, 1.0))
fd(16)
color((0.09, 0.22, 0.5))
fd(3)
color((0.86, 0.67, 0.07))
fd(1)
color((0.98, 0.74, 0.0))
fd(19)
color((0.93, 0.71, 0.03))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((0.09, 0.23, 0.5))
fd(1)
color((1.0, 1.0, 1.0))
fd(4)
color((0.09, 0.22, 0.5))
fd(2)
color((0.97, 0.73, 0.0))
fd(1)
color((0.98, 0.74, 0.0))
fd(15)
color((0.84, 0.65, 0.08))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((0.0, 0.0, 0.0))
fd(13)
gt(-128.0,5.5)
fd(12)
color((0.09, 0.22, 0.5))
fd(3)
color((0.98, 0.74, 0.0))
fd(16)
color((0.09, 0.22, 0.5))
fd(2)
color((0.36, 0.45, 0.65))
fd(1)
color((1.0, 1.0, 1.0))
fd(3)
color((0.79, 0.82, 0.88))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((0.15, 0.26, 0.47))
fd(1)
color((0.98, 0.74, 0.0))
fd(18)
color((0.52, 0.47, 0.26))
fd(1)
color((0.09, 0.22, 0.5))
fd(27)
color((0.88, 0.68, 0.06))
fd(1)
color((0.98, 0.74, 0.0))
fd(83)
color((0.53, 0.48, 0.25))
fd(1)
color((0.09, 0.22, 0.5))
fd(3)
color((0.11, 0.24, 0.51))
fd(1)
color((1.0, 1.0, 1.0))
fd(15)
color((0.97, 0.97, 0.98))
fd(1)
color((0.09, 0.22, 0.5))
fd(3)
color((0.98, 0.74, 0.0))
fd(20)
color((0.16, 0.27, 0.46))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((0.76, 0.8, 0.87))
fd(1)
color((1.0, 1.0, 1.0))
fd(3)
color((0.37, 0.47, 0.65))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((0.98, 0.74, 0.0))
fd(16)
color((0.09, 0.22, 0.5))
fd(3)
color((0.0, 0.0, 0.0))
fd(12)
gt(-128.0,4.5)
fd(12)
color((0.09, 0.22, 0.5))
fd(2)
color((0.74, 0.6, 0.13))
fd(1)
color((0.98, 0.74, 0.0))
fd(15)
color((0.92, 0.7, 0.04))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((1.0, 1.0, 1.0))
fd(4)
color((0.09, 0.22, 0.5))
fd(3)
color((0.98, 0.74, 0.0))
fd(18)
color((0.93, 0.71, 0.03))
fd(1)
color((0.09, 0.22, 0.5))
fd(27)
color((0.98, 0.73, 0.0))
fd(1)
color((0.98, 0.74, 0.0))
fd(36)
color((0.74, 0.6, 0.14))
fd(1)
color((0.46, 0.44, 0.29))
fd(1)
color((0.3, 0.35, 0.38))
fd(1)
color((0.22, 0.3, 0.42))
fd(1)
color((0.17, 0.27, 0.45))
fd(1)
color((0.14, 0.25, 0.47))
fd(1)
color((0.13, 0.24, 0.48))
fd(1)
color((0.14, 0.25, 0.47))
fd(1)
color((0.17, 0.27, 0.45))
fd(1)
color((0.24, 0.31, 0.42))
fd(1)
color((0.31, 0.35, 0.38))
fd(1)
color((0.5, 0.46, 0.27))
fd(1)
color((0.77, 0.61, 0.12))
fd(1)
color((0.98, 0.74, 0.0))
fd(36)
color((0.93, 0.71, 0.03))
fd(1)
color((0.09, 0.22, 0.5))
fd(4)
color((1.0, 1.0, 1.0))
fd(15)
color((0.68, 0.73, 0.82))
fd(1)
color((0.09, 0.22, 0.5))
fd(3)
color((0.98, 0.74, 0.0))
fd(20)
color((0.09, 0.22, 0.5))
fd(3)
color((1.0, 1.0, 1.0))
fd(4)
color((0.09, 0.22, 0.5))
fd(2)
color((0.91, 0.7, 0.04))
fd(1)
color((0.98, 0.74, 0.0))
fd(15)
color((0.75, 0.6, 0.13))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((0.0, 0.0, 0.0))
fd(12)
gt(-128.0,3.5)
fd(11)
color((0.09, 0.22, 0.51))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((0.98, 0.74, 0.0))
fd(16)
color((0.09, 0.22, 0.5))
fd(2)
color((0.75, 0.79, 0.86))
fd(1)
color((1.0, 1.0, 1.0))
fd(3)
color((0.32, 0.42, 0.62))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((0.63, 0.53, 0.2))
fd(1)
color((0.98, 0.74, 0.0))
fd(17)
color((0.98, 0.73, 0.0))
fd(1)
color((0.09, 0.22, 0.5))
fd(26)
color((0.17, 0.27, 0.45))
fd(1)
color((0.98, 0.74, 0.0))
fd(32)
color((0.72, 0.58, 0.15))
fd(1)
color((0.2, 0.29, 0.44))
fd(1)
color((0.09, 0.22, 0.5))
fd(21)
color((0.29, 0.34, 0.38))
fd(1)
color((0.78, 0.62, 0.11))
fd(1)
color((0.98, 0.74, 0.0))
fd(32)
color((0.09, 0.22, 0.5))
fd(4)
color((0.99, 0.99, 1.0))
fd(1)
color((1.0, 1.0, 1.0))
fd(14)
color((0.21, 0.33, 0.56))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((0.31, 0.35, 0.38))
fd(1)
color((0.98, 0.74, 0.0))
fd(19)
color((0.65, 0.55, 0.18))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((0.3, 0.4, 0.62))
fd(1)
color((1.0, 1.0, 1.0))
fd(3)
color((0.76, 0.8, 0.87))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((0.98, 0.74, 0.0))
fd(16)
color((0.09, 0.22, 0.5))
fd(2)
color((0.09, 0.22, 0.49))
fd(1)
color((0.0, 0.0, 0.0))
fd(11)
gt(-128.0,2.5)
fd(11)
color((0.09, 0.22, 0.5))
fd(2)
color((0.68, 0.56, 0.17))
fd(1)
color((0.98, 0.74, 0.0))
fd(15)
color((0.86, 0.67, 0.07))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((1.0, 1.0, 1.0))
fd(4)
color((0.09, 0.22, 0.5))
fd(3)
color((0.98, 0.74, 0.0))
fd(18)
color((0.09, 0.22, 0.5))
fd(26)
color((0.27, 0.33, 0.4))
fd(1)
color((0.98, 0.74, 0.0))
fd(29)
color((0.93, 0.71, 0.03))
fd(1)
color((0.09, 0.22, 0.5))
fd(29)
color((0.11, 0.24, 0.49))
fd(1)
color((0.98, 0.74, 0.0))
fd(31)
color((0.09, 0.23, 0.49))
fd(1)
color((0.09, 0.22, 0.5))
fd(3)
color((0.98, 0.98, 0.99))
fd(1)
color((1.0, 1.0, 1.0))
fd(14)
color((0.09, 0.22, 0.5))
fd(3)
color((0.91, 0.69, 0.04))
fd(1)
color((0.98, 0.74, 0.0))
fd(19)
color((0.09, 0.22, 0.5))
fd(3)
color((1.0, 1.0, 1.0))
fd(4)
color((0.09, 0.22, 0.5))
fd(2)
color((0.85, 0.67, 0.07))
fd(1)
color((0.98, 0.74, 0.0))
fd(15)
color((0.69, 0.57, 0.16))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((0.0, 0.0, 0.0))
fd(11)
gt(-128.0,1.5)
fd(11)
color((0.09, 0.22, 0.5))
fd(2)
color((0.98, 0.74, 0.0))
fd(16)
color((0.09, 0.22, 0.5))
fd(2)
color((0.93, 0.94, 0.96))
fd(1)
color((1.0, 1.0, 1.0))
fd(3)
color((0.11, 0.24, 0.51))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((0.96, 0.73, 0.01))
fd(1)
color((0.98, 0.74, 0.0))
fd(17)
color((0.67, 0.55, 0.18))
fd(1)
color((0.09, 0.22, 0.5))
fd(25)
color((0.65, 0.55, 0.19))
fd(1)
color((0.98, 0.74, 0.0))
fd(29)
color((0.11, 0.24, 0.49))
fd(1)
color((0.09, 0.22, 0.5))
fd(7)
color((0.11, 0.24, 0.51))
fd(1)
color((0.53, 0.6, 0.74))
fd(1)
color((0.82, 0.84, 0.9))
fd(1)
color((0.98, 0.98, 0.99))
fd(1)
color((1.0, 1.0, 1.0))
fd(11)
color((0.97, 0.98, 0.98))
fd(1)
color((0.79, 0.82, 0.89))
fd(1)
color((0.49, 0.56, 0.72))
fd(1)
color((0.09, 0.22, 0.5))
fd(4)
color((0.14, 0.25, 0.47))
fd(1)
color((0.98, 0.74, 0.0))
fd(33)
color((0.33, 0.36, 0.36))
fd(1)
color((0.09, 0.22, 0.5))
fd(3)
color((0.9, 0.91, 0.94))
fd(1)
color((1.0, 1.0, 1.0))
fd(13)
color((0.98, 0.98, 0.98))
fd(1)
color((0.09, 0.22, 0.5))
fd(3)
color((0.98, 0.74, 0.0))
fd(19)
color((0.96, 0.73, 0.01))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((0.11, 0.24, 0.51))
fd(1)
color((1.0, 1.0, 1.0))
fd(3)
color((0.94, 0.95, 0.96))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((0.98, 0.74, 0.0))
fd(16)
color((0.09, 0.22, 0.5))
fd(2)
color((0.0, 0.0, 0.0))
fd(11)
gt(-128.0,0.5)
fd(10)
color((0.09, 0.22, 0.5))
fd(2)
color((0.26, 0.32, 0.4))
fd(1)
color((0.98, 0.74, 0.0))
fd(15)
color((0.79, 0.63, 0.11))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((1.0, 1.0, 1.0))
fd(3)
color((0.93, 0.94, 0.96))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((0.15, 0.26, 0.47))
fd(1)
color((0.98, 0.74, 0.0))
fd(18)
color((0.09, 0.22, 0.5))
fd(25)
color((0.74, 0.6, 0.13))
fd(1)
color((0.98, 0.74, 0.0))
fd(31)
color((0.13, 0.24, 0.48))
fd(1)
color((0.09, 0.22, 0.5))
fd(3)
color((1.0, 1.0, 1.0))
fd(21)
color((0.09, 0.22, 0.5))
fd(3)
color((0.13, 0.25, 0.47))
fd(1)
color((0.98, 0.74, 0.0))
fd(35)
color((0.52, 0.47, 0.26))
fd(1)
color((0.09, 0.22, 0.5))
fd(3)
color((0.89, 0.91, 0.94))
fd(1)
color((1.0, 1.0, 1.0))
fd(13)
color((0.56, 0.63, 0.76))
fd(1)
color((0.09, 0.22, 0.5))
fd(3)
color((0.98, 0.74, 0.0))
fd(19)
color((0.15, 0.25, 0.47))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((0.93, 0.94, 0.96))
fd(1)
color((1.0, 1.0, 1.0))
fd(3)
color((0.09, 0.22, 0.5))
fd(2)
color((0.78, 0.62, 0.11))
fd(1)
color((0.98, 0.74, 0.0))
fd(15)
color((0.27, 0.33, 0.4))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((0.0, 0.0, 0.0))
fd(10)
gt(-128.0,-0.5)
fd(10)
color((0.09, 0.22, 0.5))
fd(2)
color((0.98, 0.74, 0.0))
fd(16)
color((0.09, 0.22, 0.5))
fd(2)
color((0.96, 0.96, 0.98))
fd(1)
color((1.0, 1.0, 1.0))
fd(3)
color((0.09, 0.22, 0.5))
fd(3)
color((0.98, 0.74, 0.0))
fd(18)
color((0.09, 0.22, 0.5))
fd(25)
color((0.9, 0.69, 0.04))
fd(1)
color((0.98, 0.74, 0.0))
fd(33)
color((0.09, 0.22, 0.5))
fd(4)
color((1.0, 1.0, 1.0))
fd(19)
color((0.09, 0.22, 0.5))
fd(3)
color((0.1, 0.23, 0.49))
fd(1)
color((0.98, 0.74, 0.0))
fd(37)
color((0.69, 0.57, 0.16))
fd(1)
color((0.09, 0.22, 0.5))
fd(3)
color((0.99, 0.99, 0.99))
fd(1)
color((1.0, 1.0, 1.0))
fd(13)
color((0.09, 0.22, 0.5))
fd(3)
color((0.78, 0.62, 0.11))
fd(1)
color((0.98, 0.74, 0.0))
fd(19)
color((0.09, 0.22, 0.5))
fd(3)
color((1.0, 1.0, 1.0))
fd(3)
color((0.96, | |
key_path = os.path.join(directory, 'skein.pem')
cert_bytes = self._get_bytes('cert')
key_bytes = self._get_bytes('key')
lock_path = os.path.join(directory, 'skein.lock')
with lock_file(lock_path):
for path, name in [(cert_path, 'skein.crt'), (key_path, 'skein.pem')]:
if os.path.exists(path):
if force:
os.unlink(path)
else:
msg = ("%r file already exists, use `%s` to overwrite" %
(name, '--force' if context.is_cli else 'force'))
raise context.FileExistsError(msg)
flags = os.O_WRONLY | os.O_CREAT | os.O_EXCL
for path, data in [(cert_path, cert_bytes), (key_path, key_bytes)]:
with os.fdopen(os.open(path, flags, 0o600), 'wb') as fil:
fil.write(data)
return Security(cert_file=cert_path, key_file=key_path)
@classmethod
def new_credentials(cls):
"""Create a new Security object with a new certificate/key pair."""
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.x509.oid import NameOID
key = rsa.generate_private_key(public_exponent=65537,
key_size=2048,
backend=default_backend())
key_bytes = key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption())
subject = issuer = x509.Name(
[x509.NameAttribute(NameOID.COMMON_NAME, u'skein-internal')])
now = datetime.utcnow()
cert = (x509.CertificateBuilder()
.subject_name(subject)
.issuer_name(issuer)
.public_key(key.public_key())
.serial_number(x509.random_serial_number())
.not_valid_before(now)
.not_valid_after(now + timedelta(days=365))
.sign(key, hashes.SHA256(), default_backend()))
cert_bytes = cert.public_bytes(serialization.Encoding.PEM)
return cls(cert_bytes=cert_bytes, key_bytes=key_bytes)
class ApplicationState(Enum):
"""Enum of application states.
Attributes
----------
NEW : ApplicationState
Application was just created.
NEW_SAVING : ApplicationState
Application is being saved.
SUBMITTED : ApplicationState
Application has been submitted.
ACCEPTED : ApplicationState
Application has been accepted by the scheduler.
RUNNING : ApplicationState
Application is currently running.
FINISHED : ApplicationState
Application finished successfully.
FAILED : ApplicationState
Application failed.
KILLED : ApplicationState
Application was terminated by a user or admin.
"""
_values = ('NEW',
'NEW_SAVING',
'SUBMITTED',
'ACCEPTED',
'RUNNING',
'FINISHED',
'FAILED',
'KILLED')
class FinalStatus(Enum):
"""Enum of application final statuses.
Attributes
----------
SUCCEEDED : FinalStatus
Application finished successfully.
KILLED : FinalStatus
Application was terminated by a user or admin.
FAILED : FinalStatus
Application failed.
UNDEFINED : FinalStatus
Application has not yet finished.
"""
_values = ('SUCCEEDED',
'KILLED',
'FAILED',
'UNDEFINED')
class Resources(Specification):
"""Resource requests per container.
Parameters
----------
memory : str or int
The amount of memory to request. Can be either a string with units
(e.g. ``"5 GiB"``), or numeric. If numeric, specifies the amount of
memory in *MiB*. Note that the units are in mebibytes (MiB) NOT
megabytes (MB) - the former being binary based (1024 MiB in a GiB), the
latter being decimal based (1000 MB in a GB).
Requests smaller than the minimum allocation will receive the minimum
allocation (1024 MiB by default). Requests larger than the maximum
allocation will error on application submission.
vcores : int
The number of virtual cores to request. Depending on your system
configuration one virtual core may map to a single actual core, or a
fraction of a core. Requests larger than the maximum allocation will
error on application submission.
gpus : int, optional
The number of gpus to request. Requires Hadoop >= 3.1, sets
resource requirements for ``yarn.io/gpu``. Default is 0.
fpgas : int, optional
The number of fpgas to request. Requires Hadoop >= 3.1, sets
resource requirements for ``yarn.io/fpga``. Default is 0.
"""
__slots__ = ('_memory', 'vcores', 'gpus', 'fpgas')
_params = ('memory', 'vcores', 'gpus', 'fpgas')
_protobuf_cls = _proto.Resources
def __init__(self, memory=required, vcores=required, gpus=0, fpgas=0):
self._assign_required('memory', memory)
self._assign_required('vcores', vcores)
self.gpus = gpus
self.fpgas = fpgas
self._validate()
@property
def memory(self):
return self._memory
@memory.setter
def memory(self, value):
self._memory = parse_memory(value)
def __repr__(self):
return 'Resources<memory=%d, vcores=%d>' % (self.memory, self.vcores)
def _validate(self, is_request=False):
min = 1 if is_request else 0
self._check_is_bounded_int('vcores', min=min)
self._check_is_bounded_int('memory', min=min)
self._check_is_bounded_int('gpus', min=0)
self._check_is_bounded_int('fpgas', min=0)
class FileVisibility(Enum):
"""Enum of possible file visibilities.
Determines how the file can be shared between containers.
Attributes
----------
APPLICATION : FileVisibility
Shared only among containers of the same application on the node.
PUBLIC : FileVisibility
Shared by all users on the node.
PRIVATE : FileVisibility
Shared among all applications of the same user on the node.
"""
_values = ('APPLICATION', 'PUBLIC', 'PRIVATE')
class FileType(Enum):
"""Enum of possible file types to distribute with the application.
Attributes
----------
FILE : FileType
Regular file
ARCHIVE : FileType
A ``.zip``, ``.tar.gz``, or ``.tgz`` file to be automatically
unarchived in the containers.
"""
_values = ('FILE', 'ARCHIVE')
class File(Specification):
"""A file/archive to distribute with the service.
Parameters
----------
source : str
The path to the file/archive. If no scheme is specified, path is
assumed to be on the local filesystem (``file://`` scheme).
type : FileType or str, optional
The type of file to distribute. Archive's are automatically extracted
by yarn into a directory with the same name as their destination.
By default the type is inferred from the file extension.
visibility : FileVisibility or str, optional
The resource visibility, default is ``FileVisibility.APPLICATION``
size : int, optional
The resource size in bytes. If not provided will be determined by the
file system.
timestamp : int, optional
The time the resource was last modified. If not provided will be
determined by the file system.
"""
__slots__ = ('_source', '_type', '_visibility', 'size', 'timestamp')
_params = ('source', 'type', 'visibility', 'size', 'timestamp')
_protobuf_cls = _proto.File
def __init__(self, source=required, type='infer',
visibility=FileVisibility.APPLICATION, size=0, timestamp=0):
self._assign_required('source', source)
self.type = type
self.visibility = visibility
self.size = size
self.timestamp = timestamp
self._validate()
def __repr__(self):
return 'File<source=%r, type=%r>' % (self.source, self.type)
def _validate(self):
self._check_is_type('source', str)
self._check_is_type('type', FileType)
self._check_is_type('visibility', FileVisibility)
self._check_is_bounded_int('size')
self._check_is_bounded_int('timestamp')
@property
def source(self):
return self._source
@source.setter
def source(self, val):
if not isinstance(val, str):
raise context.TypeError("'source' must be a string")
self._source = self._normpath(val)
@property
def type(self):
if self._type == 'infer':
return (FileType.ARCHIVE
if any(map(self._source.endswith, ('.zip', '.tar.gz', '.tgz')))
else FileType.FILE)
return self._type
@type.setter
def type(self, val):
self._type = val if val == 'infer' else FileType(val)
@property
def visibility(self):
return self._visibility
@visibility.setter
def visibility(self, val):
self._visibility = FileVisibility(val)
@staticmethod
def _normpath(path, origin=None):
url = urlparse(path)
if not url.scheme:
if not os.path.isabs(url.path):
if origin is not None:
path = os.path.normpath(os.path.join(origin, url.path))
else:
path = os.path.abspath(url.path)
else:
path = url.path
return 'file://%s%s' % (url.netloc, path)
return path
@implements(ProtobufMessage.to_protobuf)
def to_protobuf(self):
self._validate()
url = urlparse(self.source)
urlmsg = _proto.Url(scheme=url.scheme,
host=url.hostname,
port=url.port,
file=url.path)
return _proto.File(source=urlmsg,
type=str(self.type),
visibility=str(self.visibility),
size=self.size,
timestamp=self.timestamp)
@classmethod
def from_dict(cls, obj, **kwargs):
"""Create an instance from a dict.
Keys in the dict should match parameter names"""
_origin = _pop_origin(kwargs)
if isinstance(obj, str):
obj = {'source': obj}
cls._check_keys(obj)
if _origin:
if 'source' not in obj:
raise context.TypeError("parameter 'source' is required but "
"wasn't provided")
obj = dict(obj)
obj['source'] = cls._normpath(obj['source'], _origin)
return cls(**obj)
@classmethod
@implements(Specification.from_protobuf)
def from_protobuf(cls, obj):
if not isinstance(obj, cls._protobuf_cls):
raise TypeError("Expected message of type "
"%r" % cls._protobuf_cls.__name__)
url = obj.source
netloc = '%s:%d' % (url.host, url.port) if url.host else ''
source = '%s://%s%s' % (url.scheme, netloc, url.file)
return cls(source=source,
type=_proto.File.Type.Name(obj.type),
visibility=_proto.File.Visibility.Name(obj.visibility),
size=obj.size,
timestamp=obj.timestamp)
class Service(Specification):
"""Description of a Skein service.
Parameters
----------
resources : Resources
Describes the resources needed to run the service.
script : str
A bash script to run the service.
instances : int, optional
The number of instances to create on startup. Default is 1.
files : dict, optional
Describes any files needed to run the service. A mapping of destination
relative paths to ``File`` or ``str`` objects describing the sources
for these paths. If a ``str``, the file type is inferred from the
extension.
env : dict, optional
A mapping of environment variables needed to run the service.
depends : set, optional
A set of service names that this service depends on. The service will
only be started after all its dependencies have been started.
max_restarts : int, optional
The maximum number of restarts to allow for this service. Containers
are only restarted on failure, and the cap is set for all containers in
the service, not per container. Set to -1 to allow infinite restarts.
Default is 0.
allow_failures : bool, optional
If False (default), the whole application will shutdown if the number
of failures for this service exceeds ``max_restarts``. Set to True to
keep the application running even if this service exceeds its failure
limit.
node_label : str, optional
The node label expression to use when requesting containers for this
service. If not set, defaults to the application-level ``node_label``
(if set).
nodes : list, optional
A list of node host names to restrict containers for this | |
import logging
import math
import os
import pickle
import random
from enum import Enum
from pathlib import Path
from queue import Queue
import numpy as np
import torch
from Utils.data_utils import change_win_to_unix_path_if_needed
class FileSetIterator:
""" An iterator for samples stored in a set of files.
The FileSetIterator provides an iterator over the samples stored in a set of files.
These files are usually HDF5-files.
Args:
files (list of str): A list of paths to the files to be loaded
load_data (function): A function that can load a list of samples given a filename
MUST return the following format:
[(data_1, label_1), ... , (data_n, label_n)]
cache_path (Path): A path to cache loaded samples
worker_id (int): The id of this worker for multiprocessing environments
"""
def __init__(self, files, load_data, worker_id=0):
self.files = list(files) # Copy the file list because we remove the items in this list
self.load_data = load_data
self.sample_queue = Queue()
self.worker_id = worker_id
def _assert_instance_correctness(self, instance):
err = '''The data loader seems to return instances in the wrong format.
The required format is [(data_1, label1, [aux_1]), ... ,
(data_n, label_n, [aux_n])] or None.'''
assert isinstance(instance, list), err
for idx, i in enumerate(instance):
assert isinstance(i, tuple), err
if len(i) == 2:
instance[idx] += (dict(),)
assert len(instance[idx]) == 3, err
def _transform_to_tensor(self, i, num):
_data = torch.FloatTensor(i[0])
# The following if else is necessary to have 0, 1 Binary Labels in Tensors
# since FloatTensor(0) = FloatTensor([])
if type(i[1]) is np.ndarray and len(i[1]) > 1:
_label = torch.FloatTensor(i[1])
else:
if i[1] == 0:
_label = torch.FloatTensor([0.])
elif i[1] == 1:
_label = torch.FloatTensor([1.])
_aux = i[2]
self.sample_queue.put((_data, _label, _aux))
def _load_file(self):
while True:
if len(self.files) == 0:
return False
fn = self.files.pop(0)
instance = self.load_data(fn)
if instance is None:
continue
else:
self._assert_instance_correctness(instance)
# Add filepath to every sample
for i in instance:
i[2]["sourcefile"] = str(fn)
# These lines were probably responsible for continued crashing of trainings
# Maybe since the index was added, the filenames could not be compressed anymore (torch-internally)
# More ram was used. Need to find a fix for this since the index information is very valuable in
# evaluation mode.
# for i, sample in enumerate(instance):
# sample[2]["sourcefile"] = str(fn)
# sample[2]["n"] = i
for num, i in enumerate(instance):
self._transform_to_tensor(i, num)
break
return True
def get_remaining_files(self):
""" Get the list of remaining files
Returns:
A list of remaining files.
"""
return self.files
def __iter__(self):
return self
def __next__(self):
""" Get the next sample.
This will either return a sample from the internal queue or load the next file
from the fileset.
When the queue is exhausted and no more files are available, it will raise a
StopIteration.
Raises:
StopIteration: If no more samples are available
"""
if self.sample_queue.empty():
if not self._load_file():
raise StopIteration
return self.sample_queue.get()
class CachingMode(Enum):
"""
Enum to specify if and how caching is done.
Nothing: No caching is done
FileList: Only the list of files is cached.
"""
Nothing = 1
FileList = 2
class FileSetIterable(torch.utils.data.IterableDataset):
""" An Iterable meant to be used with the torch DataLoader.
Args:
files: A list of (typically HDF5 files) to load
load_data (function): A function that can load a list of samples given a filename
MUST return the following format:
[(data_1, label_1), ... , (data_n, label_n)]
"""
def __init__(self, files, load_data):
self.load_data = load_data
self.files = files
def __iter__(self):
""" Creates an iterator that loads a subset of the file set.
If torch indicates a multi-worker scenario, we split the files evenly along workers.
If some files contain significantly less samples than other files, this will lead
to an uneven split of workload.
If torch is not using multiprocessing, a single single Iterator will be used to
load all files.
Returns:
A FileSetIterator for a subset of files.
"""
worker_info = torch.utils.data.get_worker_info()
worker_id = 0
if worker_info is None: # single-process data loading, return the full iterator
worker_paths = self.files
else: # in a worker process
# split workload
per_worker = int(math.ceil(len(self.files) / float(worker_info.num_workers)))
worker_id = worker_info.id
if worker_id == 0:
logger = logging.getLogger(__name__)
logger.debug(f"Each worker will process up to {per_worker} files.")
iter_start = worker_id * per_worker
iter_end = iter_start + per_worker
worker_paths = self.files[iter_start:iter_end]
return FileSetIterator(worker_paths, self.load_data, worker_id=worker_id)
class FileDiscovery:
""" A helper class to gather files from a set of base paths
This class can be used to discover sample files in a set of directories.
Args:
gather_data (function): A callable that gathers files given a single root directory.
data_gather.get_filelist_within_folder is usually used for this.
cache_path (str): A directory to use for caching file lists, if required.
"""
def __init__(self, gather_data, cache_path=None, cache_mode=CachingMode.FileList):
self.filelist_cache_path = None
if cache_path is not None and cache_mode in [CachingMode.FileList]:
self.filelist_cache_path = Path(cache_path).joinpath("filelists")
self.filelist_cache_path.mkdir(parents=True, exist_ok=True)
self.gather_data = gather_data
def discover(self, data_paths):
""" Get a list of files for the given set of paths.
Args:
data_paths (list of str): The set of paths to load
Returns:
A list of files that were found
"""
paths = []
for path in data_paths:
if self.filelist_cache_path is not None:
path_name = Path(path).stem
cachefile = self.filelist_cache_path.joinpath(path_name)
if os.path.isfile(cachefile):
with open(cachefile, "rb") as f:
paths.extend(pickle.load(f))
else:
files = self.gather_data(path)
with open(cachefile, "wb") as f:
pickle.dump(files, f)
paths.extend(files)
else:
files = self.gather_data(path)
paths.extend(files)
return paths
class SubSetGenerator:
""" This class is responsible for creating and loading test and validation splits.
Given a set of filenames, it will load a subset of samples and return unused files.
Args:
load_data (function): A function that can load a list of samples given a filename
MUST return the following format:
[(data_1, label_1), ... , (data_n, label_n)]
subset_name (str): The name of this subset
num_samples (int): The number of samples in this subset
load_path (Path): A path for loading existing splits
save_path (Path): A path for saving the used splits
"""
def __init__(self,
load_data,
subset_name: str,
num_samples: int,
load_path=None,
save_path=None,
data_root=None,
dont_care_num_samples=False):
self.logger = logging.getLogger(__name__)
self.load_data = load_data
self.num_samples = num_samples
self.save_dir = save_path
self.load_dir = load_path
self.dont_care_num_samples = dont_care_num_samples
filename = f"{subset_name}.p"
self.load_file = None
if load_path is not None:
self.load_file = Path(load_path) / filename
self.save_file = None
if save_path is not None:
save_path = Path(save_path)
if save_path.is_dir():
self.save_file = save_path / filename
else:
self.logger.warning(f"save_path {save_path} is not a directory, the {subset_name} split wont be saved!")
self.subset_name = subset_name
self.samples = None
self.used_filenames = None
if data_root is not None:
self.data_root = Path(data_root)
else:
self.data_root = None
def _list_difference(self, a, b):
bset = set(b)
return [ai for ai in a if ai not in bset]
def _load_sub_set_from_files(self, file_paths):
self.logger.debug(f"Loading samples for {self.subset_name}")
sample_iterator = FileSetIterator(file_paths, self.load_data)
subset = []
for i in range(self.num_samples):
try:
subset.append(next(sample_iterator))
except StopIteration:
if self.dont_care_num_samples:
self.logger.debug(f"DONT CARE Not enough samples to create subset {self.subset_name}:"
f"Could create {len(subset)} of {self.num_samples} samples")
break
else:
raise ValueError(f"Not enough samples to create subset {self.subset_name}")
return subset, sample_iterator.get_remaining_files()
def prepare_subset(self, file_paths):
""" Prepare the subset from the given file paths.
This will either load a subset of the files in file_path or load a stored split.
Args:
file_paths (list of Path): A list of paths to files to load.
Returns:
A list of file paths that can still be used
"""
if self.load_file is not None and self.load_file.is_file():
with open(self.load_file, 'rb') as f:
self.logger.info(f"Loading {self.subset_name} from stored file {self.load_file}")
self.used_filenames = [Path(fn) for fn in pickle.load(f)]
all_abs = all(p.is_absolute() for p in self.used_filenames)
# If we got a data_root and have non relative paths, apply the data_root
# This assumes that we never get mixed relative and absolute paths which should be reasonable
if self.data_root is not None and not all_abs:
self.used_filenames = [self.data_root / p for p in self.used_filenames]
elif not all_abs:
raise ValueError("Got relative paths in stored split but data_root was not set!")
if os.name == 'nt':
# If the paths were already saved as Windows paths, as in the tests, do nothing
# Explicitly not using type() and WindowsPath here, since | |
(self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TDdlExecRequest')
if self.protocol_version is not None:
oprot.writeFieldBegin('protocol_version', TType.I32, 1)
oprot.writeI32(self.protocol_version)
oprot.writeFieldEnd()
if self.ddl_type is not None:
oprot.writeFieldBegin('ddl_type', TType.I32, 2)
oprot.writeI32(self.ddl_type)
oprot.writeFieldEnd()
if self.alter_table_params is not None:
oprot.writeFieldBegin('alter_table_params', TType.STRUCT, 3)
self.alter_table_params.write(oprot)
oprot.writeFieldEnd()
if self.alter_view_params is not None:
oprot.writeFieldBegin('alter_view_params', TType.STRUCT, 4)
self.alter_view_params.write(oprot)
oprot.writeFieldEnd()
if self.create_db_params is not None:
oprot.writeFieldBegin('create_db_params', TType.STRUCT, 5)
self.create_db_params.write(oprot)
oprot.writeFieldEnd()
if self.create_table_params is not None:
oprot.writeFieldBegin('create_table_params', TType.STRUCT, 6)
self.create_table_params.write(oprot)
oprot.writeFieldEnd()
if self.create_table_like_params is not None:
oprot.writeFieldBegin('create_table_like_params', TType.STRUCT, 7)
self.create_table_like_params.write(oprot)
oprot.writeFieldEnd()
if self.create_view_params is not None:
oprot.writeFieldBegin('create_view_params', TType.STRUCT, 8)
self.create_view_params.write(oprot)
oprot.writeFieldEnd()
if self.create_fn_params is not None:
oprot.writeFieldBegin('create_fn_params', TType.STRUCT, 9)
self.create_fn_params.write(oprot)
oprot.writeFieldEnd()
if self.drop_db_params is not None:
oprot.writeFieldBegin('drop_db_params', TType.STRUCT, 10)
self.drop_db_params.write(oprot)
oprot.writeFieldEnd()
if self.drop_table_or_view_params is not None:
oprot.writeFieldBegin('drop_table_or_view_params', TType.STRUCT, 11)
self.drop_table_or_view_params.write(oprot)
oprot.writeFieldEnd()
if self.drop_fn_params is not None:
oprot.writeFieldBegin('drop_fn_params', TType.STRUCT, 12)
self.drop_fn_params.write(oprot)
oprot.writeFieldEnd()
if self.compute_stats_params is not None:
oprot.writeFieldBegin('compute_stats_params', TType.STRUCT, 13)
self.compute_stats_params.write(oprot)
oprot.writeFieldEnd()
if self.create_data_source_params is not None:
oprot.writeFieldBegin('create_data_source_params', TType.STRUCT, 14)
self.create_data_source_params.write(oprot)
oprot.writeFieldEnd()
if self.drop_data_source_params is not None:
oprot.writeFieldBegin('drop_data_source_params', TType.STRUCT, 15)
self.drop_data_source_params.write(oprot)
oprot.writeFieldEnd()
if self.drop_stats_params is not None:
oprot.writeFieldBegin('drop_stats_params', TType.STRUCT, 16)
self.drop_stats_params.write(oprot)
oprot.writeFieldEnd()
if self.header is not None:
oprot.writeFieldBegin('header', TType.STRUCT, 17)
self.header.write(oprot)
oprot.writeFieldEnd()
if self.create_drop_role_params is not None:
oprot.writeFieldBegin('create_drop_role_params', TType.STRUCT, 18)
self.create_drop_role_params.write(oprot)
oprot.writeFieldEnd()
if self.grant_revoke_role_params is not None:
oprot.writeFieldBegin('grant_revoke_role_params', TType.STRUCT, 19)
self.grant_revoke_role_params.write(oprot)
oprot.writeFieldEnd()
if self.grant_revoke_priv_params is not None:
oprot.writeFieldBegin('grant_revoke_priv_params', TType.STRUCT, 20)
self.grant_revoke_priv_params.write(oprot)
oprot.writeFieldEnd()
if self.truncate_params is not None:
oprot.writeFieldBegin('truncate_params', TType.STRUCT, 21)
self.truncate_params.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.protocol_version is None:
raise TProtocol.TProtocolException(message='Required field protocol_version is unset!')
if self.ddl_type is None:
raise TProtocol.TProtocolException(message='Required field ddl_type is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TDdlExecResponse:
"""
Attributes:
- result
- new_table_created
- result_set
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'result', (TCatalogUpdateResult, TCatalogUpdateResult.thrift_spec), None, ), # 1
(2, TType.BOOL, 'new_table_created', None, None, ), # 2
(3, TType.STRUCT, 'result_set', (Results.ttypes.TResultSet, Results.ttypes.TResultSet.thrift_spec), None, ), # 3
)
def __init__(self, result=None, new_table_created=None, result_set=None,):
self.result = result
self.new_table_created = new_table_created
self.result_set = result_set
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.result = TCatalogUpdateResult()
self.result.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.BOOL:
self.new_table_created = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.result_set = Results.ttypes.TResultSet()
self.result_set.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TDdlExecResponse')
if self.result is not None:
oprot.writeFieldBegin('result', TType.STRUCT, 1)
self.result.write(oprot)
oprot.writeFieldEnd()
if self.new_table_created is not None:
oprot.writeFieldBegin('new_table_created', TType.BOOL, 2)
oprot.writeBool(self.new_table_created)
oprot.writeFieldEnd()
if self.result_set is not None:
oprot.writeFieldBegin('result_set', TType.STRUCT, 3)
self.result_set.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.result is None:
raise TProtocol.TProtocolException(message='Required field result is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TUpdateCatalogRequest:
"""
Attributes:
- protocol_version
- header
- target_table
- db_name
- created_partitions
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'protocol_version', None, 0, ), # 1
(2, TType.STRUCT, 'header', (TCatalogServiceRequestHeader, TCatalogServiceRequestHeader.thrift_spec), None, ), # 2
(3, TType.STRING, 'target_table', None, None, ), # 3
(4, TType.STRING, 'db_name', None, None, ), # 4
(5, TType.SET, 'created_partitions', (TType.STRING,None), None, ), # 5
)
def __init__(self, protocol_version=thrift_spec[1][4], header=None, target_table=None, db_name=None, created_partitions=None,):
self.protocol_version = protocol_version
self.header = header
self.target_table = target_table
self.db_name = db_name
self.created_partitions = created_partitions
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.protocol_version = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.header = TCatalogServiceRequestHeader()
self.header.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.target_table = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.db_name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.SET:
self.created_partitions = set()
(_etype17, _size14) = iprot.readSetBegin()
for _i18 in xrange(_size14):
_elem19 = iprot.readString();
self.created_partitions.add(_elem19)
iprot.readSetEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TUpdateCatalogRequest')
if self.protocol_version is not None:
oprot.writeFieldBegin('protocol_version', TType.I32, 1)
oprot.writeI32(self.protocol_version)
oprot.writeFieldEnd()
if self.header is not None:
oprot.writeFieldBegin('header', TType.STRUCT, 2)
self.header.write(oprot)
oprot.writeFieldEnd()
if self.target_table is not None:
oprot.writeFieldBegin('target_table', TType.STRING, 3)
oprot.writeString(self.target_table)
oprot.writeFieldEnd()
if self.db_name is not None:
oprot.writeFieldBegin('db_name', TType.STRING, 4)
oprot.writeString(self.db_name)
oprot.writeFieldEnd()
if self.created_partitions is not None:
oprot.writeFieldBegin('created_partitions', TType.SET, 5)
oprot.writeSetBegin(TType.STRING, len(self.created_partitions))
for iter20 in self.created_partitions:
oprot.writeString(iter20)
oprot.writeSetEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.protocol_version is None:
raise TProtocol.TProtocolException(message='Required field protocol_version is unset!')
if self.target_table is None:
raise TProtocol.TProtocolException(message='Required field target_table is unset!')
if self.db_name is None:
raise TProtocol.TProtocolException(message='Required field db_name is unset!')
if self.created_partitions is None:
raise TProtocol.TProtocolException(message='Required field created_partitions is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TUpdateCatalogResponse:
"""
Attributes:
- result
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'result', (TCatalogUpdateResult, TCatalogUpdateResult.thrift_spec), None, ), # 1
)
def __init__(self, result=None,):
self.result = result
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.result = TCatalogUpdateResult()
self.result.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TUpdateCatalogResponse')
if self.result is not None:
oprot.writeFieldBegin('result', TType.STRUCT, 1)
self.result.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.result is None:
raise TProtocol.TProtocolException(message='Required field result is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TResetMetadataRequest:
"""
Attributes:
- protocol_version
- header
- is_refresh
- table_name
- partition_spec
- db_name
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'protocol_version', None, 0, ), # 1
(2, TType.BOOL, 'is_refresh', None, None, ), # 2
(3, TType.STRUCT, 'table_name', (CatalogObjects.ttypes.TTableName, CatalogObjects.ttypes.TTableName.thrift_spec), None, ), # 3
(4, TType.STRUCT, 'header', (TCatalogServiceRequestHeader, TCatalogServiceRequestHeader.thrift_spec), None, ), # 4
(5, TType.LIST, 'partition_spec', (TType.STRUCT,(CatalogObjects.ttypes.TPartitionKeyValue, CatalogObjects.ttypes.TPartitionKeyValue.thrift_spec)), None, ), # 5
(6, TType.STRING, 'db_name', None, None, ), # 6
)
def __init__(self, protocol_version=thrift_spec[1][4], header=None, is_refresh=None, table_name=None, partition_spec=None, db_name=None,):
self.protocol_version = protocol_version
self.header = header
self.is_refresh = is_refresh
self.table_name = table_name
self.partition_spec = partition_spec
self.db_name = db_name
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.protocol_version = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRUCT:
self.header = TCatalogServiceRequestHeader()
self.header.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.BOOL:
self.is_refresh = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.table_name = CatalogObjects.ttypes.TTableName()
self.table_name.read(iprot)
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.LIST:
self.partition_spec = []
(_etype24, _size21) = iprot.readListBegin()
for _i25 in xrange(_size21):
_elem26 = CatalogObjects.ttypes.TPartitionKeyValue()
_elem26.read(iprot)
self.partition_spec.append(_elem26)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == | |
import numpy as np
import pandas as pd
import math
import matplotlib.pyplot as plt
import yfinance as yf
from pandas_datareader import data as web
import datetime as dt
from empyrical import*
import quantstats as qs
from darts.models import*
from darts import TimeSeries
from darts.utils.missing_values import fill_missing_values
from darts.metrics import mape
import yahoo_fin.stock_info as si
from yahoofinancials import YahooFinancials
from pypfopt import EfficientFrontier, risk_models, expected_returns, HRPOpt, objective_functions
import logging
import warnings
from warnings import filterwarnings
from IPython.display import display
import copy
# ------------------------------------------------------------------------------------------
today = dt.date.today()
# ------------------------------------------------------------------------------------------
class Engine:
def __init__(self,start_date, portfolio, weights=None, rebalance=None, benchmark=['SPY'], end_date=today, optimizer=None, max_vol=0.15):
self.start_date = start_date
self.end_date = end_date
self.portfolio = portfolio
self.weights = weights
self.benchmark = benchmark
self.optimizer = optimizer
self.max_vol = max_vol
self.rebalance = rebalance
if self.weights==None:
self.weights = [1.0/len(self.portfolio)]*len(self.portfolio)
if self.optimizer=="EF":
self.weights = efficient_frontier(self, perf="False")
if self.optimizer=="MV":
self.weights = mean_var(self, vol_max=max_vol, perf="False")
if self.optimizer=="HRP":
self.weights = hrp(self, perf="False")
if self.rebalance!=None:
self.rebalance = make_rebalance(self.start_date, self.end_date, self.optimizer, self.portfolio, self.rebalance)
#-------------------------------------------------------------------------------------------
def get_returns(stocks,wts, start_date, end_date=today):
if len(stocks) > 1:
assets = web.DataReader(stocks, data_source='yahoo', start = start_date, end= end_date)['Adj Close']
ret_data = assets.pct_change()[1:]
returns = (ret_data * wts).sum(axis = 1)
return returns
else:
df = web.DataReader(stocks, data_source='yahoo', start = start_date, end= end_date)['Adj Close']
df = pd.DataFrame(df)
returns = df.pct_change()
return returns
# ------------------------------------------------------------------------------------------
def get_pricing(stocks, start_date, end_date=today, pricing="Adj Close", wts=1):
if len(stocks) > 1:
assets = web.DataReader(stocks, data_source='yahoo', start = start_date, end= end_date)[pricing]
return assets
else:
df = web.DataReader(stocks, data_source='yahoo', start = start_date, end= end_date)[pricing]
return df
# ------------------------------------------------------------------------------------------
def get_data(stocks, period="max", trading_year_days=252):
p = {"period": period}
for stock in stocks:
years = {
'1mo' : math.ceil(trading_year_days/12),
'3mo' : math.ceil(trading_year_days/4),
'6mo' : math.ceil(trading_year_days/2),
'1y': trading_year_days,
'2y' : 2*trading_year_days,
'5y' : 5*trading_year_days,
'10y' : 10*trading_year_days,
'20y' : 20*trading_year_days,
'max' : len(yf.Ticker(stock).history(**p)['Close'].pct_change())
}
df = web.DataReader(stocks, data_source='yahoo', start = "1980-01-01", end= today)
df = pd.DataFrame(df)
df = df.tail(years[period])
df = pd.DataFrame(df)
df = df.drop(['Adj Close'], axis=1)
df = df[["Open", "High", "Low", "Close", "Volume"]]
return df
# ------------------------------------------------------------------------------------------
#reformat
def creturns(stocks,wts=1, period="max", benchmark= None, plot=True, pricing="Adj Close", trading_year_days=252, end_date = today):
p = {"period": period}
for stock in stocks:
years = {
'1mo' : math.ceil(trading_year_days/12),
'3mo' : math.ceil(trading_year_days/4),
'6mo' : math.ceil(trading_year_days/2),
'1y': trading_year_days,
'2y' : 2*trading_year_days,
'5y' : 5*trading_year_days,
'10y' : 10*trading_year_days,
'20y' : 20*trading_year_days,
'max' : len(yf.Ticker(stock).history(**p)['Close'].pct_change())
}
if len(stocks) > 1:
df = web.DataReader(stocks, data_source='yahoo', start = "1980-01-01", end= end_date)[pricing]
if benchmark != None:
df2 = web.DataReader(benchmark, data_source='yahoo', start = "1980-01-01", end= end_date)[pricing]
df = pd.DataFrame(df)
df = df.tail(years[period])
df2 = df2.tail(years[period])
return_df2 = df2.pct_change()[1:]
ret_data = df.pct_change()[1:]
ret_data = (ret_data + 1).cumprod()
port_ret = (ret_data * wts).sum(axis = 1)
return_df2 = (return_df2 + 1).cumprod()
ret_data['Portfolio'] = port_ret
ret_data['Benchmark'] = return_df2
ret_data = pd.DataFrame(ret_data)
else:
df = pd.DataFrame(df)
df = df.tail(years[period])
ret_data = df.pct_change()[1:]
ret_data = (ret_data + 1).cumprod()
port_ret = (ret_data * wts).sum(axis = 1)
ret_data['Portfolio'] = port_ret
ret_data = pd.DataFrame(ret_data)
if plot==True:
ret_data.plot(figsize=(20,10))
plt.xlabel('Date')
plt.ylabel('Returns')
plt.title(period + 'Portfolio Cumulative Returns')
else:
return ret_data
else:
df = web.DataReader(stocks, data_source='yahoo', start = "1980-01-01", end= today)[pricing]
if benchmark != None:
df2 = web.DataReader(benchmark, data_source='yahoo', start = "1980-01-01", end= today)[pricing]
return_df2 = df2.pct_change()[1:]
df = pd.DataFrame(df)
df = df.tail(years[period])
df2 = df2.tail(years[period])
return_df2 = df2.pct_change()[1:]
returns = df.pct_change()
returns = (returns + 1).cumprod()
return_df2 = (return_df2 + 1).cumprod()
returns["benchmark"] = return_df2
returns = pd.DataFrame(returns)
else:
df = pd.DataFrame(df)
df = df.tail(years[period])
returns = df.pct_change()
returns = (returns + 1).cumprod()
returns = pd.DataFrame(returns)
if plot==True:
returns.plot(figsize=(20,10))
plt.axvline(x=1)
plt.xlabel('Date')
plt.ylabel('Returns')
plt.title(stocks[0] +' Cumulative Returns (Period : '+ period+')')
else:
return returns
# ------------------------------------------------------------------------------------------
def information_ratio(returns, benchmark_returns, days=252):
return_difference = returns - benchmark_returns
volatility = return_difference.std() * np.sqrt(days)
information_ratio = return_difference.mean() / volatility
return information_ratio
def graph_allocation(my_portfolio):
fig1, ax1 = plt.subplots()
ax1.pie(my_portfolio.weights, labels=my_portfolio.portfolio, autopct='%1.1f%%',
shadow=False)
ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
plt.title("Portfolio's allocation")
plt.show()
#------------------------------------------------------------------------------------------------------------------------------------------------------
#initialize a variable that we be set to false
def empyrial(my_portfolio, rf=0.0, sigma_value=1, confidence_value=0.95, rebalance=False):
#standard emyrial output
if rebalance == False:
#standard returns calculation
returns = get_returns(my_portfolio.portfolio, my_portfolio.weights, start_date=my_portfolio.start_date,end_date=my_portfolio.end_date)
#when we want to do the rebalancing
if rebalance == True:
print("")
print('rebalance hit')
#we want to get the dataframe with the dates and weights
rebalance_schedule = my_portfolio.rebalance
#then want to make a list of the dates and start with our first date
dates = [my_portfolio.start_date]
#then our rebalancing dates into that list
dates = dates + rebalance_schedule.columns.to_list()
#this will hold returns
returns = pd.Series()
#then we want to be able to call the dates like tuples
for i in range(len(dates) - 1):
#get our weights
weights = rebalance_schedule[str(dates[i+1])]
#then we want to get the returns
add_returns = get_returns(my_portfolio.portfolio, weights, start_date = dates[i], end_date = dates[i+1])
#then append those returns
returns = returns.append(add_returns)
benchmark = get_returns(my_portfolio.benchmark, wts=[1], start_date=my_portfolio.start_date,end_date=my_portfolio.end_date)
CAGR = cagr(returns, period=DAILY, annualization=None)
CAGR = round(CAGR,2)
CAGR = CAGR.tolist()
CAGR = str(round(CAGR*100,2)) + '%'
CUM = cum_returns(returns, starting_value=0, out=None)*100
CUM = CUM.iloc[-1]
CUM = CUM.tolist()
CUM = str(round(CUM,2)) + '%'
VOL = qs.stats.volatility(returns, annualize=True, trading_year_days=252)
VOL = VOL.tolist()
VOL = str(round(VOL*100,2))+' %'
SR = sharpe_ratio(returns, risk_free=rf, period=DAILY)
SR = np.round(SR, decimals=2)
SR = str(SR)
CR = qs.stats.calmar(returns)
CR = CR.tolist()
CR = str(round(CR,2))
STABILITY = stability_of_timeseries(returns)
STABILITY = round(STABILITY,2)
STABILITY = str(STABILITY)
MD = max_drawdown(returns, out=None)
MD = str(round(MD,2))+' %'
'''OR = omega_ratio(returns, risk_free=0.0, required_return=0.0)
OR = round(OR,2)
OR = str(OR)
print(OR)'''
SOR = sortino_ratio(returns, required_return=0, period=DAILY)
SOR = round(SOR,2)
SOR = str(SOR)
SK = qs.stats.skew(returns)
SK = round(SK,2)
SK = SK.tolist()
SK = str(SK)
KU = qs.stats.kurtosis(returns)
KU = round(KU,2)
KU = KU.tolist()
KU = str(KU)
TA = tail_ratio(returns)
TA = round(TA,2)
TA = str(TA)
CSR = qs.stats.common_sense_ratio(returns)
CSR = round(CSR,2)
CSR = CSR.tolist()
CSR = str(CSR)
VAR = qs.stats.value_at_risk(returns, sigma=sigma_value, confidence=confidence_value)
VAR = np.round(VAR, decimals=2)
VAR = str(VAR*100)+' %'
AL = alpha_beta(returns, benchmark, risk_free=rf)
AL = AL[0]
AL = round(AL,2)
BTA = alpha_beta(returns, benchmark, risk_free=rf)
BTA = BTA[1]
BTA = round(BTA,2)
def condition(x):
return x > 0
win = sum(condition(x) for x in returns)
total = len(returns)
win_ratio = win/total
win_ratio = win_ratio*100
win_ratio = round(win_ratio,2)
IR = information_ratio(returns, benchmark.iloc[:,0])
IR = round(IR,2)
data = {'':['Annual return', 'Cumulative return', 'Annual volatility','Winning day ratio', 'Sharpe ratio','Calmar ratio', 'Information ratio', 'Stability', 'Max Drawdown','Sortino ratio','Skew', 'Kurtosis', 'Tail Ratio', 'Common sense ratio', 'Daily value at risk',
'Alpha', 'Beta'
],
'Backtest':[CAGR, CUM, VOL,f'{win_ratio}%', SR, CR, IR, STABILITY, MD, SOR, SK, KU, TA, CSR, VAR, AL, BTA]}
# Create DataFrame
df = pd.DataFrame(data)
df.set_index('', inplace=True)
df.style.set_properties(**{'background-color': 'white',
'color': 'black',
'border-color':'black'})
display(df)
if rebalance == True:
df
y = []
for x in returns:
y.append(x)
arr = np.array(y)
arr
returns.index
my_color = np.where(arr>=0, 'blue', 'grey')
plt.figure(figsize=(30,8))
plt.vlines(x=returns.index, ymin=0, ymax=arr, color=my_color, alpha=0.4)
plt.title('Returns')
return qs.plots.returns(returns,benchmark, cumulative=True), qs.plots.monthly_heatmap(returns), qs.plots.drawdown(returns), qs.plots.drawdowns_periods(returns), qs.plots.rolling_volatility(returns), qs.plots.rolling_sharpe(returns), qs.plots.rolling_beta(returns, benchmark)
#---------------------------------------------------------------------------------------------------------------------------------------------------------------
def oracle(my_portfolio, prediction_days=None, based_on='Adj Close'):
logger = logging.getLogger()
warnings.simplefilter(action='ignore', category=FutureWarning)
filterwarnings('ignore')
logging.disable(logging.INFO)
mape_df = pd.DataFrame()
mape_df = mape_df.append({'Exponential smoothing' : 0, 'Prophet' : 0, 'Auto-ARIMA' : 0, 'Theta(2)':0, 'ARIMA' : 0, 'FFT' : 0, 'FourTheta' : 0, 'NaiveDrift':0, 'NaiveMean' : 0, 'NaiveSeasonal':0 },
ignore_index = True)
final_df = pd.DataFrame()
final_df = final_df.append({'Exponential smoothing' : 0, 'Prophet' : 0, 'Auto-ARIMA' : 0, 'Theta(2)':0, 'ARIMA' : 0, 'FFT' : 0, 'FourTheta' : 0, 'NaiveDrift':0, 'NaiveMean' : 0, 'NaiveSeasonal':0 },
ignore_index = True)
for asset in my_portfolio.portfolio:
result = pd.DataFrame()
df = web.DataReader(asset, data_source='yahoo', start = my_portfolio.start_date, end= my_portfolio.end_date)
df = pd.DataFrame(df)
df.reset_index(level=0, inplace=True)
def eval_model(model):
model.fit(train)
forecast = model.predict(len(val))
result[model] = [mape(val, forecast)]
prediction = pd.DataFrame()
def predict(model):
model.fit(train)
forecast = model.predict(len(val))
pred = model.predict(prediction_days)
b = [str(pred[-1])]
b = [words for segments in b for words in segments.split()]
b = float(b[2])
prediction[model] = [str(round(((b-start_value)/start_value)*100,3))+' %']
series = TimeSeries.from_dataframe(df, 'Date', based_on, freq='D')
series = fill_missing_values(series)
if prediction_days==None:
x = 1
while x/(len(series)+x) < 0.3:
x+=1
prediction_days = x
train_index = round(len(df.index)*0.7)
train_date = df.loc[[train_index]]['Date'].values
date = str(train_date[0])[:10]
date = date.replace('-', '')
timestamp = date+'000000'
train, val = series.split_before(pd.Timestamp(timestamp))
eval_model(ExponentialSmoothing())
eval_model(Prophet())
eval_model(AutoARIMA())
eval_model(Theta())
eval_model(ARIMA())
eval_model(FFT())
eval_model(FourTheta())
eval_model(NaiveDrift())
eval_model(NaiveMean())
eval_model(NaiveSeasonal())
result.columns = ['Exponential smoothing','Prophet', 'Auto-ARIMA', 'Theta(2)', 'ARIMA', 'FFT','FourTheta','NaiveDrift','NaiveMean', 'NaiveSeasonal']
result.index = [asset]
mape_df = pd.concat([result, mape_df])
| |
'status': 'noexec'},
{'date': 'dayb1', 'status': 'warning'},
{'date': 'dayb2', 'status': 'ok'},
{'date': 'dayb3', 'status': 'ok'},
{'date': 'dayb4', 'status': 'ok'},
{'date': 'dayb5', 'status': 'warning'},
{'date': 'dayb6', 'status': 'fail'},
{'date': 'dayb7', 'status': 'ok'},
{'date': 'dayb8', 'status': 'ok'},
{'date': 'dayb9', 'status': 'warning'},
{'date': 'dayb0', 'status': 'ok'},
{'date': 'dayb11', 'status': 'ok'},
{'date': 'dayb12', 'status': 'noexec'},
{'date': 'dayc1', 'status': 'warning'},
{'date': 'dayc2', 'status': 'ok'},
{'date': 'dayc3', 'status': 'ok'},
{'date': 'dayc4', 'status': 'ok'},
{'date': 'dayc5', 'status': 'warning'},
{'date': 'dayc6', 'status': 'fail'},
{'date': 'dayc7', 'status': 'ok'},
{'date': 'dayc8', 'status': 'ok'},
{'date': 'dayc9', 'status': 'warning'},
{'date': 'dayc0', 'status': 'ok'},
{'date': 'dayc11', 'status': 'ok'},
{'date': 'dayc12', 'status': 'noexec'},
{'date': 'dayd1', 'status': 'warning'},
{'date': 'dayd2', 'status': 'ok'},
{'date': 'dayd3', 'status': 'ok'},
{'date': 'dayd4', 'status': 'ok'},
{'date': 'dayd5', 'status': 'warning'},
{'date': 'dayd6', 'status': 'fail'},
{'date': 'dayd7', 'status': 'ok'},
{'date': 'dayd8', 'status': 'ok'},
{'date': 'dayd9', 'status': 'warning'},
{'date': 'dayd0', 'status': 'ok'},
{'date': 'dayd11', 'status': 'ok'},
{'date': 'dayd12', 'status': 'noexec'}
]},
{'name': 'name-d8', 'data': [
{'date': 'day1', 'status': 'warning'},
{'date': 'day2', 'status': 'ok'},
{'date': 'day3', 'status': 'ok'},
{'date': 'day4', 'status': 'ok'},
{'date': 'day5', 'status': 'warning'},
{'date': 'day6', 'status': 'fail'},
{'date': 'day7', 'status': 'ok'},
{'date': 'day8', 'status': 'ok'},
{'date': 'day9', 'status': 'warning'},
{'date': 'day0', 'status': 'ok'},
{'date': 'day11', 'status': 'ok'},
{'date': 'day12', 'status': 'noexec'},
{'date': 'daya1', 'status': 'warning'},
{'date': 'daya2', 'status': 'ok'},
{'date': 'daya3', 'status': 'ok'},
{'date': 'daya4', 'status': 'ok'},
{'date': 'daya5', 'status': 'warning'},
{'date': 'daya6', 'status': 'fail'},
{'date': 'daya7', 'status': 'ok'},
{'date': 'daya8', 'status': 'ok'},
{'date': 'daya9', 'status': 'warning'},
{'date': 'daya0', 'status': 'ok'},
{'date': 'daya11', 'status': 'ok'},
{'date': 'daya12', 'status': 'noexec'},
{'date': 'dayb1', 'status': 'warning'},
{'date': 'dayb2', 'status': 'ok'},
{'date': 'dayb3', 'status': 'ok'},
{'date': 'dayb4', 'status': 'ok'},
{'date': 'dayb5', 'status': 'warning'},
{'date': 'dayb6', 'status': 'fail'},
{'date': 'dayb7', 'status': 'ok'},
{'date': 'dayb8', 'status': 'ok'},
{'date': 'dayb9', 'status': 'warning'},
{'date': 'dayb0', 'status': 'ok'},
{'date': 'dayb11', 'status': 'ok'},
{'date': 'dayb12', 'status': 'noexec'},
{'date': 'dayc1', 'status': 'warning'},
{'date': 'dayc2', 'status': 'ok'},
{'date': 'dayc3', 'status': 'ok'},
{'date': 'dayc4', 'status': 'ok'},
{'date': 'dayc5', 'status': 'warning'},
{'date': 'dayc6', 'status': 'fail'},
{'date': 'dayc7', 'status': 'ok'},
{'date': 'dayc8', 'status': 'ok'},
{'date': 'dayc9', 'status': 'warning'},
{'date': 'dayc0', 'status': 'ok'},
{'date': 'dayc11', 'status': 'ok'},
{'date': 'dayc12', 'status': 'noexec'},
{'date': 'dayd1', 'status': 'warning'},
{'date': 'dayd2', 'status': 'ok'},
{'date': 'dayd3', 'status': 'ok'},
{'date': 'dayd4', 'status': 'ok'},
{'date': 'dayd5', 'status': 'warning'},
{'date': 'dayd6', 'status': 'fail'},
{'date': 'dayd7', 'status': 'ok'},
{'date': 'dayd8', 'status': 'ok'},
{'date': 'dayd9', 'status': 'warning'},
{'date': 'dayd0', 'status': 'ok'},
{'date': 'dayd11', 'status': 'ok'},
{'date': 'dayd12', 'status': 'noexec'}
]},
{'name': 'name-d9', 'data': [
{'date': 'day1', 'status': 'warning'},
{'date': 'day2', 'status': 'ok'},
{'date': 'day3', 'status': 'ok'},
{'date': 'day4', 'status': 'ok'},
{'date': 'day5', 'status': 'warning'},
{'date': 'day6', 'status': 'fail'},
{'date': 'day7', 'status': 'ok'},
{'date': 'day8', 'status': 'ok'},
{'date': 'day9', 'status': 'warning'},
{'date': 'day0', 'status': 'ok'},
{'date': 'day11', 'status': 'ok'},
{'date': 'day12', 'status': 'noexec'},
{'date': 'daya1', 'status': 'warning'},
{'date': 'daya2', 'status': 'ok'},
{'date': 'daya3', 'status': 'ok'},
{'date': 'daya4', 'status': 'ok'},
{'date': 'daya5', 'status': 'warning'},
{'date': 'daya6', 'status': 'fail'},
{'date': 'daya7', 'status': 'ok'},
{'date': 'daya8', 'status': 'ok'},
{'date': 'daya9', 'status': 'warning'},
{'date': 'daya0', 'status': 'ok'},
{'date': 'daya11', 'status': 'ok'},
{'date': 'daya12', 'status': 'noexec'},
{'date': 'dayb1', 'status': 'warning'},
{'date': 'dayb2', 'status': 'ok'},
{'date': 'dayb3', 'status': 'ok'},
{'date': 'dayb4', 'status': 'ok'},
{'date': 'dayb5', 'status': 'warning'},
{'date': 'dayb6', 'status': 'fail'},
{'date': 'dayb7', 'status': 'ok'},
{'date': 'dayb8', 'status': 'ok'},
{'date': 'dayb9', 'status': 'warning'},
{'date': 'dayb0', 'status': 'ok'},
{'date': 'dayb11', 'status': 'ok'},
{'date': 'dayb12', 'status': 'noexec'},
{'date': 'dayc1', 'status': 'warning'},
{'date': 'dayc2', 'status': 'ok'},
{'date': 'dayc3', 'status': 'ok'},
{'date': 'dayc4', 'status': 'ok'},
{'date': 'dayc5', 'status': 'warning'},
{'date': 'dayc6', 'status': 'fail'},
{'date': 'dayc7', 'status': 'ok'},
{'date': 'dayc8', 'status': 'ok'},
{'date': 'dayc9', 'status': 'warning'},
{'date': 'dayc0', 'status': 'ok'},
{'date': 'dayc11', 'status': 'ok'},
{'date': 'dayc12', 'status': 'noexec'},
{'date': 'dayd1', 'status': 'warning'},
{'date': 'dayd2', 'status': 'ok'},
{'date': 'dayd3', 'status': 'ok'},
{'date': 'dayd4', 'status': 'ok'},
{'date': 'dayd5', 'status': 'warning'},
{'date': 'dayd6', 'status': 'fail'},
{'date': 'dayd7', 'status': 'ok'},
{'date': 'dayd8', 'status': 'ok'},
{'date': 'dayd9', 'status': 'warning'},
{'date': 'dayd0', 'status': 'ok'},
{'date': 'dayd11', 'status': 'ok'},
{'date': 'dayd12', 'status': 'noexec'}
]},
{'name': 'name-d0', 'data': [
{'date': 'day1', 'status': 'warning'},
{'date': 'day2', 'status': 'ok'},
{'date': 'day3', 'status': 'ok'},
{'date': 'day4', 'status': 'ok'},
{'date': 'day5', 'status': 'warning'},
{'date': 'day6', 'status': 'fail'},
{'date': 'day7', 'status': 'ok'},
{'date': 'day8', 'status': 'ok'},
{'date': 'day9', 'status': 'warning'},
{'date': 'day0', 'status': 'ok'},
{'date': 'day11', 'status': 'ok'},
{'date': 'day12', 'status': 'noexec'},
{'date': 'daya1', 'status': 'warning'},
{'date': 'daya2', 'status': 'ok'},
{'date': 'daya3', 'status': 'ok'},
{'date': 'daya4', 'status': 'ok'},
{'date': 'daya5', 'status': 'warning'},
{'date': 'daya6', 'status': 'fail'},
{'date': 'daya7', 'status': 'ok'},
{'date': 'daya8', 'status': 'ok'},
{'date': 'daya9', 'status': 'warning'},
{'date': 'daya0', 'status': 'ok'},
{'date': 'daya11', 'status': 'ok'},
{'date': 'daya12', 'status': 'noexec'},
{'date': 'dayb1', 'status': 'warning'},
{'date': 'dayb2', 'status': 'ok'},
{'date': 'dayb3', 'status': 'ok'},
{'date': 'dayb4', 'status': 'ok'},
{'date': 'dayb5', 'status': 'warning'},
{'date': 'dayb6', 'status': 'fail'},
{'date': 'dayb7', 'status': 'ok'},
{'date': 'dayb8', 'status': 'ok'},
{'date': 'dayb9', 'status': 'warning'},
{'date': 'dayb0', 'status': 'ok'},
{'date': 'dayb11', 'status': 'ok'},
{'date': 'dayb12', 'status': 'noexec'},
{'date': 'dayc1', 'status': 'warning'},
{'date': 'dayc2', 'status': 'ok'},
{'date': 'dayc3', 'status': 'ok'},
{'date': 'dayc4', 'status': 'ok'},
{'date': 'dayc5', 'status': 'warning'},
{'date': 'dayc6', 'status': 'fail'},
{'date': 'dayc7', 'status': 'ok'},
{'date': 'dayc8', 'status': 'ok'},
{'date': 'dayc9', 'status': 'warning'},
{'date': 'dayc0', 'status': 'ok'},
{'date': 'dayc11', 'status': 'ok'},
{'date': 'dayc12', 'status': 'noexec'},
{'date': 'dayd1', 'status': 'warning'},
{'date': 'dayd2', 'status': 'ok'},
{'date': 'dayd3', 'status': 'ok'},
{'date': 'dayd4', 'status': 'ok'},
{'date': 'dayd5', 'status': 'warning'},
{'date': 'dayd6', 'status': 'fail'},
{'date': 'dayd7', 'status': 'ok'},
{'date': 'dayd8', 'status': 'ok'},
{'date': 'dayd9', 'status': 'warning'},
{'date': 'dayd0', 'status': 'ok'},
{'date': 'dayd11', 'status': 'ok'},
{'date': 'dayd12', 'status': 'noexec'}
]},
{'name': 'name-d15', 'data': [
{'date': 'day1', 'status': 'warning'},
{'date': 'day2', 'status': 'ok'},
{'date': 'day3', 'status': 'ok'},
{'date': 'day4', 'status': 'ok'},
{'date': 'day5', 'status': 'warning'},
{'date': 'day6', 'status': 'fail'},
{'date': 'day7', 'status': 'ok'},
{'date': 'day8', 'status': 'ok'},
{'date': 'day9', 'status': 'warning'},
{'date': 'day0', 'status': 'ok'},
{'date': 'day11', 'status': 'ok'},
{'date': 'day12', 'status': 'noexec'},
{'date': 'daya1', 'status': 'warning'},
{'date': 'daya2', 'status': 'ok'},
{'date': 'daya3', 'status': 'ok'},
{'date': 'daya4', 'status': 'ok'},
{'date': 'daya5', 'status': 'warning'},
{'date': 'daya6', 'status': 'fail'},
{'date': 'daya7', 'status': 'ok'},
{'date': 'daya8', 'status': 'ok'},
{'date': 'daya9', 'status': 'warning'},
{'date': 'daya0', 'status': 'ok'},
{'date': 'daya11', 'status': 'ok'},
{'date': 'daya12', 'status': 'noexec'},
{'date': 'dayb1', 'status': 'warning'},
{'date': 'dayb2', 'status': 'ok'},
{'date': 'dayb3', 'status': 'ok'},
{'date': 'dayb4', 'status': 'ok'},
{'date': 'dayb5', 'status': 'warning'},
{'date': 'dayb6', 'status': 'fail'},
{'date': 'dayb7', 'status': 'ok'},
{'date': 'dayb8', 'status': 'ok'},
{'date': 'dayb9', 'status': 'warning'},
{'date': 'dayb0', 'status': 'ok'},
{'date': 'dayb11', 'status': 'ok'},
{'date': 'dayb12', 'status': 'noexec'},
{'date': 'dayc1', 'status': 'warning'},
{'date': 'dayc2', 'status': 'ok'},
{'date': 'dayc3', 'status': 'ok'},
{'date': 'dayc4', 'status': 'ok'},
{'date': 'dayc5', 'status': 'warning'},
{'date': 'dayc6', 'status': 'fail'},
{'date': 'dayc7', 'status': 'ok'},
{'date': 'dayc8', 'status': 'ok'},
{'date': 'dayc9', 'status': 'warning'},
{'date': 'dayc0', 'status': 'ok'},
{'date': 'dayc11', 'status': 'ok'},
{'date': 'dayc12', 'status': 'noexec'},
{'date': 'dayd1', 'status': 'warning'},
{'date': 'dayd2', 'status': 'ok'},
{'date': 'dayd3', 'status': 'ok'},
{'date': 'dayd4', 'status': 'ok'},
{'date': 'dayd5', 'status': 'warning'},
{'date': 'dayd6', 'status': 'fail'},
{'date': 'dayd7', 'status': 'ok'},
{'date': 'dayd8', 'status': 'ok'},
{'date': 'dayd9', 'status': 'warning'},
{'date': 'dayd0', 'status': 'ok'},
{'date': 'dayd11', 'status': 'ok'},
{'date': 'dayd12', 'status': 'noexec'}
]},
{'name': 'name-d16', 'data': [
{'date': 'day1', 'status': 'warning'},
{'date': 'day2', 'status': 'ok'},
{'date': 'day3', 'status': 'ok'},
{'date': 'day4', 'status': 'ok'},
{'date': 'day5', 'status': 'warning'},
{'date': 'day6', 'status': 'fail'},
{'date': 'day7', 'status': 'ok'},
{'date': 'day8', 'status': 'ok'},
{'date': 'day9', 'status': 'warning'},
{'date': 'day0', 'status': 'ok'},
{'date': 'day11', 'status': 'ok'},
{'date': 'day12', 'status': 'noexec'},
{'date': 'daya1', 'status': 'warning'},
{'date': 'daya2', 'status': 'ok'},
{'date': 'daya3', 'status': 'ok'},
{'date': 'daya4', 'status': 'ok'},
{'date': 'daya5', 'status': 'warning'},
{'date': 'daya6', 'status': 'fail'},
{'date': 'daya7', 'status': 'ok'},
{'date': 'daya8', 'status': 'ok'},
{'date': 'daya9', 'status': 'warning'},
{'date': 'daya0', 'status': 'ok'},
{'date': 'daya11', 'status': 'ok'},
{'date': 'daya12', 'status': 'noexec'},
{'date': 'dayb1', 'status': 'warning'},
{'date': 'dayb2', 'status': 'ok'},
{'date': 'dayb3', 'status': 'ok'},
{'date': 'dayb4', 'status': 'ok'},
{'date': 'dayb5', 'status': 'warning'},
{'date': 'dayb6', 'status': 'fail'},
{'date': 'dayb7', 'status': 'ok'},
{'date': 'dayb8', 'status': 'ok'},
{'date': 'dayb9', 'status': 'warning'},
{'date': 'dayb0', 'status': 'ok'},
{'date': 'dayb11', 'status': 'ok'},
{'date': 'dayb12', 'status': 'noexec'},
{'date': 'dayc1', 'status': 'warning'},
{'date': | |
horizontal rules between rows in report mode."""
ULC_ICON = wx.LC_ICON
ULC_SMALL_ICON = wx.LC_SMALL_ICON
ULC_LIST = wx.LC_LIST
ULC_REPORT = wx.LC_REPORT
ULC_TILE = 0x10000
ULC_ALIGN_TOP = wx.LC_ALIGN_TOP
ULC_ALIGN_LEFT = wx.LC_ALIGN_LEFT
ULC_AUTOARRANGE = wx.LC_AUTOARRANGE
ULC_VIRTUAL = wx.LC_VIRTUAL
ULC_EDIT_LABELS = wx.LC_EDIT_LABELS
ULC_NO_HEADER = wx.LC_NO_HEADER
ULC_NO_SORT_HEADER = wx.LC_NO_SORT_HEADER
ULC_SINGLE_SEL = wx.LC_SINGLE_SEL
ULC_SORT_ASCENDING = wx.LC_SORT_ASCENDING
ULC_SORT_DESCENDING = wx.LC_SORT_DESCENDING
ULC_NO_HIGHLIGHT = 0x20000
ULC_STICKY_HIGHLIGHT = 0x40000
ULC_STICKY_NOSELEVENT = 0x80000
ULC_SEND_LEFTCLICK = 0x100000
ULC_HAS_VARIABLE_ROW_HEIGHT = 0x200000
ULC_AUTO_CHECK_CHILD = 0x400000 # only meaningful for checkboxes
ULC_AUTO_TOGGLE_CHILD = 0x800000 # only meaningful for checkboxes
ULC_AUTO_CHECK_PARENT = 0x1000000 # only meaningful for checkboxes
ULC_SHOW_TOOLTIPS = 0x2000000 # shows tooltips on items with ellipsis (...)
ULC_HOT_TRACKING = 0x4000000 # enable hot tracking on mouse motion
ULC_BORDER_SELECT = 0x8000000 # changes border colour whan an item is selected, instead of highlighting the item
ULC_TRACK_SELECT = 0x10000000 # Enables hot-track selection in a list control. Hot track selection means that an item
# is automatically selected when the cursor remains over the item for a certain period
# of time. The delay is retrieved on Windows using the win32api call
# win32gui.SystemParametersInfo(win32con.SPI_GETMOUSEHOVERTIME), and is defaulted to 400ms
# on other platforms. This style applies to all styles of UltimateListCtrl.
ULC_HEADER_IN_ALL_VIEWS = 0x20000000 # Show column headers in all view modes
ULC_NO_FULL_ROW_SELECT = 0x40000000 # When an item is selected, the only the item in the first column is highlighted
ULC_FOOTER = 0x80000000 # Show a footer too (only when header is present)
ULC_USER_ROW_HEIGHT = 0x100000000 # Allows to set a custom row height (one value for all the items, only in report mode).
ULC_MASK_TYPE = ULC_ICON | ULC_SMALL_ICON | ULC_LIST | ULC_REPORT | ULC_TILE
ULC_MASK_ALIGN = ULC_ALIGN_TOP | ULC_ALIGN_LEFT
ULC_MASK_SORT = ULC_SORT_ASCENDING | ULC_SORT_DESCENDING
# for compatibility only
ULC_USER_TEXT = ULC_VIRTUAL
# Omitted because
# (a) too much detail
# (b) not enough style flags
# (c) not implemented anyhow in the generic version
#
# ULC_NO_SCROLL
# ULC_NO_LABEL_WRAP
# ULC_OWNERDRAW_FIXED
# ULC_SHOW_SEL_ALWAYS
# Mask flags to tell app/GUI what fields of UltimateListItem are valid
ULC_MASK_STATE = wx.LIST_MASK_STATE
ULC_MASK_TEXT = wx.LIST_MASK_TEXT
ULC_MASK_IMAGE = wx.LIST_MASK_IMAGE
ULC_MASK_DATA = wx.LIST_MASK_DATA
ULC_SET_ITEM = wx.LIST_SET_ITEM
ULC_MASK_WIDTH = wx.LIST_MASK_WIDTH
ULC_MASK_FORMAT = wx.LIST_MASK_FORMAT
ULC_MASK_FONTCOLOUR = 0x0080
ULC_MASK_FONT = 0x0100
ULC_MASK_BACKCOLOUR = 0x0200
ULC_MASK_KIND = 0x0400
ULC_MASK_ENABLE = 0x0800
ULC_MASK_CHECK = 0x1000
ULC_MASK_HYPERTEXT = 0x2000
ULC_MASK_WINDOW = 0x4000
ULC_MASK_PYDATA = 0x8000
ULC_MASK_SHOWN = 0x10000
ULC_MASK_RENDERER = 0x20000
ULC_MASK_OVERFLOW = 0x40000
ULC_MASK_FOOTER_TEXT = 0x80000
ULC_MASK_FOOTER_IMAGE = 0x100000
ULC_MASK_FOOTER_FORMAT = 0x200000
ULC_MASK_FOOTER_FONT = 0x400000
ULC_MASK_FOOTER_CHECK = 0x800000
ULC_MASK_FOOTER_KIND = 0x1000000
ULC_MASK_TOOLTIP = 0x2000000
# State flags for indicating the state of an item
ULC_STATE_DONTCARE = wx.LIST_STATE_DONTCARE
ULC_STATE_DROPHILITED = wx.LIST_STATE_DROPHILITED # MSW only
ULC_STATE_FOCUSED = wx.LIST_STATE_FOCUSED
ULC_STATE_SELECTED = wx.LIST_STATE_SELECTED
ULC_STATE_CUT = wx.LIST_STATE_CUT # MSW only
ULC_STATE_DISABLED = wx.LIST_STATE_DISABLED # OS2 only
ULC_STATE_FILTERED = wx.LIST_STATE_FILTERED # OS2 only
ULC_STATE_INUSE = wx.LIST_STATE_INUSE # OS2 only
ULC_STATE_PICKED = wx.LIST_STATE_PICKED # OS2 only
ULC_STATE_SOURCE = wx.LIST_STATE_SOURCE # OS2 only
# Hit test flags, used in HitTest
ULC_HITTEST_ABOVE = wx.LIST_HITTEST_ABOVE # Above the client area.
ULC_HITTEST_BELOW = wx.LIST_HITTEST_BELOW # Below the client area.
ULC_HITTEST_NOWHERE = wx.LIST_HITTEST_NOWHERE # In the client area but below the last item.
ULC_HITTEST_ONITEMICON = wx.LIST_HITTEST_ONITEMICON # On the bitmap associated with an item.
ULC_HITTEST_ONITEMLABEL = wx.LIST_HITTEST_ONITEMLABEL # On the label (string) associated with an item.
ULC_HITTEST_ONITEMRIGHT = wx.LIST_HITTEST_ONITEMRIGHT # In the area to the right of an item.
ULC_HITTEST_ONITEMSTATEICON = wx.LIST_HITTEST_ONITEMSTATEICON # On the state icon for a tree view item that is in a user-defined state.
ULC_HITTEST_TOLEFT = wx.LIST_HITTEST_TOLEFT # To the left of the client area.
ULC_HITTEST_TORIGHT = wx.LIST_HITTEST_TORIGHT # To the right of the client area.
ULC_HITTEST_ONITEMCHECK = 0x1000 # On the checkbox (if any)
ULC_HITTEST_ONITEM = ULC_HITTEST_ONITEMICON | ULC_HITTEST_ONITEMLABEL | ULC_HITTEST_ONITEMSTATEICON | ULC_HITTEST_ONITEMCHECK
# Flags for GetNextItem (MSW only except ULC_NEXT_ALL)
ULC_NEXT_ABOVE = wx.LIST_NEXT_ABOVE # Searches for an item above the specified item
ULC_NEXT_ALL = wx.LIST_NEXT_ALL # Searches for subsequent item by index
ULC_NEXT_BELOW = wx.LIST_NEXT_BELOW # Searches for an item below the specified item
ULC_NEXT_LEFT = wx.LIST_NEXT_LEFT # Searches for an item to the left of the specified item
ULC_NEXT_RIGHT = wx.LIST_NEXT_RIGHT # Searches for an item to the right of the specified item
# Alignment flags for Arrange (MSW only except ULC_ALIGN_LEFT)
ULC_ALIGN_DEFAULT = wx.LIST_ALIGN_DEFAULT
ULC_ALIGN_SNAP_TO_GRID = wx.LIST_ALIGN_SNAP_TO_GRID
# Column format (MSW only except ULC_FORMAT_LEFT)
ULC_FORMAT_LEFT = wx.LIST_FORMAT_LEFT
ULC_FORMAT_RIGHT = wx.LIST_FORMAT_RIGHT
ULC_FORMAT_CENTRE = wx.LIST_FORMAT_CENTRE
ULC_FORMAT_CENTER = ULC_FORMAT_CENTRE
# Autosize values for SetColumnWidth
ULC_AUTOSIZE = wx.LIST_AUTOSIZE
ULC_AUTOSIZE_USEHEADER = wx.LIST_AUTOSIZE_USEHEADER # partly supported by generic version
ULC_AUTOSIZE_FILL = -3
# Flag values for GetItemRect
ULC_RECT_BOUNDS = wx.LIST_RECT_BOUNDS
ULC_RECT_ICON = wx.LIST_RECT_ICON
ULC_RECT_LABEL = wx.LIST_RECT_LABEL
# Flag values for FindItem (MSW only)
ULC_FIND_UP = wx.LIST_FIND_UP
ULC_FIND_DOWN = wx.LIST_FIND_DOWN
ULC_FIND_LEFT = wx.LIST_FIND_LEFT
ULC_FIND_RIGHT = wx.LIST_FIND_RIGHT
# Items/subitems rect
ULC_GETSUBITEMRECT_WHOLEITEM = wx.LIST_GETSUBITEMRECT_WHOLEITEM
# ----------------------------------------------------------------------------
# UltimateListCtrl event macros
# ----------------------------------------------------------------------------
wxEVT_COMMAND_LIST_BEGIN_DRAG = wx.wxEVT_COMMAND_LIST_BEGIN_DRAG
wxEVT_COMMAND_LIST_BEGIN_RDRAG = wx.wxEVT_COMMAND_LIST_BEGIN_RDRAG
wxEVT_COMMAND_LIST_BEGIN_LABEL_EDIT = wx.wxEVT_COMMAND_LIST_BEGIN_LABEL_EDIT
wxEVT_COMMAND_LIST_END_LABEL_EDIT = wx.wxEVT_COMMAND_LIST_END_LABEL_EDIT
wxEVT_COMMAND_LIST_DELETE_ITEM = wx.wxEVT_COMMAND_LIST_DELETE_ITEM
wxEVT_COMMAND_LIST_DELETE_ALL_ITEMS = wx.wxEVT_COMMAND_LIST_DELETE_ALL_ITEMS
wxEVT_COMMAND_LIST_ITEM_SELECTED = wx.wxEVT_COMMAND_LIST_ITEM_SELECTED
wxEVT_COMMAND_LIST_ITEM_DESELECTED = wx.wxEVT_COMMAND_LIST_ITEM_DESELECTED
wxEVT_COMMAND_LIST_KEY_DOWN = wx.wxEVT_COMMAND_LIST_KEY_DOWN
wxEVT_COMMAND_LIST_INSERT_ITEM = wx.wxEVT_COMMAND_LIST_INSERT_ITEM
wxEVT_COMMAND_LIST_COL_CLICK = wx.wxEVT_COMMAND_LIST_COL_CLICK
wxEVT_COMMAND_LIST_ITEM_RIGHT_CLICK = wx.wxEVT_COMMAND_LIST_ITEM_RIGHT_CLICK
wxEVT_COMMAND_LIST_ITEM_MIDDLE_CLICK = wx.wxEVT_COMMAND_LIST_ITEM_MIDDLE_CLICK
wxEVT_COMMAND_LIST_ITEM_ACTIVATED = wx.wxEVT_COMMAND_LIST_ITEM_ACTIVATED
wxEVT_COMMAND_LIST_CACHE_HINT = wx.wxEVT_COMMAND_LIST_CACHE_HINT
wxEVT_COMMAND_LIST_COL_RIGHT_CLICK = wx.wxEVT_COMMAND_LIST_COL_RIGHT_CLICK
wxEVT_COMMAND_LIST_COL_BEGIN_DRAG = wx.wxEVT_COMMAND_LIST_COL_BEGIN_DRAG
wxEVT_COMMAND_LIST_COL_DRAGGING = wx.wxEVT_COMMAND_LIST_COL_DRAGGING
wxEVT_COMMAND_LIST_COL_END_DRAG = wx.wxEVT_COMMAND_LIST_COL_END_DRAG
wxEVT_COMMAND_LIST_ITEM_FOCUSED = wx.wxEVT_COMMAND_LIST_ITEM_FOCUSED
wxEVT_COMMAND_LIST_FOOTER_CLICK = wx.NewEventType()
wxEVT_COMMAND_LIST_FOOTER_RIGHT_CLICK = wx.NewEventType()
wxEVT_COMMAND_LIST_FOOTER_CHECKING = wx.NewEventType()
wxEVT_COMMAND_LIST_FOOTER_CHECKED = wx.NewEventType()
wxEVT_COMMAND_LIST_ITEM_LEFT_CLICK = wx.NewEventType()
wxEVT_COMMAND_LIST_ITEM_CHECKING = wx.NewEventType()
wxEVT_COMMAND_LIST_ITEM_CHECKED = wx.NewEventType()
wxEVT_COMMAND_LIST_ITEM_HYPERLINK = wx.NewEventType()
wxEVT_COMMAND_LIST_END_DRAG = wx.NewEventType()
wxEVT_COMMAND_LIST_COL_CHECKING = wx.NewEventType()
wxEVT_COMMAND_LIST_COL_CHECKED = wx.NewEventType()
EVT_LIST_BEGIN_DRAG = wx.EVT_LIST_BEGIN_DRAG
EVT_LIST_BEGIN_RDRAG = wx.EVT_LIST_BEGIN_RDRAG
EVT_LIST_BEGIN_LABEL_EDIT = wx.EVT_LIST_BEGIN_LABEL_EDIT
EVT_LIST_END_LABEL_EDIT = wx.EVT_LIST_END_LABEL_EDIT
EVT_LIST_DELETE_ITEM = wx.EVT_LIST_DELETE_ITEM
EVT_LIST_DELETE_ALL_ITEMS = wx.EVT_LIST_DELETE_ALL_ITEMS
EVT_LIST_KEY_DOWN = wx.EVT_LIST_KEY_DOWN
EVT_LIST_INSERT_ITEM = wx.EVT_LIST_INSERT_ITEM
EVT_LIST_COL_CLICK = wx.EVT_LIST_COL_CLICK
EVT_LIST_COL_RIGHT_CLICK = wx.EVT_LIST_COL_RIGHT_CLICK
EVT_LIST_COL_BEGIN_DRAG = wx.EVT_LIST_COL_BEGIN_DRAG
EVT_LIST_COL_END_DRAG = wx.EVT_LIST_COL_END_DRAG
EVT_LIST_COL_DRAGGING = wx.EVT_LIST_COL_DRAGGING
EVT_LIST_ITEM_SELECTED = wx.EVT_LIST_ITEM_SELECTED
EVT_LIST_ITEM_DESELECTED = wx.EVT_LIST_ITEM_DESELECTED
EVT_LIST_ITEM_RIGHT_CLICK = wx.EVT_LIST_ITEM_RIGHT_CLICK
EVT_LIST_ITEM_MIDDLE_CLICK = wx.EVT_LIST_ITEM_MIDDLE_CLICK
EVT_LIST_ITEM_ACTIVATED = wx.EVT_LIST_ITEM_ACTIVATED
EVT_LIST_ITEM_FOCUSED = wx.EVT_LIST_ITEM_FOCUSED
EVT_LIST_CACHE_HINT = wx.EVT_LIST_CACHE_HINT
EVT_LIST_ITEM_LEFT_CLICK = wx.PyEventBinder(wxEVT_COMMAND_LIST_ITEM_LEFT_CLICK, 1)
EVT_LIST_ITEM_CHECKING = wx.PyEventBinder(wxEVT_COMMAND_LIST_ITEM_CHECKING, 1)
EVT_LIST_ITEM_CHECKED = wx.PyEventBinder(wxEVT_COMMAND_LIST_ITEM_CHECKED, 1)
EVT_LIST_ITEM_HYPERLINK = wx.PyEventBinder(wxEVT_COMMAND_LIST_ITEM_HYPERLINK, 1)
EVT_LIST_END_DRAG = wx.PyEventBinder(wxEVT_COMMAND_LIST_END_DRAG, 1)
EVT_LIST_COL_CHECKING = wx.PyEventBinder(wxEVT_COMMAND_LIST_COL_CHECKING, 1)
EVT_LIST_COL_CHECKED = wx.PyEventBinder(wxEVT_COMMAND_LIST_COL_CHECKED, 1)
EVT_LIST_FOOTER_CLICK = wx.PyEventBinder(wxEVT_COMMAND_LIST_FOOTER_CLICK, 1)
EVT_LIST_FOOTER_RIGHT_CLICK = wx.PyEventBinder(wxEVT_COMMAND_LIST_FOOTER_RIGHT_CLICK, 1)
EVT_LIST_FOOTER_CHECKING = wx.PyEventBinder(wxEVT_COMMAND_LIST_FOOTER_CHECKING, 1)
EVT_LIST_FOOTER_CHECKED = wx.PyEventBinder(wxEVT_COMMAND_LIST_FOOTER_CHECKED, 1)
# NOTE: If using the wxExtListBox visual attributes works everywhere then this can
# be removed, as well as the #else case below.
_USE_VISATTR = 0
# ----------------------------------------------------------------------------
# Constants
# ----------------------------------------------------------------------------
SCROLL_UNIT_X = 15
SCROLL_UNIT_Y = 15
# the spacing between the lines (in report mode)
LINE_SPACING = 0
# extra margins around the text label
EXTRA_WIDTH = 4
EXTRA_HEIGHT = 4
if wx.Platform == "__WXGTK__":
EXTRA_HEIGHT = 6
# margin between the window and the items
EXTRA_BORDER_X = 2
EXTRA_BORDER_Y = 2
# offset for the header window
HEADER_OFFSET_X = 1
HEADER_OFFSET_Y = 1
# margin between rows of icons in [small] icon view
MARGIN_BETWEEN_ROWS = 6
# when autosizing the columns, add some slack
AUTOSIZE_COL_MARGIN = 10
# default and minimal widths for the header columns
WIDTH_COL_DEFAULT = 80
WIDTH_COL_MIN = 10
# the space between the image and the text in the report mode
IMAGE_MARGIN_IN_REPORT_MODE = 5
# the space between the image and the text in the report mode in header
HEADER_IMAGE_MARGIN_IN_REPORT_MODE = 2
# and the width of the icon, if any
MARGIN_BETWEEN_TEXT_AND_ICON = 2
# Background Image Style
_StyleTile = 0
_StyleStretch = 1
# Windows Vista Colours
_rgbSelectOuter = wx.Colour(170, 200, 245)
_rgbSelectInner = wx.Colour(230, 250, 250)
_rgbSelectTop = wx.Colour(210, 240, 250)
_rgbSelectBottom = wx.Colour(185, 215, 250)
_rgbNoFocusTop = wx.Colour(250, 250, 250)
_rgbNoFocusBottom = wx.Colour(235, 235, 235)
_rgbNoFocusOuter = wx.Colour(220, 220, 220)
_rgbNoFocusInner = wx.Colour(245, 245, 245)
# Mouse hover time for track selection
HOVER_TIME = 400
if wx.Platform == "__WXMSW__":
try:
import win32gui, win32con
HOVER_TIME = win32gui.SystemParametersInfo(win32con.SPI_GETMOUSEHOVERTIME)
except ImportError:
pass
# For PyImageList
IL_FIXED_SIZE = 0
IL_VARIABLE_SIZE = 1
# Python integers, to make long types to work with CreateListItem
INTEGER_TYPES = [types.IntType, types.LongType]
# ----------------------------------------------------------------------------
# Functions
# ----------------------------------------------------------------------------
# Utility method
def to_list(input):
"""
Converts the input data into a Python list.
:param `input`: can be an integer or a Python list (in which case nothing will
be done to `input`.
"""
if isinstance(input, types.ListType):
return input
elif isinstance(input, types.IntType):
return [input]
else:
raise Exception("Invalid parameter passed to `to_list`: only integers and list are accepted.")
def CheckVariableRowHeight(listCtrl, text):
"""
Checks whether a `text` contains multiline strings and if the `listCtrl` window
style is compatible with multiline strings.
:param `listCtrl`: an instance of :class:`UltimateListCtrl`;
:param `text`: the text to analyze.
"""
if not listCtrl.HasAGWFlag(ULC_HAS_VARIABLE_ROW_HEIGHT):
if "\n" in text:
raise Exception("Multiline text items are not allowed without the ULC_HAS_VARIABLE_ROW_HEIGHT style.")
def CreateListItem(itemOrId, col):
"""
Creates a new instance of :class:`UltimateListItem`.
:param `itemOrId`: can be an instance of :class:`UltimateListItem` or an integer;
:param `col`: the item column.
"""
if type(itemOrId) in INTEGER_TYPES:
item = UltimateListItem()
item._itemId = itemOrId
item._col = col
else:
item = itemOrId
return item
# ----------------------------------------------------------------------------
def MakeDisabledBitmap(original):
"""
Creates a disabled-looking bitmap starting from the input one.
:param `original`: an instance of :class:`Bitmap` to be greyed-out.
"""
img = original.ConvertToImage()
return wx.BitmapFromImage(img.ConvertToGreyscale())
# ----------------------------------------------------------------------------
#----------------------------------------------------------------------
def GetdragcursorData():
"""Return the drag and drop cursor image as a decompressed stream of characters."""
return zlib.decompress(
"x\xda\xeb\x0c\xf0s\xe7\xe5\x92\xe2b``\xe0\xf5\xf4p\t\x02\xd2\xa2@,\xcf\xc1\
\x06$9z\xda>\x00)\xce\x02\x8f\xc8b\x06\x06na\x10fd\x985G\x02(\xd8W\xe2\x1aQ\
\xe2\x9c\x9f\x9b\x9b\x9aW\xc2\x90\xec\x11\xe4\xab\x90\x9cQ\x9a\x97\x9d\x93\
\x9a\xa7`l\xa4\x90\x99\x9e\x97_\x94\x9a\xc2\xeb\x18\xec\xec\xe9i\xa5\xa0\xa7\
W\xa5\xaa\x07\x01P:7\x1eH\xe4\xe8\xe9\xd9\x808\x11\xbc\x1e\xae\x11V\n\x06@`\
\xeehd\n\xa2-\x0c,\x8cA\xb4\x9b\t\x94o\xe2b\x08\xa2\xcd\\L\xdd@\xb4\xab\x85\
\x993\x886v\xb6p\x02\xd1\x86N\xa6\x16\x12\xf7~\xdf\x05\xbal\xa9\xa7\x8bcH\
\xc5\x9c3W9\xb9\x1a\x14\x04X/\xec\xfc\xbft\xed\x02\xa5\xf4\xc2m\xfa*<N\x17??\
\x0frqy\x9c\xd3\xb2f5\xaf\x89\x8f9Gk\xbc\x08\xa7\xbf\x06\x97\x98\x06S\xd8E\
\xbd\x9cE\xb2\x15\x9da\x89\xe2k\x0f\x9c\xb6|\x1a\xea\x14X\x1d6G\x83E\xe7\x9c\
\x1dO\xa8\xde\xb6\x84l\x15\x9eS\xcf\xc2tf\x15\xde\xf7\xb5\xb2]\xf0\x96+\xf5@\
D\x90\x1d\xef19_\xf5\xde5y\xb6+\xa7\xdeZ\xfbA\x9bu\x9f`\xffD\xafYn\xf6\x9eW\
\xeb>\xb6\x7f\x98\\U\xcb\xf5\xd5\xcb\x9a'\xe7\xf4\xd7\x0b\xba\x9e\xdb\x17E\
\xfdf\x97Z\xcb\xcc\xc0\xf0\xff?3\xc3\x92\xabN\x8arB\xc7\x8f\x03\x1d\xcc\xe0\
\xe9\xea\xe7\xb2\xce)\xa1\t\x00B7|\x00" )
def GetdragcursorBitmap():
"""Return the drag and drop cursor image as a :class:`Bitmap`."""
return wx.BitmapFromImage(GetdragcursorImage())
def GetdragcursorImage():
"""Return the drag and drop cursor image as a :class:`Image`."""
stream = cStringIO.StringIO(GetdragcursorData())
return wx.ImageFromStream(stream)
#-----------------------------------------------------------------------------
# PyImageList
#-----------------------------------------------------------------------------
class PyImageList(object):
"""
A :class:`PyImageList` contains a list of images. Images can have masks for
transparent drawing, and can be made from a variety of sources including
bitmaps and icons.
:class:`PyImageList` is used in conjunction with :class:`UltimateListCtrl`.
| |
# -*- coding: utf-8 -*-
# Copyright 2018 Cereproc Ltd. (author: <NAME>)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
# WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
# MERCHANTABLITY OR NON-INFRINGEMENT.
# See the Apache 2 License for the specific language governing permissions and
# limitations under the License.
import collections
import copy
import logging
import math
import os
import sys
from lxml import etree
from . import txp
from . import vocoder
from . import gen
from . import pylib
class TangleVoice:
NumStates = 5 # number of duration states
# should not ever need to be changed
_phone_pos_fuzz = 0.1
_state_pos_fuzz = 0.2
""" Wrapper for pyIdlak to be used for TTS """
def __init__(self, voice_dir = None, loglvl = logging.WARN):
logging.basicConfig(level = loglvl)
self.log = logging.getLogger('tangle')
self._voicedir = None
self._lng = ''
self._acc = ''
self._spk = ''
self._region = ''
self._fshift = 0.005
self._voice_thresh = 0.8
if not voice_dir is None:
self.load_voice(voice_dir)
def load_voice(self, voice_dir):
""" Loads a voice from a directory """
self._voicedir = os.path.abspath(str(voice_dir))
self.log.info("Loading voice from {0}".format(self._voicedir))
if not os.path.isdir(self._voicedir):
raise IOError("Cannot find voice directory")
voice_conf = {}
_ignore_fields = ['tpdbvar']
with open(os.path.join(self._voicedir, 'voice.conf')) as voice_conf_file:
for line in voice_conf_file:
line = line.strip()
if not len(line) or line.startswith('#'):
continue
try:
field, val = line.split('=', 1)
if field not in _ignore_fields:
voice_conf[field] = val
except ValueError:
raise IOError("cannot load voice configuration")
try:
self._lng = voice_conf['lng']
del voice_conf['lng']
except KeyError:
self.log.warn("voice configuration missing langage (lng)")
try:
self._acc = voice_conf['acc']
del voice_conf['acc']
except KeyError:
self.log.warn("voice configuration missing accent (acc)")
try:
self._spk = voice_conf['spk']
del voice_conf['spk']
except KeyError:
self.log.warn("voice configuration missing speaker (spk)")
if 'region' in voice_conf:
self._region = voice_conf['region']
del voice_conf['region']
# make sure all types are ok as we go
def __load_fields(fields, fieldtype, fieldtypename):
for field in fields:
if field in voice_conf:
try:
val = fieldtype(voice_conf[field])
setattr(self, '_' + field, val)
except ValueError:
self.log.error(
"voice configuration cannot convert '{0}' to {1}".format(
field, fieldtypename))
finally:
del voice_conf[field]
int_fields = ['srate', 'delta_order', 'mcep_order', 'bndap_order', 'fftlen']
__load_fields(int_fields, int, 'integer')
float_fields = ['voice_thresh', 'alpha', 'fshift']
__load_fields(float_fields, float, 'float')
for k in voice_conf:
self.log.warn("unknown voice configuration field '{0}'".format(k))
self._load_txp()
self._load_gen()
self._load_mlpg()
self._load_vocoder()
def speak(self, text, wav_filename = None):
""" Simple interface for speaking text, returns the waveform
if wav_filename is a file name, then
"""
cex = self.process_text(text)
durfeatures = self.cex_to_dnn_features(cex)
state_durations = self.generate_state_durations(durfeatures)
pitchfeatures = self.combine_durations_and_features(
state_durations, durfeatures)
pitch = self.generate_pitch(pitchfeatures)
acousticdnnfeatures = self.combine_pitch_and_features(
pitch, pitchfeatures)
acousticfeatures = self.generate_acoustic_features(acousticdnnfeatures)
waveform = self.vocode_acoustic_features(acousticfeatures, pitch,
wav_filename)
return waveform
def process_text(self, text, normalise=True, cex=True):
""" Process the input text
If normalise is True then the full normaliser is run.
If cex is True then context features are also run
Returns a txp XML document object
"""
self.log.debug("Processing input text")
text = str(text)
xmlparser = etree.XMLParser(encoding = 'utf8')
try:
etree.fromstring(text, parser = xmlparser)
except etree.XMLSyntaxError:
self.log.debug("Input is not valid xml, adding parent tags")
text = '<parent>' + text + '</parent>'
try:
etree.fromstring(text, parser = xmlparser)
except etree.XMLSyntaxError as e:
self.log.critical('Cannot parse input')
raise(e)
doc = txp.XMLDoc(text)
self.Tokeniser.process(doc)
self.PosTag.process(doc)
if normalise:
self.Normalise.process(doc)
self.Pauses.process(doc)
self.Phrasing.process(doc)
self.Pronounce.process(doc)
self.PostLex.process(doc)
self.Syllabify.process(doc)
if cex:
self.ContextExtraction.process(doc)
return doc
def cex_to_dnn_features(self, doc):
""" Converts a txp XML to dnn features """
self.log.debug("Converting processed text to DNN input features")
if not type(doc) == txp.XMLDoc:
raise ValueError("doc must be a txp XMLDoc")
features = gen.cex_to_feat(doc, self._cexfreqtable)
return features
def generate_state_durations(self, dnnfeatures, apply_postproc = True):
""" Takes the dnnfeatures and generates state durations in frames
The state durations are predicted in frames then go through some
post-processing. The return is an n x m matrix in the form of a
list of lists of doubles where n is number of phones and m is the
number of states
The post processing can be disabled to just produce the results of
the duration DNN prediction.
"""
self.log.debug("Generating state durations")
durations = collections.OrderedDict()
for spurtid, spurtfeatures in dnnfeatures.items():
self.log.debug('generating duration for ' + spurtid)
statedfeatures = self._add_state_feature(spurtfeatures)
durmatrix = self._durmodel.forward(statedfeatures)
if apply_postproc:
durations[spurtid] = self._post_duration_processing(durmatrix)
else:
durations[spurtid] = durmatrix
return durations
def combine_durations_and_features(self, durations, dnnfeatures):
""" Combines the state durations and full context features
and to form inputs for the pitch modelling
Durations are in frames
A fuzzy factor is introduced to positions
"""
self.log.debug("Combining predicted state durations with DNN features")
combinedfeatures = collections.OrderedDict()
for spurtid, spurtdurs in durations.items():
combinedfeatures[spurtid] = []
for phoneidx, statedurs in enumerate(spurtdurs):
phnfeatures = dnnfeatures[spurtid][phoneidx]
phndur = sum(statedurs)
for stateidx, statedur in enumerate(statedurs):
for statepos in range(statedur):
phnpos = sum(statedurs[:stateidx]) + statepos
fuzzy_statepos = self._fuzzy_position(
self._state_pos_fuzz, statepos, statedur)
fuzzy_phnpos = self._fuzzy_position(
self._phone_pos_fuzz, phnpos, phndur)
spos = [stateidx, statedur, fuzzy_statepos]
ppos = [phndur, fuzzy_phnpos]
combinedfeatures[spurtid].append(list(phnfeatures + spos + ppos))
phnpos += 1
return combinedfeatures
def generate_pitch(self, dnnfeatures, mlpg = True, extract = True,
save_pdf_directory = ""):
""" Predict the pitch features
if mlpg is True then mlpg is applied
if save_pdf_directory is set then the means and variances will
be saved into that directory (used for debugging)
if extract is True then only the first two columns are returned
which are the voicing confidence and F0 respectively
"""
self.log.debug("Generating pitch values")
pitch = collections.OrderedDict()
for spurtid, spurtfeatures in dnnfeatures.items():
self.log.debug('generating pitch for ' + spurtid)
pitchmatrix = self._pitchmodel.forward(spurtfeatures)
if mlpg:
self.log.debug('applying MLPG to pitch')
if os.path.isdir(save_pdf_directory):
pdffile = os.path.join(save_pdf_directory, spurtid + '.pitch.pdf')
else:
pdffile = False
pitch[spurtid] = self._apply_mlpg(pitchmatrix, 'logf0', pdffile)
else:
if extract:
for idx, row in enumerate(pitchmatrix):
pitchmatrix[idx] = row[:2]
pitch[spurtid] = pitchmatrix
return pitch
def combine_pitch_and_features(self, pitch, dnnfeatures):
""" Insert the pitch as the first two columns of the DNN features """
self.log.debug("Combining predicted pitch with DNN features")
combinedfeatures = collections.OrderedDict()
for spurtid in pitch.keys():
combinedfeatures[spurtid] = []
spurt_pitch = pitch[spurtid]
spurt_feats = dnnfeatures[spurtid]
for row_pitch, row_feats in zip(spurt_pitch, spurt_feats):
combinedfeatures[spurtid].append(row_pitch + row_feats)
return combinedfeatures
def generate_acoustic_features(self, dnnfeatures, mlpg = True, extract = True,
save_pdf_directory = ""):
""" Predict the acoustic features
if mlpg is True then mlpg is applied
otherwise if extract is True then split out the MCEPs and Bndaps
if save_pdf_directory is set then the means and variances will
be saved into that directory (used for debugging)
"""
self.log.debug("Generating acoustic features")
acoustic = collections.OrderedDict()
for spurtid, spurtfeatures in dnnfeatures.items():
self.log.debug('generating acoustic features for ' + spurtid)
acf = self._acousticmodel.forward(spurtfeatures)
if not (mlpg or extract):
acoustic[spurtid] = acf
continue
# order in the matrix is mcep, bndap, mcep_d, bndap_d, mcep_dd, bndap_dd
mcep_start = 0
mcep_end = mcep_start + self.mcep_order + 1
bndap_start = mcep_end
bndap_end = bndap_start + self.bndap_order
mcep_d_start = bndap_end
mcep_d_end = mcep_d_start + self.mcep_order + 1
bndap_d_start = mcep_d_end
bndap_d_end = bndap_d_start + self.bndap_order
mcep_dd_start = bndap_d_end
mcep_dd_end = mcep_dd_start + self.mcep_order + 1
bndap_dd_start = mcep_dd_end
bndap_dd_end = bndap_dd_start + self.bndap_order
mceps = []
bndaps = []
for row in acf:
mceps.append(row[mcep_start:mcep_end] + row[mcep_d_start:mcep_d_end] + row[mcep_dd_start:mcep_dd_end])
bndaps.append(row[bndap_start:bndap_end] + row[bndap_d_start:bndap_d_end] + row[bndap_dd_start:bndap_dd_end])
if mlpg:
self.log.debug('applying MLPG')
if os.path.isdir(save_pdf_directory):
mcep_pdf_file = os.path.join(save_pdf_directory,
spurtid + '.mcep.pdf')
bndap_pdf_file = os.path.join(save_pdf_directory,
spurtid + '.bndap.pdf')
else:
mcep_pdf_file = False
bndap_pdf_file = False
mceps = self._apply_mlpg(mceps, 'mcep', mcep_pdf_file)
bndaps = self._apply_mlpg(bndaps, 'bndap', bndap_pdf_file)
else:
mceps = [mrow[:self.mcep_order+1] for mrow in mceps]
bndaps = [brow[:self.bndap_order] for brow in bndaps]
# convert bndaps to decibels to be inline with other tools (predicted as log value)
for fidx in range(len(bndaps)):
for bidx, bval in enumerate(bndaps[fidx]):
if bval >= -.5:
bndaps[fidx][bidx] = 0.
else:
bndaps[fidx][bidx] = 20. * (bval + .5) / math.log(10)
acoustic[spurtid] = {'mcep' : mceps, 'bndap': bndaps}
return acoustic
def vocode_acoustic_features(self, acoutic_features, pitch,
mixed_excitation = True,
save_residual_directory = False,
wav_filename = None):
""" Vocode the acoustic features using MLSA
if mixed_excitation is set to False, then the residual is
generated without mixed excitation
if save_residual_directory is set then | |
<filename>LP.py
# -*- coding: utf-8 -*-
"""
This is an implementation of a classical linear program solvable due to the two
-phase simplex algorithm. We assume in the simplex algorithm that the box
restrictions are given as 0 <= x < +inf.
@author: Ina
"""
import numpy as np
from Exceptions import InfeasibleProblem, NoPivotException
class LP:
"""
An instance of LP offers the following:
- Fields:
c (np.array): Vector of coefficients.
A_eq (np.matrix): The matrix of equality constraints (row-wise)
b_eq (np.array): The vector of equality constraint values
A_geq (np.matrix): The matrix of inequality constraints (row-wise
greater or equal)
b_geq (np.array): The vector of inequality constraint values
solved (bool): Whether the Problem has been solved or not
solution (dict {"p": (np.matrix), "d": (np.matrix)} | np.matrix):
Has a value if solved. It is a dictionary iff the problem is
unbounded. All np.matrices occuring in this variable are
column-vectors, i.e. of shape (x,1).
optimal_value (numeric): Value of the objective function in the
optimum. Obviously solved needs to be true in order for this
to have a meaningful value.
multipliers (np.matrix): The optimal simplex multipliers of this
problem. In order for this to have a meaningful value
solved needs to be true and calculate_multipliers needs to be
true. It is a column vector, i.e. of dimension (x,1).
DEBUG (static, final, bool): If true we print the tableau and other
information to current steps in the algorithm.
- Functions:
get_solution:
- Arguments:
self
- Result:
If the Problem is unsolved (solved = False) yet it is
getting solved (solve() is getting called).
In either case it returns the dictionary
{"x": (dict {"p": (np.matrix), "d": (np.matrix)} |
np.matrix), "bounded": (bool)}
where x is the solution or a ray of unbounded decrease
and bounded is a bool indicating whether x is a solution
or a ray.
- Calls and Exceptions:
solve()
solve:
- Arguments:
self
- Result:
Uses 2-Phase Simplex Algorithm to solve the given linear
program. Sets the fields solution, optimal_value,
multipliers if possible or chosen to.
- Exceptions:
InfeasibleProblem: If the Problem to solve is infeasible
"""
DEBUG = False
def __init__(self, c, A_eq = None, b_eq = None,
A_geq = None, b_geq = None, calculate_multipliers = False, eps
= 1e-8):
self.c = c
self.A_eq = A_eq if A_eq is not None else np.matrix([])
self.b_eq = b_eq if b_eq is not None else np.array([])
self.A_geq = A_geq if A_geq is not None else np.matrix([])
self.b_geq = b_geq if b_geq is not None else np.array([])
self.solved = False
self.calculate_multipliers = calculate_multipliers
self.eps = eps
if LP.DEBUG:
print("min", "\t", c, "*", "x")
print("wrt", "\t", "A_eq * x = b_eq")
print("\t", "A_geq * x >= b_geq, where")
print("A_eq = ")
print(A_eq)
print("b_eq = ")
print(b_eq)
print("A_geq = ")
print(A_geq)
print("b_geq = ")
print(b_geq)
def get_solution(self):
if not self.solved:
self.solve()
return {"x": self.solution, "bounded": not type(self.solution) is dict}
def solve(self):
iterations = 0
# local function
def base_vector(i, dim):
v = np.zeros(dim)
v[i] = 1
return np.matrix(v).T
b_eq, b_geq = np.copy(self.b_eq), np.copy(self.b_geq)
A_eq, A_geq = np.copy(self.A_eq), np.copy(self.A_geq)
c = np.copy(self.c)
# if A_eq or A_geq is empty, set m_1 resp. m_2 to 0 (default row count
# is 1)
m_1, m_2 = b_eq.size, b_geq.size # number of respective constraints
m = m_1 + m_2 # number of all constraints
n = c.size # number of variables before inserting slack or artificial variables
# generate tableau
if A_eq.shape[1] != A_geq.shape[1]:
if A_eq.shape[1] == 0:
tableau = A_geq
elif A_geq.shape[1] == 0:
tableau = A_eq
else:
raise Exception("Invalid Argument: A_eq and A_geq must be of same column size or one needs to be empty!")
else: # A_eq.shape[1] == A_geq.shape[1]:
tableau = np.concatenate((
A_eq,
A_geq
), axis=0)
swapped = [] # list of indices of equality constraints whose rhs values
# were negative; these are used for calculating the signs of the multipliers
a = 0 # number of artificial variables inserted for geq constraints
# insert artificial variables if there are no geq constraints
if A_geq.shape[1] == 0:
for i in [i for i in range(0, m_1) if b_eq[i] < 0]:
tableau[i,:] = -tableau[i,:]
b_eq[i] = -b_eq[i]
swapped.append(i)
tableau = np.concatenate((
tableau,
np.identity(m_1),
np.matrix(b_eq).T
),axis=1) # done. Go to phase 1.
B = list(range(n, n + m_1)) # current basis variable indices
A = B.copy() # Set of artificial variable indices
# insert slack variables for geq constraints where necessary
else: # A_geq.shape[1] >= 1: (therefore m_2 >= 1)
if m_1 == 0:
tableau = np.concatenate((
tableau,
- np.identity(m_2)
), axis = 1)
else:
tableau = np.concatenate((
tableau,
np.concatenate((
np.zeros(shape = (m_1,m_2)),
- np.identity(m_2)
), axis=0)
), axis=1)
for i in [i for i in range(0, m_1) if b_eq[i] < 0]:
tableau[i,:] = -tableau[i,:]
b_eq[i] = -b_eq[i]
swapped.append(i)
# insert artificial variables for eq constraints
tableau = np.concatenate((
tableau,
np.concatenate((
np.identity(m_1),
np.zeros((m_2, m_1))
), axis=0)
), axis=1)
B = list(range(n + m_2, n + m_2 + m_1))
A = list(range(n + m_2, n + m_2 + m_1))
# insert artificial variables for geq constraints where necessary
for i in range(0,m_2):
if b_geq[i] > 0:
tableau = np.concatenate((
tableau,
base_vector(m_1 + i, m_1 + m_2)
), axis=1)
B.append(n + m + a)
A.append(n + m + a)
a = a + 1
else: # b_geq[i] < 0
tableau[m_1 + i,:] = -tableau[m_1 + i,:]
b_geq[i] = -b_geq[i]
swapped.append(i)
B.append(n + i) # slack variable is now a standard basis
# vector, therefore can be put into basis
tableau = np.concatenate((
tableau,
np.concatenate((
np.matrix(b_eq).T,
np.matrix(b_geq).T
), axis=0)
), axis=1)
'''
Tableau is a matrix of the form:
n m_2 m_1 a 1
+-----------+----------+----------+--------+------+
+/- | | | | | |
+/- | A_eq | 0 | I | 0 | b_eq |
+/- | | | | | |
+-----------+----------+----------+--------+------+
+/- | | | | | |
+/- | A_geq | -I | 0 | (e_i) | b_geq|
+/- | | | | | |
+-----------+----------+----------+--------+------+
---------A---------
- - - - - - ---------B---------
'''
# Preparation phase 1
initial_b = tableau[:,-1]
c_curr = np.matrix(np.zeros(n + m + a))
c_curr[:,A] = np.repeat(1, m_1 + a)
z = - c_curr[:,B].dot(tableau[:, n + m + a])
N = [i for i in range(0, n + m + a) if i not in B]
# c_N - c_B*B^-1*N
c_curr[:,N] = - c_curr[:,B].dot(tableau[:,N])
c_curr[:,B] = np.zeros(m)
if LP.DEBUG:
print(tableau.shape[1])
print(c.shape)
print(m+a+1)
tableau = np.concatenate((# append actual objective function to tableau
tableau,
np.concatenate((
np.matrix(c),
np.zeros((1, m + a + 1))
), axis=1)
), axis = 0)
tableau = np.concatenate((# append auxiliary objective function to tabl
tableau,
np.concatenate((
c_curr,
np.matrix(z)
), axis=1)
), axis=0)
initial_B = B.copy()
if LP.DEBUG:
print({"B": B, "N": N, "A": A, "m_1": m_1, "m_2": m_2, "n": n, "a": a})
print("Tableau:")
print(tableau)
'''
local function phase
Parameters:
B (list): basis variable indices
N (list): nonbasis variable indices
A (list): artificial variable indices
c_curr (np.matrix of shape (1,*)): current objective function coeff
tableau (np.matrix): tableau as defined above
n (numeric): column-count of first block
m (numeric): column-count of second block
rows (numeric): rows for pivot search
a (numeric): column-count of third block
phase (numeric): phase 1 or 2
'''
def phase(B, N, A, c_curr, tableau, n, m, rows, a, phase, iterations):
if LP.DEBUG:
print("Entering phase", phase, "with parameters:\n",
{"n": n, "m": m, "rows": rows, "a": a})
bland = False
degenerated = 0
# while there is a negative coefficient in c_curr
while np.shape(c_curr[:,N][c_curr[:,N] < - self.eps])[1] > 0:
iterations = iterations + 1
z = tableau[rows + 2 - phase, n + m + a] # for checking cycle
if LP.DEBUG:
print("Initial Tableau:")
print(tableau)
if bland:
index = np.argmax(c_curr[:,N] < - self.eps)
i = N[index]
| |
rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_buy_volume_2_2_5.setText(_fromUtf8(""))
self.gb_pb_buy_volume_2_2_5.setObjectName(_fromUtf8("gb_pb_buy_volume_2_2_5"))
self.gb_pb_buy_volume_row_minus_2_5 = QtGui.QPushButton(self.gb_ETFOrder_5)
self.gb_pb_buy_volume_row_minus_2_5.setGeometry(QtCore.QRect(180, 180, 31, 23))
self.gb_pb_buy_volume_row_minus_2_5.setObjectName(_fromUtf8("gb_pb_buy_volume_row_minus_2_5"))
self.gb_pb_b5_5 = QtGui.QPushButton(self.gb_ETFOrder_5)
self.gb_pb_b5_5.setGeometry(QtCore.QRect(10, 240, 51, 20))
self.gb_pb_b5_5.setFlat(True)
self.gb_pb_b5_5.setObjectName(_fromUtf8("gb_pb_b5_5"))
self.gb_pb_b5_price_5 = QtGui.QPushButton(self.gb_ETFOrder_5)
self.gb_pb_b5_price_5.setGeometry(QtCore.QRect(60, 240, 51, 20))
self.gb_pb_b5_price_5.setStyleSheet(_fromUtf8(""))
self.gb_pb_b5_price_5.setFlat(True)
self.gb_pb_b5_price_5.setObjectName(_fromUtf8("gb_pb_b5_price_5"))
self.gb_pb_b4_1_5 = QtGui.QPushButton(self.gb_ETFOrder_5)
self.gb_pb_b4_1_5.setGeometry(QtCore.QRect(10, 220, 51, 20))
self.gb_pb_b4_1_5.setFlat(True)
self.gb_pb_b4_1_5.setObjectName(_fromUtf8("gb_pb_b4_1_5"))
self.gb_pb_buy_volume_row_minus_1_5 = QtGui.QPushButton(self.gb_ETFOrder_5)
self.gb_pb_buy_volume_row_minus_1_5.setGeometry(QtCore.QRect(180, 160, 31, 23))
self.gb_pb_buy_volume_row_minus_1_5.setObjectName(_fromUtf8("gb_pb_buy_volume_row_minus_1_5"))
self.gb_pb_b3_5 = QtGui.QPushButton(self.gb_ETFOrder_5)
self.gb_pb_b3_5.setGeometry(QtCore.QRect(10, 200, 51, 20))
self.gb_pb_b3_5.setFlat(True)
self.gb_pb_b3_5.setObjectName(_fromUtf8("gb_pb_b3_5"))
self.gb_pb_b2_volume_5 = QtGui.QPushButton(self.gb_ETFOrder_5)
self.gb_pb_b2_volume_5.setGeometry(QtCore.QRect(110, 180, 71, 20))
self.gb_pb_b2_volume_5.setFlat(True)
self.gb_pb_b2_volume_5.setObjectName(_fromUtf8("gb_pb_b2_volume_5"))
self.gb_pb_buy_volume_1_3_5 = QtGui.QPushButton(self.gb_ETFOrder_5)
self.gb_pb_buy_volume_1_3_5.setGeometry(QtCore.QRect(350, 160, 41, 23))
self.gb_pb_buy_volume_1_3_5.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF;background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_buy_volume_1_3_5.setText(_fromUtf8(""))
self.gb_pb_buy_volume_1_3_5.setObjectName(_fromUtf8("gb_pb_buy_volume_1_3_5"))
self.gb_pb_buy_volume_row_3_5 = QtGui.QPushButton(self.gb_ETFOrder_5)
self.gb_pb_buy_volume_row_3_5.setGeometry(QtCore.QRect(210, 200, 31, 23))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Book Antiqua"))
font.setPointSize(8)
self.gb_pb_buy_volume_row_3_5.setFont(font)
self.gb_pb_buy_volume_row_3_5.setStyleSheet(_fromUtf8("QPushButton { border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(89, 89, 89); }\n"
" "))
self.gb_pb_buy_volume_row_3_5.setFlat(True)
self.gb_pb_buy_volume_row_3_5.setObjectName(_fromUtf8("gb_pb_buy_volume_row_3_5"))
self.gb_pb_buy_volume_5_1_5 = QtGui.QPushButton(self.gb_ETFOrder_5)
self.gb_pb_buy_volume_5_1_5.setGeometry(QtCore.QRect(270, 240, 41, 23))
self.gb_pb_buy_volume_5_1_5.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF;background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_buy_volume_5_1_5.setText(_fromUtf8(""))
self.gb_pb_buy_volume_5_1_5.setObjectName(_fromUtf8("gb_pb_buy_volume_5_1_5"))
self.gb_pb_buy_volume_4_1_5 = QtGui.QPushButton(self.gb_ETFOrder_5)
self.gb_pb_buy_volume_4_1_5.setGeometry(QtCore.QRect(270, 220, 41, 23))
self.gb_pb_buy_volume_4_1_5.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF;background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_buy_volume_4_1_5.setText(_fromUtf8(""))
self.gb_pb_buy_volume_4_1_5.setObjectName(_fromUtf8("gb_pb_buy_volume_4_1_5"))
self.gb_pb_buy_volume_row_4_5 = QtGui.QPushButton(self.gb_ETFOrder_5)
self.gb_pb_buy_volume_row_4_5.setGeometry(QtCore.QRect(210, 220, 31, 23))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Book Antiqua"))
font.setPointSize(8)
self.gb_pb_buy_volume_row_4_5.setFont(font)
self.gb_pb_buy_volume_row_4_5.setStyleSheet(_fromUtf8("QPushButton { border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(89, 89, 89); }\n"
" "))
self.gb_pb_buy_volume_row_4_5.setText(_fromUtf8(""))
self.gb_pb_buy_volume_row_4_5.setFlat(True)
self.gb_pb_buy_volume_row_4_5.setObjectName(_fromUtf8("gb_pb_buy_volume_row_4_5"))
self.gb_pb_buy_volume_3_1_5 = QtGui.QPushButton(self.gb_ETFOrder_5)
self.gb_pb_buy_volume_3_1_5.setGeometry(QtCore.QRect(270, 200, 41, 23))
self.gb_pb_buy_volume_3_1_5.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF;background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_buy_volume_3_1_5.setObjectName(_fromUtf8("gb_pb_buy_volume_3_1_5"))
self.gb_pb_b1_price_5 = QtGui.QPushButton(self.gb_ETFOrder_5)
self.gb_pb_b1_price_5.setGeometry(QtCore.QRect(60, 160, 51, 20))
self.gb_pb_b1_price_5.setFlat(True)
self.gb_pb_b1_price_5.setObjectName(_fromUtf8("gb_pb_b1_price_5"))
self.gb_pb_buy_volume_3_2_5 = QtGui.QPushButton(self.gb_ETFOrder_5)
self.gb_pb_buy_volume_3_2_5.setGeometry(QtCore.QRect(310, 200, 41, 23))
self.gb_pb_buy_volume_3_2_5.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF;background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_buy_volume_3_2_5.setObjectName(_fromUtf8("gb_pb_buy_volume_3_2_5"))
self.gb_pb_b3_volume_5 = QtGui.QPushButton(self.gb_ETFOrder_5)
self.gb_pb_b3_volume_5.setGeometry(QtCore.QRect(110, 200, 71, 20))
self.gb_pb_b3_volume_5.setFlat(True)
self.gb_pb_b3_volume_5.setObjectName(_fromUtf8("gb_pb_b3_volume_5"))
self.gb_pb_buy_volume_row_2_5 = QtGui.QPushButton(self.gb_ETFOrder_5)
self.gb_pb_buy_volume_row_2_5.setGeometry(QtCore.QRect(210, 180, 31, 23))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Book Antiqua"))
font.setPointSize(8)
self.gb_pb_buy_volume_row_2_5.setFont(font)
self.gb_pb_buy_volume_row_2_5.setStyleSheet(_fromUtf8("QPushButton { border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(89, 89, 89); }\n"
" "))
self.gb_pb_buy_volume_row_2_5.setText(_fromUtf8(""))
self.gb_pb_buy_volume_row_2_5.setFlat(True)
self.gb_pb_buy_volume_row_2_5.setObjectName(_fromUtf8("gb_pb_buy_volume_row_2_5"))
self.gb_pb_b2_5 = QtGui.QPushButton(self.gb_ETFOrder_5)
self.gb_pb_b2_5.setGeometry(QtCore.QRect(10, 180, 51, 20))
self.gb_pb_b2_5.setFlat(True)
self.gb_pb_b2_5.setObjectName(_fromUtf8("gb_pb_b2_5"))
self.gb_pb_buy_volume_3_3_5 = QtGui.QPushButton(self.gb_ETFOrder_5)
self.gb_pb_buy_volume_3_3_5.setGeometry(QtCore.QRect(350, 200, 41, 23))
self.gb_pb_buy_volume_3_3_5.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF;background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_buy_volume_3_3_5.setObjectName(_fromUtf8("gb_pb_buy_volume_3_3_5"))
self.gb_pb_b2_price_5 = QtGui.QPushButton(self.gb_ETFOrder_5)
self.gb_pb_b2_price_5.setGeometry(QtCore.QRect(60, 180, 51, 20))
self.gb_pb_b2_price_5.setFlat(True)
self.gb_pb_b2_price_5.setObjectName(_fromUtf8("gb_pb_b2_price_5"))
self.gb_pb_buy_volume_row_minus_3_5 = QtGui.QPushButton(self.gb_ETFOrder_5)
self.gb_pb_buy_volume_row_minus_3_5.setGeometry(QtCore.QRect(180, 200, 31, 23))
self.gb_pb_buy_volume_row_minus_3_5.setObjectName(_fromUtf8("gb_pb_buy_volume_row_minus_3_5"))
self.gb_pb_b3_price_5 = QtGui.QPushButton(self.gb_ETFOrder_5)
self.gb_pb_b3_price_5.setGeometry(QtCore.QRect(60, 200, 51, 20))
self.gb_pb_b3_price_5.setFlat(True)
self.gb_pb_b3_price_5.setObjectName(_fromUtf8("gb_pb_b3_price_5"))
self.gb_pb_b4_volume_5 = QtGui.QPushButton(self.gb_ETFOrder_5)
self.gb_pb_b4_volume_5.setGeometry(QtCore.QRect(110, 220, 71, 20))
self.gb_pb_b4_volume_5.setFlat(True)
self.gb_pb_b4_volume_5.setObjectName(_fromUtf8("gb_pb_b4_volume_5"))
self.gb_pb_buy_volume_1_1_5 = QtGui.QPushButton(self.gb_ETFOrder_5)
self.gb_pb_buy_volume_1_1_5.setGeometry(QtCore.QRect(270, 160, 41, 23))
self.gb_pb_buy_volume_1_1_5.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF;background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_buy_volume_1_1_5.setText(_fromUtf8(""))
self.gb_pb_buy_volume_1_1_5.setObjectName(_fromUtf8("gb_pb_buy_volume_1_1_5"))
self.gb_pb_buy_volume_row_minus_5_5 = QtGui.QPushButton(self.gb_ETFOrder_5)
self.gb_pb_buy_volume_row_minus_5_5.setGeometry(QtCore.QRect(180, 240, 31, 23))
self.gb_pb_buy_volume_row_minus_5_5.setObjectName(_fromUtf8("gb_pb_buy_volume_row_minus_5_5"))
self.gb_pb_buy_volume_5_3_5 = QtGui.QPushButton(self.gb_ETFOrder_5)
self.gb_pb_buy_volume_5_3_5.setGeometry(QtCore.QRect(350, 240, 41, 23))
self.gb_pb_buy_volume_5_3_5.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF;background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_buy_volume_5_3_5.setText(_fromUtf8(""))
self.gb_pb_buy_volume_5_3_5.setObjectName(_fromUtf8("gb_pb_buy_volume_5_3_5"))
self.gb_pb_buy_volume_2_1_5 = QtGui.QPushButton(self.gb_ETFOrder_5)
self.gb_pb_buy_volume_2_1_5.setGeometry(QtCore.QRect(270, 180, 41, 23))
self.gb_pb_buy_volume_2_1_5.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF;background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_buy_volume_2_1_5.setText(_fromUtf8(""))
self.gb_pb_buy_volume_2_1_5.setObjectName(_fromUtf8("gb_pb_buy_volume_2_1_5"))
self.gb_pb_buy_volume_1_2_5 = QtGui.QPushButton(self.gb_ETFOrder_5)
self.gb_pb_buy_volume_1_2_5.setGeometry(QtCore.QRect(310, 160, 41, 23))
self.gb_pb_buy_volume_1_2_5.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF;background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_buy_volume_1_2_5.setText(_fromUtf8(""))
self.gb_pb_buy_volume_1_2_5.setObjectName(_fromUtf8("gb_pb_buy_volume_1_2_5"))
self.gb_pb_buy_volume_row_5_5 = QtGui.QPushButton(self.gb_ETFOrder_5)
self.gb_pb_buy_volume_row_5_5.setGeometry(QtCore.QRect(210, 240, 31, 23))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Book Antiqua"))
font.setPointSize(8)
self.gb_pb_buy_volume_row_5_5.setFont(font)
self.gb_pb_buy_volume_row_5_5.setStyleSheet(_fromUtf8("QPushButton { border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(89, 89, 89); }\n"
" "))
self.gb_pb_buy_volume_row_5_5.setText(_fromUtf8(""))
self.gb_pb_buy_volume_row_5_5.setFlat(True)
self.gb_pb_buy_volume_row_5_5.setObjectName(_fromUtf8("gb_pb_buy_volume_row_5_5"))
self.gb_pb_buy_volume_4_3_5 = QtGui.QPushButton(self.gb_ETFOrder_5)
self.gb_pb_buy_volume_4_3_5.setGeometry(QtCore.QRect(350, 220, 41, 23))
self.gb_pb_buy_volume_4_3_5.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF;background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_buy_volume_4_3_5.setText(_fromUtf8(""))
self.gb_pb_buy_volume_4_3_5.setObjectName(_fromUtf8("gb_pb_buy_volume_4_3_5"))
self.gb_pb_b4_price_5 = QtGui.QPushButton(self.gb_ETFOrder_5)
self.gb_pb_b4_price_5.setGeometry(QtCore.QRect(60, 220, 51, 20))
self.gb_pb_b4_price_5.setStyleSheet(_fromUtf8("\n"
"QPushButton:hover { background-color: rgb(89, 89, 89); }\n"
""))
self.gb_pb_b4_price_5.setFlat(True)
self.gb_pb_b4_price_5.setObjectName(_fromUtf8("gb_pb_b4_price_5"))
self.gb_pb_b5_volume_5 = QtGui.QPushButton(self.gb_ETFOrder_5)
self.gb_pb_b5_volume_5.setGeometry(QtCore.QRect(110, 240, 71, 20))
self.gb_pb_b5_volume_5.setFlat(True)
self.gb_pb_b5_volume_5.setObjectName(_fromUtf8("gb_pb_b5_volume_5"))
self.gb_pb_buy_volume_4_2_5 = QtGui.QPushButton(self.gb_ETFOrder_5)
self.gb_pb_buy_volume_4_2_5.setGeometry(QtCore.QRect(310, 220, 41, 23))
self.gb_pb_buy_volume_4_2_5.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF;background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_buy_volume_4_2_5.setText(_fromUtf8(""))
self.gb_pb_buy_volume_4_2_5.setObjectName(_fromUtf8("gb_pb_buy_volume_4_2_5"))
self.gb_pb_b1_volume_5 = QtGui.QPushButton(self.gb_ETFOrder_5)
self.gb_pb_b1_volume_5.setGeometry(QtCore.QRect(110, 160, 71, 20))
self.gb_pb_b1_volume_5.setFlat(True)
self.gb_pb_b1_volume_5.setObjectName(_fromUtf8("gb_pb_b1_volume_5"))
self.gb_pb_buy_volume_2_3_5 = QtGui.QPushButton(self.gb_ETFOrder_5)
self.gb_pb_buy_volume_2_3_5.setGeometry(QtCore.QRect(350, 180, 41, 23))
self.gb_pb_buy_volume_2_3_5.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF;background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_buy_volume_2_3_5.setText(_fromUtf8(""))
self.gb_pb_buy_volume_2_3_5.setObjectName(_fromUtf8("gb_pb_buy_volume_2_3_5"))
self.gb_pb_buy_volume_row_minus_4_5 = QtGui.QPushButton(self.gb_ETFOrder_5)
self.gb_pb_buy_volume_row_minus_4_5.setGeometry(QtCore.QRect(180, 220, 31, 23))
self.gb_pb_buy_volume_row_minus_4_5.setObjectName(_fromUtf8("gb_pb_buy_volume_row_minus_4_5"))
self.gb_pb_buy_volume_row_1_5 = QtGui.QPushButton(self.gb_ETFOrder_5)
self.gb_pb_buy_volume_row_1_5.setGeometry(QtCore.QRect(210, 160, 31, 23))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Book Antiqua"))
font.setPointSize(8)
self.gb_pb_buy_volume_row_1_5.setFont(font)
self.gb_pb_buy_volume_row_1_5.setStyleSheet(_fromUtf8("QPushButton { border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(89, 89, 89); }\n"
" "))
self.gb_pb_buy_volume_row_1_5.setText(_fromUtf8(""))
self.gb_pb_buy_volume_row_1_5.setFlat(True)
self.gb_pb_buy_volume_row_1_5.setObjectName(_fromUtf8("gb_pb_buy_volume_row_1_5"))
self.gb_pb_b1_5 = QtGui.QPushButton(self.gb_ETFOrder_5)
self.gb_pb_b1_5.setGeometry(QtCore.QRect(10, 160, 51, 20))
self.gb_pb_b1_5.setFlat(True)
self.gb_pb_b1_5.setObjectName(_fromUtf8("gb_pb_b1_5"))
self.gb_pb_buy_volume_5_2_5 = QtGui.QPushButton(self.gb_ETFOrder_5)
self.gb_pb_buy_volume_5_2_5.setGeometry(QtCore.QRect(310, 240, 41, 23))
self.gb_pb_buy_volume_5_2_5.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF;background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_buy_volume_5_2_5.setText(_fromUtf8(""))
self.gb_pb_buy_volume_5_2_5.setObjectName(_fromUtf8("gb_pb_buy_volume_5_2_5"))
self.gb_pb_buy_total_money_5 = QtGui.QPushButton(self.gb_ETFOrder_5)
self.gb_pb_buy_total_money_5.setGeometry(QtCore.QRect(100, 260, 81, 23))
self.gb_pb_buy_total_money_5.setStyleSheet(_fromUtf8("QPushButton { border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(89, 89, 89); }\n"
" "))
self.gb_pb_buy_total_money_5.setFlat(True)
self.gb_pb_buy_total_money_5.setObjectName(_fromUtf8("gb_pb_buy_total_money_5"))
self.gb_pb_buy_volume_total_minus_5 = QtGui.QPushButton(self.gb_ETFOrder_5)
self.gb_pb_buy_volume_total_minus_5.setGeometry(QtCore.QRect(240, 260, 31, 23))
self.gb_pb_buy_volume_total_minus_5.setObjectName(_fromUtf8("gb_pb_buy_volume_total_minus_5"))
self.gb_pb_buy_volume_column_3_5 = QtGui.QPushButton(self.gb_ETFOrder_5)
self.gb_pb_buy_volume_column_3_5.setGeometry(QtCore.QRect(350, 260, 41, 23))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Book Antiqua"))
font.setPointSize(8)
self.gb_pb_buy_volume_column_3_5.setFont(font)
self.gb_pb_buy_volume_column_3_5.setStyleSheet(_fromUtf8("QPushButton { border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(89, 89, 89); }\n"
" "))
self.gb_pb_buy_volume_column_3_5.setFlat(True)
self.gb_pb_buy_volume_column_3_5.setObjectName(_fromUtf8("gb_pb_buy_volume_column_3_5"))
self.gb_pb_buy_volume_column_2_5 = QtGui.QPushButton(self.gb_ETFOrder_5)
self.gb_pb_buy_volume_column_2_5.setGeometry(QtCore.QRect(310, 260, 41, 23))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Book Antiqua"))
font.setPointSize(8)
self.gb_pb_buy_volume_column_2_5.setFont(font)
self.gb_pb_buy_volume_column_2_5.setStyleSheet(_fromUtf8("QPushButton { border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(89, 89, 89); }\n"
" "))
self.gb_pb_buy_volume_column_2_5.setFlat(True)
self.gb_pb_buy_volume_column_2_5.setObjectName(_fromUtf8("gb_pb_buy_volume_column_2_5"))
self.gb_pb_buy_volume_total_5 = QtGui.QPushButton(self.gb_ETFOrder_5)
self.gb_pb_buy_volume_total_5.setGeometry(QtCore.QRect(210, 260, 31, 23))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Book Antiqua"))
font.setPointSize(8)
self.gb_pb_buy_volume_total_5.setFont(font)
self.gb_pb_buy_volume_total_5.setStyleSheet(_fromUtf8("QPushButton { border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(89, 89, 89); }\n"
" "))
self.gb_pb_buy_volume_total_5.setFlat(True)
self.gb_pb_buy_volume_total_5.setObjectName(_fromUtf8("gb_pb_buy_volume_total_5"))
self.gb_pb_buy_volume_column_1_5 = QtGui.QPushButton(self.gb_ETFOrder_5)
self.gb_pb_buy_volume_column_1_5.setGeometry(QtCore.QRect(270, 260, 41, 23))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Book Antiqua"))
font.setPointSize(8)
self.gb_pb_buy_volume_column_1_5.setFont(font)
self.gb_pb_buy_volume_column_1_5.setStyleSheet(_fromUtf8("QPushButton { border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(89, 89, 89); }\n"
" "))
self.gb_pb_buy_volume_column_1_5.setFlat(True)
self.gb_pb_buy_volume_column_1_5.setObjectName(_fromUtf8("gb_pb_buy_volume_column_1_5"))
self.gb_ETFOrder_6 = QtGui.QGroupBox(self.dockWidgetContents)
self.gb_ETFOrder_6.setGeometry(QtCore.QRect(830, 310, 401, 291))
self.gb_ETFOrder_6.setStyleSheet(_fromUtf8("QGroupBox {\n"
" border: 2px solid rgb(29, 233, 255)\n"
"};\n"
""))
self.gb_ETFOrder_6.setTitle(_fromUtf8(""))
self.gb_ETFOrder_6.setObjectName(_fromUtf8("gb_ETFOrder_6"))
self.gb_cb_etfCode_6 = QtGui.QComboBox(self.gb_ETFOrder_6)
self.gb_cb_etfCode_6.setGeometry(QtCore.QRect(10, 10, 81, 22))
self.gb_cb_etfCode_6.setObjectName(_fromUtf8("gb_cb_etfCode_6"))
self.gb_pb_sell_total_money_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_sell_total_money_6.setGeometry(QtCore.QRect(100, 10, 81, 23))
self.gb_pb_sell_total_money_6.setStyleSheet(_fromUtf8("QPushButton { border-radius: 3px; color: rgb(1, 208, 25); } \n"
"QPushButton:hover { background-color: rgb(89, 89, 89); }\n"
""))
self.gb_pb_sell_total_money_6.setFlat(True)
self.gb_pb_sell_total_money_6.setObjectName(_fromUtf8("gb_pb_sell_total_money_6"))
self.gb_pb_sell_volume_total_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_sell_volume_total_6.setGeometry(QtCore.QRect(210, 10, 31, 23))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Book Antiqua"))
font.setPointSize(8)
self.gb_pb_sell_volume_total_6.setFont(font)
self.gb_pb_sell_volume_total_6.setStyleSheet(_fromUtf8("QPushButton { border-radius: 3px; color: rgb(1, 208, 25); } \n"
"QPushButton:hover { background-color: rgb(89, 89, 89); }\n"
""))
self.gb_pb_sell_volume_total_6.setFlat(True)
self.gb_pb_sell_volume_total_6.setObjectName(_fromUtf8("gb_pb_sell_volume_total_6"))
self.gb_pb_sell_volume_total_minus_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_sell_volume_total_minus_6.setGeometry(QtCore.QRect(240, 10, 31, 23))
self.gb_pb_sell_volume_total_minus_6.setObjectName(_fromUtf8("gb_pb_sell_volume_total_minus_6"))
self.gb_pb_sell_volume_column_1_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_sell_volume_column_1_6.setGeometry(QtCore.QRect(270, 10, 41, 23))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Book Antiqua"))
font.setPointSize(8)
self.gb_pb_sell_volume_column_1_6.setFont(font)
self.gb_pb_sell_volume_column_1_6.setStyleSheet(_fromUtf8("QPushButton { border-radius: 3px; color: rgb(1, 208, 25); } \n"
"QPushButton:hover { background-color: rgb(89, 89, 89); }\n"
""))
self.gb_pb_sell_volume_column_1_6.setFlat(True)
self.gb_pb_sell_volume_column_1_6.setObjectName(_fromUtf8("gb_pb_sell_volume_column_1_6"))
self.gb_pb_sell_volume_column_2_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_sell_volume_column_2_6.setGeometry(QtCore.QRect(310, 10, 41, 23))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Book Antiqua"))
font.setPointSize(8)
self.gb_pb_sell_volume_column_2_6.setFont(font)
self.gb_pb_sell_volume_column_2_6.setStyleSheet(_fromUtf8("QPushButton { border-radius: 3px; color: rgb(1, 208, 25); } \n"
"QPushButton:hover { background-color: rgb(89, 89, 89); }\n"
""))
self.gb_pb_sell_volume_column_2_6.setFlat(True)
self.gb_pb_sell_volume_column_2_6.setObjectName(_fromUtf8("gb_pb_sell_volume_column_2_6"))
self.gb_pb_sell_volume_column_3_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_sell_volume_column_3_6.setGeometry(QtCore.QRect(350, 10, 41, 23))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Book Antiqua"))
font.setPointSize(8)
self.gb_pb_sell_volume_column_3_6.setFont(font)
self.gb_pb_sell_volume_column_3_6.setStyleSheet(_fromUtf8("QPushButton { border-radius: 3px; color: rgb(1, 208, 25); } \n"
"QPushButton:hover { background-color: rgb(89, 89, 89); }\n"
""))
self.gb_pb_sell_volume_column_3_6.setFlat(True)
self.gb_pb_sell_volume_column_3_6.setObjectName(_fromUtf8("gb_pb_sell_volume_column_3_6"))
self.gb_pb_s3_volume_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_s3_volume_6.setGeometry(QtCore.QRect(110, 80, 71, 20))
self.gb_pb_s3_volume_6.setFlat(True)
self.gb_pb_s3_volume_6.setObjectName(_fromUtf8("gb_pb_s3_volume_6"))
self.gb_pb_s2_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_s2_6.setGeometry(QtCore.QRect(10, 100, 51, 20))
self.gb_pb_s2_6.setFlat(True)
self.gb_pb_s2_6.setObjectName(_fromUtf8("gb_pb_s2_6"))
self.gb_pb_s4_price_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_s4_price_6.setGeometry(QtCore.QRect(60, 60, 51, 20))
self.gb_pb_s4_price_6.setFlat(True)
self.gb_pb_s4_price_6.setObjectName(_fromUtf8("gb_pb_s4_price_6"))
self.gb_pb_s4_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_s4_6.setGeometry(QtCore.QRect(10, 60, 51, 20))
self.gb_pb_s4_6.setFlat(True)
self.gb_pb_s4_6.setObjectName(_fromUtf8("gb_pb_s4_6"))
self.gb_pb_s3_price_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_s3_price_6.setGeometry(QtCore.QRect(60, 80, 51, 20))
self.gb_pb_s3_price_6.setFlat(True)
self.gb_pb_s3_price_6.setObjectName(_fromUtf8("gb_pb_s3_price_6"))
self.gb_pb_s5_volume_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_s5_volume_6.setGeometry(QtCore.QRect(110, 40, 71, 20))
self.gb_pb_s5_volume_6.setFlat(True)
self.gb_pb_s5_volume_6.setObjectName(_fromUtf8("gb_pb_s5_volume_6"))
self.gb_pb_s2_volume_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_s2_volume_6.setGeometry(QtCore.QRect(110, 100, 71, 20))
self.gb_pb_s2_volume_6.setFlat(True)
self.gb_pb_s2_volume_6.setObjectName(_fromUtf8("gb_pb_s2_volume_6"))
self.gb_pb_s5_price_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_s5_price_6.setGeometry(QtCore.QRect(60, 40, 51, 20))
self.gb_pb_s5_price_6.setFlat(True)
self.gb_pb_s5_price_6.setObjectName(_fromUtf8("gb_pb_s5_price_6"))
self.gb_pb_s4_volume_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_s4_volume_6.setGeometry(QtCore.QRect(110, 60, 71, 20))
self.gb_pb_s4_volume_6.setFlat(True)
self.gb_pb_s4_volume_6.setObjectName(_fromUtf8("gb_pb_s4_volume_6"))
self.gb_pb_s2_price_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_s2_price_6.setGeometry(QtCore.QRect(60, 100, 51, 20))
self.gb_pb_s2_price_6.setStyleSheet(_fromUtf8("\n"
"QPushButton:hover { background-color: rgb(89, 89, 89); }\n"
""))
self.gb_pb_s2_price_6.setFlat(True)
self.gb_pb_s2_price_6.setObjectName(_fromUtf8("gb_pb_s2_price_6"))
self.gb_pb_s1_volume_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_s1_volume_6.setGeometry(QtCore.QRect(110, 120, 71, 20))
self.gb_pb_s1_volume_6.setFlat(True)
self.gb_pb_s1_volume_6.setObjectName(_fromUtf8("gb_pb_s1_volume_6"))
self.gb_pb_s3_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_s3_6.setGeometry(QtCore.QRect(10, 80, 51, 20))
self.gb_pb_s3_6.setFlat(True)
self.gb_pb_s3_6.setObjectName(_fromUtf8("gb_pb_s3_6"))
self.gb_pb_s1_price_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_s1_price_6.setGeometry(QtCore.QRect(60, 120, 51, 20))
self.gb_pb_s1_price_6.setStyleSheet(_fromUtf8(""))
self.gb_pb_s1_price_6.setFlat(True)
self.gb_pb_s1_price_6.setObjectName(_fromUtf8("gb_pb_s1_price_6"))
self.gb_pb_s5_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_s5_6.setGeometry(QtCore.QRect(10, 40, 51, 20))
self.gb_pb_s5_6.setFlat(True)
self.gb_pb_s5_6.setObjectName(_fromUtf8("gb_pb_s5_6"))
self.gb_pb_s1_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_s1_6.setGeometry(QtCore.QRect(10, 120, 51, 20))
self.gb_pb_s1_6.setFlat(True)
self.gb_pb_s1_6.setObjectName(_fromUtf8("gb_pb_s1_6"))
self.gb_pb_sell_volume_row_5_6 = QtGui.QPushButton(self.gb_ETFOrder_6)
self.gb_pb_sell_volume_row_5_6.setGeometry(QtCore.QRect(210, 40, 31, 23))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Book Antiqua"))
font.setPointSize(8)
self.gb_pb_sell_volume_row_5_6.setFont(font)
self.gb_pb_sell_volume_row_5_6.setStyleSheet(_fromUtf8("QPushButton { border-radius: 3px; color: rgb(1, 208, 25); } \n"
"QPushButton:hover { background-color: rgb(89, 89, | |
first_record:
msg = ", ".join(data)
datafile.write(", %s" % msg)
else:
first_record = False
msg = ", ".join(data)
datafile.write(msg)
offset += chunk_size
# Close the JSON for this table section
datafile.write("]}")
db.session.execute("DELETE FROM %s WHERE time < :time AND instrument_id = :id" % (table,),
dict(time=cutoff_time.isoformat(), id=instrument_id))
db.session.commit()
# Close the list of tables
datafile.write("]\n")
return "Finish"
def new_event(msg, msg_struct):
""" Register a new event
Parameters
----------
msg
msg_struct
Returns
-------
"""
save_instrument_data_reference(msg_struct)
return get_event_code(msg, msg_struct)
@app.route("/eventmanager/event", methods=['POST'])
def event():
"""Event comes as a web request with a JSON packet. The JSON is loaded into dictionary, and the
event code is extracted. Dependent on the event code, different functions are called.
If it is part of a predefined set of special event codes, calls a new function to handle it, depending on the
event code. Passes the message in to the call, then returns the return of whichever sub-function was called.
If it is not a special case, it extracts the information from the packet and saves the event to the database.
If the 'is_central' flag is not set, it then forwards the packet on to the 'cf_url'
(both specified in *config.yml*).
Returns
-------
The original message packet if a sub-function was not called, the sub-function's return if it was called.
"""
msg = request.data
msg_struct = dict(json.loads(msg))
msg_event_code = msg_struct['event_code']
EVENT_ROUTING_TABLE = {
utility.EVENT_CODE_REQUEST: new_event,
utility.SITE_ID_REQUEST: get_site_id,
utility.INSTRUMENT_ID_REQUEST: get_instrument_id,
utility.PULSE_CAPTURE: save_pulse_capture,
utility.INSTRUMENT_LOG: save_instrument_log,
utility.PROSENSING_PAF: save_special_event,
utility.IRIS_BITE: save_special_event,
}
# The save_misc_event is the default value if the event_code does not exist in the table.
return EVENT_ROUTING_TABLE.get(msg_event_code, save_misc_event)(msg, msg_struct)
def save_misc_event(msg, msg_struct):
""" Handle a MISC event message.
Parameters
----------
msg: JSON object
JSON object representing the message
msg_struct: dict
Dictionary representation of message
"""
msg_event_code = msg_struct['event_code']
timestamp = msg_struct['data']['time']
try:
# If it can cast as a number, save as a number. If not, save as text
float_value = float(msg_struct['data']['value'])
event_wv = EventWithValue()
event_wv.event_code_id = msg_event_code
event_wv.time = timestamp
event_wv.instrument_id = msg_struct['data']['instrument_id']
event_wv.value = float_value
db.session.add(event_wv)
db.session.commit()
# Add the entry to the Redis database.
attribute_name = redint.get_attribute_by_event_code(msg_event_code)
redint.add_values_for_attribute(event_wv.instrument_id, attribute_name,
dateutil.parser.parse(timestamp), float_value)
EM_LOGGER.info("Saved Value Event")
except ValueError:
event_wt = EventWithText()
event_wt.event_code_id = msg_event_code
event_wt.time = timestamp
event_wt.instrument_id = msg_struct['data']['instrument_id']
event_wt.text = msg_struct['data']['value']
db.session.add(event_wt)
db.session.commit()
# Add the entry to the Redis database.
attribute_name = redint.get_attribute_by_event_code(msg_event_code)
redint.add_values_for_attribute(event_wt.instrument_id, attribute_name,
dateutil.parser.parse(timestamp), msg_struct['data']['value'])
EM_LOGGER.info("Saved Text Event")
# If application is at a site instead of the central facility, passes data on to be saved at central facility
if not is_central:
payload = json.loads(msg)
requests.post(cf_url, json=payload, headers=headers, verify=cert_verify)
return "", 200
def save_special_event(msg, msg_struct):
"""Inserts the information given in 'msg_struct' into the database, with all of the values being mapped into columns
for the database. If the 'is_central' flag is not set, it then forwards the packet on to the 'cf_url'
(both specified in *config.yml*). Uses the event code from the message to determine which table to save to.
Parameters
----------
msg: JSON
JSON message structure, expected format:
{event_code: *code*, data: {time: *ISO DateTime*, site_id: *Integer*, instrument_id: *Integer*,
values: *Dictionary of database column names mapped to their values*}}
msg_struct: dictionary
Decoded version of msg, converted to python dictionary.
"""
msg_event_code = msg_struct['event_code']
timestamp = msg_struct['data']['time']
if msg_event_code == utility.PROSENSING_PAF:
table_name = "prosensing_paf"
elif msg_event_code == utility.IRIS_BITE:
table_name = "iris_bite"
else:
table_name = "prosensing_paf" # A default that should never be reached
sql_query_a = "INSERT INTO %s(time, site_id, instrument_id" % table_name
sql_query_b = ") VALUES ('%s', %s, %s" % (timestamp, msg_struct['data']['site_id'],
msg_struct['data']['instrument_id'])
redis_attributes = []
redis_values = []
for key, value in msg_struct['data']['values'].iteritems():
sql_query_a = ', '.join([sql_query_a, key])
# Converts inf and -inf to Postgresql equivalents
# Add Redis attribute
redis_attributes.append(key)
if "-inf" in str(value) or "inf" in str(value) or "-Inf" in str(value) or "Inf" in str(value):
sql_query_b = ', '.join([sql_query_b, "NULL"])
# Add value to list of Redis values to save
redis_values.append("NULL")
else:
try:
float(value)
sql_query_b = ', '.join([sql_query_b, "%s" % value])
except ValueError:
sql_query_b = ', '.join([sql_query_b, "'%s'" % value.rstrip('\x00')])
# Add value to list of Redis values to save
redis_values.append(value)
sql_query = ''.join([sql_query_a, sql_query_b, ")"])
db.session.execute(sql_query)
db.session.commit()
# Save values to Redis
redint.add_value_set_for_table_attributes(msg_struct["data"]["instrument_id"], redis_attributes,
dateutil.parser.parse(timestamp), redis_values, table_name)
if not is_central:
payload = json.loads(msg)
requests.post(cf_url, json=payload, headers=headers, verify=cert_verify)
return "OK"
def save_instrument_log(msg, msg_struct):
"""Inserts the information given in 'msg_struct' into the database 'instrument_logs' table, with all of the values
being mapped into columns for the database.
Parameters
----------
msg: JSON
JSON message structure, expected format:
{event_code: *code*, data: {time: *ISO DateTime*, author_id: *Integer*, instrument_id: *Integer*,
status: *Integer Status Code*, contents: *Log Message*, supporting_images: *Image*}}
msg_struct: dictionary
Decoded version of msg, converted to python dictionary.
"""
new_log = InstrumentLog()
new_log.time = msg_struct['data']['time']
new_log.instrument_id = msg_struct['data']['instrument_id']
new_log.author_id = msg_struct['data']['author_id']
new_log.status = msg_struct['data']['status']
new_log.contents = msg_struct['data']['contents']
new_log.supporting_images = msg_struct['data']['supporting_images']
db.session.add(new_log)
db.session.commit()
return "OK"
def save_pulse_capture(msg, msg_struct):
"""
Inserts the information given in 'msg_struct' into the database 'pulse_captures' table, with all of the values
being mapped into columns for the database. If the 'is_central' flag is not set, it then forwards the packet on
to the 'cf_url' (both specified in *config.yml*).
Parameters
----------
msg: JSON
JSON message structure, expected format: \
{event_code: *code*, data: {time: *ISO DateTime*, site_id: *Integer*, instrument_id: *Integer*, \
values: *Array of Floats*}}
msg_struct: dictionary
Decoded version of msg, converted to python dictionary.
"""
new_pulse = PulseCapture()
new_pulse.time = msg_struct['data']['time']
new_pulse.instrument_id = msg_struct['data']['instrument_id']
new_pulse.data = msg_struct['data']['values']
db.session.add(new_pulse)
db.session.commit()
if not is_central:
payload = json.loads(msg)
requests.post(cf_url, json=payload, headers=headers, verify=cert_verify)
return "OK"
def get_instrument_id(msg, msg_struct):
"""Searches the database for any instruments where the instrument abbreviation matches
'msg_struct['instrument']'. If the 'is_central' flag is set and there is no instrument,
returns a -1 to indicate nothing was found, but if it was found, returns the instrument's
information to be saved. If the 'is_central' flag is not set, it then forwards the
packet on to the 'cf_url' (both specified in *config.yml*) and returns whatever the central
facility determines the instrument id is, saving the returned site.
Parameters
----------
msg: JSON
JSON message structure, expected format: {Event_Code: *code*, Data: *instrument abbreviation*}
msg_struct: dictionary
Decoded version of msg, converted to python dictionary.
Returns
-------
The instrument id or information determined by the function.
Returned in the form of a string structured as
{"event_code": *integer event code*, "data": {"instrument_id": *integer instrument id*, "site_id":
*integer site id instrument is at*, "name_short": *string instrument abbreviation*, "name_long":
*string full instrument name*, "type": *string type of instrument*, "vendor": *string instrument's vendor*,
"description": *string description of instrument* }}.
If no instrument was found, the instrument id is passed as -1.
"""
db_instrument = db.session.query(Instrument).filter(Instrument.name_short == msg_struct['data']['name']).\
filter(Instrument.site_id == msg_struct['data']['site_id']).first()
# If there is an instrument with a matching name, returns all info to a site or just the id to an agent.
if db_instrument:
EM_LOGGER.info("Found Existing Instrument")
return '{"event_code": %i, "data": {"instrument_id": %s, "site_id": %s, "name_short": "%s", '\
'"name_long": "%s", "type": "%s", "vendor": "%s", "description": "%s"}}' \
% (utility.INSTRUMENT_ID_REQUEST, db_instrument.id, db_instrument.site_id, db_instrument.name_short,
db_instrument.name_long, db_instrument.type, db_instrument.vendor,
db_instrument.description)
else:
# If it does not exist at the central facility, returns an error indicator
if is_central:
EM_LOGGER.error("Instrument could not be found at central facility")
return '{"data": {"instrument_id": -1}}'
# If it does not exist at a site, requests the site information from the central facility
else:
payload = json.loads(msg)
response = requests.post(cf_url, json=payload, headers=headers, verify=cert_verify)
cf_msg = dict(json.loads(response.content))
cf_data = cf_msg['data']
# Need to add handler for if there is a bad return from CF (if clause above)
new_instrument = Instrument()
new_instrument.id = cf_data['instrument_id']
new_instrument.site_id = cf_data['site_id']
new_instrument.name_short = cf_data['name_short']
new_instrument.name_long = cf_data['name_long']
new_instrument.type = cf_data['type']
new_instrument.vendor = cf_data['vendor']
new_instrument.description = cf_data['description']
db.session.add(new_instrument)
db.session.commit()
utility.reset_db_keys()
EM_LOGGER.info("Saved New Instrument")
return '{"event_code": %i, "data": {"instrument_id": %s, "site_id": %s, "name_short": "%s", ' \
'"name_long": "%s", "type": "%s", "vendor": "%s", "description": "%s"}}' \
% (utility.INSTRUMENT_ID_REQUEST, cf_data['instrument_id'], cf_data['site_id'],
cf_data['name_short'], cf_data['name_long'], cf_data['type'], cf_data['vendor'],
cf_data['description'])
def get_site_id(msg, msg_struct):
"""Searches the database for any sites where the site abbreviation matches 'msg_struct['site']'. If the
'is_central' flag is set and | |
<reponame>jasonplato/High_SimulationPlatform
from __future__ import division, print_function, absolute_import
import numpy as np
from highway_env import utils
from highway_env.envs.abstract import AbstractEnv
from highway_env.road.lane import LineType, StraightLane, SineLane, CircularLane
from highway_env.road.road import Road, RoadNetwork
from highway_env.vehicle.control import ControlledVehicle, MDPVehicle, CarSim, FreeControl
from highway_env.vehicle.behavior import IDMVehicle
from highway_env.vehicle.dynamics import RedLight
import pygame
import random
import math
from highway_env.envs.graphics import EnvViewer
def rad(deg):
return deg * np.pi / 180
class CrossroadEnv(AbstractEnv):
"""
A highway merge negotiation environment.
The ego-vehicle is driving on a highway and approached a merge, with some vehicles incoming on the access ramp.
It is rewarded for maintaining a high velocity and avoiding collisions, but also making room for merging
vehicles.
"""
COLLISION_REWARD = -1
RIGHT_LANE_REWARD = 0.1
HIGH_VELOCITY_REWARD = 0.2
MERGING_VELOCITY_REWARD = -0.5
LANE_CHANGE_REWARD = -0.05
DEFAULT_CONFIG = {"other_vehicles_type": "highway_env.vehicle.behavior.IDMVehicle",
"incoming_vehicle_destination": None,
"other_vehicles_destination": None}
def __init__(self):
super(CrossroadEnv, self).__init__()
self.config = self.DEFAULT_CONFIG.copy()
self.steps = 0
self.traffic_lights = {}
self.have_traffic_lights = False
self.entrance = ["swwe", "swse", "wmwe", "wmee", "nwwe", "nwne", "nene", "neee", "emwe", "emee", "sese", "seee"]
self.end = ["swwx", "swsx", "wmwx", "nwwx", "nwnx", "nenx",
"neex",
"emex",
"seex", "sesx"]
EnvViewer.SCREEN_HEIGHT = 700
EnvViewer.SCREEN_WIDTH = 700
self.SIMULATION_FREQUENCY = 50
def configure(self, config):
self.config.update(config)
def _observation(self):
return super(CrossroadEnv, self)._observation()
def _reward(self, action):
"""
The vehicle is rewarded for driving with high velocity on lanes to the right and avoiding collisions, but
an additional altruistic penalty is also suffered if any vehicle on the merging lane has a low velocity.
:param action: the action performed
:return: the reward of the state-action transition
"""
action_reward = {0: self.LANE_CHANGE_REWARD,
1: 0,
2: self.LANE_CHANGE_REWARD,
3: 0,
4: 0}
reward = self.COLLISION_REWARD * self.vehicle.crashed \
+ self.RIGHT_LANE_REWARD * self.vehicle.lane_index / (len(self.road.lanes) - 2) \
+ self.HIGH_VELOCITY_REWARD * self.vehicle.velocity_index / (self.vehicle.SPEED_COUNT - 1)
# Altruistic penalty
for vehicle in self.road.vehicles:
if vehicle.lane_index == len(self.road.lanes) - 1 and isinstance(vehicle, ControlledVehicle):
reward += self.MERGING_VELOCITY_REWARD * \
(vehicle.target_velocity - vehicle.velocity) / vehicle.target_velocity
return reward + action_reward[action]
def _is_terminal(self):
"""
The episode is over when a collision occurs or when the access ramp has been passed.
"""
return self.vehicle.crashed
def reset(self):
self.make_roads()
self.make_vehicles()
return self._observation()
def make_roads(self):
net = RoadNetwork()
n, c, s = LineType.NONE, LineType.CONTINUOUS, LineType.STRIPED
"""
crossroad of southwest
sw:southwest
w:west n:north e:east s:south
e:entrance x:exit
r:right of the ego_car l:left of the ego_car
swne swnx
inter4
swner swnel swnxl swnxr
swwe swwxr sweer swee
swwx inter1 swnxl sweel inter3 swex
swnel swexl
swner swexr
swsxr swsxl swsel swser
inter2
swsx swse
"""
net.add_lane("swwe", "intersw1", StraightLane(np.array([0, 0]), np.array([100, 0]), line_types=[c, s]))
net.add_lane("intersw1", "swwel",
StraightLane(np.array([100, 0]), np.array([150, 0]), line_types=[c, c], forbidden=True))
net.add_lane("swwe", "intersw1", StraightLane(np.array([0, 4]), np.array([100, 4]), line_types=[s, c]))
net.add_lane("intersw1", "swwer",
StraightLane(np.array([100, 4]), np.array([150, 4]), line_types=[c, c], forbidden=True))
net.add_lane("inter_sw_1", "swwx", StraightLane(np.array([100, -8]), np.array([0, -8]), line_types=[s, c]))
net.add_lane("swwxr", "inter_sw_1", StraightLane(np.array([150, -8]), np.array([100, -8]), line_types=[s, c]))
net.add_lane("inter_sw_1", "swwx", StraightLane(np.array([100, -4]), np.array([0, -4]), line_types=[c, s]))
net.add_lane("swwxl", "inter_sw_1", StraightLane(np.array([150, -4]), np.array([100, -4]), line_types=[c, s]))
net.add_lane("swse", "intersw2", StraightLane(np.array([167, 158]), np.array([167, 58]), line_types=[c, s]))
net.add_lane("intersw2", "swsel",
StraightLane(np.array([167, 58]), np.array([167, 8]), line_types=[c, c], forbidden=True))
net.add_lane("swse", "intersw2", StraightLane(np.array([171, 158]), np.array([171, 58]), line_types=[s, c]))
net.add_lane("intersw2", "swser",
StraightLane(np.array([171, 58]), np.array([171, 8]), line_types=[c, c], forbidden=True))
net.add_lane("inter_sw_2", "swsx", StraightLane(np.array([159, 58]), np.array([159, 158]), line_types=[s, c]))
net.add_lane("swsxr", "inter_sw_2", StraightLane(np.array([159, 8]), np.array([159, 58]), line_types=[s, c]))
net.add_lane("inter_sw_2", "swsx", StraightLane(np.array([163, 58]), np.array([163, 158]), line_types=[c, s]))
net.add_lane("swsxl", "inter_sw_2", StraightLane(np.array([163, 8]), np.array([163, 58]), line_types=[c, s]))
# net.add_lane("swee", "intersw3", StraightLane(np.array([328, -8]), np.array([228, -8]), line_types=[s, c]))
net.add_lane("intersw3", "sweer",
StraightLane(np.array([228, -8]), np.array([178, -8]), line_types=[c, c], forbidden=True))
# net.add_lane("swee", "intersw3", StraightLane(np.array([328, -4]), np.array([228, -4]), line_types=[c, s]))
net.add_lane("intersw3", "sweel",
StraightLane(np.array([228, -4]), np.array([178, -4]), line_types=[c, c], forbidden=True))
# net.add_lane("intersw_3", "swex", StraightLane(np.array([228, 0]), np.array([328, 0]), line_types=[c, s]))
net.add_lane("swexl", "inter_sw_3", StraightLane(np.array([178, 0]), np.array([228, 0]), line_types=[c, s]))
# net.add_lane("intersw_3", "swex", StraightLane(np.array([228, 4]), np.array([328, 4]), line_types=[s, c]))
net.add_lane("swexr", "inter_sw_3", StraightLane(np.array([178, 4]), np.array([228, 4]), line_types=[s, c]))
net.add_lane("intersw4", "swner",
StraightLane(np.array([159, -62]), np.array([159, -12]), line_types=[c, c], forbidden=True))
net.add_lane("intersw4", "swnel",
StraightLane(np.array([163, -62]), np.array([163, -12]), line_types=[c, c], forbidden=True))
net.add_lane("swnxl", "inter_sw_4", StraightLane(np.array([167, -12]), np.array([167, -62]), line_types=[c, s]))
net.add_lane("swnxr", "inter_sw_4", StraightLane(np.array([171, -12]), np.array([171, -62]), line_types=[s, c]))
# bellow: fulfill the turning lanes for vehicles to turn
# center = [152, 10]
# radii = [6, 10]
# alpha = math.degrees(math.asin(math.sqrt(97) / radii[0] / 2))
net.add_lane("swwer", "swsxr",
StraightLane(np.array([150, 4]), np.array([159, 8]), line_types=[n, n], forbidden=True))
# center = [152, -13]
net.add_lane("swner", "swwxr",
StraightLane(np.array([159, -12]), np.array([150, -8]), line_types=[n, n], forbidden=True))
net.add_lane("swnel", "swsxl",
StraightLane(np.array([163, -12]), np.array([163, 8]), line_types=[n, n], forbidden=True))
net.add_lane("swnel", "swexl",
StraightLane(np.array([163, -12]), np.array([178, 0]), line_types=[n, n], forbidden=True))
# center = [178, -13]
net.add_lane("sweer", "swnxr",
StraightLane(np.array([178, -8]), np.array([171, -12]), line_types=[n, n], forbidden=True))
net.add_lane("sweel", "swsxl",
StraightLane(np.array([178, -4]), np.array([163, 8]), line_types=[n, n], forbidden=True))
net.add_lane("sweel", "swwxl",
StraightLane(np.array([178, -4]), np.array([150, -4]), line_types=[n, n], forbidden=True))
# center = [178, 10]
net.add_lane("swser", "swexr",
StraightLane(np.array([171, 8]), np.array([178, 4]), line_types=[n, n], forbidden=True))
net.add_lane("swsel", "swwxl",
StraightLane(np.array([167, 8]), np.array([150, -4]), line_types=[n, n], forbidden=True))
net.add_lane("swsel", "swnxl",
StraightLane(np.array([167, 8]), np.array([167, -12]), line_types=[n, n], forbidden=True))
net.add_lane("swwel", "swnxl",
StraightLane(np.array([150, 0]), np.array([167, -12]), line_types=[n, n], forbidden=True))
net.add_lane("swwel", "swexl",
StraightLane(np.array([150, 0]), np.array([178, 0]), line_types=[n, n], forbidden=True))
"""
straight road of west
m:middle
"""
net.add_lane("inter_sw_4", "interwm2",
StraightLane(np.array([167, -62]), np.array([167, -162]), line_types=[c, c]))
# net.add_lane(" ", " ",
# StraightLane(np.array([66, -37]), np.array([66, -42]), line_types=[n, c]))
# net.add_lane(" ", " ",
# StraightLane(np.array([66, -42]), np.array([62, -49]), line_types=[n, c]))
# net.add_lane(" ", " ",
# StraightLane(np.array([62, -62]), np.array([66, -67]), line_types=[n, c]))
# net.add_lane("intersw_mergein", "interwm_mergeout",
# StraightLane(np.array([62, -47]), np.array([62, -55]), line_types=[c, c]))
net.add_lane("inter_wm_2", "intersw4",
StraightLane(np.array([163, -162]), np.array([163, -62]), line_types=[c, c]))
# net.add_lane("interwm_mergeout", "interwm2l",
# StraightLane(np.array([62, -55]), np.array([62, -62]), line_types=[c, s]))
# net.add_lane("interwm_mergeout", "interwm2r",
# StraightLane(np.array([62, -55]), np.array([66, -62]), line_types=[n, c]))
net.add_lane("interwm2", "wmsel",
StraightLane(np.array([167, -162]), np.array([167, -212]), line_types=[c, c], forbidden=True))
net.add_lane("interwm2", "wmser",
StraightLane(np.array([171, -162]), np.array([171, -212]), line_types=[c, c], forbidden=True))
net.add_lane("wmsxr", "inter_wm_2",
StraightLane(np.array([159, -212]), np.array([159, -162]), line_types=[s, c]))
net.add_lane("wmsxl", "inter_wm_2",
StraightLane(np.array([163, -212]), np.array([163, -162]), line_types=[c, s]))
net.add_lane("wmwe", "interwm1", StraightLane(np.array([50, -216]), np.array([100, -216]), line_types=[s, c]))
net.add_lane("wmwe", "interwm1", StraightLane(np.array([50, -220]), np.array([100, -220]), line_types=[c, s]))
net.add_lane("interwm1", "wmwel",
StraightLane(np.array([100, -216]), np.array([150, -216]), line_types=[c, c], forbidden=True))
net.add_lane("interwm1", "wmwer",
StraightLane(np.array([100, -220]), np.array([150, -220]), line_types=[c, c], forbidden=True))
net.add_lane("inter_wm_1", "wmwx", StraightLane(np.array([100, -224]), np.array([50, -224]), line_types=[c, s]))
net.add_lane("inter_wm_1", "wmwx", StraightLane(np.array([100, -228]), np.array([50, -228]), line_types=[s, c]))
net.add_lane("wmwxl", "inter_wm_1",
StraightLane(np.array([150, -224]), np.array([100, -224]), line_types=[c, s]))
net.add_lane("wmwxr", "inter_wm_1",
StraightLane(np.array([150, -228]), np.array([100, -228]), line_types=[s, c]))
net.add_lane("wmee", "interwm3",
StraightLane(np.array([271, -228]), np.array([221, -228]), line_types=[c, s]))
net.add_lane("wmee", "interwm3",
StraightLane(np.array([271, -224]), np.array([221, -224]), line_types=[s, c]))
net.add_lane("interwm3", "wmeer",
StraightLane(np.array([221, -228]), np.array([179, -228]), line_types=[c, c], forbidden=True))
net.add_lane("interwm3", "wmeel",
StraightLane(np.array([221, -224]), np.array([179, -224]), line_types=[c, c], forbidden=True))
net.add_lane("wmexr", "inter_wm_3",
StraightLane(np.array([179, -216]), np.array([221, -216]), line_types=[s, c]))
net.add_lane("wmexl", "inter_wm_3",
StraightLane(np.array([179, -220]), np.array([221, -220]), line_types=[c, s]))
net.add_lane("inter_wm_3", "wmex",
StraightLane(np.array([221, -216]), np.array([271, -216]), line_types=[s, c]))
net.add_lane("inter_wm_3", "wmex",
StraightLane(np.array([221, -220]), np.array([271, -220]), line_types=[c, s]))
net.add_lane("interwm4", "wmner",
StraightLane(np.array([159, -274]), np.array([159, -232]), line_types=[c, c], forbidden=True))
net.add_lane("interwm4", "wmnel",
StraightLane(np.array([163, -274]), np.array([163, -232]), line_types=[c, c], forbidden=True))
net.add_lane("wmnxr", "inter_wm_4",
StraightLane(np.array([171, -232]), np.array([171, -274]), line_types=[s, c]))
net.add_lane("wmnxl", "inter_wm_4",
StraightLane(np.array([167, -232]), np.array([167, -274]), line_types=[c, s]))
# bellow: fulfill the turning lanes for vehicles to turn
# center = [152, -210]
# radii = [6, 10]
# alpha = math.degrees(math.asin(math.sqrt(97) / radii[0] / 2))
net.add_lane("wmwer", "wmsxr",
StraightLane(np.array([150, -220]), np.array([159, -212]), line_types=[n, n], forbidden=True))
net.add_lane("wmwel", "wmnxl",
StraightLane(np.array([150, -216]), np.array([167, -232]), line_types=[n, n], forbidden=True))
net.add_lane("wmwel", "wmexl",
StraightLane(np.array([150, -216]), np.array([179, -220]), line_types=[n, n], forbidden=True))
# center = [152, -233]
net.add_lane("wmner", "wmwxr",
StraightLane(np.array([159, -232]), np.array([150, -228]), line_types=[n, n], forbidden=True))
net.add_lane("wmnel", "wmexl",
StraightLane(np.array([163, -232]), np.array([179, -220]), line_types=[n, n], forbidden=True))
net.add_lane("wmnel", "wmsxl",
StraightLane(np.array([163, -232]), np.array([163, -212]), line_types=[n, n], forbidden=True))
# center = [178, -233]
net.add_lane("wmeer", "wmnxr",
StraightLane(np.array([179, -228]), np.array([171, -232]), line_types=[n, n], forbidden=True))
net.add_lane("wmeel", "wmsxl",
StraightLane(np.array([179, -224]), np.array([163, -212]), line_types=[n, n], forbidden=True))
net.add_lane("wmeel", "wmwxl",
StraightLane(np.array([179, -224]), np.array([150, -224]), line_types=[n, n], forbidden=True))
# center = [178, -210]
net.add_lane("wmser", "wmexr",
StraightLane(np.array([171, -212]), np.array([179, -216]), line_types=[n, n], forbidden=True))
net.add_lane("wmsel", "wmnxl",
StraightLane(np.array([167, -212]), np.array([167, -232]), line_types=[n, n], forbidden=True))
net.add_lane("wmsel", "wmwxl",
StraightLane(np.array([167, -212]), np.array([150, -224]), line_types=[n, n], forbidden=True))
"""
straight road of west
m:middle
"""
net.add_lane("inter_wm_4", "internw2",
StraightLane(np.array([167, -274]), np.array([167, -374]), line_types=[c, c]))
net.add_lane("inter_nw_2", "interwm4",
StraightLane(np.array([163, -374]), np.array([163, -274]), line_types=[c, c]))
# net.add_lane(" ", " ",
# StraightLane(np.array([66, -136]), np.array([62, -146]), line_types=[n, c]))
# net.add_lane(" ", " ",
# StraightLane(np.array([62, -161]), np.array([66, -165]), line_types=[n, c]))
net.add_lane("internw2", "nwsel",
StraightLane(np.array([167, -374]), np.array([167, -424]), line_types=[c, c], forbidden=True))
net.add_lane("internw2", "nwser",
StraightLane(np.array([171, -374]), np.array([171, -424]), line_types=[c, c], forbidden=True))
net.add_lane("nwsxr", "inter_nw_2",
| |
<reponame>SimplyVC/panic
import copy
import json
import logging
import unittest
from datetime import datetime
from datetime import timedelta
from unittest import mock
from unittest.mock import call
import pika
import pika.exceptions
from freezegun import freeze_time
from parameterized import parameterized
from src.alerter.alerters.contract.chainlink import ChainlinkContractAlerter
from src.alerter.alerters.github import GithubAlerter
from src.alerter.alerters.node.chainlink import ChainlinkNodeAlerter
from src.alerter.alerters.node.evm import EVMNodeAlerter
from src.alerter.alerters.system import SystemAlerter
from src.data_store.mongo.mongo_api import MongoApi
from src.data_store.redis.redis_api import RedisApi
from src.data_store.redis.store_keys import Keys
from src.data_store.stores.alert import AlertStore
from src.message_broker.rabbitmq import RabbitMQApi
from src.utils import env
from src.utils.constants.rabbitmq import (STORE_EXCHANGE, HEALTH_CHECK_EXCHANGE,
ALERT_STORE_INPUT_QUEUE_NAME,
HEARTBEAT_OUTPUT_WORKER_ROUTING_KEY,
ALERT_STORE_INPUT_ROUTING_KEY, TOPIC)
from src.utils.exceptions import (PANICException,
MessageWasNotDeliveredException)
from test.utils.utils import (connect_to_rabbit,
disconnect_from_rabbit,
delete_exchange_if_exists,
delete_queue_if_exists)
class TestAlertStore(unittest.TestCase):
def setUp(self) -> None:
self.dummy_logger = logging.getLogger('Dummy')
self.dummy_logger.disabled = True
self.connection_check_time_interval = timedelta(seconds=0)
self.rabbit_ip = env.RABBIT_IP
self.rabbitmq = RabbitMQApi(
self.dummy_logger, self.rabbit_ip,
connection_check_time_interval=self.connection_check_time_interval)
self.test_rabbit_manager = RabbitMQApi(
self.dummy_logger, self.rabbit_ip,
connection_check_time_interval=self.connection_check_time_interval)
self.mongo_ip = env.DB_IP
self.mongo_db = env.DB_NAME
self.mongo_port = env.DB_PORT
self.mongo = MongoApi(logger=self.dummy_logger.getChild(
MongoApi.__name__), db_name=self.mongo_db, host=self.mongo_ip,
port=self.mongo_port)
self.redis_db = env.REDIS_DB
self.redis_host = env.REDIS_IP
self.redis_port = env.REDIS_PORT
self.redis_namespace = env.UNIQUE_ALERTER_IDENTIFIER
self.redis = RedisApi(self.dummy_logger, self.redis_db,
self.redis_host, self.redis_port, '',
self.redis_namespace,
self.connection_check_time_interval)
self.test_store_name = 'store name'
self.test_store = AlertStore(self.test_store_name,
self.dummy_logger,
self.rabbitmq)
self.heartbeat_routing_key = HEARTBEAT_OUTPUT_WORKER_ROUTING_KEY
self.test_queue_name = 'test queue'
connect_to_rabbit(self.rabbitmq)
self.rabbitmq.exchange_declare(HEALTH_CHECK_EXCHANGE, TOPIC, False,
True, False, False)
self.rabbitmq.exchange_declare(STORE_EXCHANGE, TOPIC, False,
True, False, False)
self.rabbitmq.queue_declare(ALERT_STORE_INPUT_QUEUE_NAME, False, True,
False, False)
self.rabbitmq.queue_bind(ALERT_STORE_INPUT_QUEUE_NAME, STORE_EXCHANGE,
ALERT_STORE_INPUT_ROUTING_KEY)
connect_to_rabbit(self.test_rabbit_manager)
self.test_rabbit_manager.queue_declare(self.test_queue_name, False,
True, False, False)
self.test_rabbit_manager.queue_bind(self.test_queue_name,
HEALTH_CHECK_EXCHANGE,
self.heartbeat_routing_key)
self.test_data_str = 'test data'
self.test_exception = PANICException('test_exception', 1)
self.info = 'INFO'
self.warning = 'WARNING'
self.critical = 'CRITICAL'
self.internal = 'INTERNAL'
self.parent_id = 'test_parent_id'
self.parent_id2 = 'test_parent_id2'
self.alert_id = 'test_alert_id'
self.origin_id = 'test_origin_id'
self.alert_name = 'test_alert'
self.metric = 'system_is_down'
self.severity = 'warning'
self.message = 'alert message'
self.value = 'alert_code_1'
self.alert_id_2 = 'test_alert_id_2'
self.origin_id_2 = 'test_origin_id_2'
self.alert_name_2 = 'test_alert_2'
self.metric_2 = 'system_cpu_usage'
self.severity_2 = 'critical'
self.message_2 = 'alert message 2'
self.value_2 = 'alert_code_2'
self.alert_id_3 = 'test_alert_id_3'
self.origin_id_3 = 'test_origin_id_3'
self.alert_name_3 = 'test_alert_3'
self.metric_3 = 'system_storage_usage'
self.severity_3 = 'info'
self.message_3 = 'alert message 3'
self.value_3 = 'alert_code_3'
self.alert_id_4 = 'test_alert_id_4'
self.origin_id_4 = 'test_origin_id_4'
self.alert_name_4 = 'test_alert_4'
self.metric_4 = 'node_is_down'
self.severity_4 = 'info'
self.message_4 = 'alert message 4'
self.value_4 = 'alert_code_4'
self.alert_id_5 = 'test_alert_id_5'
self.origin_id_5 = 'test_origin_id_5'
self.alert_name_5 = 'test_alert_5'
self.metric_5 = 'price_feed_not_observed'
self.severity_5 = 'info'
self.message_5 = 'alert message 5'
self.value_5 = 'alert_code_5'
self.alert_id_6 = 'test_alert_id_6'
self.origin_id_6 = 'test_origin_id_6'
self.alert_name_6 = 'test_alert_6'
self.metric_6 = 'evm_node_is_down'
self.severity_6 = 'info'
self.message_6 = 'alert message 6'
self.value_6 = 'alert_code_6'
self.last_monitored = datetime(2012, 1, 1).timestamp()
self.none = None
# We do not want to reset `github_release` for Github metrics as we
# will lose the pending upgrades
self.github_alert_metrics = ['cannot_access_github']
# Normal alerts
self.alert_data_1 = {
'parent_id': self.parent_id,
'origin_id': self.origin_id,
'alert_code': {
'name': self.alert_name,
'code': self.value,
},
'severity': self.severity,
'metric': self.metric,
'message': self.message,
'timestamp': self.last_monitored,
}
self.alert_data_2 = {
'parent_id': self.parent_id2,
'origin_id': self.origin_id_2,
'alert_code': {
'name': self.alert_name_2,
'code': self.value_2,
},
'severity': self.severity_2,
'metric': self.metric_2,
'message': self.message_2,
'timestamp': self.last_monitored,
}
self.alert_data_3 = {
'parent_id': self.parent_id,
'origin_id': self.origin_id_3,
'alert_code': {
'name': self.alert_name_3,
'code': self.value_3,
},
'severity': self.severity_3,
'metric': self.metric_3,
'message': self.message_3,
'timestamp': self.last_monitored,
}
self.alert_data_4 = {
'parent_id': self.parent_id,
'origin_id': self.origin_id_4,
'alert_code': {
'name': self.alert_name_4,
'code': self.value_4,
},
'severity': self.severity_4,
'metric': self.metric_4,
'message': self.message_4,
'timestamp': self.last_monitored,
}
self.alert_data_4_1 = copy.deepcopy(self.alert_data_4)
self.alert_data_4_1['parent_id'] = self.parent_id2
self.alert_data_5 = {
'parent_id': self.parent_id,
'origin_id': self.origin_id_5,
'alert_code': {
'name': self.alert_name_5,
'code': self.value_5,
},
'severity': self.severity_5,
'metric': self.metric_5,
'message': self.message_5,
'timestamp': self.last_monitored,
'alert_data': {'contract_proxy_address':
'0x5DcB78343780E1B1e578ae0590dc1e868792a435'}
}
self.alert_data_5_1 = copy.deepcopy(self.alert_data_5)
self.alert_data_5_1['parent_id'] = self.parent_id2
self.alert_data_5_1['alert_data']['contract_proxy_address'] = \
'0xA5F7146D3cbB5a50Da36b8AC3857C48Ed3BF3bd9'
self.alert_data_6 = {
'parent_id': self.parent_id,
'origin_id': self.origin_id_6,
'alert_code': {
'name': self.alert_name_6,
'code': self.value_6,
},
'severity': self.severity_6,
'metric': self.metric_6,
'message': self.message_6,
'timestamp': self.last_monitored,
}
self.alert_data_6_1 = copy.deepcopy(self.alert_data_6)
self.alert_data_6_1['parent_id'] = self.parent_id2
# Bad data
self.alert_data_key_error = {
"result": {
"data": {},
"data2": {}
}
}
self.alert_data_unexpected = {
"unexpected": {}
}
# Alerts copied for GITHUB metric values, these are used to test
# Metric deletion on startup
self.alert_data_github_1 = copy.deepcopy(self.alert_data_1)
self.alert_data_github_1['metric'] = 'github_release'
self.alert_data_github_2 = copy.deepcopy(self.alert_data_1)
self.alert_data_github_2['metric'] = 'cannot_access_github'
self.alert_data_github_3 = copy.deepcopy(self.alert_data_2)
self.alert_data_github_3['metric'] = 'github_release'
"""
Internal alerts on component reset which are used to clear metrics from
REDIS.
"""
self.alert_internal_system_chain_1 = {
'parent_id': self.parent_id,
'origin_id': SystemAlerter.__name__,
'alert_code': {
'name': 'internal_alert_1',
'code': 'internal_alert_1',
},
'severity': self.internal,
'metric': self.metric,
'message': self.message,
'timestamp': self.last_monitored,
}
self.alert_internal_system_all_chains = {
'parent_id': None,
'origin_id': SystemAlerter.__name__,
'alert_code': {
'name': 'internal_alert_1',
'code': 'internal_alert_1',
},
'severity': self.internal,
'metric': self.metric_2,
'message': self.message_2,
'timestamp': self.last_monitored,
}
self.alert_internal_chainlink_1 = {
'parent_id': self.parent_id,
'origin_id': ChainlinkNodeAlerter.__name__,
'alert_code': {
'name': 'internal_alert_1',
'code': 'internal_alert_1',
},
'severity': self.internal,
'metric': self.metric,
'message': self.message,
'timestamp': self.last_monitored,
}
self.alert_internal_chainlink_all_chains = {
'parent_id': None,
'origin_id': ChainlinkNodeAlerter.__name__,
'alert_code': {
'name': 'internal_alert_1',
'code': 'internal_alert_1',
},
'severity': self.internal,
'metric': self.metric_2,
'message': self.message_2,
'timestamp': self.last_monitored,
}
self.alert_internal_chainlink_contract_1 = {
'parent_id': self.parent_id,
'origin_id': ChainlinkContractAlerter.__name__,
'alert_code': {
'name': 'internal_alert_1',
'code': 'internal_alert_1',
},
'severity': self.internal,
'metric': self.metric_5,
'message': self.message_5,
'timestamp': self.last_monitored,
}
self.alert_internal_chainlink_contract_all_chains = {
'parent_id': None,
'origin_id': ChainlinkContractAlerter.__name__,
'alert_code': {
'name': 'internal_alert_1',
'code': 'internal_alert_1',
},
'severity': self.internal,
'metric': self.metric_5,
'message': self.message_5,
'timestamp': self.last_monitored,
}
self.alert_internal_evm_node_1 = {
'parent_id': self.parent_id,
'origin_id': EVMNodeAlerter.__name__,
'alert_code': {
'name': 'internal_alert_1',
'code': 'internal_alert_1',
},
'severity': self.internal,
'metric': self.metric_6,
'message': self.message_6,
'timestamp': self.last_monitored,
}
self.alert_internal_evm_node_all_chains = {
'parent_id': None,
'origin_id': EVMNodeAlerter.__name__,
'alert_code': {
'name': 'internal_alert_1',
'code': 'internal_alert_1',
},
'severity': self.internal,
'metric': self.metric_6,
'message': self.message_6,
'timestamp': self.last_monitored,
}
self.alert_internal_github_chain_1 = {
'parent_id': self.parent_id,
'origin_id': GithubAlerter.__name__,
'alert_code': {
'name': 'internal_alert_1',
'code': 'internal_alert_1',
},
'severity': self.internal,
'metric': self.metric,
'message': self.message,
'timestamp': self.last_monitored,
}
self.alert_internal_github_all_chains = {
'parent_id': None,
'origin_id': GithubAlerter.__name__,
'alert_code': {
'name': 'internal_alert_1',
'code': 'internal_alert_1',
},
'severity': self.internal,
'metric': self.metric,
'message': self.message_2,
'timestamp': self.last_monitored,
}
def tearDown(self) -> None:
connect_to_rabbit(self.rabbitmq)
delete_queue_if_exists(self.rabbitmq, ALERT_STORE_INPUT_QUEUE_NAME)
delete_exchange_if_exists(self.rabbitmq, STORE_EXCHANGE)
delete_exchange_if_exists(self.rabbitmq, HEALTH_CHECK_EXCHANGE)
disconnect_from_rabbit(self.rabbitmq)
connect_to_rabbit(self.test_rabbit_manager)
delete_queue_if_exists(self.test_rabbit_manager, self.test_queue_name)
disconnect_from_rabbit(self.test_rabbit_manager)
self.dummy_logger = None
self.connection_check_time_interval = None
self.rabbitmq = None
self.test_rabbit_manager = None
self.redis.delete_all_unsafe()
self.redis = None
self.test_store._redis = None
self.mongo.drop_collection(self.parent_id)
self.mongo = None
self.test_store._mongo = None
self.test_store = None
def test__str__returns_name_correctly(self) -> None:
self.assertEqual(self.test_store_name, str(self.test_store))
def test_name_property_returns_name_correctly(self) -> None:
self.assertEqual(self.test_store_name, self.test_store.name)
def test_mongo_ip_property_returns_mongo_ip_correctly(self) -> None:
self.assertEqual(self.mongo_ip, self.test_store.mongo_ip)
def test_mongo_db_property_returns_mongo_db_correctly(self) -> None:
self.assertEqual(self.mongo_db, self.test_store.mongo_db)
def test_mongo_port_property_returns_mongo_port_correctly(self) -> None:
self.assertEqual(self.mongo_port, self.test_store.mongo_port)
def test_mongo_property_returns_mongo(self) -> None:
self.assertEqual(type(self.mongo), type(self.test_store.mongo))
def test_redis_property_returns_redis_correctly(self) -> None:
self.assertEqual(type(self.redis), type(self.test_store.redis))
def test_initialise_rabbitmq_initialises_everything_as_expected(
self) -> None:
try:
# To make sure that the exchanges have not already been declared
self.rabbitmq.connect()
self.rabbitmq.queue_delete(ALERT_STORE_INPUT_QUEUE_NAME)
self.test_rabbit_manager.queue_delete(self.test_queue_name)
self.rabbitmq.exchange_delete(HEALTH_CHECK_EXCHANGE)
self.rabbitmq.exchange_delete(STORE_EXCHANGE)
self.rabbitmq.disconnect()
self.test_store._initialise_rabbitmq()
# Perform checks that the connection has been opened, marked as open
# and that the delivery confirmation variable is set.
self.assertTrue(self.test_store.rabbitmq.is_connected)
self.assertTrue(self.test_store.rabbitmq.connection.is_open)
self.assertTrue(
self.test_store.rabbitmq.channel._delivery_confirmation)
# Check whether the producing exchanges have been created by
# using passive=True. If this check fails an exception is raised
# automatically.
self.test_store.rabbitmq.exchange_declare(
STORE_EXCHANGE, passive=True)
self.test_store.rabbitmq.exchange_declare(
HEALTH_CHECK_EXCHANGE, passive=True)
# Check whether the exchange has been creating by sending messages
# to it. If this fails an exception is raised, hence the test fails.
self.test_store.rabbitmq.basic_publish_confirm(
exchange=HEALTH_CHECK_EXCHANGE,
routing_key=self.heartbeat_routing_key, body=self.test_data_str,
is_body_dict=False,
properties=pika.BasicProperties(delivery_mode=2),
mandatory=False)
# Check whether the exchange has been creating by sending messages
# to it. If this fails an exception is raised, hence the test fails.
self.test_store.rabbitmq.basic_publish_confirm(
exchange=STORE_EXCHANGE,
routing_key=ALERT_STORE_INPUT_ROUTING_KEY,
body=self.test_data_str, is_body_dict=False,
properties=pika.BasicProperties(delivery_mode=2),
mandatory=False)
# Re-declare queue to get the number of messages
res = self.test_store.rabbitmq.queue_declare(
ALERT_STORE_INPUT_QUEUE_NAME, False, True, False, False)
self.assertEqual(1, res.method.message_count)
except Exception as e:
self.fail("Test failed: {}".format(e))
@parameterized.expand([
("KeyError", "self.alert_data_key_error "),
])
@mock.patch("src.data_store.stores.store.RabbitMQApi.basic_ack",
autospec=True)
@mock.patch("src.data_store.stores.store.Store._send_heartbeat",
autospec=True)
def test_process_data_with_bad_data_does_raises_exceptions(
self, mock_error, mock_bad_data, mock_send_hb, mock_ack) -> None:
mock_ack.return_value = None
try:
self.test_store._initialise_rabbitmq()
blocking_channel = self.test_store.rabbitmq.channel
method_chains = pika.spec.Basic.Deliver(
routing_key=ALERT_STORE_INPUT_ROUTING_KEY)
properties = pika.spec.BasicProperties()
self.test_store._process_data(
blocking_channel,
method_chains,
properties,
json.dumps(self.alert_data_unexpected)
)
self.assertRaises(eval(mock_error),
self.test_store._process_mongo_store,
eval(mock_bad_data))
mock_ack.assert_called_once()
mock_send_hb.assert_not_called()
except Exception as e:
self.fail("Test failed: {}".format(e))
@freeze_time("2012-01-01")
@mock.patch("src.data_store.stores.store.RabbitMQApi.basic_ack",
autospec=True)
@mock.patch("src.data_store.stores.alert.AlertStore._process_redis_store",
autospec=True)
@mock.patch("src.data_store.stores.alert.AlertStore._process_mongo_store",
autospec=True)
def test_process_data_sends_heartbeat_correctly(self,
mock_process_mongo_store,
mock_process_redis_store,
mock_basic_ack) -> None:
mock_basic_ack.return_value = None
try:
self.test_rabbit_manager.connect()
self.test_store._initialise_rabbitmq()
self.test_rabbit_manager.queue_delete(self.test_queue_name)
res = self.test_rabbit_manager.queue_declare(
queue=self.test_queue_name, durable=True, exclusive=False,
auto_delete=False, passive=False
)
self.assertEqual(0, res.method.message_count)
self.test_rabbit_manager.queue_bind(
queue=self.test_queue_name, exchange=HEALTH_CHECK_EXCHANGE,
routing_key=self.heartbeat_routing_key)
blocking_channel = self.test_store.rabbitmq.channel
method_chains = pika.spec.Basic.Deliver(
routing_key=ALERT_STORE_INPUT_ROUTING_KEY)
properties = pika.spec.BasicProperties()
self.test_store._process_data(
blocking_channel,
method_chains,
properties,
json.dumps(self.alert_data_1)
)
res = self.test_rabbit_manager.queue_declare(
queue=self.test_queue_name, durable=True, exclusive=False,
auto_delete=False, passive=True
)
self.assertEqual(1, res.method.message_count)
heartbeat_test = {
'component_name': self.test_store_name,
'is_alive': True,
'timestamp': datetime(2012, 1, 1).timestamp()
}
_, _, body = self.test_rabbit_manager.basic_get(
self.test_queue_name)
self.assertEqual(heartbeat_test, json.loads(body))
mock_process_mongo_store.assert_called_once()
mock_process_redis_store.assert_called_once()
except Exception as e:
self.fail("Test failed: {}".format(e))
@mock.patch("src.data_store.stores.store.RabbitMQApi.basic_ack",
autospec=True)
def test_process_data_doesnt_send_heartbeat_on_processing_error(
self, mock_basic_ack) -> None:
mock_basic_ack.return_value = None
try:
self.test_rabbit_manager.connect()
self.test_store._initialise_rabbitmq()
self.test_rabbit_manager.queue_delete(self.test_queue_name)
res = self.test_rabbit_manager.queue_declare(
queue=self.test_queue_name, durable=True, exclusive=False,
auto_delete=False, passive=False
)
self.assertEqual(0, res.method.message_count)
self.test_rabbit_manager.queue_bind(
queue=self.test_queue_name, exchange=HEALTH_CHECK_EXCHANGE,
routing_key=self.heartbeat_routing_key)
blocking_channel = self.test_store.rabbitmq.channel
method_chains | |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Haiku recurrent core."""
import abc
import types
from typing import Any, NamedTuple, Optional, Sequence, Tuple, Union
from haiku._src import base
from haiku._src import basic
from haiku._src import conv
from haiku._src import initializers
from haiku._src import module
from haiku._src import stateful
import jax
import jax.nn
import jax.numpy as jnp
# If you are forking replace this with `import haiku as hk`.
hk = types.ModuleType("haiku")
hk.initializers = initializers
hk.Linear = basic.Linear
hk.ConvND = conv.ConvND
hk.get_parameter = base.get_parameter
hk.Module = module.Module
hk.scan = stateful.scan
inside_transform = base.inside_transform
del base, basic, conv, initializers, module
class RNNCore(hk.Module):
"""Base class for RNN cores.
This class defines the basic functionality that every core should
implement: :meth:`initial_state`, used to construct an example of the
core state; and :meth:`__call__` which applies the core parameterized
by a previous state to an input.
Cores may be used with :func:`dynamic_unroll` and :func:`static_unroll` to
iteratively construct an output sequence from the given input sequence.
"""
@abc.abstractmethod
def __call__(self, inputs, prev_state) -> Tuple[Any, Any]:
"""Run one step of the RNN.
Args:
inputs: An arbitrarily nested structure.
prev_state: Previous core state.
Returns:
A tuple with two elements ``output, next_state``. ``output`` is an
arbitrarily nested structure. ``next_state`` is the next core state, this
must be the same shape as ``prev_state``.
"""
@abc.abstractmethod
def initial_state(self, batch_size: Optional[int]):
"""Constructs an initial state for this core.
Args:
batch_size: Optional int or an integral scalar tensor representing
batch size. If None, the core may either fail or (experimentally)
return an initial state without a batch dimension.
Returns:
Arbitrarily nested initial state for this core.
"""
def static_unroll(core, input_sequence, initial_state, time_major=True):
"""Performs a static unroll of an RNN.
An *unroll* corresponds to calling the core on each element of the
input sequence in a loop, carrying the state through::
state = initial_state
for t in range(len(input_sequence)):
outputs, state = core(input_sequence[t], state)
A *static* unroll replaces a loop with its body repeated multiple
times when executed inside :func:`jax.jit`::
state = initial_state
outputs0, state = core(input_sequence[0], state)
outputs1, state = core(input_sequence[1], state)
outputs2, state = core(input_sequence[2], state)
...
See :func:`dynamic_unroll` for a loop-preserving unroll function.
Args:
core: An :class:`RNNCore` to unroll.
input_sequence: An arbitrarily nested structure of tensors of shape
``[T, ...]`` if time-major=True, or ``[B, T, ...]`` if time_major=False,
where ``T`` is the number of time steps.
initial_state: An initial state of the given core.
time_major: If True, inputs are expected time-major, otherwise they are
expected batch-major.
Returns:
A tuple with two elements:
* **output_sequence** - An arbitrarily nested structure of tensors
of shape ``[T, ...]`` if time-major, otherwise ``[B, T, ...]``.
* **final_state** - Core state at time step ``T``.
"""
output_sequence = []
time_axis = 0 if time_major else 1
num_steps = jax.tree_leaves(input_sequence)[0].shape[time_axis]
state = initial_state
for t in range(num_steps):
if time_major:
inputs = jax.tree_map(lambda x, _t=t: x[_t], input_sequence)
else:
inputs = jax.tree_map(lambda x, _t=t: x[:, _t], input_sequence)
outputs, state = core(inputs, state)
output_sequence.append(outputs)
# Stack outputs along the time axis.
output_sequence = jax.tree_multimap(
lambda *args: jnp.stack(args, axis=time_axis),
*output_sequence)
return output_sequence, state
def _swap_batch_time(inputs):
"""Swaps batch and time axes, assumed to be the first two axes."""
return jax.tree_map(lambda x: jnp.swapaxes(x, 0, 1), inputs)
def dynamic_unroll(core,
input_sequence,
initial_state,
time_major=True,
reverse=False,
return_all_states=False):
"""Performs a dynamic unroll of an RNN.
An *unroll* corresponds to calling the core on each element of the
input sequence in a loop, carrying the state through::
state = initial_state
for t in range(len(input_sequence)):
outputs, state = core(input_sequence[t], state)
A *dynamic* unroll preserves the loop structure when executed inside
:func:`jax.jit`. See :func:`static_unroll` for an unroll function which
replaces a loop with its body repeated multiple times.
Args:
core: An :class:`RNNCore` to unroll.
input_sequence: An arbitrarily nested structure of tensors of shape
``[T, ...]`` if time-major=True, or ``[B, T, ...]`` if time_major=False,
where ``T`` is the number of time steps.
initial_state: An initial state of the given core.
time_major: If True, inputs are expected time-major, otherwise they are
expected batch-major.
reverse: If True, inputs are scanned in the reversed order. Equivalent to
reversing the time dimension in both inputs and outputs. See
https://jax.readthedocs.io/en/latest/_autosummary/jax.lax.scan.html for
more details.
return_all_states: If True, all intermediate states are returned rather than
only the last one in time.
Returns:
A tuple with two elements:
* **output_sequence** - An arbitrarily nested structure of tensors
of shape ``[T, ...]`` if time-major, otherwise ``[B, T, ...]``.
* **state_sequence** - If return_all_states is True, returns the sequence
of core states. Otherwise, core state at time step ``T``.
"""
scan = hk.scan if inside_transform() else jax.lax.scan
# Swap the input and output of core.
def scan_f(prev_state, inputs):
outputs, next_state = core(inputs, prev_state)
if return_all_states:
return next_state, (outputs, next_state)
return next_state, outputs
# TODO(hamzamerzic): Remove axis swapping once scan supports time axis arg.
if not time_major:
input_sequence = _swap_batch_time(input_sequence)
scan_result = scan(
scan_f, initial_state, input_sequence, reverse=reverse)
if return_all_states:
_, (output_sequence, state_sequence) = scan_result
else:
last_state, output_sequence = scan_result
if not time_major:
output_sequence = _swap_batch_time(output_sequence)
if return_all_states:
state_sequence = _swap_batch_time(state_sequence)
if return_all_states:
return output_sequence, state_sequence
return output_sequence, last_state
def add_batch(nest, batch_size: Optional[int]):
"""Adds a batch dimension at axis 0 to the leaves of a nested structure."""
broadcast = lambda x: jnp.broadcast_to(x, (batch_size,) + x.shape)
return jax.tree_map(broadcast, nest)
class VanillaRNN(RNNCore):
r"""Basic fully-connected RNN core.
Given :math:`x_t` and the previous hidden state :math:`h_{t-1}` the
core computes
.. math::
h_t = \operatorname{ReLU}(w_i x_t + b_i + w_h h_{t-1} + b_h)
The output is equal to the new state, :math:`h_t`.
"""
def __init__(
self,
hidden_size: int,
double_bias: bool = True,
name: Optional[str] = None
):
"""Constructs a vanilla RNN core.
Args:
hidden_size: Hidden layer size.
double_bias: Whether to use a bias in the two linear layers. This changes
nothing to the learning performance of the cell. However, doubling will
create two sets of bias parameters rather than one.
name: Name of the module.
"""
super().__init__(name=name)
self.hidden_size = hidden_size
self.double_bias = double_bias
def __call__(self, inputs, prev_state):
input_to_hidden = hk.Linear(self.hidden_size)
# TODO(b/173771088): Consider changing default to double_bias=False.
hidden_to_hidden = hk.Linear(self.hidden_size, with_bias=self.double_bias)
out = jax.nn.relu(input_to_hidden(inputs) + hidden_to_hidden(prev_state))
return out, out
def initial_state(self, batch_size: Optional[int]):
state = jnp.zeros([self.hidden_size])
if batch_size is not None:
state = add_batch(state, batch_size)
return state
class LSTMState(NamedTuple):
"""An LSTM core state consists of hidden and cell vectors.
Attributes:
hidden: Hidden state.
cell: Cell state.
"""
hidden: jnp.ndarray
cell: jnp.ndarray
class LSTM(RNNCore):
r"""Long short-term memory (LSTM) RNN core.
The implementation is based on :cite:`zaremba2014recurrent`. Given
:math:`x_t` and the previous state :math:`(h_{t-1}, c_{t-1})` the core
computes
.. math::
\begin{array}{ll}
i_t = \sigma(W_{ii} x_t + W_{hi} h_{t-1} + b_i) \\
f_t = \sigma(W_{if} x_t + W_{hf} h_{t-1} + b_f) \\
g_t = \tanh(W_{ig} x_t + W_{hg} h_{t-1} + b_g) \\
o_t = \sigma(W_{io} x_t + W_{ho} h_{t-1} + b_o) \\
c_t = f_t c_{t-1} + i_t g_t \\
h_t = o_t \tanh(c_t)
\end{array}
where :math:`i_t`, :math:`f_t`, :math:`o_t` are input, forget and
output gate activations, and :math:`g_t` is a vector of cell updates.
The output is equal to the new hidden, :math:`h_t`.
Notes:
Forget gate initialization:
Following :cite:`jozefowicz2015empirical` we add 1.0 to :math:`b_f`
after initialization in order to reduce the scale of forgetting in
the beginning of the training.
"""
def __init__(self, hidden_size: int, name: Optional[str] = None):
"""Constructs an LSTM.
Args:
hidden_size: Hidden layer size.
name: Name of the module.
"""
super().__init__(name=name)
self.hidden_size = hidden_size
def __call__(
self,
inputs: jnp.ndarray,
prev_state: LSTMState,
) -> Tuple[jnp.ndarray, LSTMState]:
if len(inputs.shape) > 2 or not inputs.shape:
raise ValueError("LSTM input must be rank-1 or rank-2.")
x_and_h = jnp.concatenate([inputs, prev_state.hidden], axis=-1)
gated = hk.Linear(4 * self.hidden_size)(x_and_h)
# TODO(slebedev): | |
utils._eval_generic_conditions(
rule, self.course, self.participation,
now_datetime, self.flow_id, self.login_exam_ticket))
def test_if_signed_in_with_matching_exam_ticket(self):
rule = utils.FlowSessionRuleBase()
rule.if_signed_in_with_matching_exam_ticket = True
now_datetime = now() - timedelta(days=2)
# login_exam_ticket is None
self.assertFalse(
utils._eval_generic_conditions(
rule, self.course, self.participation,
now_datetime, self.flow_id, None))
# flow_id not match
self.flow_id = "bar"
self.login_exam_ticket.exam = mock.MagicMock()
self.login_exam_ticket.exam.flow_id = "foo"
self.assertFalse(
utils._eval_generic_conditions(
rule, self.course, self.participation,
now_datetime, self.flow_id, self.login_exam_ticket))
# flow_id matched
self.flow_id = "foo"
self.login_exam_ticket.exam = mock.MagicMock()
self.login_exam_ticket.exam.flow_id = "foo"
self.assertTrue(
utils._eval_generic_conditions(
rule, self.course, self.participation,
now_datetime, self.flow_id, self.login_exam_ticket))
class EvalGenericSessionConditionsTest(unittest.TestCase):
def setUp(self):
self.session = mock.MagicMock()
fake_parse_date_spec = mock.patch("course.utils.parse_date_spec")
self.mock_parse_date_spec = fake_parse_date_spec.start()
self.mock_parse_date_spec.return_value = now() + timedelta(days=1)
self.addCleanup(fake_parse_date_spec.stop)
def test_true(self):
rule = utils.FlowSessionRuleBase()
self.assertTrue(
utils._eval_generic_session_conditions(rule, self.session, now()))
def test_if_has_tag(self):
rule = utils.FlowSessionRuleBase()
rule.if_has_tag = "foo"
now_datetime = now()
self.session.access_rules_tag = "bar"
self.assertFalse(
utils._eval_generic_session_conditions(rule, self.session, now_datetime)
)
self.session.access_rules_tag = "foo"
self.assertTrue(
utils._eval_generic_session_conditions(rule, self.session, now_datetime)
)
def test_if_started_before(self):
rule = utils.FlowSessionRuleBase()
rule.if_started_before = mock.MagicMock()
now_datetime = now()
self.session.start_time = now()
self.assertTrue(
utils._eval_generic_session_conditions(rule, self.session, now_datetime)
)
self.session.start_time = now() + timedelta(days=2)
self.assertFalse(
utils._eval_generic_session_conditions(rule, self.session, now_datetime)
)
class EvalParticipationTagsConditionsTest(CoursesTestMixinBase, TestCase):
# test utils._eval_participation_tags_conditions
@classmethod
def setUpTestData(cls): # noqa
course = factories.CourseFactory()
cls.participation1 = factories.ParticipationFactory(
course=course)
tag1 = factories.ParticipationTagFactory(
course=course,
name="tag1")
tag2 = factories.ParticipationTagFactory(
course=course,
name="tag2")
tag3 = factories.ParticipationTagFactory(
course=course,
name="tag3")
cls.participation2 = factories.ParticipationFactory(
course=course)
cls.participation2.tags.set([tag1, tag2])
cls.participation3 = factories.ParticipationFactory(
course=course)
cls.participation3.tags.set([tag1, tag2, tag3])
def test_no_participation(self):
rule = utils.FlowSessionRuleBase()
rule.if_has_participation_tags_any = ["tag1"]
self.assertFalse(
utils._eval_participation_tags_conditions(rule, None))
def test_true(self):
rule = utils.FlowSessionRuleBase()
self.assertTrue(
utils._eval_participation_tags_conditions(rule, None))
self.assertTrue(
utils._eval_participation_tags_conditions(rule, self.participation1))
self.assertTrue(
utils._eval_participation_tags_conditions(rule, self.participation2))
self.assertTrue(
utils._eval_participation_tags_conditions(rule, self.participation3))
def test_if_has_participation_tags_any(self):
rule = utils.FlowSessionRuleBase()
rule.if_has_participation_tags_any = ["tag1", "tag3"]
self.assertFalse(
utils._eval_participation_tags_conditions(rule, self.participation1))
self.assertTrue(
utils._eval_participation_tags_conditions(rule, self.participation2))
self.assertTrue(
utils._eval_participation_tags_conditions(rule, self.participation3))
rule.if_has_participation_tags_any = ["foo"]
self.assertFalse(
utils._eval_participation_tags_conditions(rule, self.participation3))
def test_if_has_participation_tags_all(self):
rule = utils.FlowSessionRuleBase()
rule.if_has_participation_tags_all = ["tag1", "tag3"]
self.assertFalse(
utils._eval_participation_tags_conditions(rule, self.participation1))
self.assertFalse(
utils._eval_participation_tags_conditions(rule, self.participation2))
self.assertTrue(
utils._eval_participation_tags_conditions(rule, self.participation3))
class GetFlowRulesTest(SingleCourseTestMixin, TestCase):
# test utils.get_flow_rules
flow_id = QUIZ_FLOW_ID
def test_no_rules(self):
# emtpy rules
flow_desc = self.get_hacked_flow_desc(del_rules=True)
default_rules_desc = [mock.MagicMock(), mock.MagicMock()]
result = utils.get_flow_rules(
flow_desc, constants.flow_rule_kind.start,
self.student_participation,
self.flow_id,
now(),
consider_exceptions=False,
default_rules_desc=default_rules_desc
)
self.assertListEqual(result, default_rules_desc)
result = utils.get_flow_rules(
flow_desc, constants.flow_rule_kind.start,
self.student_participation,
self.flow_id,
now(),
consider_exceptions=True,
default_rules_desc=default_rules_desc
)
self.assertListEqual(result, default_rules_desc)
def test_rules_with_given_kind(self):
# use real rules
flow_desc = self.get_hacked_flow_desc()
default_rules_desc = [mock.MagicMock(), mock.MagicMock()]
for kind in dict(constants.FLOW_RULE_KIND_CHOICES).keys():
with self.subTest(missing_kind=kind):
result = utils.get_flow_rules(
flow_desc, kind,
self.student_participation,
self.flow_id,
now(),
consider_exceptions=False,
default_rules_desc=default_rules_desc
)
# there are existing rule for those kind
self.assertNotEqual(result, default_rules_desc)
def test_rules_with_no_given_kind(self):
flow_desc_dict = self.get_hacked_flow_desc(as_dict=True)
default_rules_desc = [mock.MagicMock(), mock.MagicMock()]
for kind in dict(constants.FLOW_RULE_KIND_CHOICES).keys():
flow_desc_dict_copy = deepcopy(flow_desc_dict)
rules_dict = struct_to_dict(flow_desc_dict_copy["rules"])
# delete kind from flow_desc
rules_dict.pop(kind)
flow_desc_dict_copy["rules"] = dict_to_struct(rules_dict)
flow_desc = dict_to_struct(flow_desc_dict_copy)
assert not hasattr(flow_desc.rules, kind)
with self.subTest(missing_kind=kind):
result = utils.get_flow_rules(
flow_desc, kind,
self.student_participation,
self.flow_id,
now(),
consider_exceptions=False,
default_rules_desc=default_rules_desc
)
self.assertListEqual(result, default_rules_desc)
def test_not_consider_exist_exceptions(self):
# use real rules
flow_desc = self.get_hacked_flow_desc()
default_rules_desc = [mock.MagicMock(), mock.MagicMock()]
factories.FlowRuleExceptionFactory(
flow_id=self.flow_id,
participation=self.student_participation,
kind=constants.flow_rule_kind.start,
rule={
"if_after": "end_week 1"
}
)
result = utils.get_flow_rules(
flow_desc, constants.flow_rule_kind.start,
self.student_participation,
self.flow_id,
now(),
consider_exceptions=False, # NOT consider
default_rules_desc=default_rules_desc
)
exist_start_rule = flow_desc.rules.start
self.assertNotEqual(result, default_rules_desc)
self.assertEqual(exist_start_rule, result)
def test_consider_exist_exceptions_is_default_to_true(self):
# use real rules
flow_desc = self.get_hacked_flow_desc()
exist_start_rule = flow_desc.rules.start
default_rules_desc = [mock.MagicMock(), mock.MagicMock()]
# creating 1 rules without expiration
factories.FlowRuleExceptionFactory(
flow_id=self.flow_id,
participation=self.student_participation,
kind=constants.flow_rule_kind.start,
creation_time=now() - timedelta(days=1),
rule={
"if_after": "end_week 1"
}
)
# consider_exceptions not specified
result = utils.get_flow_rules(
flow_desc, constants.flow_rule_kind.start,
self.student_participation,
self.flow_id,
now(),
default_rules_desc=default_rules_desc
)
self.assertNotEqual(result, default_rules_desc)
self.assertEqual(len(result), len(exist_start_rule) + 1)
def test_consider_exist_exceptions(self):
# use real rules
flow_desc = self.get_hacked_flow_desc()
default_rules_desc = [mock.MagicMock(), mock.MagicMock()]
# creating 2 rules without expiration
factories.FlowRuleExceptionFactory(
flow_id=self.flow_id,
participation=self.student_participation,
kind=constants.flow_rule_kind.start,
creation_time=now() - timedelta(days=1),
rule={
"if_after": "end_week 1"
}
)
factories.FlowRuleExceptionFactory(
flow_id=self.flow_id,
participation=self.student_participation,
kind=constants.flow_rule_kind.start,
rule={
"if_before": "end_week 2"
},
creation_time=now() - timedelta(minutes=3),
)
result = utils.get_flow_rules(
flow_desc, constants.flow_rule_kind.start,
self.student_participation,
self.flow_id,
now(),
consider_exceptions=True,
default_rules_desc=default_rules_desc
)
exist_start_rule = flow_desc.rules.start
self.assertNotEqual(result, default_rules_desc)
self.assertEqual(len(result), len(exist_start_rule) + 2)
self.assertEqual(exist_start_rule, result[2:])
# last create ordered first
self.assertEqual(result[0].if_before, "end_week 2")
def test_consider_exist_exceptions_rule_expiration(self):
# use real rules
flow_desc = self.get_hacked_flow_desc()
exist_start_rule = flow_desc.rules.start
default_rules_desc = [mock.MagicMock(), mock.MagicMock()]
# creating 2 rules without expiration
factories.FlowRuleExceptionFactory(
flow_id=self.flow_id,
participation=self.student_participation,
expiration=now() - timedelta(hours=12),
kind=constants.flow_rule_kind.start,
creation_time=now() - timedelta(days=1),
rule={
"if_after": "end_week 1"
}
)
factories.FlowRuleExceptionFactory(
flow_id=self.flow_id,
participation=self.student_participation,
expiration=now() + timedelta(hours=12),
kind=constants.flow_rule_kind.start,
rule={
"if_before": "end_week 2"
},
creation_time=now() - timedelta(minutes=3),
)
# {{{ all exceptions not due
now_datetime = now() - timedelta(days=3)
result = utils.get_flow_rules(
flow_desc, constants.flow_rule_kind.start,
self.student_participation,
self.flow_id,
now_datetime,
consider_exceptions=True,
default_rules_desc=default_rules_desc
)
self.assertEqual(len(result), len(exist_start_rule) + 2)
self.assertEqual(exist_start_rule, result[2:])
# last create ordered first
self.assertEqual(result[0].if_before, "end_week 2")
# }}}
# {{{ one exception expired
now_datetime = now()
result = utils.get_flow_rules(
flow_desc, constants.flow_rule_kind.start,
self.student_participation,
self.flow_id,
now_datetime,
consider_exceptions=True,
default_rules_desc=default_rules_desc
)
self.assertEqual(len(result), len(exist_start_rule) + 1)
self.assertEqual(exist_start_rule, result[1:])
# last create ordered first
self.assertEqual(result[0].if_before, "end_week 2")
# }}}
# {{{ all exceptions expired
now_datetime = now() + timedelta(days=5)
result = utils.get_flow_rules(
flow_desc, constants.flow_rule_kind.start,
self.student_participation,
self.flow_id,
now_datetime,
consider_exceptions=True,
default_rules_desc=default_rules_desc
)
self.assertEqual(len(result), len(exist_start_rule))
self.assertEqual(exist_start_rule, result)
# }}}
my_mock_event_time = mock.MagicMock()
my_test_event_1_time = now() - timedelta(days=2)
my_test_event_2_time = now()
my_test_event_3_time = now() + timedelta(days=1)
def parse_date_spec_get_rule_test_side_effect(
course, datespec, vctx=None, location=None):
if datespec == "my_mock_event_time":
return my_mock_event_time
if datespec == "my_test_event 1":
return my_test_event_1_time
if datespec == "my_test_event 2":
return my_test_event_2_time
if datespec == "my_test_event 3":
return my_test_event_3_time
return parse_date_spec(course, datespec, vctx, location)
class GetSessionRuleMixin(object):
flow_id = QUIZ_FLOW_ID
@property
def call_func(self):
raise NotImplementedError()
def get_updated_kwargs(self, **extra_kwargs):
kwargs = deepcopy(self.default_kwargs)
kwargs.update(extra_kwargs)
return kwargs
@property
def default_kwargs(self):
raise NotImplementedError()
@property
def fallback_rule(self):
raise NotImplementedError()
def get_result(self, **extra_kwargs):
raise NotImplementedError()
def assertRuleEqual(self, rule, expected_rule): # noqa
self.assertIsInstance(rule, self.rule_klass)
rule_dict = struct_to_dict(rule)
if isinstance(expected_rule, dict):
expected_rule_dict = expected_rule
else:
self.assertIsInstance(expected_rule, self.rule_klass)
expected_rule_dict = struct_to_dict(expected_rule)
self.assertDictEqual(rule_dict, expected_rule_dict)
def setUp(self):
super(GetSessionRuleMixin, self).setUp()
fake_get_flow_rules = mock.patch("course.utils.get_flow_rules")
self.mock_get_flow_rules = fake_get_flow_rules.start()
self.addCleanup(fake_get_flow_rules.stop)
fake_eval_generic_conditions = mock.patch(
"course.utils._eval_generic_conditions")
self.mock_eval_generic_conditions = fake_eval_generic_conditions.start()
self.addCleanup(fake_eval_generic_conditions.stop)
fake_eval_participation_tags_conditions = mock.patch(
"course.utils._eval_participation_tags_conditions")
self.mock_eval_participation_tags_conditions = (
fake_eval_participation_tags_conditions.start())
self.addCleanup(fake_eval_participation_tags_conditions.stop)
fake_eval_generic_session_conditions = mock.patch(
"course.utils._eval_generic_session_conditions")
self.mock_eval_generic_session_conditions = (
fake_eval_generic_session_conditions.start())
self.addCleanup(fake_eval_generic_session_conditions.stop)
fake_get_participation_role_identifiers = mock.patch(
"course.enrollment.get_participation_role_identifiers")
self.mock_get_participation_role_identifiers = (
fake_get_participation_role_identifiers.start())
self.mock_get_participation_role_identifiers.return_value = ["student"]
self.addCleanup(fake_get_participation_role_identifiers.stop)
fake_parse_date_spec = mock.patch("course.utils.parse_date_spec")
self.mock_parse_date_spec = fake_parse_date_spec.start()
self.mock_parse_date_spec.side_effect = (
parse_date_spec_get_rule_test_side_effect)
self.addCleanup(fake_parse_date_spec.stop)
class GetSessionStartRuleTest(GetSessionRuleMixin, SingleCourseTestMixin, TestCase):
# test utils.get_session_start_rule
call_func = utils.get_session_start_rule
rule_klass = utils.FlowSessionStartRule
fallback_rule = utils.FlowSessionStartRule(
may_list_existing_sessions=False,
may_start_new_session=False)
@property
def default_kwargs(self):
return {
"course": self.course,
"participation": self.student_participation,
"flow_id": self.flow_id,
"flow_desc": mock.MagicMock(),
"now_datetime": now(),
"facilities": None,
"for_rollover": False,
"login_exam_ticket": None,
}
def get_result(self, **extra_kwargs):
kwargs = self.get_updated_kwargs(**extra_kwargs)
return utils.get_session_start_rule(**kwargs)
def get_default_rule(self, **kwargs):
defaults = {
"tag_session": None,
"may_start_new_session": True,
"may_list_existing_sessions": True,
"default_expiration_mode": None,
}
defaults.update(kwargs)
return utils.FlowSessionStartRule(**defaults)
def test_no_rules(self):
self.mock_get_flow_rules.return_value = []
result = self.get_result()
self.assertRuleEqual(self.fallback_rule, result)
# make sure get_flow_rules is called with expected default_rules_desc
self.assertEqual(self.mock_get_flow_rules.call_count, 1)
self.assertIn("default_rules_desc", self.mock_get_flow_rules.call_args[1])
default_rules_desc = (
self.mock_get_flow_rules.call_args[1]["default_rules_desc"])
self.assertTrue(default_rules_desc[0].may_start_new_session)
self.assertFalse(default_rules_desc[0].may_list_existing_sessions)
def test_not_passing_eval_generic_conditions(self):
self.mock_get_flow_rules.return_value = [mock.MagicMock()]
self.mock_eval_generic_conditions.return_value = False
fake_login_exam_ticket = mock.MagicMock()
result = self.get_result(login_exam_ticket=fake_login_exam_ticket)
self.assertRuleEqual(self.fallback_rule, result)
# make sure _eval_generic_conditions is called with expected
# login_exam_ticket
self.assertEqual(self.mock_eval_generic_conditions.call_count, 1)
self.assertIn("login_exam_ticket",
self.mock_eval_generic_conditions.call_args[1])
self.assertEqual(
self.mock_eval_generic_conditions.call_args[1]["login_exam_ticket"],
fake_login_exam_ticket
)
def test_not_passing_eval_participation_tags_conditions(self):
self.mock_get_flow_rules.return_value = [mock.MagicMock()]
self.mock_eval_generic_conditions.return_value = True
self.mock_eval_participation_tags_conditions.return_value = False
result = self.get_result()
self.assertRuleEqual(self.fallback_rule, result)
def test_not_passing_not_for_rollover_and_if_in_facility(self):
self.mock_get_flow_rules.return_value = [
dict_to_struct({"if_in_facility": "f1"})]
self.mock_eval_generic_conditions.return_value = True
self.mock_eval_participation_tags_conditions.return_value = True
result = self.get_result(facilities=frozenset(["f2"]))
self.assertRuleEqual(self.fallback_rule, result)
def test_not_passing_not_for_rollover_and_if_has_in_progress_session(self):
factories.FlowSessionFactory(
participation=self.student_participation, flow_id=self.flow_id)
self.mock_get_flow_rules.return_value = [
dict_to_struct({"if_in_facility": "f1",
"if_has_in_progress_session": 2})]
self.mock_eval_generic_conditions.return_value = True
self.mock_eval_participation_tags_conditions.return_value = True
result = self.get_result(facilities=frozenset(["f1", "f2"]))
self.assertRuleEqual(self.fallback_rule, result)
def test_not_passing_not_for_rollover_and_if_has_session_tagged(self):
factories.FlowSessionFactory(
participation=self.student_participation, flow_id=self.flow_id,
in_progress=True)
self.mock_get_flow_rules.return_value = [
dict_to_struct({"if_has_in_progress_session": 1,
"if_has_session_tagged": "atag1"})]
self.mock_eval_generic_conditions.return_value = True
self.mock_eval_participation_tags_conditions.return_value = True
result = self.get_result()
self.assertRuleEqual(self.fallback_rule, result)
def test_not_passing_not_for_rollover_and_if_has_fewer_sessions_than(self):
factories.FlowSessionFactory(
participation=self.student_participation, flow_id=self.flow_id,
access_rules_tag="atag1"
)
self.mock_get_flow_rules.return_value = [
dict_to_struct({"if_has_session_tagged": "atag1",
"if_has_fewer_sessions_than": 1})]
self.mock_eval_generic_conditions.return_value = True
self.mock_eval_participation_tags_conditions.return_value = True
result = self.get_result()
self.assertRuleEqual(self.fallback_rule, result)
def test_not_passing_not_for_rollover_and_if_has_fewer_tagged_sessions_than(self): # noqa
factories.FlowSessionFactory.create_batch(size=2,
participation=self.student_participation, flow_id=self.flow_id,
access_rules_tag="atag1")
self.mock_get_flow_rules.return_value = [
dict_to_struct({"if_has_fewer_sessions_than": 3,
"if_has_fewer_tagged_sessions_than": 1})]
self.mock_eval_generic_conditions.return_value = True
self.mock_eval_participation_tags_conditions.return_value = True
result = self.get_result()
self.assertRuleEqual(self.fallback_rule, result)
def test_passing_not_for_rollover_and_if_has_fewer_tagged_sessions_than(self): # noqa
factories.FlowSessionFactory.create_batch(size=2,
participation=self.student_participation, flow_id=self.flow_id,
access_rules_tag="atag1")
self.mock_get_flow_rules.return_value = [
dict_to_struct({"if_has_fewer_tagged_sessions_than": 3})]
self.mock_eval_generic_conditions.return_value = True
self.mock_eval_participation_tags_conditions.return_value = True
result = self.get_result()
self.assertRuleEqual(
result,
{"tag_session": None,
"may_start_new_session": True,
"may_list_existing_sessions": True,
"default_expiration_mode": None}
)
def test_passing_not_for_rollover(self):
factories.FlowSessionFactory.create_batch(size=2,
participation=self.student_participation, flow_id=self.flow_id)
self.mock_get_flow_rules.return_value = [
dict_to_struct({})]
self.mock_eval_generic_conditions.return_value = True
self.mock_eval_participation_tags_conditions.return_value = True
result = self.get_result()
self.assertRuleEqual(
result,
{"tag_session": None,
"may_start_new_session": True,
"may_list_existing_sessions": True,
"default_expiration_mode": None}
)
def test_passing_for_rollover(self):
factories.FlowSessionFactory.create_batch(size=2,
participation=self.student_participation, flow_id=self.flow_id)
self.mock_get_flow_rules.return_value = [
dict_to_struct({})]
self.mock_eval_generic_conditions.return_value = True
self.mock_eval_participation_tags_conditions.return_value = True
result = self.get_result(for_rollover=True)
self.assertRuleEqual(
result,
{"tag_session": None,
"may_start_new_session": True,
"may_list_existing_sessions": True,
"default_expiration_mode": None}
)
def test_get_expected_rule(self):
tag_session = mock.MagicMock()
default_expiration_mode = mock.MagicMock()
may_start_new_session = mock.MagicMock()
may_list_existing_sessions = mock.MagicMock()
self.mock_get_flow_rules.return_value = [
dict_to_struct(
{"tag_session": tag_session,
"default_expiration_mode": default_expiration_mode,
"may_start_new_session": may_start_new_session,
"may_list_existing_sessions": may_list_existing_sessions
| |
= True
timerT = False
while True:
#get wake word
local_request = wake_word(command= 'request')
if voice == True:
text = record_audio()
text = get_text(text)
else:
text = input(f'{local_request.capitalize()}: ')
local_request = wake_word(command= 'request')
text = f'{local_request} {text}'
text = str(text.lower())
text_split = text.split()
response = ''
if wake_word(text= text) == True:
#check just for wake word
if local_request == text:
audio_file = 'resources/sounds/accept.wav'
subprocess.call(['afplay', audio_file])
text = record_audio()
text = str(text.lower())
text_split = text.split()
else:
text = text.replace(f'{local_request} ', '')
#check for blank input for text mode
if text.strip() == '' and voice == False:
continue
#beginning of functions
#check for greetings
response = greeting(text)
#check for math questions
if response == '':
response = get_math(text)
#stop
if ('quit' == text or 'exit' == text or 'stop' == text):
response = 'goodbye'
if voice == True:
assistant_response(response)
else:
print(response.capitalize())
exit()
#function list
if (('what can you do' == text or 'what are your abilities' == text
or 'what are your functions' == text) and response == ''):
response = get_functions('partial')
if voice == True:
assistant_response(response)
assistant_response('would you like the full list')
text = record_audio()
else:
print(response.capitalize())
print('would you like the full list'.capitalize())
text = input('Function: ')
text = str(text.lower())
if 'yes' in text:
full_response = get_functions('full')
for i in range(len(full_response)):
print(full_response[i])
continue
elif text == '' and voice == True:
response = ''
elif text == '' and voice == False:
continue
else:
response = 'okay'
#check for mode change
if 'change mode' == text and response == '':
if voice == True:
voice = False
response = 'text mode activated'
else:
voice = True
response = 'voice mode activated'
#change wake word
if 'change wake word' in text and response == '':
if voice == True:
assistant_response('what is your new wake word')
text = record_audio()
else:
print('What is your new wake word')
text = input('Wake word: ')
text = str(text.lower())
response = wake_word(text= text, command= 'change')
#reset wake word
if 'reset wake word' in text and response == '':
response = wake_word(text= '', command= 'change')
#check for date
if (('what is the date' in text or 'what\'s the date' in text) and
response == ''):
response = get_date()
#check for the time
if (('what is the time' in text or 'what\'s the time' in text) and
response == ''):
response = get_time()
#check spelling
if 'how do you spell' in text and voice == True and response == '':
response = get_spelling(text)
try:
#system commands
#system report
if (('system report' == text or 'system status' == text)
and response == ''):
response = system('system report')
#unmute
if ('unmute' == text or 'stop mute' == text) and response == '':
response = system('unmute')
#mute
if 'mute' == text and response == '':
response = system('mute')
#change volume
if 'volume' in text and response == '':
increase = ['volume up', 'increase volume',
'increase the volume']
decrease = ['volume down', 'decrease volume',
'decrease the volume']
for i in range(len(increase)):
if increase[i] in text:
response = system('increase volume')
for i in range(len(decrease)):
if decrease[i] in text:
response = system('decrease volume')
#sleep
if 'sleep' == text and response == '':
response = system('sleep')
exit()
except subprocess.CalledProcessError:
response = ''
#set a timer
if (('set a timer' in text or 'start a timer' in text
or 'start timer' in text) and response == ''):
try:
if timerT.is_alive():
response = 'timer is already set'
else:
if voice == True:
assistant_response('timer for how long')
text = record_audio()
else:
print('Timer for how long')
text = input('Timer: ')
text = str(text.lower())
if text == '' and voice == True:
response = ''
elif text == '' and voice == False:
continue
else:
interval = get_timer(text)
if type(interval) == float:
timerT = threading.Timer(interval= interval,
function= timer_sound)
timerT.start()
response = 'timer set'
else:
response = ''
except AttributeError:
if voice == True:
assistant_response('timer for how long')
text = record_audio()
else:
print('Timer for how long')
text = input('Timer: ')
text = str(text.lower())
if text == '' and voice == True:
response = ''
elif text == '' and voice == False:
continue
else:
interval = get_timer(text)
if type(interval) == float:
timerT = threading.Timer(interval= interval,
function= timer_sound)
timerT.start()
response = 'timer set'
else:
response = ''
#stop a timer
if (('stop the timer' in text or 'cancel the timer' in text
or 'cancel timer' in text) and response == ''):
try:
if timerT.is_alive():
timerT.cancel()
response = 'timer stopped'
else:
response = 'timer is not set'
except AttributeError:
response = 'timer is not set'
#check wikipedia
if 'search wikipedia' in text and response == '':
info = get_wiki(text)
try:
response = wikipedia.summary(info, sentences= 2)
except ValueError:
pass #to little options
except wikipedia.DisambiguationError:
pass #to many options
except wikipedia.PageError:
pass #no results found
#make an iNote
if 'make a note' in text and response == '':
if voice == True:
assistant_response('what is your note')
text = record_audio()
else:
print('What is your note')
text = input('Note: ')
if text == '' and voice == True:
response = ''
elif text == '' and voice == False:
continue
else:
response = get_inote(text)
#make a text note
if 'make a text note' in text and response == '':
if voice == True:
assistant_response('what is your note')
text = record_audio()
else:
print('What is your note')
text = input('Note: ')
if text == '' and voice == True:
response = ''
elif text == '' and voice == False:
continue
else:
response = get_note(text)
#youtube
if 'open youtube' in text and response == '':
response = get_site('open youtube')
if 'search youtube' in text and response == '':
for i in range(len(text_split)):
if (text_split[i] == 'search' and text_split[i+1] == 'youtube'
and text_split[i+2] == 'for'):
terms = text_split[i+3:]
terms = '+'.join(terms)
response = get_site(text= 'search youtube', terms= terms)
#search engines
if 'search for' in text and '.' in text and response == '':
for i in range(len(text_split)):
if text_split[i] == 'search' and text_split[i+1] == 'for':
terms = text_split[i+2:]
terms = '+'.join(terms)
response = get_site(text= 'search browser', terms= terms)
#duckduckgo (default)
if (('open the internet' in text or 'open duckduckgo' in text)
and response == ''):
response = get_site('open duckduckgo')
if (('search for' in text or 'search the internet for' in text
or 'search duckduckgo for' in text) and response == ''):
for i in range(len(text_split)):
if text_split[i] == 'search' and text_split[i+1] == 'for':
terms = text_split[i+2:]
terms = '+'.join(terms)
response = get_site(text= 'search duckduckgo', terms= terms)
#google
if 'open google' in text and response == '':
response = get_site('open google')
if 'search google for' in text and response == '':
for i in range(len(text_split)):
if (text_split[i] == 'search' and text_split[i+1] == 'google'
and text_split[i+2] == 'for'):
terms = text_split[i+3:]
terms = '+'.join(terms)
response = get_site(text= 'search google', terms= terms)
#bing
if 'open bing' in text and response == '':
response = get_site('open bing')
if 'search bing for' in text and response == '':
for i in range(len(text_split)):
if (text_split[i] == 'search' and text_split[i+1] == 'bing'
and text_split[i+2] == 'for'):
terms = text_split[i+3:]
terms = '+'.join(terms)
response = get_site(text= 'search bing', terms= terms)
#open application
if 'open' in text and response == '':
for i in range(len(text_split)):
if text_split[i] == 'open':
app = text_split[i+1:]
app = ' '.join(app)
response = get_application(app= app, state= 'open')
#open news application
if (('what\'s the' in text and 'news' in text
or 'what is the' in text and 'news' in text) and response == ''):
get_application(app= 'news', state= 'open')
response = 'opening news'
#close application
if 'close' in text and response == '':
for i in range(len(text_split)):
if text_split[i] == 'close':
app = text_split[i+1:]
app = ' '.join(app)
response = get_application(app= app, state= 'close')
#response
if (response == '' or response == None) and voice == True:
audio_file = 'resources/sounds/unsure.wav'
subprocess.call(['afplay', audio_file])
elif (response == '' or response | |
#!/usr/bin/env python3
from nistats import regression
from nistats import reporting
from nistats.design_matrix import make_first_level_design_matrix
import nibabel as nb
import numpy as np
import os, pandas, sys, pdb, argparse, copy, scipy, jinja2
from os.path import join as pjoin
from nilearn import plotting
from nilearn.signal import butterworth
from nilearn.input_data import NiftiMasker
import matplotlib
import pylab as plt
import seaborn as sns
from nilearn._utils.niimg import load_niimg
from niworkflows.nipype.algorithms import confounds as nac
parser = argparse.ArgumentParser(description='Function for performing nuisance regression. Saves resulting output '
'nifti file, information about nuisance regressors and motion (html '
'report), and outputs nibabel object containing clean data')
parser.add_argument('img_file', help='4d nifti img: file path or nibabel object loaded into memory')
parser.add_argument('tsv_file', help='tsv file containing nuisance regressors to be removed')
parser.add_argument('out_path', help='output directory for saving new data file')
parser.add_argument('--col_names',
help='which columns of TSV file to include as nuisance regressors. defaults to ALL columns.',
nargs="+")
parser.add_argument('--hp_filter', help='frequency cut-off for high pass filter (removing low frequencies). Recommend '
'.009 Hz')
parser.add_argument('--lp_filter', help='frequency cut-off for low pass filter (removing high frequencies). Recommend '
'.1 Hz for non-task data')
parser.add_argument('--out_figure_path',
help='output directory for saving figures. Defaults to location of out_path + _figures')
args = parser.parse_args()
img_file = args.img_file
tsv_file = args.tsv_file
out_path = args.out_path
col_names = args.col_names
hp_filter = args.hp_filter
lp_filter = args.lp_filter
out_figure_path = args.out_figure_path
def denoise(img_file, tsv_file, out_path, col_names=False, hp_filter=False, lp_filter=False, out_figure_path=False):
nii_ext = '.nii.gz'
FD_thr = [.5]
sc_range = np.arange(-1, 3)
constant = 'constant'
# read in files
img = load_niimg(img_file)
# get file info
img_name = os.path.basename(img.get_filename())
file_base = img_name[0:img_name.find('.')]
save_img_file = pjoin(out_path, file_base + \
'_NR' + nii_ext)
data = img.get_data()
df_orig = pandas.read_csv(tsv_file, '\t', na_values='n/a')
df = copy.deepcopy(df_orig)
Ntrs = df.as_matrix().shape[0]
print('# of TRs: ' + str(Ntrs))
assert (Ntrs == data.shape[len(data.shape) - 1])
# select columns to use as nuisance regressors
if col_names:
df = df[col_names]
str_append = ' [SELECTED regressors in CSV]'
else:
col_names = df.columns.tolist()
str_append = ' [ALL regressors in CSV]'
# fill in missing nuisance values with mean for that variable
for col in df.columns:
if sum(df[col].isnull()) > 0:
print('Filling in ' + str(sum(df[col].isnull())) + ' NaN value for ' + col)
df[col] = df[col].fillna(np.mean(df[col]))
print('# of Confound Regressors: ' + str(len(df.columns)) + str_append)
# implement HP filter in regression
TR = img.header.get_zooms()[-1]
frame_times = np.arange(Ntrs) * TR
if hp_filter:
hp_filter = float(hp_filter)
assert (hp_filter > 0)
period_cutoff = 1. / hp_filter
df = make_first_level_design_matrix(frame_times, period_cut=period_cutoff, add_regs=df.as_matrix(),
add_reg_names=df.columns.tolist())
# fn adds intercept into dm
hp_cols = [col for col in df.columns if 'drift' in col]
print('# of High-pass Filter Regressors: ' + str(len(hp_cols)))
else:
# add in intercept column into data frame
df[constant] = 1
print('No High-pass Filter Applied')
dm = df.as_matrix()
# prep data
data = np.reshape(data, (-1, Ntrs))
data_mean = np.mean(data, axis=1)
Nvox = len(data_mean)
# setup and run regression
model = regression.OLSModel(dm)
results = model.fit(data.T)
if not hp_filter:
results_orig_resid = copy.deepcopy(results.resid) # save for rsquared computation
# apply low-pass filter
if lp_filter:
# input to butterworth fn is time x voxels
low_pass = float(lp_filter)
Fs = 1. / TR
if low_pass >= Fs / 2:
raise ValueError('Low pass filter cutoff if too close to the Nyquist frequency (%s)' % (Fs / 2))
temp_img_file = pjoin(out_path, file_base + \
'_temp' + nii_ext)
temp_img = nb.Nifti1Image(np.reshape(results.resid.T + np.reshape(data_mean, (Nvox, 1)), img.shape).astype('float32'),
img.affine, header=img.header)
temp_img.to_filename(temp_img_file)
results.resid = butterworth(results.resid, sampling_rate=Fs, low_pass=low_pass, high_pass=None)
print('Low-pass Filter Applied: < ' + str(low_pass) + ' Hz')
# add mean back into data
clean_data = results.resid.T + np.reshape(data_mean, (Nvox, 1)) # add mean back into residuals
# save out new data file
print('Saving output file...')
clean_data = np.reshape(clean_data, img.shape).astype('float32')
new_img = nb.Nifti1Image(clean_data, img.affine, header=img.header)
new_img.to_filename(save_img_file)
######### generate Rsquared map for confounds only
if hp_filter:
# first remove low-frequency information from data
hp_cols.append(constant)
model_first = regression.OLSModel(df[hp_cols].as_matrix())
results_first = model_first.fit(data.T)
results_first_resid = copy.deepcopy(results_first.resid)
del results_first, model_first
# compute sst - borrowed from matlab
sst = np.square(np.linalg.norm(results_first_resid -
np.mean(results_first_resid, axis=0), axis=0))
# now regress out 'true' confounds to estimate their Rsquared
nr_cols = [col for col in df.columns if 'drift' not in col]
model_second = regression.OLSModel(df[nr_cols].as_matrix())
results_second = model_second.fit(results_first_resid)
# compute sse - borrowed from matlab
sse = np.square(np.linalg.norm(results_second.resid, axis=0))
del results_second, model_second, results_first_resid
elif not hp_filter:
# compute sst - borrowed from matlab
sst = np.square(np.linalg.norm(data.T -
np.mean(data.T, axis=0), axis=0))
# compute sse - borrowed from matlab
sse = np.square(np.linalg.norm(results_orig_resid, axis=0))
del results_orig_resid
# compute rsquared of nuisance regressors
zero_idx = scipy.logical_and(sst == 0, sse == 0)
sse[zero_idx] = 1
sst[zero_idx] = 1 # would be NaNs - become rsquared = 0
rsquare = 1 - np.true_divide(sse, sst)
rsquare[np.isnan(rsquare)] = 0
######### Visualizing DM & outputs
fontsize = 12
fontsize_title = 14
def_img_size = 8
if not out_figure_path:
out_figure_path = save_img_file[0:save_img_file.find('.')] + '_figures'
if not os.path.isdir(out_figure_path):
os.mkdir(out_figure_path)
png_append = '_' + img_name[0:img_name.find('.')] + '.png'
print('Output directory: ' + out_figure_path)
# DM corr matrix
cm = df[df.columns[0:-1]].corr()
curr_sz = copy.deepcopy(def_img_size)
if cm.shape[0] > def_img_size:
curr_sz = curr_sz + ((cm.shape[0] - curr_sz) * .3)
mtx_scale = curr_sz * 100
mask = np.zeros_like(cm, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
fig, ax = plt.subplots(figsize=(curr_sz, curr_sz))
cmap = sns.diverging_palette(220, 10, as_cmap=True)
sns.heatmap(cm, mask=mask, cmap=cmap, center=0, vmax=cm[cm < 1].max().max(), vmin=cm[cm < 1].min().min(),
square=True, linewidths=.5, cbar_kws={"shrink": .6})
ax.set_xticklabels(ax.get_xticklabels(), rotation=60, ha='right', fontsize=fontsize)
ax.set_yticklabels(cm.columns.tolist(), rotation=-30, va='bottom', fontsize=fontsize)
ax.set_title('Nuisance Corr. Matrix', fontsize=fontsize_title)
plt.tight_layout()
file_corr_matrix = 'Corr_matrix_regressors' + png_append
fig.savefig(pjoin(out_figure_path, file_corr_matrix))
plt.close(fig)
del fig, ax
# DM of Nuisance Regressors (all)
tr_label = 'TR (Volume #)'
fig, ax = plt.subplots(figsize=(curr_sz - 4.1, def_img_size))
x_scale_html = ((curr_sz - 4.1) / def_img_size) * 890
reporting.plot_design_matrix(df, ax=ax)
ax.set_title('Nuisance Design Matrix', fontsize=fontsize_title)
ax.set_xticklabels(ax.get_xticklabels(), rotation=60, ha='right', fontsize=fontsize)
ax.set_yticklabels(ax.get_yticklabels(), fontsize=fontsize)
ax.set_ylabel(tr_label, fontsize=fontsize)
plt.tight_layout()
file_design_matrix = 'Design_matrix' + png_append
fig.savefig(pjoin(out_figure_path, file_design_matrix))
plt.close(fig)
del fig, ax
# FD timeseries plot
FD = 'FD'
poss_names = ['FramewiseDisplacement', FD, 'framewisedisplacement', 'fd']
fd_idx = [df_orig.columns.__contains__(i) for i in poss_names]
if np.sum(fd_idx) > 0:
FD_name = poss_names[fd_idx == True]
if sum(df_orig[FD_name].isnull()) > 0:
df_orig[FD_name] = df_orig[FD_name].fillna(np.mean(df_orig[FD_name]))
y = df_orig[FD_name].as_matrix()
Nremove = []
sc_idx = []
for thr_idx, thr in enumerate(FD_thr):
idx = y >= thr
sc_idx.append(copy.deepcopy(idx))
for iidx in np.where(idx)[0]:
for buffer in sc_range:
curr_idx = iidx + buffer
if curr_idx >= 0 and curr_idx <= len(idx):
sc_idx[thr_idx][curr_idx] = True
Nremove.append(np.sum(sc_idx[thr_idx]))
Nplots = len(FD_thr)
sns.set(font_scale=1.5)
sns.set_style('ticks')
fig, axes = plt.subplots(Nplots, 1, figsize=(def_img_size * 1.5, def_img_size / 2), squeeze=False)
sns.despine()
bound = .4
fd_mean = np.mean(y)
for curr in np.arange(0, Nplots):
axes[curr, 0].plot(y)
axes[curr, 0].plot((-bound, Ntrs + bound), FD_thr[curr] * np.ones((1, 2))[0], '--', color='black')
axes[curr, 0].scatter(np.arange(0, Ntrs), y, s=20)
if Nremove[curr] > 0:
info = scipy.ndimage.measurements.label(sc_idx[curr])
for cluster in np.arange(1, info[1] + 1):
temp = np.where(info[0] == cluster)[0]
axes[curr, 0].axvspan(temp.min() - bound, temp.max() + bound, alpha=.5, color='red')
axes[curr, 0].set_ylabel('Framewise Disp. (' + FD + ')')
axes[curr, 0].set_title(FD + ': ' + str(100 * Nremove[curr] / Ntrs)[0:4]
+ '% of scan (' + str(Nremove[curr]) + ' volumes) would be scrubbed (FD thr.= ' +
str(FD_thr[curr]) + ')')
plt.text(Ntrs + 1, FD_thr[curr] - .01, FD + ' = ' + str(FD_thr[curr]), fontsize=fontsize)
plt.text(Ntrs, fd_mean - .01, 'avg = ' + str(fd_mean), fontsize=fontsize)
axes[curr, 0].set_xlim((-bound, Ntrs + 8))
plt.tight_layout()
axes[curr, 0].set_xlabel(tr_label)
file_fd_plot = FD + '_timeseries' + png_append
fig.savefig(pjoin(out_figure_path, file_fd_plot))
plt.close(fig)
del fig, axes
print(FD + ' timeseries plot saved')
else:
print(FD + ' not found: ' + FD + ' timeseries not plotted')
file_fd_plot = None
# Carpet and DVARS plots - before & after nuisance regression
# need to create mask file to input to DVARS function
mask_file = pjoin(out_figure_path, 'mask_temp.nii.gz')
nifti_masker = NiftiMasker(mask_strategy='epi', standardize=False)
nifti_masker.fit(img)
nifti_masker.mask_img_.to_filename(mask_file)
# create 2 or 3 carpet plots, depending on if LP filter is also applied
Ncarpet = 2
total_sz = int(16)
carpet_scale = 840
y_labels = ['Input (voxels)', 'Output \'cleaned\'']
imgs = [img, new_img]
img_files = [img_file, save_img_file]
color = ['red', 'salmon']
labels = ['input', 'cleaned']
if lp_filter:
Ncarpet = 3
total_sz = int(20)
carpet_scale = carpet_scale * (9/8)
y_labels = ['Input', 'Clean Pre-LP', 'Clean LP']
imgs.insert(1, temp_img)
img_files.insert(1, temp_img_file)
color.insert(1, 'firebrick')
labels.insert(1, 'clean pre-LP')
labels[-1] = 'clean LP'
dvars = []
print('Computing dvars...')
for in_file in img_files:
temp = nac.compute_dvars(in_file=in_file, in_mask=mask_file)[1]
dvars.append(np.hstack((temp.mean(), temp)))
del | |
import ida_funcs
import ida_bytes
import idc
import ida_search
import idaapi
import ida_xref
import ida_segment
import ida_offset
import ida_kernwin
from idaapi import BADADDR
from ida_search import SEARCH_DOWN, SEARCH_UP
import ida_auto
import IDAPatternSearch_utils.ida_common as ida_common
import sys
import os
import pathlib
import struct
import ast
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
PATH_TO_FUNCTION_PATTERNS = str(pathlib.Path(
__file__).parent.absolute()) + "\\function_patterns\\"
# Each dictionary entry should include: (pattern_file_name, use_pre_patterns, use_post_patterns, arch_name, t_register_value=None)
_SEARCH_PARAMETERS = {
'THUMB_LE_LOOSENED': ("THUMB_LE_LOOSENED.xml", False, True, "THUMB", 1),
'THUMB_LE': ("THUMB_LE.xml", False, True, "THUMB", 1),
'THUMB_BE_LOOSENED': ("THUMB_BE_LOOSENED.xml", False, True, "THUMB", 1),
'THUMB_BE': ("THUMB_BE.xml", False, True, "THUMB", 1),
'ARM_32_LE': ("ARM_32_LE.xml", False, True, "ARM_32", 0),
'ARM_32_BE': ("ARM_32_BE.xml", False, True, "ARM_32", 0),
'AARCH_64_LE': ("AARCH_64_LE.xml", False, True, "AARCH64"),
'PPC_BE': ("PPC_BE.xml", False, True, "PowerPC"),
'V850': ("V850.xml", False, True, "V850")
}
# Alt + G
def _change_t_register(ea, val):
if val == -1:
return
idaapi.split_sreg_range(ea, idaapi.str2reg("T"), val, idaapi.SR_user)
def _parse_ghidra_pattern_item(pattern_item, element_length):
'''
This function parses a given Ghidra pattern item (pattern_item) which is one item from a Ghidra pattern.
The function also uses (element_length) to determine if it is:
* Hex item (starting with 0x which was already omitted before calling this function)
which in this case (element_length) == 4
* Bitfield item
which in this case (element_length) == 1
The '.' characther in both item types represents a wildcard Bit/Byte depends on the item type (Hex/Bitfield).
This function returns two values: (image, mask)
* image - represents the item image to search by the pattern.
* mask - represents the item mask that can be used to mask out the matched bytes and check against the image
'''
cur_image = 0
cur_mask = 0
for element in pattern_item:
if element == '.':
# Wildcard element
# mask should be zeros (in bits), image should be zero (lets say 0)
cur_mask = cur_mask << element_length
cur_image = cur_image << element_length
else:
# mask should be 1's (in bits), image is same as half_byte
cur_mask = cur_mask << element_length
if element_length == 1:
# bit element
cur_mask += 1
else:
# half byte element
cur_mask += 0xf
cur_image = cur_image << element_length
if element_length == 1:
# bit element
cur_image += int(element, 2)
else:
# half byte element
cur_image += int(element, 16)
return cur_image, cur_mask
def _convert_ghidra_pattern_to_image_and_mask(ghidra_pattern):
'''
This function parses a given Ghidra pattern (ghidra_pattern) after extracted from the XML file already.
Every item is parsed and at the end, all the items are joined into 2 returned byte strings: image and mask.
The function returns a dictionary with the keys:
* image - represents the image to search by the pattern.
* mask - represents the mask that can be used to mask out the matched bytes and check against the image
The values in the dictionary are byte-strings.
'''
ghidra_pattern = ghidra_pattern.split()
image = [b'']*len(ghidra_pattern)
mask = [b'']*len(ghidra_pattern)
for i in range(len(ghidra_pattern)):
pattern_item = ghidra_pattern[i]
pattern_byte_len = 0 # Number of bytes presetend by pattern
cur_image = 0 # Image of current pattern item
cur_mask = 0 # Mask of current pattern item
if '0x' in pattern_item[0:2]:
# Hex parsing
pattern_item = pattern_item[2:] # Remove '0x' at start
if len(pattern_item) == 2 or len(pattern_item) == 4 or len(pattern_item) == 8:
cur_image, cur_mask = _parse_ghidra_pattern_item(
pattern_item, 4)
# 1 or 2 or 4 byte format
pattern_byte_len = len(pattern_item)//2
else:
print("[-] Unrecognized length while parsing: 0x" +
str(pattern_item))
break
else:
# Bit parsing
if len(pattern_item) == 8:
cur_image, cur_mask = _parse_ghidra_pattern_item(
pattern_item, 1)
# 1 byte format is the only case for bit parsing
pattern_byte_len = 1
else:
print("[-] Unrecognized length while parsing: " +
str(pattern_item))
break
# Convert from int to bytes
image[i] = cur_image.to_bytes(pattern_byte_len, byteorder='big')
mask[i] = cur_mask.to_bytes(pattern_byte_len, byteorder='big')
# Join image_list and mask_list
return {"image": b''.join(image), "mask": b''.join(mask)}
def _parse_ghidra_xml_pattern_list(filepath):
'''
Parses an Ghidra pattern XML file resides in (filepath).
This file contains pattern pairs where each pair consits of pre-patterns and post-patterns.
Pre-patterns should occur before a function start definition (It is usually a function end in the pattern file).
Post-patterns should occur in a place where a function start should be defined.
Returns a dictionary contains two keys:
* pre_patterns
* post_patterns
The values are lists where each item is a Ghidra pattern string.
Note that only one node for each element type (except data) exists in most cases,
but the parsing process treats the elements as lists for a more general case.
'''
import xml.dom.minidom
parsed_pre_patterns = []
parsed_post_patterns = []
doc = xml.dom.minidom.parse(filepath)
patternpairs_list = doc.getElementsByTagName("patternpairs")
for patternpairs_node in patternpairs_list:
# Parse pre patterns
prepatterns_list = patternpairs_node.getElementsByTagName(
'prepatterns')
for prepatterns_node in prepatterns_list:
data_list = prepatterns_node.getElementsByTagName('data')
for data_node in data_list:
data_value = data_node.firstChild.nodeValue
parsed_pre_patterns.append(data_value)
# Parse post patterns
postpatterns_list = patternpairs_node.getElementsByTagName(
'postpatterns')
for postpatterns_node in postpatterns_list:
data_list = postpatterns_node.getElementsByTagName('data')
for data_node in data_list:
data_value = data_node.firstChild.nodeValue
parsed_post_patterns.append(data_value)
return {"pre_patterns": parsed_pre_patterns, "post_patterns": parsed_post_patterns}
def _relax_ghidra_mask(mask):
'''
In order to use Ghidra patterns in IDA, we need to make the patterns more premissive
because IDA only allows byte wildcards, while Ghidra patterns involving bits.
This function returns the premissive mask given a strict (mask) byte string.
IDA bin_search mask should be composed of \x00 or \x01 bytes,
where \x01 means to perform the comparison and \x00 means not tp perform.
If the strict mask contains zero bit in one of the bytes, this byte should be \x00 in the relaxed mask.
Else, the byte contains only one bits, and therefore this byte should be \x01 in the relaxed mask.
'''
mask_list = list(mask)
relaxed_mask = [b'\x00' if b != 0xff else b'\x01' for b in mask_list]
return b''.join(relaxed_mask)
def parse_and_search(pattern_file_name, use_pre_patterns, use_post_patterns, arch_name, t_register_value=None, **kwargs):
'''
Find function prologues based on patterns for given architecture and pattern file.
'''
pre_patterns = [None]
post_patterns = [None]
pattern_list = _parse_ghidra_xml_pattern_list(
os.path.join(PATH_TO_FUNCTION_PATTERNS, pattern_file_name))
if use_post_patterns:
print("[+] Searching using post-patterns ({})".format(arch_name))
post_patterns = pattern_list['post_patterns']
if use_pre_patterns:
print("[+] Searching using pre-patterns ({})".format(arch_name))
pre_patterns = pattern_list['pre_patterns']
_prologue_pattern_search(
pre_patterns, post_patterns, t_register_value, arch_name, **kwargs)
def _prologue_pattern_search(pre_pattern_list, post_pattern_list, t_register_value, arch_name, **kwargs):
"""
Find functions prologues in range(**kwargs) based on the given pattern list.
Patterns are seperated into 2 different lists: (pre_pattern_list,post_pattern_list).
* Pre_pattern is before function definition (e.g. filler or other function end),
* Post_pattern is the actual function start.
The function can use only one of the pattern lists.
In this case, the other list (the unused one) should be passed as [None] to the function.
* As for now we will use only Post Patterns (but usage of pre-patterns is already implemented).
For each pattern in the pattern list, finding bytes matching the pattern and tries to define a function in IDA.
Uses the (t_register_value) to set the T register accordingly when tries to define a function.
"""
defined_counter = 0 # Number of defined functions
# Go over all search results for a specific pattern
for addr_range in ida_common.parse_address_ranges(**kwargs):
start_ea = addr_range[0]
end_ea = addr_range[1]
for pre_pattern in pre_pattern_list:
for post_pattern in post_pattern_list:
cur_start_ea = start_ea # restore cur_start_ea
while(cur_start_ea < end_ea):
cursor, defined = _find_next_pattern_bytes_and_define_function(
cur_start_ea, pre_pattern, post_pattern, t_register_value, end_ea)
if cursor == BADADDR:
break
defined_counter += defined
cur_start_ea = cursor + 1 # Continue to search in next bytes
print("[+] Total number of {} functions defined: {}".format(arch_name, defined_counter))
def _find_next_pattern_bytes_and_define_function(start_ea, pre_pattern, post_pattern, t_register_value, end_ea=BADADDR):
"""
Find bytes starting in range (start_ea,end_ea) based on the given (post_pattern) or/and (pre_pattern).
There are 3 different cases for the function operation:
* When only post_pattern exists, the function will be defined at the matched bytes location.
* When only pre_pattern exists, the function will be defined after the matched bytes location.
* When both pre_pattern and post_pattern exists, the function will be defined after the pre-pattern matched bytes location.
When there is a match, first sets the T register with the given value (t_register_value).
Then, tries to define a function in IDA.
Returns two values:
1. The matched address (in case not found: BADADDR).
2. An integer that indicates if as a result a function was defined successfully (1 -> Success, 0 | |
<gh_stars>1-10
# encoding: utf8
# Copyright (c) 2020 <NAME> and <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
NestedText: A Human Readable and Writable Data Format
"""
__version__ = "0.0.4"
__all__ = (
"load",
"loads",
"dump",
"dumps",
"DuplicateFieldBehaviour",
"NestedtextError",
"NestedtextType",
)
import collections
import enum
import io
import json
import re
from typing import Any, Dict, Iterable, Iterator, List, NoReturn, Optional, Tuple, Union
NestedtextType = Union[str, List["NestedtextType"], Dict[str, "NestedTextType"]]
NestedtextContainerType = Union[List[NestedtextType], Dict[str, NestedtextType]]
class NestedtextError(Exception):
def __init__(
self, message: str, lineno: Optional[int] = None, colno: Optional[int] = None
):
if lineno is not None:
message += f": {lineno}"
if colno is not None:
message += f":{colno}"
super().__init__(message)
self.lineno = lineno
self.colno = colno
def _report(message, line, *args, colno=None, **kwargs) -> NoReturn:
raise NestedtextError(message, line.lineno, colno)
def _indentation_error(line, depth) -> NoReturn:
_report("invalid indentation", line, colno=depth)
# ------------------------------------------------------------------------------
# Parsing logic
# ------------------------------------------------------------------------------
class DuplicateFieldBehaviour(str, enum.Enum):
USE_FIRST = "use_first"
USE_LAST = "use_last"
ERROR = "error"
def __repr__(self):
return str(self)
class _LineType(enum.Enum):
BLANK = enum.auto()
COMMENT = enum.auto()
STRING = enum.auto()
LIST_ITEM = enum.auto()
OBJECT_ITEM = enum.auto()
OBJECT_KEY = enum.auto()
INLINE_CONTAINER = enum.auto()
def __repr__(self):
return str(self)
def is_ignorable(self) -> bool:
return self in [self.BLANK, self.COMMENT]
class _Line(collections.namedtuple("_Line", "text, lineno, kind, depth, value")):
def __new__(
cls,
text: str,
lineno: int,
kind: _LineType,
depth: int,
value: Union[None, str, Tuple[str, Optional[str]]],
):
return super().__new__(cls, text, lineno, kind, depth, value)
class _InvalidLineType(enum.Enum):
NON_SPACE_INDENT = enum.auto()
UNRECOGNISED = enum.auto()
def __repr__(self):
return str(self)
class _InvalidLine(collections.namedtuple("_InvalidLine", "text, lineno, kind, colno")):
def __new__(
cls,
text: str,
lineno: int,
kind: _InvalidLineType,
colno: int,
):
return super().__new__(cls, text, lineno, kind, colno)
class _LinesIter(Iterable[_Line]):
def __init__(self, lines: Iterable[str]):
self._generator = self._read_lines(lines)
self._next_line: Optional[_Line] = self._advance_to_next_content_line()
def __iter__(self):
return self
def __next__(self) -> _Line:
if self._next_line is None:
raise StopIteration
this_line = self._next_line
self._next_line = self._advance_to_next_content_line()
return this_line
def _read_lines(self, lines: Iterable[str]) -> Iterator[Union[_Line, _InvalidLine]]:
for idx, line in enumerate(lines):
yield self._read_line(line, idx + 1)
def _read_line(self, line: str, lineno: int) -> Union[_Line, _InvalidLine]:
if not line.strip():
return _Line(line, lineno, _LineType.BLANK, 0, None)
text = line.rstrip("\r\n")
# Comments can have any leading whitespace.
if text.lstrip()[0] == "#":
return _Line(line, lineno, _LineType.COMMENT, 0, text.lstrip()[1:])
stripped = text.lstrip(" ")
depth = len(text) - len(stripped)
# Otherwise check leading whitespace consists only of spaces.
if len(stripped.lstrip()) < len(stripped):
return _InvalidLine(line, lineno, _InvalidLineType.NON_SPACE_INDENT, depth)
def _read_content_line() -> Optional[Tuple[_LineType, Any]]:
# Now handle normal content lines!
if stripped == "-" or stripped.startswith("- "):
return _LineType.LIST_ITEM, stripped[2:] or None
elif stripped == ">" or stripped.startswith("> "):
# Include end-of-line characters.
value = re.sub(r"> ?", "", line.lstrip(" "), count=1)
return _LineType.STRING, value
elif stripped == ":" or stripped.startswith(": "):
# Include end-of-line characters.
value = re.sub(r": ?", "", line.lstrip(" "), count=1)
return _LineType.OBJECT_KEY, value
elif stripped[0] in "[{":
return _LineType.INLINE_CONTAINER, stripped
# Object item?
match = re.fullmatch(r"(?P<key>.+?)\s*:(?: (?P<value>.*))?", stripped)
if match:
return _LineType.OBJECT_ITEM, tuple(match.groups())
return None
result = _read_content_line()
if result:
return _Line(line, lineno, result[0], depth, result[1])
else:
return _InvalidLine(line, lineno, _InvalidLineType.UNRECOGNISED, depth)
def _advance_to_next_content_line(self) -> Optional[_Line]:
"""Advance the generator the next useful line and return it."""
while True:
next_line = next(self._generator, None)
if isinstance(next_line, _InvalidLine):
_report("invalid line", next_line, colno=next_line.colno)
if next_line is None or not next_line.kind.is_ignorable():
break
return next_line
def peek_next(self) -> Optional[_Line]:
return self._next_line
class _Parser:
def __init__(self, *, on_dup=DuplicateFieldBehaviour.ERROR):
self.on_dup = on_dup
def parse(self, lines: Iterable[str]):
lines = _LinesIter(lines)
if lines.peek_next() is None:
return None
return self._read_value(lines, 0)
def _read_value(self, lines: _LinesIter, depth: int) -> Union[str, List, Dict]:
if lines.peek_next().kind is _LineType.STRING:
return self._read_string(lines, depth)
elif lines.peek_next().kind is _LineType.LIST_ITEM:
return self._read_list(lines, depth)
elif lines.peek_next().kind in [_LineType.OBJECT_ITEM, _LineType.OBJECT_KEY]:
return self._read_object(lines, depth)
elif lines.peek_next().kind is _LineType.INLINE_CONTAINER:
return self._read_inline_container(lines, depth)
_report("unrecognized line", next(lines))
def _read_string(self, lines: _LinesIter, depth: int) -> str:
data = []
while (
lines.peek_next()
and lines.peek_next().kind is _LineType.STRING
and lines.peek_next().depth >= depth
):
line = next(lines)
data.append(line.value)
if line.depth != depth:
_indentation_error(line, depth)
data[-1] = data[-1].rstrip("\r\n")
return "".join(data)
def _read_list(self, lines: _LinesIter, depth: int) -> List[NestedtextType]:
data = []
while lines.peek_next() and lines.peek_next().depth >= depth:
line = next(lines)
if line.depth != depth:
_indentation_error(line, depth)
if line.kind is not _LineType.LIST_ITEM:
_report("expected list item", line, colno=depth)
if line.value:
data.append(line.value)
else:
# Value may simply be empty, or it may be on next line, in which
# case it must be indented.
if lines.peek_next() is None:
value = ""
else:
depth_of_next = lines.peek_next().depth
if depth_of_next > depth:
value = self._read_value(lines, depth_of_next)
else:
value = ""
data.append(value)
return data
def _read_object(self, lines: _LinesIter, depth: int) -> Dict[str, NestedtextType]:
data = {}
while lines.peek_next() and lines.peek_next().depth >= depth:
line = lines.peek_next()
if line.depth != depth:
_indentation_error(line, depth)
if line.kind is _LineType.OBJECT_ITEM:
next(lines) # Advance the iterator
key, value = line.value
elif line.kind is _LineType.OBJECT_KEY:
key = self._read_object_key(lines, depth)
value = None
else:
_report("expected object item", line, colno=depth)
if not value:
if lines.peek_next() is None:
if line.kind is _LineType.OBJECT_KEY:
raise NestedtextError(
"expected value after multiline object key"
)
value = ""
else:
depth_of_next = lines.peek_next().depth
if depth_of_next > depth:
value = self._read_value(lines, depth_of_next)
elif line.kind is _LineType.OBJECT_KEY:
raise NestedtextError(
"expected value after multiline object key"
)
else:
value = ""
if key in data:
# Found duplicate key.
if self.on_dup == DuplicateFieldBehaviour.USE_FIRST:
continue
elif self.on_dup == DuplicateFieldBehaviour.USE_LAST:
pass
elif self.on_dup == DuplicateFieldBehaviour.ERROR:
_report("duplicate key", line, colno=depth)
data[key] = value
return data
def _read_object_key(self, lines: _LinesIter, depth: int) -> str:
data = []
while (
lines.peek_next()
and lines.peek_next().kind is _LineType.OBJECT_KEY
and lines.peek_next().depth == depth
):
line = next(lines)
data.append(line.value)
data[-1] = data[-1].rstrip("\r\n")
return "".join(data)
def _read_inline_container(
self, lines: _LinesIter, depth: int
) -> NestedtextContainerType:
line = next(lines)
assert line.kind is _LineType.INLINE_CONTAINER
line_text = line.value
# Convert into valid JSON!
# Escape quotes and tabs.
line_text.replace('"', '\\"')
line_text.replace("\t", "\\t")
# Quote list items.
line_text = re.sub(
r"([\[,])\s*(?P<value>[^\[\]\{\}]+?)\s*(?=[,\]])", r'\1"\2"', line_text
)
# Quote dict keys.
line_text = re.sub(
r"([\{,])\s*(?P<key>[^\[\]\{\}:,]+?)\s*(?=:)", r'\1"\2"', line_text
)
# Quote dict values.
line_text = re.sub(
r"([\{,][^\[\]]+?):\s*(?P<value>[^\[\]\{\}:]+?)\s*(?=[,\}])",
r'\1:"\2"',
line_text,
)
try:
return json.loads(line_text)
except json.JSONDecodeError:
_report("Invalid inline list/object", line)
def loads(
content: str, *, on_dup=DuplicateFieldBehaviour.ERROR
) -> Optional[NestedtextType]:
"""
Deserialize 'content' (a NestedText document) to a Python object.
"""
return load(io.StringIO(content), on_dup=on_dup)
def load(
stream: Iterable, *, on_dup=DuplicateFieldBehaviour.ERROR
) -> Optional[NestedtextType]:
"""
Deserialize 'stream' (an iterable of lines corresponding to a NestedText
document) to a Python object.
"""
return _Parser(on_dup=on_dup).parse(stream)
# ------------------------------------------------------------------------------
# Dumping logic
# ------------------------------------------------------------------------------
class _Dumper:
def __init__(self, sort_keys: bool, indent: int):
self.sort_keys = sort_keys
self.indent_size = indent
def dump(self, obj, writer):
if isinstance(obj, str):
self._dump_multiline_str(obj, writer, 0)
elif isinstance(obj, list):
self._dump_list(obj, writer, 0)
elif isinstance(obj, dict):
self._dump_object(obj, writer, 0)
else:
raise NestedtextError(
"Unsupported type to dump {!r}".format(type(obj).__name__)
)
def _dump_multiline_str(self, string: str, writer, indent: int):
lines = string.splitlines(keepends=True)
for line in lines:
writer.write(" " * indent)
writer.write("> " if line.strip("\r\n") else ">")
writer.write(line)
if string == "" or string[-1] in "\r\n":
writer.write(" " * indent)
writer.write(">")
def _dump_list(self, values: list, writer, indent: int):
if len(values) == 0:
writer.write(" " * indent)
writer.write("[]\n")
return
for value in values:
writer.write(" " * indent)
writer.write("-")
if isinstance(value, str):
if "\r" in value or "\n" in value:
writer.write("\n")
self._dump_multiline_str(value, writer, indent + self.indent_size)
elif value:
writer.write(" ")
writer.write(value)
writer.write("\n")
elif isinstance(value, list):
writer.write("\n")
| |
+ p + "_val |-> s_eventually(!" + p + "_val || "+handshakes[p]+"));\n")
# Assert liveness!
prop.write("// Assert that every request has a response and that every reponse has a request\n")
prop.write("as__" + name_tid + "_eventual_response: assert property (|" + name_tid + "_sampled |-> s_eventually(" + q + "_val")
if size != "'0": prop.write(" && (" + q_trans_id + " == " + symb_name + ") ));\n")
else: prop.write("));\n")
prop.write("as__" + name_tid + "_was_a_request: assert property (" + name_tid + "_response |-> "+name_tid+"_set || "+name_tid+"_sampled);\n\n")
if "p_data" in entry and "q_data" in entry:
p_trans_id = p + "_" + entry["p_id"]
q_trans_id = q + "_" + entry["q_id"]
name_tid = name + "_" + entry["p_id"]
p_data = p + "_" + entry["p_data"]
q_data = q + "_" + entry["q_data"]
symb_name = "symb_" + p_trans_id
data_integrity(prop, name_tid, p, q, p_trans_id, q_trans_id, p_data, q_data, symb_name, size)
def gen_iface_sampled (prop, name, p, q, p_trans_id, q_trans_id, symb_name, entry, count,size):
prop.write("reg ["+str(count)+":0] " + name + "_sampled;\n")
prop.write("wire " + name + "_set = " + p + "_hsk")
if size != "'0": prop.write(" && " + p_trans_id + " == " + symb_name + ";\n")
else: prop.write(";\n")
prop.write("wire " + name + "_response = " + q + "_hsk")
if size != "'0": prop.write(" && " + q_trans_id + " == " + symb_name + ";\n\n")
else: prop.write(";\n\n")
prop.write("always_ff @(posedge " + clk_sig + ") begin\n")
prop.write("\tif(" + get_reset() + ") begin\n")
prop.write("\t\t" + name + "_sampled <= '0;\n")
prop.write("\tend else if (" + name + "_set || "+ name + "_response ) begin\n")
prop.write("\t\t"+name+"_sampled <= "+name+"_sampled + "+name+"_set - "+name+"_response;\n")
prop.write("\tend\n")
prop.write("end\n")
# do not create sampled cover if it's never going to be sampled
p_rdy = p+"_rdy";
p_val = q+"_val"
if not ((p_rdy in assign_wires) and (p_val in assign_wires) and (assign_wires[p_rdy] == assign_wires[p_val]) ):
prop.write("co__" + name + "_sampled: cover property (|" + name + "_sampled);\n")
if count > 0: # When not unique assume that this sampling structure would not overflow
prop.write("if (ASSERT_INPUTS) begin\n")
prop.write("\tas__" + name + "_sample_no_overflow: assert property ("+name+"_sampled != '1 || !"+name+"_set);\n")
prop.write("end else begin\n")
prop.write("\tam__" + name + "_sample_no_overflow: assume property ("+name+"_sampled != '1 || !"+name+"_set);\n")
prop.write("end\n\n")
if "active" in entry:
prop.write("as__" + name + "_active: assert property (" + name + "_sampled > 0 |-> "+entry["active"]+");\n\n")
else:
prop.write("\n")
def data_integrity(prop, name, p, q, p_trans_id, q_trans_id, p_data, q_data, symb_name, size):
size_p = signals[p_data]["size"]
prop.write("\n// Modeling data integrity for " + name + "\n")
prop.write("reg [" + size_p + ":0] " + name + "_data_model;\n")
prop.write("always_ff @(posedge " + clk_sig + ") begin\n")
prop.write("\tif(" + get_reset() + ") begin\n")
prop.write("\t\t" + name + "_data_model <= '0;\n")
prop.write("\tend else if (" + name + "_set) begin\n")
prop.write("\t\t" + name + "_data_model <= " + p_data + ";\n")
prop.write("\tend\n")
prop.write("end\n\n")
prop.write("as__" + name + "_data_unique: assert property (|" + name + "_sampled |-> !"+name+"_set);\n")
prop.write("as__" + name + "_data_integrity: assert property (|" + name + "_sampled && "+name+"_response ");
prop.write("|-> (" + q_data + " == " + name + "_data_model));\n\n");
def gen_out(prop, name, entry):
p = entry["p"]
q = entry["q"]
size = entry["size"]
p_trans_id = p + "_" + entry["p_id"]
q_trans_id = q + "_" + entry["q_id"]
p_data = None
if "p_data" in entry and "q_data" in entry:
p_data = p + "_" + entry["p_data"]
q_data = q + "_" + entry["q_data"]
size_p = signals[p_data]["size"]
symb_name = "symb_" + p_trans_id
if (size == "'0"):
power_size = "1"
else:
power_size = "2**("+ size + "+1)"
prop.write("// Modeling outstanding request for " + name + "\n")
prop.write("reg [" + power_size + "-1:0] " + name + "_outstanding_req_r;\n")
if p_data:
prop.write("reg [" + power_size + "-1:0]["+size_p+":0] " + name + "_outstanding_req_data_r;\n")
prop.write("\n")
prop.write("always_ff @(posedge " + clk_sig + ") begin\n")
prop.write("\tif(" + get_reset() + ") begin\n")
prop.write("\t\t" + name + "_outstanding_req_r <= '0;\n")
prop.write("\tend else begin\n")
prop.write("\t\tif (" + p + "_hsk) begin\n")
if size == "'0":
prop.write("\t\t\t" + name + "_outstanding_req_r <= 1'b1;\n")
else:
prop.write("\t\t\t" + name + "_outstanding_req_r[" + p_trans_id + "] <= 1'b1;\n")
if p_data:
if size == "'0":
prop.write("\t\t\t" + name + "_outstanding_req_data_r <= "+p_data+";\n")
else:
prop.write("\t\t\t" + name + "_outstanding_req_data_r[" + p_trans_id + "] <= "+p_data+";\n")
prop.write("\t\tend\n")
prop.write("\t\tif (" + q + "_hsk) begin\n")
if size == "'0":
prop.write("\t\t\t" + name + "_outstanding_req_r <= 1'b0;\n")
else:
prop.write("\t\t\t" + name + "_outstanding_req_r[" + q_trans_id + "] <= 1'b0;\n")
prop.write("\t\tend\n")
prop.write("\tend\n")
prop.write("end\n")
prop.write("\n")
if "active" in entry:
prop.write("as__" + name + "_active: assert property (|" + name + "_outstanding_req_r |-> "+entry["active"]+");\n\n")
else:
prop.write("\n")
prop.write("generate\n")
# First Assertion (if macro defined)
prop.write("if (ASSERT_INPUTS) begin : " + name+ "_gen\n")
if size == "'0":
prop.write("\tas__" + name + "1: assert property (!" + name + "_outstanding_req_r |-> !(" + q + "_hsk));\n")
else:
prop.write("\tas__" + name + "1: assert property (!" + name + "_outstanding_req_r[" + symb_name + "] ")
prop.write("|-> !(" + q + "_hsk && (" + q_trans_id + " == " + symb_name + ")));\n")
# Second assertion
if size == "'0":
prop.write("\tas__" + name + "2: assert property (" + name + "_outstanding_req_r |-> s_eventually(" + q + "_hsk")
else:
prop.write("\tas__" + name + "2: assert property (" + name + "_outstanding_req_r[" + symb_name + "] ")
prop.write("|-> s_eventually(" + q + "_hsk && (" + q_trans_id + " == " + symb_name + ")")
if p_data:
if size == "'0":
prop.write("&&\n\t (" + q_data + " == " + name + "_outstanding_req_data_r) ));\n")
else:
prop.write("&&\n\t (" + q_data + " == " + name + "_outstanding_req_data_r[" + symb_name + "]) ));\n")
else:
prop.write("));\n")
prop.write("end else begin : " + name+ "_else_gen\n")
if p != handshakes[p]:
prop.write("\tam__" + name + "_fairness: assume property (" + p + "_val |-> s_eventually(" + p + "_rdy));\n")
prop.write("\tfor ( j = 0; j < " + power_size + "; j = j + 1) begin : " + name+ "_for_gen\n")
prop.write("\t\tco__" + name + ": cover property (" + name + "_outstanding_req_r[j]);\n")
# First Assertion
prop.write("\t\tam__" + name + "1: assume property (!" + name + "_outstanding_req_r[j] ")
if size == "'0":
prop.write("|-> !(" + q + "_val));\n")
else:
prop.write("|-> !(" + q + "_val && (" + q_trans_id + " == j)));\n")
# Second Assertion
prop.write("\t\tam__" + name + "2: assume property (" + name + "_outstanding_req_r[j] ")
if size == "'0":
prop.write("|-> s_eventually(" + q + "_val")
else:
prop.write("|-> s_eventually(" + q + "_val && (" + q_trans_id + " == j)")
if p_data:
prop.write("&&\n\t (" + q_data + " == " + name + "_outstanding_req_data_r[j]) ));\n")
else:
prop.write("));\n")
prop.write("\tend\n")
prop.write("end\n")
prop.write("endgenerate\n")
prop.write("\n")
def gen_unique(prop, name, entry, p_key, q_key):
p = entry["p"]
q = entry["q"]
p_trans_id = p + "_" + p_key[0]
q_trans_id = q + "_" + q_key[0]
size = p_key[1]
symb_name = "symb_" + p_trans_id
power_size = "2**("+ size + "+1)"
prop.write("// Max 1 outstanding request for " + name + "\n")
prop.write("reg [" + power_size + "-1:0] " + name + "_unique_outstanding_req_r;\n")
prop.write("wire " + name + "_equal = " + p_trans_id + " == " + q_trans_id + ";\n")
prop.write("\n")
prop.write("always_ff @(posedge " + clk_sig + ") begin\n")
prop.write("\tif(" + get_reset() + ") begin\n")
prop.write("\t\t" + name + "_unique_outstanding_req_r <= '0;\n")
prop.write("\tend else begin\n")
prop.write("\t\tif (" + p + "_hsk) begin\n")
prop.write("\t\t\t" + name + "_unique_outstanding_req_r[" + p_trans_id + "] <= 1'b1;\n")
prop.write("\t\tend\n")
prop.write("\t\tif (" + q + "_hsk) begin\n")
prop.write("\t\t\t" + name + "_unique_outstanding_req_r[" + q_trans_id + "] <= 1'b0;\n")
prop.write("\t\tend\n")
prop.write("\tend\n")
prop.write("end\n")
prop.write("\n")
prop.write("generate\n")
prop.write("if (ASSERT_INPUTS) begin : " + name+ "_gen\n")
prop.write("\tas__" + name + "_unique: assert property (" + name + "_unique_outstanding_req_r[" + symb_name + "] ")
prop.write("|-> !(" + p + "_hsk && (" + p_trans_id + " == " + symb_name + ")));\n")
prop.write("end else begin : " + | |
<reponame>thbom001/improver
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown copyright. The Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Neighbour finding for the Improver site specific process chain."""
import warnings
from typing import Any, Dict, List, Optional, Tuple
import cartopy.crs as ccrs
import numpy as np
from cartopy.crs import CRS
from iris.cube import Cube
from numpy import ndarray
from scipy.spatial import cKDTree
from improver import BasePlugin
from improver.metadata.utilities import create_coordinate_hash
from improver.spotdata.build_spotdata_cube import build_spotdata_cube
from improver.utilities.cube_manipulation import enforce_coordinate_ordering
class NeighbourSelection(BasePlugin):
"""
For the selection of a grid point near an arbitrary coordinate, where the
selection may be the nearest point, or a point that fulfils other
imposed constraints.
Constraints available for determining the neighbours are:
1. land_constraint which requires the selected point to be on land.
2. minimum_dz which minimises the vertical displacement between the
given coordinate (when an altitude is provided) and the grid point
where its altitude is provided by the relevant model or high resolution
orography. Note that spot coordinates provided without an altitude are
given the altitude of the nearest grid point taken from the orography
cube.
3. A combination of the above, where the land constraint is primary and out
of available land points, the one with the minimal vertical displacement
is chosen.
"""
def __init__(
self,
land_constraint: bool = False,
minimum_dz: bool = False,
search_radius: float = 1.0e4,
site_coordinate_system: CRS = ccrs.PlateCarree(),
site_x_coordinate: str = "longitude",
site_y_coordinate: str = "latitude",
node_limit: int = 36,
unique_site_id_key: Optional[str] = None,
) -> None:
"""
Args:
land_constraint:
If True the selected neighbouring grid point must be on land,
where this is determined using a land_mask.
minimum_dz:
If True the selected neighbouring grid point must be chosen to
minimise the vertical displacement compared to the site
altitude.
search_radius:
The radius in metres from a spot site within which to search
for a grid point neighbour.
site_coordinate_system:
The coordinate system of the sitelist coordinates that will be
provided. This defaults to be a latitude/longitude grid, a
PlateCarree projection.
site_x_coordinate:
The key that identifies site x coordinates in the provided site
dictionary. Defaults to longitude.
site_y_coordinate:
The key that identifies site y coordinates in the provided site
dictionary. Defaults to latitude.
node_limit:
The upper limit for the number of nearest neighbours to return
when querying the tree for a selection of neighbours from which
one matching the minimum_dz constraint will be picked.
unique_site_id_key:
Key in the provided site list that corresponds to a unique numerical
ID for every site (up to 8 digits). If this optional key is provided
such an identifier must exist for every site. This key will also be
used to name the resulting unique ID coordinate on the constructed
cube. Values in this coordinate will be recorded as strings, with
all numbers padded to 8-digits, e.g. "00012345".
"""
self.minimum_dz = minimum_dz
self.land_constraint = land_constraint
self.search_radius = search_radius
self.site_coordinate_system = site_coordinate_system
self.site_x_coordinate = site_x_coordinate
self.site_y_coordinate = site_y_coordinate
self.site_altitude = "altitude"
self.node_limit = node_limit
self.unique_site_id_key = unique_site_id_key
self.global_coordinate_system = False
def __repr__(self) -> str:
"""Represent the configured plugin instance as a string."""
return (
"<NeighbourSelection: land_constraint: {}, "
"minimum_dz: {}, search_radius: {}, site_coordinate_system"
": {}, site_x_coordinate:{}, site_y_coordinate: {}, "
"node_limit: {}>"
).format(
self.land_constraint,
self.minimum_dz,
self.search_radius,
self.site_coordinate_system.__class__,
self.site_x_coordinate,
self.site_y_coordinate,
self.node_limit,
)
def neighbour_finding_method_name(self) -> str:
"""
Create a name to describe the neighbour method based on the constraints
provided.
Returns:
A string that describes the neighbour finding method employed.
This is essentially a concatenation of the options.
"""
method_name = "{}{}{}".format(
"nearest",
"_land" if self.land_constraint else "",
"_minimum_dz" if self.minimum_dz else "",
)
return method_name
def _transform_sites_coordinate_system(
self, x_points: ndarray, y_points: ndarray, target_crs: CRS
) -> ndarray:
"""
Function to convert coordinate pairs that specify spot sites into the
coordinate system of the model from which data will be extracted. Note
that the cartopy functionality returns a z-coordinate which we do not
want in this case, as such only the first two columns are returned.
Args:
x_points:
An array of x coordinates to be transformed in conjunction
with the corresponding y coordinates.
y_points:
An array of y coordinates to be transformed in conjunction
with the corresponding x coordinates.
target_crs:
Coordinate system to which the site coordinates should be
transformed. This should be the coordinate system of the model
from which data will be spot extracted.
Returns:
An array containing the x and y coordinates of the spot sites
in the target coordinate system, shaped as (n_sites, 2). The
z coordinate column is excluded from the return.
"""
return target_crs.transform_points(
self.site_coordinate_system, x_points, y_points
)[:, 0:2]
def check_sites_are_within_domain(
self,
sites: List[Dict[str, Any]],
site_coords: ndarray,
site_x_coords: ndarray,
site_y_coords: ndarray,
cube: Cube,
) -> Tuple[ndarray, ndarray, ndarray, ndarray]:
"""
A function to remove sites from consideration if they fall outside the
domain of the provided model cube. A warning is raised and the details
of each rejected site are printed.
Args:
sites:
A list of dictionaries defining the spot sites for which
neighbours are to be found. e.g.:
[{'altitude': 11.0, 'latitude': 57.867000579833984,
'longitude': -5.632999897003174, 'wmo_id': 3034}]
site_coords:
An array of shape (n_sites, 2) that contains the spot site
coordinates in the coordinate system of the model cube.
site_x_coords:
The x coordinates of the spot sites in their original
coordinate system, from which invalid sites must be removed.
site_y_coords:
The y coordinates of the spot sites in their original
coordinate system, from which invalid sites must be removed.
cube:
A cube that is representative of the model/grid from which spot
data will be extracted.
Returns:
- The sites modified to filter out the sites falling outside
the grid domain of the cube.
- The site_coords modified to filter out the sites falling
outside the grid domain of the cube.
- The x_coords modified to filter out the sites falling
outside the grid domain of the cube.
- The y_coords modified to filter out the sites falling
outside the grid domain of the cube.
"""
# Get the grid domain limits
x_min = cube.coord(axis="x").bounds.min()
x_max = cube.coord(axis="x").bounds.max()
y_min = cube.coord(axis="y").bounds.min()
y_max = cube.coord(axis="y").bounds.max()
if self.global_coordinate_system:
domain_valid = np.where(
(site_coords[:, 1] >= y_min) & (site_coords[:, 1] <= y_max)
)
domain_invalid = np.where(
(site_coords[:, 1] < y_min) | (site_coords[:, 1] > y_max)
)
else:
domain_valid = np.where(
(site_coords[:, 0] >= x_min)
& (site_coords[:, 0] <= x_max)
& (site_coords[:, 1] >= y_min)
& (site_coords[:, 1] <= y_max)
)
domain_invalid = np.where(
(site_coords[:, 0] < x_min)
| (site_coords[:, 0] > x_max)
| (site_coords[:, 1] < y_min)
| (site_coords[:, 1] > y_max)
)
num_invalid = len(domain_invalid[0])
if num_invalid > 0:
msg = (
"{} spot sites fall outside the grid domain | |
#!/usr/bin/env python
''' ---------------- About the script ----------------
Assignment 5: CNNs on cultural image data
This script builds a deep learning model using convolutional neural networks which classify Impressionism paintings by their respective artists. It uses LeNet architecture for CNN.
Preprocessing of the data involves resizing the images, getting the images and labels into an array for the model. As an output, this script produces a visualization showing loss/accuracy of the model during training and the classification report.
Arguments:
-trd, --train_data: Directory of training data
-vald, --val_data: Directory of validation data
-optim, --optimizer: Method to update the weight parameters to minimize the loss function. Choose between SGD and Adam.
-lr, --learning_rate: The amount that the weights are updated during training. Default = 0.01
-ep, --epochs: Defines how many times the learning algorithm will work through the entire training dataset. Default = 50
Example:
with default values:
$ python cnn-artists.py -trd ../data/Impressionist_Classifier_data/training -vald ../data/Impressionist_Classifier_data/validation -optim SGD
with optional arguments:
$ python cnn-artists.py -trd ../data/Impressionist_Classifier_data/training -vald ../data/Impressionist_Classifier_data/validation - optim Adam -lr 0.002 -ep 100
'''
"""---------------- Importing libraries ----------------
"""
# data tools
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
import glob
import cv2
import pandas as pd
sys.path.append(os.path.join(".."))
# Import pathlib
from pathlib import Path
# sklearn tools
from sklearn.preprocessing import LabelBinarizer
from sklearn.metrics import classification_report
# tf tools
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import (Conv2D,
MaxPooling2D,
Activation,
Flatten,
Dense,
Dropout)
from tensorflow.keras.utils import plot_model
from tensorflow.keras.optimizers import SGD, Adam
from tensorflow.keras import backend as K
from tensorflow.keras.layers import LeakyReLU
from tensorflow.python.keras.preprocessing.image_dataset import image_dataset_from_directory
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
from keras.utils import plot_model
# Command-line interface
import argparse
"""---------------- Main script ----------------
"""
def main():
"""------ Argparse parameters ------
"""
# Instantiating the ArgumentParser object as parser
parser = argparse.ArgumentParser(description = "[INFO] Classify Impressionists paintings and print out performance accuracy report")
# Adding optional (with defaults) and required arguments
parser.add_argument("-trd", "--train_data", required=True, help = "Directory of training data")
parser.add_argument("-vald", "--val_data", required=True, help = "Directory of validation data")
parser.add_argument("-optim", "--optimizer", required = False, default = SGD, help = "Method to update the weight parameters to minimize the loss function. Choose between SGD and Adam.")
parser.add_argument("-lr", "--learning_rate", required = False, default = 0.01, type = float, help = "The amount that the weights are updated during training. Default = 0.01")
parser.add_argument("-ep", "--epochs", required=False, default = 50, help = "Defines how many times the learning algorithm will work through the entire training dataset. Default = 50")
# Parsing the arguments
args = vars(parser.parse_args())
# Saving parameters as variables
trd = args["train_data"] # training data dir
vald = args["val_data"] # validation data dir
optim = args["optimizer"] # optimizer
lr = args["learning_rate"] # learning rate
ep = int(args["epochs"]) # epochs
"""------ Loading data and preprocessing ------
"""
# getting training and validation data
print("[INFO] loading and preprocessing training and validation data ...")
train = get_data(os.path.join(trd))
val = get_data(os.path.join(vald))
#Create ouput folder, if it doesn´t exist already, for saving the classification report, performance graph and model´s architecture
if not os.path.exists("../output"):
os.makedirs("../output")
"""------ Preparing training and validations sets ------
"""
# empty lists for training and validation images and labels
x_train = []
y_train = []
x_val = []
y_val = []
# appending features (images as numpy arrays) and labels to the empty lists for further processing
for feature, label in train:
x_train.append(feature)
y_train.append(label)
for feature, label in val:
x_val.append(feature)
y_val.append(label)
# normalizing the data (rescaling RGB channel values from 0-255 to 0-1)
x_train = np.array(x_train) / 255
x_val = np.array(x_val) / 255
# integers to one-hot vectors
lb = LabelBinarizer()
y_train = lb.fit_transform(y_train)
y_val = lb.fit_transform(y_val)
"""------ Defining and training LeNet CNN model ------
"""
# LeNet architecture:
## >INPUT => CONV => ReLU => MAXPOOL => CONV => ReLU => MAXPOOL => FC => ReLU => FC
# define model
model = Sequential()
# first set of CONV => RELU => POOL
model.add(Conv2D(64, (3, 3),
padding="same",
input_shape=(256, 256, 3)))
model.add(Activation("relu"))
##model.add(LeakyReLU(alpha = 0.1))
model.add(MaxPooling2D(pool_size=(2, 2),
strides=(2, 2)))
##model.add(Dropout(0.3))
# second set of CONV => RELU => POOL
model.add(Conv2D(128, (5, 5),
padding="same"))
model.add(Activation("relu"))
##model.add(LeakyReLU(alpha = 0.1))
model.add(MaxPooling2D(pool_size=(2, 2),
strides=(2, 2)))
# FC => RELU
model.add(Flatten())
model.add(Dense(500))
model.add(Activation("relu"))
# adding a dropout layer
model.add(Dropout(0.5))
##model.add(LeakyReLU(alpha = 0.1))
# softmax classifier
model.add(Dense(10))
model.add(Activation("softmax"))
# ploting and saving model´s architecture
plot_model(model, to_file='../output/Model´s_architecture.png',
show_shapes=True,
show_dtype=True,
show_layer_names=True)
# Printing that model´s architecture graph has been saved
print(f"\n[INFO] Model´s architecture graph has been saved")
# ModelCheckpoint and EarlyStopping to monitor model performance
## checkpoint = ModelCheckpoint("LeNet_1.h5",
## monitor='val_accuracy',
## verbose=1,
## save_best_only=True,
## save_weights_only=False,
## mode='auto', period=1)
## early = EarlyStopping(monitor='val_accuracy', min_delta=0,
## patience=25, verbose=1, mode='auto')
"""------ Optimizer choice ------
"""
if optim == "SGD":
opt = SGD(lr=lr)
# compile model
model.compile(loss="categorical_crossentropy",
optimizer=opt,
metrics=["accuracy"])
print(model.summary())
# train model
print("[INFO] training LeNet CNN model ...")
H = model.fit(x_train, y_train,
validation_data=(x_val, y_val),
batch_size=32,
epochs=ep,
verbose=1)
#callbacks=[checkpoint,early])
elif optim == "Adam":
opt = Adam(lr=lr)
# compile model
model.compile(loss="categorical_crossentropy",
optimizer=opt,
metrics=["accuracy"])
print(model.summary())
# train model
print("[INFO] training LeNet CNN model ...")
H = model.fit(x_train, y_train,
validation_data=(x_val, y_val),
batch_size=32,
epochs=ep,
verbose=1)
#callbacks=[checkpoint,early])
else:
print("Not a valid optimizer. Choose between 'SGD' and 'Adam'.")
"""------ LeNet CNN model output ------
"""
# finding out the number of epochs the model has run through
# epochs = len(H.history['val_accuracy'])
# ploting and saving model´s performance graph
plot_history(H,ep)
# Printing that performance graph has been saved
print(f"\n[INFO] Model´s performance graph has been saved")
# Extracting the labels
labels = os.listdir(os.path.join(trd))
# Classification report
predictions = model.predict(x_val, batch_size=32)
print(classification_report(y_val.argmax(axis=1),
predictions.argmax(axis=1),
target_names=labels))
# defining full filepath to save .csv file
outfile = os.path.join("../", "output", "Impressionist_classifier_report.csv")
# turning report into dataframe and saving as .csv
report = pd.DataFrame(classification_report(y_val.argmax(axis=1), predictions.argmax(axis=1), target_names=labels, output_dict = True)).transpose()
report.to_csv(outfile)
print(f"\n[INFO] Classification report has been saved")
print("\nScript was executed successfully! Have a nice day")
"""---------------- Functions ----------------
"""
# this function was developed for use in class and has been adapted for this project
def plot_history(H, epochs):
'''
visualize model´s performance: training and validation loss,
training and validation accuracy
'''
plt.style.use("fivethirtyeight")
plt.figure()
plt.plot(np.arange(0, epochs), H.history["loss"], label="train_loss")
plt.plot(np.arange(0, epochs), H.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, epochs), H.history["accuracy"], label="train_acc")
plt.plot(np.arange(0, epochs), H.history["val_accuracy"], label="val_acc")
plt.title("Training Loss and Accuracy")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend()
plt.tight_layout()
#plt.show()
plt.savefig('../output/LeNet_CNN_model´s_performance.png')
# this function was taken from Gautam (2020) and adapted for this project
def get_data(data_dir):
'''
loads training and validation data/images in the RGB format,
reshapes images and converts them into a numpy array
'''
# Extracting the labels
labels = os.listdir(os.path.join(data_dir))
# defining desirable image size
img_size = 256
# an empty list to store data
data = []
# a loop through each folder according to labels
for label in labels:
# defining folder path
path = os.path.join(data_dir, label)
# assigning an index to a label/folder
class_num = labels.index(label)
# a loop through each image in each folder in the path
for img in os.listdir(path): # returns a list containing the names of the entries in the directory given by path
try:
# reading an image into an object called 'image array'
img_arr = cv2.imread(os.path.join(path, img))[...,::-1] # convert BGR to RGB format, cause openCV opens it in BGR (blue, green, red) format
# reshaping an image to preferred size
resized_arr = cv2.resize(img_arr, (img_size, img_size))
# appending the resized image and label index to a list
data.append([resized_arr, class_num])
# makes sure that the loop has gone through all the files
except Exception as e:
print(e)
# return a numpy array
return np.array(data, dtype=object)
| |
grid
self.ciGui.config(menu=self.menubar)
#instructive labels
self.instructions = Text(self.gridframe, bd=4, width=20, font=('Helvetica', 12), wrap=WORD)
f = open('instructions.txt', 'r')
instruct = f.read()
f.close()
self.instructions.insert('1.0', instruct)
self.i_scroll = Scrollbar(self.gridframe)
self.instructions.config(yscrollcommand=self.i_scroll.set)
self.i_scroll.config(command=self.instructions.yview)
self.instructions.grid(row=14, column=0, rowspan=20, columnspan=2, padx=10)
self.i_scroll.grid(row=14, column=0, rowspan=20, columnspan=2, sticky=E+N+S, padx=10)
#Sets some sizing stuff
for i in range(0, 10):
self.ciGui.columnconfigure(i, weight=1, minsize=10)
for i in range(0, 30):
self.ciGui.rowconfigure(i, weight=1, minsize=10)
for i in range(7, 11):
self.ciGui.rowconfigure(i, weight=1, minsize=20)
self.ciGui.rowconfigure(18, weight=1, minsize=25)
#mainloop
self.newClientDisplay()
self.ciGui.mainloop()
#DISPLAY SCREENS
def newClientDisplay(self):
"""This function will clear all irrelevant widgets, and
grid all widgets necessary for the new client screen.
"""
#clear widgets
self.clearEntries()
#grid widgets
self.addhhsep.grid(row=23,column=3,columnspan=40,sticky=EW, pady=10)
self.addhhtitle.grid(row=23,column=3,columnspan=12, sticky=W, pady=10)
self.famNum.grid(row=24, column=4)
self.entNum.grid(row=24, column=3)
self.newMembersB.grid(row=24, column=5)
self.newClientSave.grid(row=40,column=3, columnspan=2)
self.cancelNewB.grid(row=40, column=5, columnspan=2)
self.newvisitf()
self.saveVisit.grid_forget()
self.cancelVisit.grid_forget()
return
def updateClientDisplay(self):
"""This function will clear all irrelevant widgets and
grid all widgets necessary for the updating-client screen.
"""
#clear widgets
self.clearEntries()
#grid widgets
self.family_listbox.grid(row=24, column=3, rowspan=3, columnspan=2, sticky=W)
self.fam_scroll.grid(row=24, column=4, rowspan=3, columnspan=1, sticky=E+N+S)
self.addmemb.grid(row=24,column=5,sticky=E+N+S)
self.removmemb.grid(row=25,column=5,sticky=E+N+S)
self.viewmemb.grid(row=26,column=5,sticky=E+N+S)
self.housetitle.grid(row=23,column=3,columnspan=12, sticky=W)
self.houseSep.grid(row=23,column=3,columnspan=40,sticky=EW)
self.saveB.grid(row=28, column=3, columnspan=2)
self.cancelB.grid(row=28, column=5, columnspan=2)
return
#DISPLAY FOR SELECTED CLIENTS
def displayInfo(self, *args):
"""This function displays the information for a client that
has been selected in the client_listbox.
"""
try:
self.cursel = int(self.id_list[self.client_listbox.curselection()[0]])
info = select_client(self.cursel)
self.info = info
self.updateClientDisplay()
self.displayHouseholdMem(info)
self.displayVisitInfo(info)
self.displayClientInfo(info)
self.displayHouseholdInfo(info)
except IndexError:
pass
return
def displayNewInfo(self, client_id):
"""This function displays the information for a specified
client whose id is client_id.
"""
cursel = client_id
info = select_client(cursel)
self.info = info
self.updateClientDisplay()
self.displayHouseholdMem(info)
self.displayVisitInfo(info)
self.displayClientInfo(info)
self.displayHouseholdInfo(info)
return
#DISPLAY INFORMATION FUNCTIONS
def displayClientInfo(self, info, *args):
"""This function displays the client information.
"""
#retrieve info from dictionary
visitor = info["visitor"]
#set variables
self.fnv.set(visitor.firstname)
self.lnv.set(visitor.lastname)
month = self.month_int[visitor.dob.month]
self.mv.set(month)
self.dv.set(visitor.dob.day)
self.yv.set(visitor.dob.year)
self.phv.set(visitor.phone)
#parse and set datejoined
joined = str(visitor.dateJoined.month) + "/" +\
str(visitor.dateJoined.day) + "/" +\
str(visitor.dateJoined.year)
self.datejoinv.set(joined)
#set age
ad=str(age(visitor.dob))
a="Age: "
ad=str(a+ad)
self.agev.set(ad)
return
def displayHouseholdInfo(self, info, *args):
"""This function displays the household information for
a client.
"""
#retrieve info from dictionary
house = info["household"]
#set variables
self.adv.set(house.street)
self.apv.set(house.apt)
self.ctyv.set(house.city)
self.stav.set(house.state)
self.zpv.set(house.zip)
#check dateVerified, and set variables accordingly
if house.dateVerified != None:
month = house.dateVerified.month
self.mvv.set(self.month_int[month])
self.dvv.set(house.dateVerified.day)
self.yvv.set(house.dateVerified.year)
#parse and set label variables for all members
ad=str(info["agegroup_dict"]["adults"])
a="Adults: "
ad=str(a+ad)
self.adl.set(ad)
ch=str(info["agegroup_dict"]["children"])
c="Children: "
ch=c+ch
self.chil.set(ch)
sn=str(info["agegroup_dict"]["seniors"])
s="Seniors: "
sn=s+sn
self.sen.set(sn)
infa=str(info["agegroup_dict"]["infants"])
i="Infants: "
infa=i+infa
self.inf.set(infa)
tl = str(info["agegroup_dict"]["total"])
t="Total: "
tl = t+tl
self.tot.set(tl)
#grid family member labels
self.dispad.grid(row=22,column=3,sticky=W, pady=10)
self.dischil.grid(row=22,column=4,sticky=W)
self.dissen.grid(row=22,column=5,sticky=W)
self.disinf.grid(row=22,column=6,sticky=W)
self.distot.grid(row=22,column=7,sticky=W)
return
def displayVisitInfo(self, info, *args):
"""This function display the visit information for a client.
"""
self.clearVisits()
self.visitDict = {}
visitor = info["visitor"]
name = str(visitor.firstname)+ " " +str(visitor.lastname)
self.visv.set(name)
#visit info
visits = info["visit_list"]
if len(visits) == 0:
pass
else:
vdatelabs = []
vnlabs = []
vvisitors = []
vvols = []
vids = []
for v in visits:
d=str(v.date.month)+'/'+str(v.date.day)+'/'+str(v.date.year)
n=v.notes
vi=v.visitor
vol=v.volunteer
vid=v.visitID
vdatelabs.append(d)
vnlabs.append(n)
vvisitors.append(vi)
vvols.append(vol)
vids.append(vid)
#set variables to display first visit
self.visv.set(vvisitors[0])
self.volv.set(vvols[0])
self.notv.set(vnlabs[0])
self.notescv.config(state='normal')
self.notescv.insert('1.0', vnlabs[0])
self.notescv.config(state='disabled')
#save lists in dictionary
self.visitDict['dates'] = vdatelabs
self.visitDict['notes'] = vnlabs
self.visitDict['visitors'] = vvisitors
self.visitDict['volunteers'] = vvols
self.visitDict['ids'] = vids
for i in range(0, len(vdatelabs)):
self.visit_listbox.insert(i, vdatelabs[i])
self.visit_listbox.selection_set(0)
def displayVisit(self, *args):
"""This function will display the data for a visit when
a visit date is selected.
"""
try:
self.notescv.config(state='normal')
self.notescv.delete('1.0', END)
datev = int(self.visit_listbox.curselection()[0])
self.selectedVisit = datev
n = self.visitDict['notes']
vi = self.visitDict['visitors']
vol = self.visitDict['volunteers']
self.visv.set(vi[datev])
self.volv.set(vol[datev])
self.notv.set(n[datev])
notes = str(self.notv.get())
self.notescv.insert('1.0', notes)
self.notescv.config(state='disabled')
except IndexError:
pass
def displayHouseholdMem(self, info, *args):
"""This function displays the household information for a client.
"""
self.family_listbox.delete(0,END)
a=[]
del self.mem_list[:]
for member in info["member_list"]:
self.mem_list.append(member.id)
s=str(age(member.dob))
q='Age: '
s=q+s
x=(member.firstname, member.lastname,s)
a.append(x)
for i in range(len(a)):
self.family_listbox.insert(i,a[i])
#DISPLAY EXTRA ENTRY BOXES FOR ADDITIONAL FAMILY MEMBERS
#BUG: WHEN Add Member IS PRESSED MORE THAN ONCE, EXTRA
#BOXES HANG AROUND, AND ARE NEVER CLEARED
def familyEntryBoxes(self, *args):
"""This function generates entry boxes for adding new family members.
The entry boxes are saved in list form and added to the dictionary
memDict.
"""
#clears any boxes already displayed
self.clearFamily()
try:
n = int(self.q.get())
except ValueError:
return
#add instructive labels to grid
self.famfn.grid(row=25,column=3)
self.famln.grid(row=25,column=4)
self.famdob.grid(row=25,column=5)
self.famphone.grid(row=25,column=8)
#create lists
fnames = []
lnames = []
mm = []
dd = []
yy = []
phnum = []
#create entry boxes, grid them, and append them to a list
for i in range(0, n):
fname = Entry(self.gridframe)
fname.grid(row=26+i, column=3)
fnames.append(fname)
lname = Entry(self.gridframe)
lname.grid(row=26+i, column=4)
lnames.append(lname)
month = ttk.Combobox(self.gridframe, width=12, state='readonly',
values=self.month_li)
#month.bind('<<ComboboxSelected>>', self.monthbox_select)
month.grid(row=26+i, column=5)
mm.append(month)
day = Spinbox(self.gridframe, from_=0, to=0, width=5)
day.grid(row=26+i, column=6)
dd.append(day)
year = Spinbox(self.gridframe, from_=1900, to=2500, width=7)
year.grid(row=26+i, column=7)
yy.append(year)
phone = Entry(self.gridframe)
phone.grid(row=26+i, column=8)
phnum.append(phone)
#add all lists to dictionary
self.memDict["first"] = fnames
self.memDict["last"] = lnames
self.memDict["mm"] = mm
self.memDict["dd"] = dd
self.memDict["yy"] = yy
self.memDict["phone"] = phnum
def addMemberEntryBoxes(self, *args):
"""This function generates entry boxes for adding new family members.
The entry boxes are saved in list form and added to the dictionary
memDict.
"""
if self.addmemberON == True:
pass
else:
#add instructive labels to grid
self.famfn.grid(row=24,column=6) #, sticky=NE)
self.famln.grid(row=24,column=8) #, sticky=NE)
self.famdob.grid(row=25,column=6)
self.famphone.grid(row=26,column=6)
#create entry boxes, grid them, and append them to a list
#first name
self.fname = Entry(self.gridframe)
self.fname.grid(row=24, column=7, sticky=W)
self.memDict["first"]=[self.fname]
#last name
self.lname = Entry(self.gridframe)
self.lname.grid(row=24, column=9, sticky=W)
self.memDict["last"]=[self.lname]
#dob: month
self.month = ttk.Combobox(self.gridframe, width=12, state='readonly',
values=self.month_li)
#self.month.bind('<<ComboboxSelected>>', self.monthbox_select)
self.month.grid(row=25, column=7, sticky=W)
self.memDict["mm"]=[self.month]
#dob: day
self.day = Spinbox(self.gridframe, from_=0, to=0, width=5)
self.day.grid(row=25, column=8, sticky=W)
self.memDict["dd"]=[self.day]
#dob: year
self.year = Spinbox(self.gridframe, from_=1900, to=2500, width=7)
self.year.grid(row=25, column=9, sticky=W)
self.memDict["yy"]=[self.year]
#phone
self.phone = Entry(self.gridframe)
self.phone.grid(row=26, column=7, sticky=W)
self.memDict["phone"]=[self.phone]
#self.addmemberON = True
#CLEAR WIDGETS FUNCTIONS
def clearVisits(self):
"""This function clears the entry boxes/visit notes
used for visits.
"""
self.visit_listbox.delete(0, END)
self.visv.set("")
self.volv.set("")
self.notv.set("")
self.notescv.config(state='normal')
self.notescv.delete('1.0', END)
self.notescv.config(state='disabled')
visitob = [self.visit_listbox, self.visit_scroll, self.visitdate,
self.newVisit, self.editVisit, self.deleteVisit,
self.saveVisit, self.saveVisitE, self.cancelVisit]
for ob in visitob:
ob.grid_forget()
self.visit_listbox.grid(row=8, column=3, rowspan=4, columnspan=1, sticky=W)
self.visit_scroll.grid(row=8, column=3, rowspan=4, columnspan=1, sticky=E+N+S)
self.newVisit.grid(row=8, column=8, sticky=W)
self.editVisit.grid(row=9, column=8, sticky=W)
self.deleteVisit.grid(row=10, column=8, sticky=W)
def clearFamily(self):
#forgets additional family members
self.family_listbox.delete(0, END)
try:
mfname = self.memDict["first"]
mlname = self.memDict["last"]
mm = self.memDict["mm"]
dd = self.memDict["dd"]
yy = self.memDict["yy"]
phnum = self.memDict["phone"]
easylist = [mfname, mlname, mm, dd,
yy, phnum]
for i in range(0, 6):
for j in range(0, len(easylist[i])):
easylist[i][j].grid_forget()
for i in range(0, 6):
easylist[i] = []
self.memDict = {}
except KeyError:
pass
def clearEntries(self):
"""This function clears the entry boxes that will never be
removed from the display.
"""
allvaries = [self.fnv, self.lnv, self.phv, self.mv, self.dv, self.yv,
self.adv, self.apv, self.q, self.agev,
self.notv, self.volv, self.visv, self.adl, self.chil,
self.sen, self.inf, self.tot, self.datejoinv, self.mvv,
self.dvv, self.yvv]
#Clears the entryboxes
for i in range(0, len(allvaries)):
allvaries[i].set("")
#sets defaulted entries
today = datetime.now()
todaystr = str(today.month)+'/'+str(today.day)+\
'/'+str(today.year)
#self.visdatev.set(todaystr)
self.datejoinv.set(todaystr)
self.ctyv.set("Troy")
self.stav.set("NY")
self.zpv.set(12180)
#new client stuff
allforgets = [self.family_listbox,
self.fam_scroll, self.addmemb, self.removmemb,
self.viewmemb, self.housetitle, self.houseSep, self.saveB,
self.cancelB, self.dispad, self.dischil, self.dissen,
self.disinf, self.distot, self.addhhsep, self.addhhtitle,
self.famNum, self.entNum, self.newMembersB,
self.newClientSave, self.cancelNewB, self.famname,
self.famfn, self.famln, self.famdob, self.famphone,
self.fammon, self.famday, self.famyear]
for i in range(0, len(allforgets)):
allforgets[i].forget()
allforgets[i].grid_forget()
#forgets additional family members
#self.family_listbox.delete(0, END)
self.clearFamily()
#forgets previous visit notes
self.clearVisits()
self.visitDict = {}
def monthbox_select(self, *args):
"""This function is called when a month is selected from the
month combobox. It will look up the month in the month_day_dict,
and assign the right number of days to the "dob" spinbox.
"""
month = self.mv.get()
days = self.month_day_dict[month]
self.dob.config(from_=1, to=days)
return
#visit buttons
def newvisitf(self):
"""This function will clear unnecessary widgets, add an entrybox
for the date, and prepopulate the date, volunteer, and visitor fields.
"""
#clear Notes, Vol, & Visitor
self.visit_listbox.grid_forget()
self.visit_scroll.grid_forget()
self.newVisit.grid_forget()
self.editVisit.grid_forget()
self.deleteVisit.grid_forget()
#set date of visit to today
today = datetime.now()
tstr = | |
in the LSTM layer
'lr': float
Learning rate
'dropout': float
Dropout in the LSTM layer
'optimizer': class
Keras optimizer function
'losses': func
Keras loss function
'activation': func
Keras activation in each layer
'last_activation': str
Keras activation in the output layer
prop_grid: float
proportion of the grid combinations to sample
tuning_output_file: str
path of the csv file where the grid search results will be stored
shuffle_epoch: Boolean
whether to shuffle the data after every epoch. Default: False
shuffle_grid: Boolean
whether to shuffle the parameter grid or respect the same order of parameters. Default: True
provided in `params'
num_threads: int
maximum number of processes to use - it should be >= 1. Default: 1
seed : int or None
random seed to initialise the pseudorandom number generator (for selecting the parameter
combinations to cover). Use it if you want to have replicable results. Default: None
verbose: int (0, 1, or 2)
verbosity mode. 0 = silent, 1 = one line per parameter combination, 2 = detailed. Default: 1
Returns
-------
None
save csv files
"""
### check verbose and convert to train()'s verbose
if verbose in (0, 1):
verbose_t = 0
elif verbose == 2:
verbose_t = 2
else:
raise ValueError("incorrect verbose value: choose an integer bewteen 0 and 2")
### Select the appropriate model generator based on the type of data
# Training data
if ((not isinstance(data_train, pd.DataFrame)) and
(not isinstance(data_train, IndexedFile)) and
(not isinstance(data_train, str))):
raise ValueError("data_train should be either a path to an event file, a dataframe or an indexed text file")
# Validation data
if ((not isinstance(data_train, pd.DataFrame)) and
(not isinstance(data_train, IndexedFile)) and
(not isinstance(data_train, str))):
raise ValueError("data_valid should be either a path to an event file, a dataframe or an indexed text file")
# Extract the dimensions of the pretrained embeddings
pretrain_embed_dim = {}
embed_inputs = params['embedding_input']
for i, e in enumerate(embed_inputs):
if embed_inputs[i] and embed_inputs[i] != 'learn':
pretrain_embed_dim.update({embed_inputs[i]:extract_embedding_dim(embed_inputs[i])})
# Create a list of dictionaries giving all possible parameter combinations
keys, values = zip(*params.items())
grid_full = [dict(zip(keys, v)) for v in itertools.product(*values)]
### Remove impossible combinations
ind_to_remove = []
for i,d in enumerate(grid_full):
# In the case of no hidden layer, no need to set the 'activation' parameter - only 'last_activation' is used
if grid_full[i]['hidden_layers'] == 0:
grid_full[i]['activation'] = None
# In the case of hot encoding or pretrained embedding, no need to set embedding_dim, otherwise,
# it is essential to set embedding_dim, so remove all cases where embedding_dim is not given with
# embeddings to be learned from scratch
if not grid_full[i]['embedding_input']:
grid_full[i]['embedding_dim'] = None
elif grid_full[i]['embedding_input'] == 'learn' and not grid_full[i]['embedding_dim']:
ind_to_remove.append(i)
elif grid_full[i]['embedding_input'] and grid_full[i]['embedding_input'] != 'learn':
grid_full[i]['embedding_dim'] = pretrain_embed_dim[grid_full[i]['embedding_input']]
# In the case of embeddings, it is essential to set 'max_len' (max_len cannot be None),
# so remove all cases where embeddings are used max_len is not given
if grid_full[i]['embedding_input'] and not grid_full[i]['max_len']:
ind_to_remove.append(i)
# First remove the detected impossible combinations (e.g. 'embedding_input = 'learn', embedding_dim = None')
for ii in sorted(ind_to_remove, reverse = True):
del grid_full[ii]
# Second remove the duplicated combinations 'embedding_input != 'learn', embedding_dim = None'
grid_full = [dict(t) for t in {tuple(d.items()) for d in grid_full}]
# shuffle the list of params
if shuffle_grid:
random.Random(seed).shuffle(grid_full)
# Select the combinations to use
N_comb = round(prop_grid * len(grid_full))
grid_select = grid_full[:N_comb]
# Create a list of lists which stores all parameter combinations that are covered so far in the grid search
param_comb_sofar = []
### Write to the csv file that encodes the results
with open(tuning_output_file, mode = 'w', newline = '\n') as o:
csv_writer = csv.writer(o, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
heading = list(params.keys())
heading.extend(['loss', 'acc', 'precision', 'recall', 'f1score',
'val_loss', 'val_acc', 'val_precision', 'val_recall', 'val_f1score'])
csv_writer.writerow(heading)
### Run the experiments
for i, param_comb in enumerate(grid_select):
start = time.time()
# Message at the start of each iteration
if verbose != 0:
_ = sys.stdout.write('\n********************************** Iteration %d out of %d **********************************\n\n' % ((i+1), len(grid_select)))
_ = sys.stdout.write('*** Parameter combination \n\n')
_ = sys.stdout.write('%s\n' % (param_comb))
sys.stdout.flush()
# this will contain the values that will be recorded in each row.
# We start by copying the parameter values
row_values = list(param_comb.values())
# Check if the current parameter combination has already been processed in the grid search
if row_values in param_comb_sofar:
if verbose != 0:
_ = sys.stdout.write('\nThis parameter combination was skipped because it has already been processed\n')
sys.stdout.flush()
else:
# Fit the model given the current param combination
hist, model = train_FNN(data_train = data_train,
data_valid = data_valid,
cue_index = cue_index,
outcome_index = outcome_index,
shuffle_epoch = shuffle_epoch,
num_threads = num_threads,
verbose = verbose_t,
metrics = ['accuracy', 'precision', 'recall', 'f1score'],
params = param_comb)
# Get index of epochs and extract embedding name from the 'param_comb' dictionary
for ind, (k, v) in enumerate(param_comb.items()):
if k == 'epochs':
i_epochs = ind
elif (k == 'embedding_input') and isinstance(v, str) and (v != 'learn'):
# Extract the name of embedding from the path
row_values[ind] = ntpath.basename(v)[:-4]
elif (k == 'embedding_input') and not v:
row_values[ind] = 'onehot'
elif (k == 'embedding_dim') and not v:
row_values[ind] = 0
elif (k == 'activation') and not v:
row_values[ind] = 'none'
### Export the results to a csv file
for j in range(param_comb['epochs']):
# Copy the parameter values to current param combination variables
row_values_j = row_values.copy()
# correct the epoch num
row_values_j[i_epochs] = j+1
# Add the derived combination to the list of all parameter combinations
param_comb_sofar.append(row_values_j.copy())
# Add the performance scores
# training
loss_j = hist['loss'][j]
acc_j = hist['accuracy'][j]
precision_j = hist['precision'][j]
recall_j = hist['recall'][j]
f1score_j = hist['f1score'][j]
# validation
val_loss_j = hist['val_loss'][j]
val_acc_j = hist['val_accuracy'][j]
val_precision_j = hist['val_precision'][j]
val_recall_j = hist['val_recall'][j]
val_f1score_j = hist['val_f1score'][j]
row_values_j.extend([loss_j, acc_j, precision_j, recall_j, f1score_j,
val_loss_j, val_acc_j, val_precision_j, val_recall_j, val_f1score_j])
# Write the row
csv_writer.writerow(row_values_j)
o.flush()
if verbose == 1:
sys.stdout.write('\nIteration completed in %.0fs\n' % ((time.time() - start)))
sys.stdout.flush()
# Clear memory
del model, hist
gc.collect()
K.clear_session()
if verbose != 0:
_ = sys.stdout.write('\n********************************************************************************************\n')
sys.stdout.flush()
########
# LSTM
########
class generator_textfile_LSTM(keras.utils.all_utils.Sequence):
""" Class that generates batches of data ready for training an LSTM model. The data is expected to
be event style (cue and ouctomes seperated by underscores)
Attributes
----------
data: class
indexed text file
batch_size: int
number of examples in each batch
num_cues: int
number of allowed cues
num_outcomes: int
number of allowed outcomes
cue_index: dict
mapping from cues to indices
outcome_index: dict
mapping from outcomes to indices
max_len: int
Consider only 'max_len' first tokens in a sequence
vector_encoding: str
Whether to use one-hot encoding (='onehot') or embedding (='embedding'). Default: 'onehot'
shuffle_epoch: Boolean
whether to shuffle the data after every epoch
Returns
-------
class object
generator for keras. It inherites from keras.utils.all_utils.Sequence
"""
def __init__(self, data, batch_size, num_cues, num_outcomes,
cue_index, outcome_index, max_len,
vector_encoding = 'onehot', shuffle_epoch = False):
'Initialization'
self.data = data
self.batch_size = batch_size
self.num_cues = num_cues
self.num_outcomes = num_outcomes
self.cue_index = cue_index
self.outcome_index = outcome_index
self.max_len = max_len
self.vector_encoding = vector_encoding
self.shuffle_epoch = shuffle_epoch
self.on_epoch_end()
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.floor(len(self.data) / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
# Generate indices of the batch
indexes_batch = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
# Generate data
X, Y = self.__data_generation(indexes_batch)
return X, Y
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(len(self.data))
if self.shuffle_epoch == True:
np.random.shuffle(self.indexes)
def __data_generation(self, indexes_batch):
'Generates data containing batch_size samples'
if self.vector_encoding == 'onehot': # One-hot encoding
seq_to_vec = seq_to_onehot_2darray
else: # Embedding
seq_to_vec = seq_to_integers_1darray
X_arrays = []
Y_arrays = []
for raw_event in self.data[indexes_batch]:
# extract the cues and outcomes sequences
cue_seq, outcome_seq = raw_event.strip().split('\t')
cues_onehot = seq_to_vec(cue_seq, self.cue_index, self.num_cues, self.max_len)
outcomes_onehot = seq_to_onehot_1darray(outcome_seq, self.outcome_index, self.num_outcomes)
X_arrays.append(cues_onehot)
Y_arrays.append(outcomes_onehot)
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import quantipy as qp
from collections import OrderedDict
from quantipy.core.tools.qp_decorators import *
import copy as org_copy
import warnings
import re
from quantipy.core.tools.view.logic import (
has_any, has_all, has_count,
not_any, not_all, not_count,
is_lt, is_ne, is_gt,
is_le, is_eq, is_ge,
union, intersection, get_logic_index)
def meta_editor(self, dataset_func):
"""
Decorator for inherited DataSet methods.
"""
def edit(*args, **kwargs):
# get name and type of the variable dor correct dict refernces
name = args[0] if args else kwargs['name']
if not isinstance(name, list): name = [name]
# create DataSet clone to leave global meta data untouched
if self.edits_ds is None:
self.edits_ds = qp.DataSet.clone(self)
ds_clone = self.edits_ds
var_edits = []
# args/ kwargs for min_value_count
if dataset_func.func_name == 'min_value_count':
if len(args) < 3 and not 'weight' in kwargs:
kwargs['weight'] = self.weights[0]
if len(args) < 4 and not 'condition' in kwargs:
if self.filter:
kwargs['condition'] = self.filter.values()[0]
# args/ kwargs for sorting
elif dataset_func.func_name == 'sorting':
if len(args) < 7 and not 'sort_by_weight' in kwargs:
kwargs['sort_by_weight'] = self.weights[0]
for n in name:
is_array = self.is_array(n)
is_array_item = self._is_array_item(n)
has_edits = n in self.meta_edits
parent = self._maskname_from_item(n) if is_array_item else None
parent_edits = parent in self.meta_edits
source = self.sources(n) if is_array else []
source_edits = [s in self.meta_edits for s in source]
# are we adding to aleady existing batch meta edits? (use copy then!)
var_edits += [(n, has_edits), (parent, parent_edits)]
var_edits += [(s, s_edit) for s, s_edit in zip(source, source_edits)]
for var, edits in var_edits:
if edits:
copied_meta = org_copy.deepcopy(self.meta_edits[var])
if not self.is_array(var):
ds_clone._meta['columns'][var] = copied_meta
else:
ds_clone._meta['masks'][var] = copied_meta
if self.meta_edits['lib'].get(var):
lib = self.meta_edits['lib'][var]
ds_clone._meta['lib']['values'][var] = lib
# use qp.DataSet method to apply the edit
dataset_func(ds_clone, *args, **kwargs)
# grab edited meta data and collect via Batch.meta_edits attribute
for n in self.unroll(name, both='all'):
if not self.is_array(n):
meta = ds_clone._meta['columns'][n]
text_edits = ['set_col_text_edit', 'set_val_text_edit']
if dataset_func.func_name in text_edits and is_array_item:
self.meta_edits[parent] = ds_clone._meta['masks'][parent]
lib = ds_clone._meta['lib']['values'][parent]
self.meta_edits['lib'][parent] = lib
else:
meta = ds_clone._meta['masks'][n]
if ds_clone._has_categorical_data(n):
self.meta_edits['lib'][n] = ds_clone._meta['lib']['values'][n]
self.meta_edits[n] = meta
if dataset_func.func_name in ['hiding', 'slicing', 'min_value_count', 'sorting']:
self._update()
return edit
def not_implemented(dataset_func):
"""
Decorator for UNALLOWED DataSet methods.
"""
def _unallowed_inherited_method(*args, **kwargs):
err_msg = 'DataSet method not allowed for Batch editing!'
raise NotImplementedError(err_msg)
return _unallowed_inherited_method
class Batch(qp.DataSet):
"""
A Batch is a container for structuring a Link collection's
specifications aimed at Excel and/or PPTX build Clusters.
"""
def __init__(self, dataset, name, ci=['c', 'p'], weights=None, tests=None):
if '-' in name: raise ValueError("Batch 'name' must not contain '-'!")
sets = dataset._meta['sets']
if not 'batches' in sets: sets['batches'] = OrderedDict()
self.name = name
meta, data = dataset.split()
self._meta = meta
self._data = data
self.edits_ds = None
self.valid_tks = dataset.valid_tks
self.text_key = dataset.text_key
self.sample_size = None
self._verbose_errors = dataset._verbose_errors
self._verbose_infos = dataset._verbose_infos
self._dimensions_comp = dataset._dimensions_comp
# RENAMED DataSet methods
self._dsfilter = qp.DataSet.filter.__func__
if sets['batches'].get(name):
if self._verbose_infos:
print "Load Batch '{}'.".format(name)
self._load_batch()
else:
sets['batches'][name] = {'name': name, 'additions': []}
self.xks = []
self.yks = ['@']
self._variables = []
self._section_starts = {}
self.total = True
self.extended_yks_per_x = {}
self.exclusive_yks_per_x = {}
self.extended_filters_per_x = {}
self.filter = None
self.filter_names = []
self.x_y_map = None
self.x_filter_map = None
self.y_on_y = []
self.y_on_y_filter = {}
self.y_filter_map = {}
self.forced_names = {}
self.transposed = []
self.leveled = {}
# self.summaries = []
# self.transposed_arrays = {}
self.skip_items = []
self.verbatims = []
# self.verbatim_names = []
self.set_cell_items(ci) # self.cell_items
self.unwgt_counts = False
self.set_weights(weights) # self.weights
self.set_sigtests(tests) # self.sigproperties
self.additional = False
self.meta_edits = {'lib': {}}
self.build_info = {}
self.set_language(dataset.text_key) # self.language
self._update()
# DECORATED / OVERWRITTEN DataSet methods
# self.hide_empty_items = meta_editor(self, qp.DataSet.hide_empty_items.__func__)
self.hiding = meta_editor(self, qp.DataSet.hiding.__func__)
self.min_value_count = meta_editor(self, qp.DataSet.min_value_count.__func__)
self.sorting = meta_editor(self, qp.DataSet.sorting.__func__)
self.slicing = meta_editor(self, qp.DataSet.slicing.__func__)
self.set_variable_text = meta_editor(self, qp.DataSet.set_variable_text.__func__)
self.set_value_texts = meta_editor(self, qp.DataSet.set_value_texts.__func__)
self.set_property = meta_editor(self, qp.DataSet.set_property.__func__)
# UNALLOWED DataSet methods
# self.add_meta = not_implemented(qp.DataSet.add_meta.__func__)
self.derive = not_implemented(qp.DataSet.derive.__func__)
self.remove_items = not_implemented(qp.DataSet.remove_items.__func__)
self.set_missings = not_implemented(qp.DataSet.set_missings.__func__)
def _update(self):
"""
Update Batch metadata with Batch attributes.
"""
self._map_x_to_y()
self._map_x_to_filter()
self._split_level_arrays()
self._map_y_on_y_filter()
self._samplesize_from_batch_filter()
attrs = self.__dict__
for attr in ['xks', 'yks', '_variables', 'filter', 'filter_names',
'x_y_map', 'x_filter_map', 'y_on_y', 'y_on_y_filter',
'forced_names', 'transposed', 'leveled', 'verbatims',
'extended_yks_per_x',
'exclusive_yks_per_x', 'extended_filters_per_x', 'meta_edits',
'cell_items', 'weights', 'sigproperties', 'additional',
'sample_size', 'language', 'name', 'skip_items', 'total',
'unwgt_counts', 'y_filter_map', 'build_info',
'_section_starts'
]:
attr_update = {attr: attrs.get(attr, attrs.get('_{}'.format(attr)))}
self._meta['sets']['batches'][self.name].update(attr_update)
def _load_batch(self):
"""
Fill batch attributes with information from meta.
"""
bdefs = self._meta['sets']['batches'][self.name]
for attr in ['xks', 'yks', '_variables', 'filter', 'filter_names',
'x_y_map', 'x_filter_map', 'y_on_y', 'y_on_y_filter',
'forced_names', 'transposed', 'leveled', 'verbatims',
'extended_yks_per_x',
'exclusive_yks_per_x', 'extended_filters_per_x', 'meta_edits',
'cell_items', 'weights', 'sigproperties', 'additional',
'sample_size', 'language', 'skip_items', 'total', 'unwgt_counts',
'y_filter_map', 'build_info', '_section_starts'
]:
attr_load = {attr: bdefs.get(attr, bdefs.get('_{}'.format(attr)))}
self.__dict__.update(attr_load)
def clone(self, name, b_filter=None, as_addition=False):
"""
Create a copy of Batch instance.
Parameters
----------
name: str
Name of the Batch instance that is copied.
b_filter: tuple (str, dict/ complex logic)
Filter logic which is applied on the new batch.
(filtername, filterlogic)
as_addition: bool, default False
If True, the new batch is added as addition to the master batch.
Returns
-------
New/ copied Batch instance.
"""
org_name = self.name
org_meta = org_copy.deepcopy(self._meta['sets']['batches'][org_name])
self._meta['sets']['batches'][name] = org_meta
verbose = self._verbose_infos
self.set_verbose_infomsg(False)
batch_copy = self.get_batch(name)
self.set_verbose_infomsg(verbose)
batch_copy.set_verbose_infomsg(verbose)
if b_filter:
batch_copy.add_filter(b_filter[0], b_filter[1])
if batch_copy.verbatims and b_filter and not as_addition:
for oe in batch_copy.verbatims:
oe["filter"] = batch_copy.filter
if as_addition:
batch_copy.as_addition(self.name)
batch_copy._update()
return batch_copy
def remove(self):
"""
Remove instance from meta object.
"""
name = self.name
adds = self._meta['sets']['batches'][name]['additions']
if adds:
for bname, bdef in self._meta['sets']['batches'].items():
if bname == name: continue
for add in adds[:]:
if add in bdef['additions']:
adds.remove(add)
for add in adds:
self._meta['sets']['batches'][add]['additional'] = False
del(self._meta['sets']['batches'][name])
if self._verbose_infos:
print "Batch '%s' is removed from meta-object." % name
self = None
return None
def _rename_in_additions(self, find_bname, new_name):
for bname, bdef in self._meta['sets']['batches'].items():
if find_bname in bdef['additions']:
adds = bdef['additions']
adds[adds.index(find_bname)] = new_name
bdef['additions'] = adds
return None
def rename(self, new_name):
"""
Rename instance, updating ``DataSet`` references to the definiton, too.
"""
if new_name in self._meta['sets']['batches']:
raise KeyError("'%s' is already included!" % new_name)
batches = self._meta['sets']['batches']
org_name = self.name
batches[new_name] = batches.pop(org_name)
self._rename_in_additions(org_name, new_name)
self.name = new_name
self._update()
return None
@modify(to_list='ci')
def set_cell_items(self, ci):
"""
Assign cell items ('c', 'p', 'cp').
Parameters
----------
ci: str/ list of str, {'c', 'p', 'cp'}
Cell items used for this Batch instance.
Returns
-------
None
"""
if any(c not in ['c', 'p', 'cp'] for c in ci):
raise ValueError("'ci' cell items must be either 'c', 'p' or 'cp'.")
self.cell_items = ci
self._update()
return None
def set_unwgt_counts(self, unwgt=False):
"""
Assign if counts (incl. nets) should be aggregated unweighted.
"""
self.unwgt_counts = unwgt
self._update()
return None
@modify(to_list='w')
def set_weights(self, w):
"""
Assign a weight variable setup.
Parameters
----------
w: str/ list of str
Name(s) of the weight variable(s).
Returns
-------
None
"""
if not w:
w = [None]
elif any(we is None for we in w):
w = [None] + [we for we in w if not we is None]
self.weights = w
if any(weight not in self.columns() for weight in w if not weight is None):
raise ValueError('{} is not in DataSet.'.format(w))
self._update()
return None
@modify(to_list='levels')
def set_sigtests(self, levels=None, flags=[30, 100], test_total=False, mimic=None):
"""
Specify a significance test setup.
Parameters
----------
levels: float/ list of float
Level(s) for significance calculation(s).
mimic/ flags/ test_total:
Currently not implemented.
Returns
-------
None
"""
if levels and self.total:
if not all(isinstance(l, float) for l in levels):
raise TypeError('All significance levels must be provided as floats!')
levels = sorted(levels)
else:
levels = []
self.sigproperties = {'siglevels': levels,
'test_total': test_total,
'flag_bases': flags,
'mimic': ['Dim']}
if mimic :
err = ("Changes to 'mimic' are currently not allowed!")
raise NotImplementedError(err)
self._update()
return None
@verify(text_keys='text_key')
def set_language(self, text_key):
"""
Set ``Batch.language`` indicated via the ``text_key`` for Build exports.
Parameters
----------
text_key: str
The text_key used as language for the Batch instance
Returns
-------
None
"""
self.language = text_key
self._update()
return None
def as_addition(self, batch_name):
"""
Treat the Batch as additional aggregations, independent from the
global Batch & Build setup.
Parameters
----------
batch_name: str
Name of the Batch instance where the current | |
argument `machine`.\n"
def test_handle_info_abbreviated(shell):
with patch("hera.debugger.shell.Shell.handle_info") as mock_handle_info:
shell.handle_command("i")
assert mock_handle_info.call_count == 1
def test_handle_list(shell, capsys):
shell.handle_command("list")
captured = capsys.readouterr().out
assert (
captured
== """\
[<string>]
1 // A comment
2 CONSTANT(N, 3)
3
-> 4 SET(R1, N)
5 SET(R2, 39)
6 LABEL(add)
7 ADD(R3, R1, R2)
"""
)
def test_handle_list_with_context_arg(shell, capsys):
shell.handle_command("list 1")
captured = capsys.readouterr().out
assert (
captured
== """\
[<string>]
3
-> 4 SET(R1, N)
5 SET(R2, 39)
"""
)
def test_handle_list_with_invalid_context_arg(shell, capsys):
shell.handle_command("list abc")
assert capsys.readouterr().out == "Could not parse argument to list.\n"
def test_handle_list_after_end_of_program(capsys):
shell = load_shell("SET(R1, 42)")
shell.handle_command("c")
capsys.readouterr()
shell.handle_command("list")
assert capsys.readouterr().out == "Program has finished executing.\n"
def test_handle_list_with_too_many_args(shell, capsys):
shell.handle_command("list 1 2")
assert capsys.readouterr().out == "list takes zero or one arguments.\n"
def test_handle_list_abbreviated(shell):
with patch("hera.debugger.shell.Shell.handle_list") as mock_handle_list:
shell.handle_command("l")
assert mock_handle_list.call_count == 1
def test_handle_ll(shell, capsys):
shell.handle_command("ll")
captured = capsys.readouterr().out
assert (
captured
== """\
[<string>]
1 // A comment
2 CONSTANT(N, 3)
3
-> 4 SET(R1, N)
5 SET(R2, 39)
6 LABEL(add)
7 ADD(R3, R1, R2)
8 HALT()
9
"""
)
def test_handle_ll_with_too_many_args(shell, capsys):
shell.handle_command("ll 1")
assert capsys.readouterr().out == "ll takes no arguments.\n"
def test_handle_ll_after_end_of_program(capsys):
shell = load_shell("SET(R1, 42)")
shell.handle_command("c")
capsys.readouterr()
shell.handle_command("ll")
assert capsys.readouterr().out == "Program has finished executing.\n"
def test_handle_abbreviated_ll(shell, capsys):
with patch("hera.debugger.shell.Shell.handle_ll") as mock_handle_ll:
shell.handle_command("ll")
assert mock_handle_ll.call_count == 1
def test_handle_on(shell):
shell.handle_command("on carry-block")
shell.handle_command("on carry")
shell.handle_command("on overflow")
shell.handle_command("on sign")
shell.handle_command("on zero")
assert shell.debugger.vm.flag_carry_block
assert shell.debugger.vm.flag_carry
assert shell.debugger.vm.flag_overflow
assert shell.debugger.vm.flag_sign
assert shell.debugger.vm.flag_zero
def test_handle_on_with_abbreviated_flag(shell):
shell.handle_command("on cb")
shell.handle_command("on c")
shell.handle_command("on v")
shell.handle_command("on s")
shell.handle_command("on z")
assert shell.debugger.vm.flag_carry_block
assert shell.debugger.vm.flag_carry
assert shell.debugger.vm.flag_overflow
assert shell.debugger.vm.flag_sign
assert shell.debugger.vm.flag_zero
def test_handle_on_with_multiple_args(shell):
shell.handle_command("on c v")
assert shell.debugger.vm.flag_carry
assert shell.debugger.vm.flag_overflow
def test_handle_on_with_invalid_flag(shell, capsys):
shell.handle_command("on c y")
assert capsys.readouterr().out == "Unrecognized flag: `y`.\n"
def test_handle_on_with_no_args(shell, capsys):
shell.handle_command("on")
assert capsys.readouterr().out == "on takes one or more arguments.\n"
def test_handle_off(shell):
shell.debugger.vm.flag_carry_block = True
shell.debugger.vm.flag_carry = True
shell.debugger.vm.flag_overflow = True
shell.debugger.vm.flag_sign = True
shell.debugger.vm.flag_zero = True
shell.handle_command("off carry-block")
shell.handle_command("off carry")
shell.handle_command("off overflow")
shell.handle_command("off sign")
shell.handle_command("off zero")
assert not shell.debugger.vm.flag_carry_block
assert not shell.debugger.vm.flag_carry
assert not shell.debugger.vm.flag_overflow
assert not shell.debugger.vm.flag_sign
assert not shell.debugger.vm.flag_zero
def test_handle_off_with_abbreviated_flag(shell):
shell.debugger.vm.flag_carry_block = True
shell.debugger.vm.flag_carry = True
shell.debugger.vm.flag_overflow = True
shell.debugger.vm.flag_sign = True
shell.debugger.vm.flag_zero = True
shell.handle_command("off cb")
shell.handle_command("off c")
shell.handle_command("off v")
shell.handle_command("off s")
shell.handle_command("off z")
assert not shell.debugger.vm.flag_carry_block
assert not shell.debugger.vm.flag_carry
assert not shell.debugger.vm.flag_overflow
assert not shell.debugger.vm.flag_sign
assert not shell.debugger.vm.flag_zero
def test_handle_off_with_multiple_args(shell):
shell.debugger.vm.flag_carry = True
shell.debugger.vm.flag_overflow = True
shell.handle_command("off c v")
assert not shell.debugger.vm.flag_carry
assert not shell.debugger.vm.flag_overflow
def test_handle_off_with_invalid_flag(shell, capsys):
shell.handle_command("off c y")
assert capsys.readouterr().out == "Unrecognized flag: `y`.\n"
def test_handle_off_with_no_args(shell, capsys):
shell.handle_command("off")
assert capsys.readouterr().out == "off takes one or more arguments.\n"
def test_handle_restart(shell, capsys):
shell.handle_command("n")
shell.handle_command("n")
capsys.readouterr()
shell.handle_command("restart")
vm = shell.debugger.vm
assert vm.pc == 0
assert vm.registers[1] == 0
assert vm.registers[2] == 0
assert (
capsys.readouterr().out
== """\
[<string>]
3
-> 4 SET(R1, N)
5 SET(R2, 39)
"""
)
def test_handle_restart_with_too_many_args(shell, capsys):
shell.handle_command("restart 1")
assert capsys.readouterr().out == "restart takes no arguments.\n"
def test_restart_cannot_be_abbreviated(shell, capsys):
with patch("hera.debugger.shell.Shell.handle_restart") as mock_handle_restart:
shell.handle_command("r")
assert mock_handle_restart.call_count == 0
def test_handle_assign_to_register(shell):
shell.handle_command("r12 = 10")
assert shell.debugger.vm.registers[12] == 10
def test_handle_assign_negative_number_to_register(shell):
shell.handle_command("r12 = -0xabc")
assert shell.debugger.vm.registers[12] == -0xABC
def test_handle_assign_to_memory_location(shell):
shell.debugger.vm.registers[9] = 1000
shell.handle_command("@R9 = 4000")
assert shell.debugger.vm.memory[1000] == 4000
def test_handle_assign_to_PC(shell):
shell.handle_command("pc = 10")
assert shell.debugger.vm.pc == 10
def test_handle_assign_to_symbol(shell, capsys):
shell.handle_command("f_c = 10")
assert capsys.readouterr().out == "Eval error: cannot assign to symbol.\n"
assert "f_c" not in shell.debugger.symbol_table
def test_handle_assign_to_arithmetic_expression(shell, capsys):
shell.handle_command("1 + 1 = 3")
assert (
capsys.readouterr().out
== "Eval error: cannot assign to arithmetic expression.\n"
)
def test_handle_assign_with_undefined_symbol(shell, capsys):
shell.debugger.vm.registers[4] = 42
shell.handle_command("r4 = whatever")
assert shell.debugger.vm.registers[4] == 42
assert capsys.readouterr().out == "Eval error: whatever is not defined.\n"
def test_handle_assign_register_to_symbol(shell):
shell.handle_command("r7 = add")
assert shell.debugger.vm.registers[7] == 4
def test_handle_assign_with_invalid_syntax(shell, capsys):
shell.handle_command("@ = R5")
assert capsys.readouterr().out == "Parse error: premature end of input.\n"
def test_handle_assign_with_explicit_command(shell):
shell.handle_command("assign r7 add")
assert shell.debugger.vm.registers[7] == 4
def test_handle_assign_with_too_many_args(shell, capsys):
shell.handle_command("assign r1 r2 r3")
assert capsys.readouterr().out == "assign takes two arguments.\n"
def test_handle_print_register(shell, capsys):
shell.handle_command("print R1")
assert capsys.readouterr().out == "0\n"
def test_handle_print_PC_ret(shell, capsys):
shell.debugger.vm.registers[13] = 2
shell.handle_command("print PC_ret")
assert capsys.readouterr().out == "2 [<string>:5]\n"
def test_handle_print_PC_ret_with_explicit_format(shell, capsys):
shell.debugger.vm.registers[13] = 2
shell.handle_command("print :xd PC_ret")
assert capsys.readouterr().out == "0x0002 = 2\n"
def test_handle_print_with_format_string(shell, capsys):
shell.debugger.vm.registers[5] = 2
shell.handle_command("print :bl r5")
assert capsys.readouterr().out == "0b0000000000000010 [<string>:5]\n"
def test_handle_print_zero_register_with_char_format_string(shell, capsys):
shell.handle_command("print :c R0")
assert capsys.readouterr().out == "'\\x00'\n"
def test_handle_print_large_int_with_char_format_string(shell, capsys):
shell.debugger.vm.registers[5] = 1000
shell.handle_command("print :c R5")
assert capsys.readouterr().out == "not an ASCII character\n"
def test_handle_print_unsigned_integer_with_signed_format_string(shell, capsys):
shell.debugger.vm.registers[5] = 42
shell.handle_command("print :s R5")
assert capsys.readouterr().out == "not a signed integer\n"
def test_handle_print_with_restrictive_format_string(shell, capsys):
shell.debugger.vm.registers[13] = 2
shell.handle_command("print :o r13")
assert capsys.readouterr().out == "0o00000002\n"
def test_handle_print_memory_expression(shell, capsys):
shell.debugger.vm.registers[1] = 4
shell.debugger.vm.memory[4] = 42
shell.handle_command("print @r1")
assert capsys.readouterr().out == "42 = '*'\n"
def test_handle_print_PC(shell, capsys):
shell.debugger.vm.pc = 4
shell.handle_command("print PC")
assert capsys.readouterr().out == "4 [<string>:7]\n"
def test_handle_print_PC_with_nonsense_value(shell, capsys):
shell.debugger.vm.pc = 300
shell.handle_command("print pc")
assert capsys.readouterr().out == "300\n"
def test_handle_print_int(shell, capsys):
shell.handle_command("print 17")
assert capsys.readouterr().out == "17\n"
def test_handle_print_arithmetic_expression(shell, capsys):
shell.handle_command("print :x 21 * (1+1)")
assert capsys.readouterr().out == "0x002a\n"
def test_handle_print_with_another_arithmetic_expression(shell, capsys):
shell.debugger.vm.registers[1] = 60
shell.handle_command("print :d r1-12")
assert capsys.readouterr().out == "48\n"
def test_handle_print_with_multiple_arguments(shell, capsys):
shell.debugger.vm.registers[1] = 5
shell.debugger.vm.registers[2] = 7
shell.handle_command("print :d r1, r2")
assert capsys.readouterr().out == "R1 = 5\nR2 = 7\n"
def test_handle_print_symbol(shell, capsys):
shell.handle_command("print add")
assert capsys.readouterr().out == "4 [<string>:7]\n"
def test_handle_print_undefined_symbol(shell, capsys):
shell.handle_command("print whatever")
assert capsys.readouterr().out == "Eval error: whatever is not defined.\n"
def test_handle_print_invalid_register(shell, capsys):
shell.handle_command("print R17")
assert capsys.readouterr().out == "Parse error: R17 is not a valid register.\n"
def test_handle_print_with_division_by_zero(shell, capsys):
shell.handle_command("print 10 / 0")
assert capsys.readouterr().out == "Eval error: division by zero.\n"
def test_handle_print_with_nested_division_by_zero(shell, capsys):
shell.handle_command("print @(10 / 0)")
assert capsys.readouterr().out == "Eval error: division by zero.\n"
def test_handle_print_with_integer_literal_too_big(shell, capsys):
shell.handle_command("print 100000")
assert capsys.readouterr().out == "Eval error: integer literal exceeds 16 bits.\n"
def test_handle_print_with_overflow_from_multiplication(shell, capsys):
shell.handle_command("print 30000*40")
assert capsys.readouterr().out == "Eval error: overflow from *.\n"
def test_handle_print_with_integer_literal_too_small(shell, capsys):
shell.handle_command("print -65000")
assert capsys.readouterr().out == "Eval error: overflow from unary -.\n"
def test_handle_print_with_invalid_format(shell, capsys):
shell.handle_command("print :y R1")
assert capsys.readouterr().out == "Unknown format specifier `y`.\n"
def test_handle_print_undefined_symbol_in_memory_expression(shell, capsys):
shell.handle_command("print @whatever")
assert capsys.readouterr().out == "Eval error: whatever is not defined.\n"
def test_handle_print_case_sensitive_symbol(shell, capsys):
shell.debugger.symbol_table["ADD"] = 10
shell.handle_command("print ADD")
assert capsys.readouterr().out == "10\n"
def test_handle_print_with_too_few_args(shell, capsys):
shell.handle_command("print")
assert capsys.readouterr().out == "print takes one or more arguments.\n"
def test_handle_print_abbreviated(shell):
with patch("hera.debugger.shell.Shell.handle_print") as mock_handle_print:
shell.handle_command("p @R7")
assert mock_handle_print.call_count == 1
args, kwargs = mock_handle_print.call_args
assert len(args) == 1
assert args[0] == "@R7"
assert len(kwargs) == 0
def test_handle_doc(shell, capsys):
shell.handle_command("doc")
out = capsys.readouterr().out
assert "SET" in out
def test_handle_doc_with_arguments(shell, capsys):
shell.handle_command("doc ADD xor Buler")
out = capsys.readouterr().out
assert "ADD" in out
assert "XOR" in out
assert "BULER" in out
def test_handle_doc_with_nonexistent_operation(shell, capsys):
shell.handle_command("doc div")
out = capsys.readouterr().out
assert "DIV is not a HERA operation" in out
def test_handle_doc_with_BRANCH(shell, capsys):
shell.handle_command("doc branch")
out = capsys.readouterr().out
assert "Register branch" in out
assert "Relative branch" in out
def test_handle_help(shell, capsys):
shell.handle_command("help")
out = capsys.readouterr().out
assert "Available commands" in out
assert "Error:" not in out
def test_handle_help_with_one_arg(shell, capsys):
shell.handle_command("help next")
out = capsys.readouterr().out
# Make sure it's not just printing the regular help message.
assert "Available commands" not in out
assert "next" in out
assert "Execute the current line" in out
def test_handle_help_with_abbreviated_command_name(shell, capsys):
shell.handle_command("help n")
out = capsys.readouterr().out
# Make sure it's not just printing the regular help message.
assert "Available commands" not in out
assert "next" in out
assert "Execute the current line" in out
def test_handle_help_with_multiple_args(shell, capsys):
shell.handle_command("help break next")
out = capsys.readouterr().out
# Make sure it's not just printing the regular help message.
assert "Available commands" not in out
assert "next" in out
assert "Execute the current line" in out
assert "break" in out
def test_handle_help_with_all_commands(shell, capsys):
shell.handle_command(
"help assign break clear continue execute help info list ll next off on print \
restart goto step undo quit asm dis"
)
assert "not a recognized command" not in capsys.readouterr().out
def test_handle_help_with_unknown_command(shell, capsys):
shell.handle_command("help whatever")
assert capsys.readouterr().out == "whatever is not a recognized command.\n"
def test_handle_help_abbreviated(shell):
with patch("hera.debugger.shell.Shell.handle_help") as mock_handle_help:
shell.handle_command("h break")
assert mock_handle_help.call_count == 1
args, kwargs = mock_handle_help.call_args
assert len(args) == 1
assert args[0] == ["break"]
assert len(kwargs) == 0
def test_handle_step(capsys):
shell = load_shell(
"""\
SET(R1, 4)
CALL(FP_alt, plus_two)
SET(R2, 5)
HALT()
LABEL(plus_two)
INC(R1, 2)
RETURN(FP_alt, PC_ret)
"""
)
shell.handle_command("n")
capsys.readouterr()
shell.handle_command("step")
assert shell.debugger.vm.pc == 8
assert shell.debugger.vm.registers[1] == 4
assert shell.debugger.vm.registers[2] == 0
captured = capsys.readouterr().out
assert (
captured
== """\
[<string>]
6 LABEL(plus_two)
-> 7 INC(R1, 2)
8 RETURN(FP_alt, PC_ret)
"""
)
def test_handle_step_not_on_CALL(shell, capsys):
shell.handle_command("step")
assert (
capsys.readouterr().out
== "step is only valid when the current instruction is CALL.\n"
)
def test_handle_step_with_too_many_args(shell, capsys):
shell.handle_command("step 1")
assert capsys.readouterr().out == "step takes no arguments.\n"
def | |
<filename>BUILDER/tools/toolkit.py
print "Loading libraries"
from PIL import Image,ImageTk,ImagePalette
from ffmpy import FFmpeg
from itertools import chain
import Tkinter as tk
import sys,os,ctypes,subprocess,getopt,shutil,struct,time,colorsys
np = os.path.normpath
cwd = os.getcwd()
TEMP_DIR = np(cwd+"/obj/")
TEMP_PNG_DIR = np(cwd+"/obj/png")
OUTPUT_DIR = np(cwd+"/bin")
STATUS_FILE = np(TEMP_DIR+'/curstate')
def GETIMGPATH(fname): return np(TEMP_PNG_DIR+"/"+fname)
def GETIMGNAMES():
global TEMP_PNG_DIR
return sorted([f for f in os.listdir(TEMP_PNG_DIR) if os.path.isfile(os.path.join(TEMP_PNG_DIR,f))])
def ensure_dir(d):
if not os.path.isdir(d): os.makedirs(d)
ensure_dir(TEMP_DIR)
ensure_dir(TEMP_PNG_DIR)
ensure_dir(OUTPUT_DIR)
ENCODER_NAMES = { 1: "1B3X-ZX7",
2: "2B3X-ZX7",
3: "2B1X-ZX7",
4: "1B1X-ZX7",
5: "4C3X-ZX7",
6: "4A3X-ZX7",
}
FPSEG_BY_ENCODER = { 1:30,
2:15,
3:6,
4:10,
5:15,
6:10,
}
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# Miscellaneous
def readFile(fn):
a = []
with open(fn,"rb") as f:
b = f.read(1)
while b!=b'':
a.append(ord(b))
b = f.read(1)
return a
def writeFile(fn,a):
with open(fn,"wb+") as f:
f.write(bytearray(a))
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# Export data to appvar
TI_VAR_PROG_TYPE = 0x05
TI_VAR_PROTPROG_TYPE = 0x06
TI_VAR_APPVAR_TYPE = 0x15
TI_VAR_FLAG_RAM = 0x00
TI_VAR_FLAG_ARCHIVED = 0x80
def export8xv(filepath,filename,filedata):
# Ensure that filedata is a string
if isinstance(filedata,list): filedata = str(bytearray(filedata))
# Add size bytes to file data as per (PROT)PROG/APPVAR data structure
dsl = len(filedata)&0xFF
dsh = (len(filedata)>>8)&0xFF
filedata = str(bytearray([dsl,dsh]))+filedata
# Construct variable header
vsl = len(filedata)&0xFF
vsh = (len(filedata)>>8)&0xFF
vh = str(bytearray([0x0D,0x00,vsl,vsh,TI_VAR_APPVAR_TYPE]))
vh += filename.ljust(8,'\x00')[:8]
vh += str(bytearray([0x00,TI_VAR_FLAG_ARCHIVED,vsl,vsh]))
# Pull together variable metadata for TI8X file header
varentry = vh + filedata
varsizel = len(varentry)&0xFF
varsizeh = (len(varentry)>>8)&0xFF
varchksum = sum([ord(i) for i in varentry])
vchkl = varchksum&0xFF
vchkh = (varchksum>>8)&0xFF
# Construct TI8X file header
h = "**TI83F*"
h += str(bytearray([0x1A,0x0A,0x00]))
h += "Rawr. Gravy. Steaks. Cherries!".ljust(42)[:42] #Always makes comments exactly 42 chars wide.
h += str(bytearray([varsizel,varsizeh]))
h += varentry
h += str(bytearray([vchkl,vchkh]))
# Write data out to file
writeFile(np(filepath+"/"+filename+".8xv"),h)
return
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# Video window class
class Application(tk.Frame):
def __init__(self, master=None):
tk.Frame.__init__(self, master)
self.master.title("* Ohhhh yesss!")
self.master.geometry('200x200')
self.master.minsize(400,300)
self.pack()
self.img = ImageTk.PhotoImage(Image.new('RGB',(96,72),0))
self.canvas = tk.Canvas(self.master,width=320,height=240)
self.canvas.place(x=10,y=10,width=320,height=240)
self.canvas.configure(bg='white',width=96,height=72,state=tk.NORMAL)
self.imgobj = self.canvas.create_image(1,1,image=self.img,anchor=tk.NW,state=tk.NORMAL)
def updateframe(self,pimg):
self.img = ImageTk.PhotoImage(pimg)
self.canvas.itemconfig(self.imgobj,image=self.img)
self.update_idletasks()
self.update()
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# Frame data packager
class CmprSeg():
def __init__(self,segid,data):
self.segid = segid
self.data = data
self.size = len(data)
class Framebuf():
def __init__(self,video_width,video_height,video_title='',video_author=''):
self.frame_buffer = []
self.cmpr_arr = []
self.frames_per_segment = 30
self.cur_frame = 0
self.cur_segment = 0
self.cmpr_len = 0
self.raw_len = 0
self.vid_w = video_width
self.vid_h = video_height
self.vid_title = video_title
self.vid_author = video_author
def addframe(self,framedata):
global TEMP_DIR
if framedata:
framedata = str(bytearray(framedata))
self.frame_buffer.extend(framedata)
self.cur_frame += 1
if self.cur_frame >= self.frames_per_segment:
framedata = None
if not framedata and self.frame_buffer:
tfo = np(TEMP_DIR+"/tin")
tfc = np(TEMP_DIR+"/tout")
if os.path.exists(tfo): os.remove(tfo)
if os.path.exists(tfc): os.remove(tfc)
writeFile(tfo,self.frame_buffer)
FNULL = open("NUL","w")
subprocess.call([np(cwd+"/tools/zx7.exe"),tfo,tfc],stdout=FNULL)
self.cmpr_arr.append(CmprSeg(self.cur_segment,readFile(tfc)))
self.raw_len += len(self.frame_buffer)
self.cmpr_len += self.cmpr_arr[-1].size
sys.stdout.write("Output seg "+str(self.cur_segment)+" size "+str(self.cmpr_arr[-1].size)+" \r")
self.frame_buffer = []
self.cur_frame = 0
self.cur_segment += 1
def framecap(self,framedata):
if self.cur_frame:
for i in range(self.frames_per_segment-self.cur_frame):
self.addframe(framedata)
def flushtofile(self,output_filename,output_encoding):
global ENCODER_NAMES,OUTPUT_DIR
outfilename = str(os.path.splitext(os.path.basename(output_filename))[0])
video_decoder = ENCODER_NAMES[int(output_encoding)]
self.cmpr_arr = sorted(self.cmpr_arr,key=lambda i:i.size,reverse=True)
slack = -1
curfile = 0
curseg = 0
tslack = 0
maxslack = 65000
total_len = 0
while len(self.cmpr_arr)>0:
slack = maxslack
segs_in_file = 0
i = 0
wfiledata = ""
while i<len(self.cmpr_arr):
if self.cmpr_arr[i].size > slack:
i += 1
else:
a = self.cmpr_arr.pop(i)
s = struct.pack('<H',a.segid) + struct.pack('<H',a.size)
s += str(bytearray(a.data))
wfiledata += s
curseg += 1
segs_in_file += 1
slack -= a.size+4
print "Segment "+str(a.segid)+ " sized "+str(a.size)+" written."
wfilename = outfilename[:5]+str(curfile).zfill(3)
wtempdata = "8CEVDat" + outfilename.ljust(9,'\x00')[:9] #header, assocaited file
wtempdata+= str(bytearray([segs_in_file&0xFF]))
wfiledata = wtempdata + wfiledata
export8xv(OUTPUT_DIR,wfilename,wfiledata)
print "File output: "+str(wfilename)
curfile += 1
tslack += slack
total_len += len(wfiledata)
mfiledata = "8CEVDaH" + video_decoder.ljust(9,'\x00')[:9] #header and decoder name
mfiledata += self.vid_title + "\x00" #title string
mfiledata += self.vid_author + "\x00" #author string
mfiledata += struct.pack("<H",curseg)
mfiledata += struct.pack("<H",self.vid_w)
mfiledata += struct.pack("<H",self.vid_h)
mfiledata += struct.pack("B",self.frames_per_segment)
export8xv(OUTPUT_DIR,outfilename,mfiledata)
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# Image filtering
def gethsv(t):
r,g,b = (i/255.0 for i in t)
return colorsys.rgb_to_hsv(r,g,b)
def rgbpaltolist(s):
o = []
for i in range(0,768,3):
r = ord(s[i+0])&0xF8
g = ord(s[i+1])&0xF8
b = ord(s[i+2])&0xF8
o.append((r,g,b))
o = list(set(o)) #remove duplicates
#o.sort(key=gethsv) #sort by hsv for continuity
if (0,0,0) not in o: o.insert(0,(0,0,0)) #black must always be in the palette
o.insert(0,o.pop(o.index((0,0,0)))) #black is always at the front
o += [(0,0,0)]*(256-len(o))
return o
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# Conversion to palettized image without dithering, sourced from:
# https://stackoverflow.com/questions/29433243/convert-image-to-specific-palette-using-pil-without-dithering
def quantizetopalette(silf, palette, dither=False):
silf.load()
palette.load()
if palette.mode != "P":
raise ValueError("bad mode for palette image")
if silf.mode != "RGB" and silf.mode != "L":
raise ValueError(
"only RGB or L mode images can be quantized to a palette"
)
im = silf.im.convert("P", 1 if dither else 0, palette.im)
try:
return silf._new(im)
except AttributeError:
return silf._makeself(im)
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def usage():
#012345678901234567890123456789012345678901234567890123456789012345678901234567
print "\ntoolkit.py is a video converter/packager utility for the TI-84 CE platform."
print "Usage: python toolkit.py -i <in_video.name>"
print "Additional options:"
print "-e ENCODER = Uses a particular encoder. ENCODER are as follows:"
print " 1 = 1bpp b/w, 3x scaling from 96 by X"
print " 2 = 2bpp b/dg/lg/w, 3x scaling from 96 by X"
print " 3 = (decoder not supported)"
print " 4 = 1bpp b/w, no scaling from 176 by X"
print " 5 = 4bpp 16 color, 3x scaling from 96 by X"
print " 6 = 4bpp adaptive palette, 3x scaling from 96 by X"
print " -d = Uses dithering. May increase filesize."
print " -f = Force reconversion of video data"
print ' -t "title" = Adds title information to the project'
print '-a "author" = Adds author information to the project'
return 2
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# It gets real.
print "Setting variables"
try: opts,args = getopt.gnu_getopt(sys.argv,"i:e:dt:a:f")
except:
print "Err0"
sys.exit(usage())
dithering = Image.NONE
vid_encoder = ''
invidname = ''
doffmpeg = False
vid_title = ''
vid_author = ''
for opt,arg in opts:
if opt == '-h':
print "Err1"
sys.exit(usage())
elif opt == '-i':
invidname = arg
elif opt == '-e':
vid_encoder = arg
elif opt == '-d':
dithering = Image.FLOYDSTEINBERG
elif opt == '-f':
doffmpeg = True
elif opt == '-t':
vid_title = arg
elif opt == '-a':
vid_author = arg
#status file line numbers: 0=src path/fn; 1=encoding; 2=titlestr; 3=authorstr
status_file_array = []
if not os.path.isfile(STATUS_FILE):
with open(STATUS_FILE,'w') as f:
f.write("\n1\n\n\n")
with open(STATUS_FILE,"r") as f:
for line in f: status_file_array.append(line.strip())
sf_fileget = status_file_array[0]
sf_encoder = status_file_array[1]
sf_title = status_file_array[2]
sf_author = status_file_array[3]
# Override sf variables if found on cmd line, else set vid stuff to sf.
if invidname:
if sf_fileget and sf_fileget != invidname:
print "Input video name has changed since last build. Cleaning png buffer."
for f in os.listdir(TEMP_PNG_DIR):
fn = os.path.join(TEMP_PNG_DIR,f)
try:
if os.path.isfile(fn):
os.remove(fn)
except Exception as e:
print e
sf_fileget = invidname
else: invidname = sf_fileget
if vid_title: sf_title = vid_title
else: vid_title = sf_title
if vid_author: sf_author = vid_title
else: vid_author = sf_author
if vid_encoder:
if sf_encoder != vid_encoder:
doffmpeg = True
sf_encoder = vid_encoder
else: vid_encoder = sf_encoder
if not os.path.isfile(invidname):
if not os.path.isfile(sf_fileget):
print "Error: File "+str(invidname)+" does not exist."
sys.exit(2)
else:
invidname = sf_fileget
else:
sf_fileget = invidname
flist = GETIMGNAMES()
if not flist: doffmpeg = True
#-----------------------------------------------------------------------------------
#VVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV
if doffmpeg:
if vid_encoder == '1':
hres = 96
vres = -2
elif vid_encoder == '2':
hres = 96
vres = -2
elif vid_encoder == '3':
hres = 176
vres = -2
elif vid_encoder == '4':
hres = 176
vres = -2
elif vid_encoder == '5':
hres = 96
vres = -2
elif vid_encoder == '6':
hres = 96
vres = -2
else:
print "Illegal encoder value was used. Cannot encode video."
sys.exit(2)
of1 = np(TEMP_DIR+"/"+"t1.mp4")
of2 = np(TEMP_DIR+"/"+"t2.mp4")
ofimg = np(TEMP_PNG_DIR+'/i%05d.png')
try:
print "Converting video to target dimensions"
FFmpeg(
inputs = { invidname: '-y'},
outputs = { of1: '-vf scale='+str(hres)+':'+str(vres)+':flags=neighbor'},
).run()
print "Outputting individual frames to .png files"
FFmpeg(
inputs = {of1:'-y'},
outputs = {ofimg:'-f image2'},
).run()
except Exception as e:
print e
print "Terminating script."
sys.exit(2)
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#-----------------------------------------------------------------------------------
print "Saving config data"
status_file_array[0] = sf_fileget
status_file_array[1] = sf_encoder
status_file_array[2] = sf_title
status_file_array[3] = sf_author
with open(STATUS_FILE,"w") as f:
for i in status_file_array: f.write(i+"\n")
print "Collecting image data..."
img_width,img_height = (0,0)
fl = [f for f in os.listdir(OUTPUT_DIR) if os.path.isfile(np(OUTPUT_DIR+'/'+f)) and f[:5]==invidname[:5]]
for i in fl: os.remove(np(OUTPUT_DIR+'/'+i))
flist = sorted([f for f in os.listdir(TEMP_PNG_DIR) if os.path.isfile(os.path.join(TEMP_PNG_DIR,f))])
for f in flist:
if f.lower().endswith('.png'):
timg = Image.open(GETIMGPATH(f))
img_width,img_height = timg.size
break
if not (img_width|img_height):
print "Illegal image data passed. You must rebuild the video"
print "Hint: Add the -f flag to force a rebuild"
sys.exit()
fb = Framebuf(img_width,img_height,vid_title,vid_author)
fb.frames_per_segment = FPSEG_BY_ENCODER[int(vid_encoder)]
palimg = Image.new("P",(16,16))
newimgobj = Image.new("P",(img_width,img_height))
root | |
: List[str, ..., str]; default=None
should the stress? be printed; {YES, NO}
None : [NO] * nplies
nsm : float; default=0.
nonstructural mass per unit area
sb : float; default=0.
Allowable shear stress of the bonding material.
Used by the failure theory
ft : str; default=None
failure theory; {HILL, HOFF, TSAI, STRN, None}
tref : float; default=0.
reference temperature
ge : float; default=0.
structural damping
lam : str; default=None
symmetric flag; {SYM, MEM, BEND, SMEAR, SMCORE, None}
None : not symmmetric
z0 : float; default=None
Distance from the reference plane to the bottom surface
None : -1/2 * total_thickness
comment : str; default=''
a comment for the card
"""
prop = PCOMP(pid, mids, thicknesses, thetas, souts,
nsm=nsm, sb=sb, ft=ft, tref=tref, ge=ge, lam=lam,
z0=z0, comment=comment)
self._add_property_object(prop)
return prop
def add_pcompg(self, pid, global_ply_ids, mids, thicknesses, thetas=None, souts=None,
nsm=0.0, sb=0.0, ft=None, tref=0.0, ge=0.0, lam=None, z0=None,
comment='') -> PCOMPG:
"""
Creates a PCOMPG card
Parameters
----------
pid : int
property id
global_ply_ids : List[int]
the ply id
mids : List[int, ..., int]
material ids for each ply
thicknesses : List[float, ..., float]
thicknesses for each ply
thetas : List[float, ..., float]; default=None
ply angle
None : [0.] * nplies
souts : List[str, ..., str]; default=None
should the stress? be printed; {YES, NO}
None : [NO] * nplies
nsm : float; default=0.
nonstructural mass per unit area
sb : float; default=0.
Allowable shear stress of the bonding material.
Used by the failure theory
ft : str; default=None
failure theory; {HILL, HOFF, TSAI, STRN, None}
tref : float; default=0.
reference temperature
ge : float; default=0.
structural damping
lam : str; default=None
symmetric flag; {SYM, MEM, BEND, SMEAR, SMCORE, None}
None : not symmmetric
z0 : float; default=None
Distance from the reference plane to the bottom surface
None : -1/2 * total_thickness
comment : str; default=''
a comment for the card
"""
prop = PCOMPG(pid, global_ply_ids, mids, thicknesses, thetas=thetas, souts=souts,
nsm=nsm, sb=sb, ft=ft, tref=tref, ge=ge, lam=lam, z0=z0,
comment=comment)
self._add_property_object(prop)
return prop
def add_pcomps(self, pid, global_ply_ids, mids, thicknesses, thetas,
cordm=0, psdir=13, sb=None, nb=None, tref=0.0, ge=0.0,
failure_theories=None, interlaminar_failure_theories=None,
souts=None, comment='') -> PCOMPS:
"""Creates a PCOMPS card"""
prop = PCOMPS(pid, global_ply_ids, mids, thicknesses, thetas,
cordm, psdir, sb, nb, tref, ge,
failure_theories, interlaminar_failure_theories, souts,
comment=comment)
self._add_property_object(prop)
return prop
def add_plplane(self, pid, mid, cid=0, stress_strain_output_location='GRID',
comment='') -> PLPLANE:
"""Creates a PLPLANE card"""
prop = PLPLANE(pid, mid, cid=cid,
stress_strain_output_location=stress_strain_output_location,
comment=comment)
self._add_property_object(prop)
return prop
def add_pplane(self, pid: int, mid: int, t: float=0.0, nsm: float=0.0,
formulation_option: int=0, comment: str='') -> PPLANE:
"""Creates a PPLANE card"""
prop = PPLANE(pid, mid, t=t, nsm=nsm, formulation_option=formulation_option,
comment=comment)
self._add_property_object(prop)
return prop
def add_cplstn3(self, eid, pid, nids, theta=0.0, comment='') -> CPLSTN3:
"""Creates a CPLSTN4 card"""
elem = CPLSTN3(eid, pid, nids, theta=theta, comment=comment)
self._add_element_object(elem)
return elem
def add_cplstn4(self, eid, pid, nids, theta=0.0, comment='') -> CPLSTN4:
"""Creates a CPLSTN4 card"""
elem = CPLSTN4(eid, pid, nids, theta=theta, comment=comment)
self._add_element_object(elem)
return elem
def add_cplstn6(self, eid, pid, nids, theta=0.0, comment='') -> CPLSTN6:
"""Creates a CPLSTN6 card"""
elem = CPLSTN6(eid, pid, nids, theta=theta, comment=comment)
self._add_element_object(elem)
return elem
def add_cplstn8(self, eid, pid, nids, theta=0.0, comment='') -> CPLSTN8:
"""Creates a CPLSTN8 card"""
elem = CPLSTN8(eid, pid, nids, theta=theta, comment=comment)
self._add_element_object(elem)
return elem
def add_cplsts3(self, eid, pid, nids, theta=0.0,
tflag=0, T1=None, T2=None, T3=None, comment='') -> CPLSTS3:
"""Creates a CPLSTS3 card"""
elem = CPLSTS3(eid, pid, nids, theta=theta,
tflag=tflag, T1=T1, T2=T2, T3=T3, comment=comment)
self._add_element_object(elem)
return elem
def add_cplsts4(self, eid, pid, nids, theta=0.0,
tflag=0, T1=None, T2=None, T3=None, T4=None, comment='') -> CPLSTS4:
"""Creates a CPLSTS4 card"""
elem = CPLSTS4(eid, pid, nids, theta=theta,
tflag=tflag, T1=T1, T2=T2, T3=T3, T4=T4, comment=comment)
self._add_element_object(elem)
return elem
def add_cplsts6(self, eid, pid, nids, theta=0.0,
tflag=0,
T1=None, T2=None, T3=None,
T4=None, T5=None, T6=None,
comment='') -> CPLSTS6:
"""Creates a CPLSTS6 card"""
elem = CPLSTS6(eid, pid, nids, theta=theta, comment=comment)
self._add_element_object(elem)
return elem
def add_cplsts8(self, eid, pid, nids, theta=0.0,
tflag=0,
T1=None, T2=None, T3=None, T4=None,
T5=None, T6=None, T7=None, T8=None, comment='') -> CPLSTS8:
"""Creates a CPLSTS8 card"""
elem = CPLSTS8(eid, pid, nids, theta=theta,
tflag=tflag,
T1=T1, T2=T2, T3=T3, T4=T4,
T5=T5, T6=T6, T7=T7, T8=T8,
comment=comment)
self._add_element_object(elem)
return elem
def add_ctetra(self, eid, pid, nids, comment='') -> Union[CTETRA4, CTETRA10]:
"""
Creates a CTETRA4/CTETRA10
Parameters
----------
eid : int
element id
pid : int
property id (PSOLID, PLSOLID)
nids : List[int]
node ids; n=4 or 10
comment : str; default=''
a comment for the card
"""
if len(nids) == 4:
elem = CTETRA4(eid, pid, nids, comment=comment)
else:
elem = CTETRA10(eid, pid, nids, comment=comment)
self._add_element_object(elem)
return elem
def add_cpyram(self, eid, pid, nids, comment='') -> Union[CPYRAM5, CPYRAM13]:
"""
Creates a CPYRAM5/CPYRAM13
Parameters
----------
eid : int
element id
pid : int
property id (PSOLID, PLSOLID)
nids : List[int]
node ids; n=5 or 13
comment : str; default=''
a comment for the card
"""
if len(nids) == 5:
elem = CPYRAM5(eid, pid, nids, comment=comment)
else:
elem = CPYRAM13(eid, pid, nids, comment=comment)
self._add_element_object(elem)
return elem
def add_cpenta(self, eid, pid, nids, comment='') -> Union[CPENTA6, CPENTA15]:
"""
Creates a CPENTA6/CPENTA15
Parameters
----------
eid : int
element id
pid : int
property id (PSOLID, PLSOLID)
nids : List[int]
node ids; n=6 or 15
comment : str; default=''
a comment for the card
"""
if len(nids) == 6:
elem = CPENTA6(eid, pid, nids, comment=comment)
else:
elem = CPENTA15(eid, pid, nids, comment=comment)
self._add_element_object(elem)
return elem
def add_chexa(self, eid, pid, nids, comment='') -> Union[CHEXA8, CHEXA20]:
"""
Creates a CHEXA8/CHEXA20
Parameters
----------
eid : int
element id
pid : int
property id (PSOLID, PLSOLID)
nids : List[int]
node ids; n=8 or 20
comment : str; default=''
a comment for the card
"""
if len(nids) == 8:
elem = CHEXA8(eid, pid, nids, comment=comment)
else:
elem = CHEXA20(eid, pid, nids, comment=comment)
self._add_element_object(elem)
return elem
def add_psolid(self, pid, mid, cordm=0, integ=None, stress=None, isop=None,
fctn='SMECH', comment='') -> PSOLID:
"""
Creates a PSOLID card
Parameters
----------
pid : int
property id
mid : int
material id
cordm : int; default=0
material coordinate system
integ : int; default=None
None-varies depending on element type
0, 'BUBBLE'
1, 'GAUSS'
2, 'TWO'
3, 'THREE'
REDUCED
FULL
stress : int/str; default=None
None/GRID, 1-GAUSS
isop : int/str; default=None
0-REDUCED
1-FULL
fctn : str; default='SMECH'
PFLUID/SMECH
comment : str; default=''
a comment for the card
"""
prop = PSOLID(pid, mid, cordm=cordm, integ=integ, stress=stress, isop=isop,
fctn=fctn, comment=comment)
self._add_property_object(prop)
return prop
def add_plsolid(self, pid, mid, stress_strain='GRID', ge=0., comment='') -> PLSOLID:
"""
Creates a PLSOLID card
Parameters
----------
pid : int
property id
mid : int
material id
stress_strain : str
Location of stress and strain output
valid types = {GRID, GAUSS}
ge : float; default=0.
damping coefficient
comment : str; default=''
a comment for the card
"""
prop = PLSOLID(pid, mid, stress_strain=stress_strain, ge=ge, comment=comment)
self._add_property_object(prop)
return prop
def add_crac2d(self, eid, pid, nids, comment='') -> CRAC2D:
"""Creates a PRAC2D card"""
elem = CRAC2D(eid, pid, nids, comment=comment)
self._add_element_object(elem)
return elem
def add_prac2d(self, pid, mid, thick, iplane, nsm=0., gamma=0.5, phi=180.,
comment='') -> PRAC2D:
"""Creates a PRAC2D card"""
prop = PRAC2D(pid, mid, thick, iplane, nsm=nsm, gamma=gamma, phi=phi,
comment=comment)
self._add_property_object(prop)
return prop
def add_crac3d(self, eid, pid, nids, comment='') -> CRAC3D:
"""Creates a CRAC3D card"""
elem = CRAC3D(eid, pid, nids, comment=comment)
self._add_element_object(elem)
return elem
def add_prac3d(self, pid, mid, gamma=0.5, phi=180., comment='') -> PRAC3D:
"""Creates a PRAC3D card"""
prop = PRAC3D(pid, mid, gamma=gamma, phi=phi, comment=comment)
self._add_property_object(prop)
return prop
def add_genel_stiffness(self, eid, ul, ud, k, s=None) -> GENEL:
"""creates a GENEL card using the stiffness (K) approach"""
assert k is not None
genel = GENEL(eid, ul, ud, k, None, s)
self._add_element_object(genel)
return genel
def add_genel_flexibility(self, eid, ul, ud, z, s=None) -> GENEL:
"""creates a GENEL card using the flexiblity (Z) approach"""
assert z is not None
genel = GENEL(eid, ul, ud, None, z, s)
self._add_element_object(genel)
return genel
def add_axic(self, nharmonics, comment='') -> AXIC:
"""Creates a AXIC card"""
axic = AXIC(nharmonics, comment=comment)
self._add_axic_object(axic)
return axic
def add_pointax(self, nid, ringax, phi, comment='') -> POINTAX:
"""Creates a POINTAX card"""
node = POINTAX(nid, ringax, phi, comment=comment)
self._add_ringax_object(node)
return node
def add_ringax(self, nid, R, z, ps=None, comment='') -> RINGAX:
"""Creates a | |
assert "name" not in kw
kw ["name"] = name = self.__class__.__name__.lower ()
self.abbr = name.rsplit ("_", 1) [0]
self.__super.__init__ (** kw)
# end def __init__
def _get_default (self) :
import locale
return locale.getpreferredencoding ()
# end def _get_default
# end class _Encoding_
class _Number_ (_Spec_) :
"""Base class for numeric argument and option types"""
def cook (self, value, cao = None) :
err = None
if isinstance (value, pyk.string_types) :
try :
value = self._safe_eval (value)
except Err as exc :
### try `_cook` ("08" doesn't work for `Int`, otherwise)
err = exc
try :
return self._cook (value)
except (ValueError, TypeError) as exc :
raise err or Err ("%s for %s `%s`" % (exc, self.kind, self.name))
# end def cook
def _resolve_range_1 (self, value, cao, pat) :
cook = self.cook
### Extract match variables because errors in cook might trigger
### another pattern match overwriting them
r_head = pat.head
r_tail = pat.tail
r_delta = pat.delta
head = cook (r_head, cao)
tail = cook (r_tail, cao) + 1
delta = cook (r_delta or self.range_delta, cao)
yield from self._resolved_range (head, tail, delta)
# end def _resolve_range_1
def _resolved_range (self, head, tail, delta) :
return range (head, tail, delta)
# end def _resolved_range
# end class _Number_
class Bool (_Spec_O_) :
"""Option with a boolean value"""
implied_value = "True"
needs_value = False
type_abbr = "B"
def cook (self, value, cao = None) :
if value is None :
return True
if not isinstance (value, pyk.string_types) :
return bool (value)
if value.lower () in ("no", "0", "false") : ### XXX I18N
return False
return True
# end def cook
def _auto_max_number (self, auto_split) :
return 1
# end def _auto_max_number
def _set_default (self, default) :
if default is None :
default = False
return self.__super._set_default (default)
# end def _set_default
# end class Bool
class Binary (Bool) :
"""Option with a required boolean value"""
needs_value = True
type_abbr = "Y"
# end class Binary
class Cmd_Choice (_Spec_Base_) :
"""Argument that selects a sub-command.
The :meth:`__init__` method accepts the arguments:
:obj:`name`
The name of the argument that offers a choice of sub-commands.
:obj:`* cmds`
A tuple of sub-commands, each an instance of :class:`Cmd` with
its own :obj:`handler`.
:obj:`description`
The description for the sub-command argument which must be passed
as a keyword argument.
"""
default = None
hide = False
kind = "sub-command"
max_number = 1
def __init__ (self, name, * cmds, ** kw) :
self.name = name
self.sub_cmds = dict ((c._name, c) for c in cmds)
self.sub_abbr = Trie (self.sub_cmds)
self.description = kw.pop ("description", "")
assert not kw
# end def __init__
def __call__ (self, value, cao) :
cao._cmd = sc = self._get_choice (value)
cao._name = " ".join ([cao._name, sc._name])
cao._min_args = sc._min_args
cao._max_args = sc._max_args
cao._arg_list [:] = sc._arg_list
cao._arg_dict.clear ()
cao._arg_dict.update (sc._arg_dict)
cao._bun_dict.update (sc._bun_dict)
cao._opt_dict.update (sc._opt_dict)
cao._opt_abbr.update (sc._opt_dict, sc._opt_alias)
cao._opt_alias.update (sc._opt_alias)
cao._opt_conf.extend (sc._opt_conf)
# end def __call__
@property
def choices (self) :
return self.sub_cmds
# end def choices
@TFL.Meta.Once_Property
def _max_name_length (self) :
return max \
(sc._max_name_length for sc in pyk.itervalues (self.sub_cmds))
# end def _max_name_length
def cooked_default (self, cao = None) :
return self.default
# end def cooked_default
def raw_default (self, cao = None) :
return None
# end def raw_default
def _choice_ambiguous (self, value, matches) :
return \
( "Ambiguous sub-command `%s`, matches any of %s"
% (value, portable_repr (matches))
)
# end def _choice_ambiguous
def _choice_unknown (self, value, choices) :
return \
( "Unkown sub-command `%s`, specify one of: (%s)"
% (value, ", ".join (sorted (choices)))
)
# end def _choice_unknown
def __getattr__ (self, name) :
"""Return the sub-command with `name`."""
try :
return self.sub_cmds [name]
except KeyError :
raise AttributeError (name)
# end def __getattr__
def __getitem__ (self, key) :
"""Return the sub-command named `key`."""
return self.sub_cmds [key]
# end def __getitem__
# end class Cmd_Choice
class Decimal (_Number_) :
"""Argument or option with a decimal value"""
type_abbr = "D"
def _cook (self, value) :
if value is not None :
return decimal.Decimal (value)
# end def _cook
# end class Decimal
class File_System_Encoding (_Encoding_) :
"""Encoding used to convert Unicode filenames into operating system filenames."""
def _get_default (self) :
return sys.getfilesystemencoding ()
# end def _get_default
# end class File_System_Encoding
class Float (_Number_) :
"""Argument or option with a floating point value"""
type_abbr = "F"
range_pat = Regexp \
( r"""^\s*"""
r"""(?P<head> (?: 0[xX])? \d+ (?: \. \d* )?)"""
r"""\s*"""
r"""\.\."""
r"""\s*"""
r"""(?P<tail> (?: 0[xX])? \d+ (?: \. \d* )?)"""
r"""\s*"""
r"""(?: : (?P<delta> \d+ (?: \. \d* )?))?"""
r"""\s*$"""
, re.VERBOSE
)
_cook = float
def _resolved_range (self, head, tail, delta) :
v = head
while v < tail :
yield v
v += delta
# end def _resolved_range
# end class Float
class Help (_Spec_O_) :
"""Option asking for help"""
alias = "?"
auto_split = ","
implied_value = "default"
needs_value = False
line_length = 78
topics = set \
( [ "args"
, "buns"
, "cmds"
, "config"
, "details"
, "opts"
, "summary"
, "syntax"
, "vals"
]
)
default_topics = set (["args", "buns", "opts", "summary"])
topic_map = dict \
( all = "Show help about the categories:\n"
+ ", ".join (sorted (topics))
, args = "Show help about the arguments, if any."
, buns = "Show help about the arguments/options bundles, if any."
, cmds = "Show help about the sub-commands, if any."
, config = "Show help about configuration options, if any."
, details = "Show detailed explanation about command, if any."
, help = "Show help about usage of the help option"
, opts = "Show help about the options, if any."
, summary = "Show summary about the usage of the command."
, syntax = "Show help about the syntax of arguments and options."
, vals =
"Display the actual values of the options and arguments that "
"would by used by this command invocation."
)
def __init__ (self) :
self.__super.__init__ \
( name = "help"
, description = _ ("Display help about command")
)
# end def __init__
def __call__ (self, cao, indent = 0, spec = None) :
return self._handler (cao, indent, spec)
# end def __call__
def _handler (self, cao, indent = 0, spec = None) :
helper = cao._cmd._helper
if helper :
helper (cao)
else :
if spec is None :
spec = getattr (cao, self.name)
nl = self.nl = self._nl_gen ()
topics = self.topics
wanted = set (v for v in spec if v)
most_p = False
if wanted == set (["default"]) or not wanted :
wanted = self.default_topics
most_p = True
elif wanted.issubset (set (["all", "*"])) :
wanted = topics
most_p = True
if "summary" in wanted :
next (nl)
self._help_summary (cao, indent)
arg_p = any (a for a in cao._arg_list if not a.hide)
want_args = "args" in wanted
indent_most = indent + (4 * most_p)
indent_want = indent + (4 * want_args)
if want_args and arg_p :
next (nl)
self._help_args (cao, indent, heading = not most_p)
if "cmds" in wanted :
next (nl)
self._help_cmds \
(cao, indent_want, heading = not want_args)
opt_p = any \
(o for o in pyk.itervalues (cao._opt_dict) if not o.hide)
if "opts" in wanted and opt_p :
next (nl)
self._help_opts (cao, indent, heading = not most_p)
if "buns" in wanted and cao._bun_dict :
next (nl)
self._help_buns (cao, indent_most)
if "syntax" in wanted :
next (nl)
self._help_syntax (cao, indent_most)
if "details" in wanted :
next (nl)
self._help_details (cao, indent_most, heading = most_p)
if "config" in wanted and cao._opt_conf :
next (nl)
self._help_config (cao, indent_most)
if "vals" in wanted :
next (nl)
self._help_values (cao, indent_most)
if not wanted.issubset (topics) :
next (nl)
self._help_help (cao, indent)
# end def _handler
def _help_ao (self, ao, cao, head, max_l, prefix = "") :
if ao.hide :
return
name = ao.name
try :
v = cao ["%s:raw" % name]
except KeyError :
v = ""
print \
( "%s%s%-*s : %s%s%s"
% | |
[[] for _ in range(K)]
for i_state in range(K):
if state_indices[i_state].shape[0] > 0:
state_lens = np.diff(state_indices[i_state][:, 1:3], axis=1)
over_idxs = state_lens > min_threshold
over_threshold_instances[i_state] = state_indices[i_state][over_idxs[:, 0]]
np.random.shuffle(over_threshold_instances[i_state]) # shuffle instances
make_syllable_movies(
ims_orig=ims_orig,
state_list=over_threshold_instances,
trial_idxs=trial_idxs,
save_file=save_file,
max_frames=max_frames,
frame_rate=frame_rate,
n_buffer=n_buffer,
n_pre_frames=n_pre_frames,
n_rows=n_rows,
single_syllable=single_syllable)
def make_syllable_movies(
ims_orig, state_list, trial_idxs, save_file=None, max_frames=400, frame_rate=10,
n_buffer=5, n_pre_frames=3, n_rows=None, single_syllable=None):
"""Present video clips of each individual syllable in separate panels
Parameters
----------
ims_orig : :obj:`np.ndarray`
shape (n_frames, n_channels, y_pix, x_pix)
state_list : :obj:`list`
each entry (one per state) contains all occurences of that discrete state by
:obj:`[chunk number, starting index, ending index]`
trial_idxs : :obj:`array-like`
indices into :obj:`states` for which trials should be plotted
save_file : :obj:`str`
full save file (path and filename)
max_frames : :obj:`int`, optional
maximum number of frames to animate
frame_rate : :obj:`float`, optional
frame rate of saved movie
n_buffer : :obj:`int`, optional
number of blank frames between syllable instances
n_pre_frames : :obj:`int`, optional
number of behavioral frames to precede each syllable instance
n_rows : :obj:`int` or :obj:`NoneType`, optional
number of rows in output movie
single_syllable : :obj:`int` or :obj:`NoneType`, optional
choose only a single state for movie
"""
K = len(state_list)
# Initialize syllable movie frames
plt.clf()
if single_syllable is not None:
K = 1
fig_width = 5
n_rows = 1
else:
fig_width = 10 # aiming for dim 1 being 10
# get video dims
bs, n_channels, y_dim, x_dim = ims_orig[0].shape
movie_dim1 = n_channels * y_dim
movie_dim2 = x_dim
if n_rows is None:
n_rows = int(np.floor(np.sqrt(K)))
n_cols = int(np.ceil(K / n_rows))
fig_dim_div = movie_dim2 * n_cols / fig_width
fig_width = (movie_dim2 * n_cols) / fig_dim_div
fig_height = (movie_dim1 * n_rows) / fig_dim_div
fig, axes = plt.subplots(n_rows, n_cols, figsize=(fig_width, fig_height))
for i, ax in enumerate(fig.axes):
ax.set_yticks([])
ax.set_xticks([])
if i >= K:
ax.set_axis_off()
elif single_syllable is not None:
ax.set_title('Syllable %i' % single_syllable, fontsize=16)
else:
ax.set_title('Syllable %i' % i, fontsize=16)
fig.tight_layout(pad=0, h_pad=1.005)
imshow_kwargs = {'animated': True, 'cmap': 'gray', 'vmin': 0, 'vmax': 1}
ims = [[] for _ in range(max_frames + bs + 200)]
# Loop through syllables
for i_k, ax in enumerate(fig.axes):
# skip if no syllable in this axis
if i_k >= K:
continue
print('processing syllable %i/%i' % (i_k + 1, K))
# skip if no syllables are longer than threshold
if len(state_list[i_k]) == 0:
continue
if single_syllable is not None:
i_k = single_syllable
i_chunk = 0
i_frame = 0
while i_frame < max_frames:
if i_chunk >= len(state_list[i_k]):
# show blank if out of syllable examples
im = ax.imshow(np.zeros((movie_dim1, movie_dim2)), **imshow_kwargs)
ims[i_frame].append(im)
i_frame += 1
else:
# Get movies/latents
chunk_idx = state_list[i_k][i_chunk, 0]
which_trial = trial_idxs[chunk_idx]
tr_beg = state_list[i_k][i_chunk, 1]
tr_end = state_list[i_k][i_chunk, 2]
batch = ims_orig[which_trial]
movie_chunk = batch[max(tr_beg - n_pre_frames, 0):tr_end]
movie_chunk = np.concatenate(
[movie_chunk[:, j] for j in range(movie_chunk.shape[1])], axis=1)
# if np.sum(states[chunk_idx][tr_beg:tr_end-1] != i_k) > 0:
# raise ValueError('Misaligned states for syllable segmentation')
# Loop over this chunk
for i in range(movie_chunk.shape[0]):
im = ax.imshow(movie_chunk[i], **imshow_kwargs)
ims[i_frame].append(im)
# Add red box if start of syllable
syllable_start = n_pre_frames if tr_beg >= n_pre_frames else tr_beg
if syllable_start <= i < (syllable_start + 2):
rect = matplotlib.patches.Rectangle(
(5, 5), 10, 10, linewidth=1, edgecolor='r', facecolor='r')
im = ax.add_patch(rect)
ims[i_frame].append(im)
i_frame += 1
# Add buffer black frames
for j in range(n_buffer):
im = ax.imshow(np.zeros((movie_dim1, movie_dim2)), **imshow_kwargs)
ims[i_frame].append(im)
i_frame += 1
i_chunk += 1
print('creating animation...', end='')
ani = animation.ArtistAnimation(
fig,
[ims[i] for i in range(len(ims)) if ims[i] != []], interval=20, blit=True, repeat=False)
print('done')
if save_file is not None:
# put together file name
if save_file[-3:] == 'mp4':
save_file = save_file[:-3]
if single_syllable is not None:
state_str = str('_syllable-%02i' % single_syllable)
else:
state_str = ''
save_file += state_str
save_file += '.mp4'
save_movie(save_file, ani, frame_rate=frame_rate)
def real_vs_sampled_wrapper(
output_type, hparams, save_file, sess_idx, dtype='test', conditional=True, max_frames=400,
frame_rate=20, n_buffer=5, xtick_locs=None, frame_rate_beh=None, format='png'):
"""Produce movie with (AE) reconstructed video and sampled video.
This is a high-level function that loads the model described in the hparams dictionary and
produces the necessary state sequences/samples. The sampled video can be completely
unconditional (states and latents are sampled) or conditioned on the most likely state
sequence.
Parameters
----------
output_type : :obj:`str`
'plot' | 'movie' | 'both'
hparams : :obj:`dict`
needs to contain enough information to specify an autoencoder
save_file : :obj:`str`
full save file (path and filename)
sess_idx : :obj:`int`, optional
session index into data generator
dtype : :obj:`str`, optional
types of trials to make plot/video with; 'train' | 'val' | 'test'
conditional : :obj:`bool`
conditional vs unconditional samples; for creating reconstruction title
max_frames : :obj:`int`, optional
maximum number of frames to animate
frame_rate : :obj:`float`, optional
frame rate of saved movie
n_buffer : :obj:`int`
number of blank frames between animated trials if more one are needed to reach
:obj:`max_frames`
xtick_locs : :obj:`array-like`, optional
tick locations in bin values for plot
frame_rate_beh : :obj:`float`, optional
behavioral video framerate; to properly relabel xticks
format : :obj:`str`, optional
any accepted matplotlib save format, e.g. 'png' | 'pdf' | 'jpeg'
Returns
-------
:obj:`matplotlib.figure.Figure`
matplotlib figure handle if :obj:`output_type='plot'` or :obj:`output_type='both'`, else
nothing returned (movie is saved)
"""
from behavenet.data.utils import get_transforms_paths
from behavenet.fitting.utils import get_expt_dir
from behavenet.fitting.utils import get_session_dir
# check input - cannot create sampled movies for arhmm-labels models (no mapping from labels to
# frames)
if hparams['model_class'].find('labels') > -1:
if output_type == 'both' or output_type == 'movie':
print('warning: cannot create video with "arhmm-labels" model; producing plots')
output_type = 'plot'
# load latents and states (observed and sampled)
model_output = get_model_latents_states(
hparams, '', sess_idx=sess_idx, return_samples=50, cond_sampling=conditional)
if output_type == 'both' or output_type == 'movie':
# load in AE decoder
if hparams.get('ae_model_path', None) is not None:
ae_model_file = os.path.join(hparams['ae_model_path'], 'best_val_model.pt')
ae_arch = pickle.load(
open(os.path.join(hparams['ae_model_path'], 'meta_tags.pkl'), 'rb'))
else:
hparams['session_dir'], sess_ids = get_session_dir(
hparams, session_source=hparams.get('all_source', 'save'))
hparams['expt_dir'] = get_expt_dir(hparams)
_, latents_file = get_transforms_paths('ae_latents', hparams, sess_ids[sess_idx])
ae_model_file = os.path.join(os.path.dirname(latents_file), 'best_val_model.pt')
ae_arch = pickle.load(
open(os.path.join(os.path.dirname(latents_file), 'meta_tags.pkl'), 'rb'))
print('loading model from %s' % ae_model_file)
ae_model = AE(ae_arch)
ae_model.load_state_dict(
torch.load(ae_model_file, map_location=lambda storage, loc: storage))
ae_model.eval()
n_channels = ae_model.hparams['n_input_channels']
y_pix = ae_model.hparams['y_pixels']
x_pix = ae_model.hparams['x_pixels']
# push observed latents through ae decoder
ims_recon = np.zeros((0, n_channels * y_pix, x_pix))
i_trial = 0
while ims_recon.shape[0] < max_frames:
recon = ae_model.decoding(
torch.tensor(model_output['latents'][dtype][i_trial]).float(), None, None). \
cpu().detach().numpy()
recon = np.concatenate([recon[:, i] for i in range(recon.shape[1])], axis=1)
zero_frames = np.zeros((n_buffer, n_channels * y_pix, x_pix)) # add a few black frames
ims_recon = np.concatenate((ims_recon, recon, zero_frames), axis=0)
i_trial += 1
# push sampled latents through ae decoder
ims_recon_samp = np.zeros((0, n_channels * y_pix, x_pix))
i_trial = 0
while ims_recon_samp.shape[0] < max_frames:
recon = ae_model.decoding(torch.tensor(
model_output['latents_gen'][i_trial]).float(), None, None).cpu().detach().numpy()
recon = np.concatenate([recon[:, i] for i in range(recon.shape[1])], axis=1)
zero_frames = np.zeros((n_buffer, n_channels * y_pix, x_pix)) # add a few black frames
ims_recon_samp = np.concatenate((ims_recon_samp, recon, zero_frames), axis=0)
i_trial += 1
make_real_vs_sampled_movies(
ims_recon, ims_recon_samp, conditional=conditional, save_file=save_file,
frame_rate=frame_rate)
if output_type == 'both' or output_type == 'plot':
i_trial = 0
latents = model_output['latents'][dtype][i_trial][:max_frames]
states = model_output['states'][dtype][i_trial][:max_frames]
latents_samp = model_output['latents_gen'][i_trial][:max_frames]
if not conditional:
states_samp = model_output['states_gen'][i_trial][:max_frames]
else:
states_samp = []
fig = plot_real_vs_sampled(
latents, latents_samp, states, states_samp, save_file=save_file, xtick_locs=xtick_locs,
frame_rate=hparams['frame_rate'] if frame_rate_beh is None else frame_rate_beh,
format=format)
if output_type == 'movie':
return None
elif output_type == 'both' or output_type == 'plot':
return fig
else:
raise ValueError('"%s" is an invalid output_type' % output_type)
def make_real_vs_sampled_movies(
ims_recon, ims_recon_samp, conditional, save_file=None, frame_rate=15):
"""Produce movie with (AE) reconstructed video and sampled video.
Parameters
----------
ims_recon : :obj:`np.ndarray`
shape (n_frames, y_pix, x_pix)
ims_recon_samp : :obj:`np.ndarray`
shape (n_frames, y_pix, x_pix)
conditional : :obj:`bool`
conditional vs unconditional samples; for creating reconstruction title
save_file : :obj:`str`, optional
full save file (path and filename)
frame_rate : :obj:`float`, optional
frame rate of saved movie
"""
n_frames = ims_recon.shape[0]
n_plots = 2
[y_pix, x_pix] = ims_recon[0].shape
fig_dim_div = x_pix * n_plots / 10 # aiming for dim 1 being 10
x_dim = x_pix * n_plots / fig_dim_div
y_dim = y_pix / fig_dim_div
fig, axes = plt.subplots(1, n_plots, figsize=(x_dim, y_dim))
for j in range(2):
axes[j].set_xticks([])
axes[j].set_yticks([])
axes[0].set_title('Real Reconstructions\n', | |
<reponame>djarpin/sagemaker-python-sdk
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import print_function, absolute_import
import json
import logging
from abc import ABCMeta
from abc import abstractmethod
from six import with_metaclass, string_types
from sagemaker.fw_utils import tar_and_upload_dir
from sagemaker.fw_utils import parse_s3_url
from sagemaker.fw_utils import UploadedCode
from sagemaker.model import Model
from sagemaker.model import (SCRIPT_PARAM_NAME, DIR_PARAM_NAME, CLOUDWATCH_METRICS_PARAM_NAME,
CONTAINER_LOG_LEVEL_PARAM_NAME, JOB_NAME_PARAM_NAME, SAGEMAKER_REGION_PARAM_NAME)
from sagemaker.predictor import RealTimePredictor
from sagemaker.session import Session
from sagemaker.session import s3_input
from sagemaker.utils import base_name_from_image, name_from_base
class EstimatorBase(with_metaclass(ABCMeta, object)):
"""Handle end-to-end Amazon SageMaker training and deployment tasks.
For introduction to model training and deployment, see
http://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works-training.html
Subclasses must define a way to determine what image to use for training,
what hyperparameters to use, and how to create an appropriate predictor instance.
"""
def __init__(self, role, train_instance_count, train_instance_type,
train_volume_size=30, train_max_run=24 * 60 * 60, input_mode='File',
output_path=None, output_kms_key=None, base_job_name=None, sagemaker_session=None):
"""Initialize an ``EstimatorBase`` instance.
Args:
role (str): An AWS IAM role (either name or full ARN). The Amazon SageMaker training jobs and APIs
that create Amazon SageMaker endpoints use this role to access training data and model artifacts.
After the endpoint is created, the inference code might use the IAM role,
if it needs to access an AWS resource.
train_instance_count (int): Number of Amazon EC2 instances to use for training.
train_instance_type (str): Type of EC2 instance to use for training, for example, 'ml.c4.xlarge'.
train_volume_size (int): Size in GB of the EBS volume to use for storing input data
during training (default: 30). Must be large enough to store training data if File Mode is used
(which is the default).
train_max_run (int): Timeout in seconds for training (default: 24 * 60 * 60).
After this amount of time Amazon SageMaker terminates the job regardless of its current status.
input_mode (str): The input mode that the algorithm supports (default: 'File'). Valid modes:
'File' - Amazon SageMaker copies the training dataset from the S3 location to a local directory.
'Pipe' - Amazon SageMaker streams data directly from S3 to the container via a Unix-named pipe.
output_path (str): S3 location for saving the trainig result (model artifacts and output files).
If not specified, results are stored to a default bucket. If the bucket with the specific name
does not exist, the estimator creates the bucket during the
:meth:`~sagemaker.estimator.EstimatorBase.fit` method execution.
output_kms_key (str): Optional. KMS key ID for encrypting the training output (default: None).
base_job_name (str): Prefix for training job name when the :meth:`~sagemaker.estimator.EstimatorBase.fit`
method launches. If not specified, the estimator generates a default job name, based on
the training image name and current timestamp.
sagemaker_session (sagemaker.session.Session): Session object which manages interactions with
Amazon SageMaker APIs and any other AWS services needed. If not specified, the estimator creates one
using the default AWS configuration chain.
"""
self.role = role
self.train_instance_count = train_instance_count
self.train_instance_type = train_instance_type
self.train_volume_size = train_volume_size
self.train_max_run = train_max_run
self.input_mode = input_mode
self.sagemaker_session = sagemaker_session or Session()
self.base_job_name = base_job_name
self._current_job_name = None
self.output_path = output_path
self.output_kms_key = output_kms_key
self.latest_training_job = None
@abstractmethod
def train_image(self):
"""Return the Docker image to use for training.
The :meth:`~sagemaker.estimator.EstimatorBase.fit` method, which does the model training, calls this method to
find the image to use for model training.
Returns:
str: The URI of the Docker image.
"""
pass
@abstractmethod
def hyperparameters(self):
"""Return the hyperparameters as a dictionary to use for training.
The :meth:`~sagemaker.estimator.EstimatorBase.fit` method, which trains the model, calls this method to
find the hyperparameters.
Returns:
dict[str, str]: The hyperparameters.
"""
pass
def fit(self, inputs, wait=True, logs=True, job_name=None):
"""Train a model using the input training dataset.
The API calls the Amazon SageMaker CreateTrainingJob API to start model training.
The API uses configuration you provided to create the estimator and the
specified input training data to send the CreatingTrainingJob request to Amazon SageMaker.
This is a synchronous operation. After the model training successfully completes,
you can call the ``deploy()`` method to host the model using the Amazon SageMaker hosting services.
Args:
inputs (str or dict or sagemaker.session.s3_input): Information about the training data.
This can be one of three types:
* (str) the S3 location where training data is saved.
* (dict[str, str] or dict[str, sagemaker.session.s3_input]) If using multiple channels for
training data, you can specify a dict mapping channel names
to strings or :func:`~sagemaker.session.s3_input` objects.
* (sagemaker.session.s3_input) - channel configuration for S3 data sources that can provide
additional information about the training dataset. See :func:`sagemaker.session.s3_input`
for full details.
wait (bool): Whether the call should wait until the job completes (default: True).
logs (bool): Whether to show the logs produced by the job.
Only meaningful when wait is True (default: True).
job_name (str): Training job name. If not specified, the estimator generates a default job name,
based on the training image name and current timestamp.
"""
if job_name is not None:
self._current_job_name = job_name
else:
# make sure the job name is unique for each invocation, honor supplied base_job_name or generate it
base_name = self.base_job_name or base_name_from_image(self.train_image())
self._current_job_name = name_from_base(base_name)
# if output_path was specified we use it otherwise initialize here
if self.output_path is None:
self.output_path = 's3://{}/'.format(self.sagemaker_session.default_bucket())
self.latest_training_job = _TrainingJob.start_new(self, inputs)
if wait:
self.latest_training_job.wait(logs=logs)
else:
raise NotImplemented('Asynchronous fit not available')
def deploy(self, initial_instance_count, instance_type, endpoint_name=None, **kwargs):
"""Deploy the trained model to an Amazon SageMaker endpoint and return a ``sagemaker.RealTimePredictor`` object.
More information:
http://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works-training.html
Args:
initial_instance_count (int): Minimum number of EC2 instances to deploy to an endpoint for prediction.
instance_type (str): Type of EC2 instance to deploy to an endpoint for prediction,
for example, 'ml.c4.xlarge'.
endpoint_name (str): Name to use for creating an Amazon SageMaker endpoint. If not specified, the name of
the training job is used.
**kwargs: Passed to invocation of ``create_model()``. Implementations may customize
``create_model()`` to accept ``**kwargs`` to customize model creation during deploy.
For more, see the implementation docs.
Returns:
sagemaker.predictor.RealTimePredictor: A predictor that provides a ``predict()`` method,
which can be used to send requests to the Amazon SageMaker endpoint and obtain inferences.
"""
if not self.latest_training_job:
raise RuntimeError('Estimator has not been fit yet.')
endpoint_name = endpoint_name or self.latest_training_job.name
self.deploy_instance_type = instance_type
return self.create_model(**kwargs).deploy(
instance_type=instance_type,
initial_instance_count=initial_instance_count,
endpoint_name=endpoint_name)
@property
def model_data(self):
"""str: The model location in S3. Only set if Estimator has been ``fit()``."""
return self.sagemaker_session.sagemaker_client.describe_training_job(
TrainingJobName=self.latest_training_job.name)['ModelArtifacts']['S3ModelArtifacts']
@abstractmethod
def create_model(self, **kwargs):
"""Create a SageMaker ``Model`` object that can be deployed to an ``Endpoint``.
Args:
**kwargs: Keyword arguments used by the implemented method for creating the ``Model``.
Returns:
sagemaker.model.Model: A SageMaker ``Model`` object. See :func:`~sagemaker.model.Model` for full details.
"""
pass
@staticmethod
def _prepare_estimator_params_from_job_description(job_details):
estimator_params = dict()
estimator_params['role'] = job_details['RoleArn']
estimator_params['train_instance_count'] = job_details['ResourceConfig']['InstanceCount']
estimator_params['train_instance_type'] = job_details['ResourceConfig']['InstanceType']
estimator_params['train_volume_size'] = job_details['ResourceConfig']['VolumeSizeInGB']
estimator_params['train_max_run'] = job_details['StoppingCondition']['MaxRuntimeInSeconds']
estimator_params['input_mode'] = job_details['AlgorithmSpecification']['TrainingInputMode']
estimator_params['base_job_name'] = job_details['TrainingJobName']
estimator_params['output_path'] = job_details['OutputDataConfig']['S3OutputPath']
estimator_params['output_kms_key'] = job_details['OutputDataConfig']['KmsKeyId']
return estimator_params, job_details['HyperParameters'], job_details['AlgorithmSpecification']['TrainingImage']
def delete_endpoint(self):
"""Delete an Amazon SageMaker ``Endpoint``.
Raises:
ValueError: If the endpoint does not exist.
"""
if self.latest_training_job is None:
raise ValueError('Endpoint was not created yet')
self.sagemaker_session.delete_endpoint(self.latest_training_job.name)
class _TrainingJob(object):
def __init__(self, sagemaker_session, training_job_name):
self.sagemaker_session = sagemaker_session
self.job_name = training_job_name
@classmethod
def start_new(cls, estimator, inputs):
"""Create a new Amazon SageMaker training job from the estimator.
Args:
estimator (sagemaker.estimator.Framework): Estimator object created by the user.
inputs (str): Parameters used when called :meth:`~sagemaker.estimator.EstimatorBase.fit`.
Returns:
sagemaker.estimator.Framework: Constructed object that captures all information about the started job.
"""
input_config = _TrainingJob._format_inputs_to_input_config(inputs)
role = estimator.sagemaker_session.expand_role(estimator.role)
output_config = _TrainingJob._prepare_output_config(estimator.output_path, estimator.output_kms_key)
resource_config = _TrainingJob._prepare_resource_config(estimator.train_instance_count,
estimator.train_instance_type,
estimator.train_volume_size)
stop_condition = _TrainingJob._prepare_stopping_condition(estimator.train_max_run)
if estimator.hyperparameters() is not None:
hyperparameters = {str(k): str(v) for (k, v) in estimator.hyperparameters().items()}
estimator.sagemaker_session.train(image=estimator.train_image(), input_mode=estimator.input_mode,
input_config=input_config, role=role, job_name=estimator._current_job_name,
output_config=output_config, resource_config=resource_config,
hyperparameters=hyperparameters, stop_condition=stop_condition)
return cls(estimator.sagemaker_session, estimator._current_job_name)
@staticmethod
def _format_inputs_to_input_config(inputs):
input_dict = {}
if isinstance(inputs, string_types):
input_dict['training'] = _TrainingJob._format_s3_uri_input(inputs)
elif | |
- err_q else 180 - err_q
err = [err_t, err_q]
if np.isnan(err_t) or np.isnan(err_q):
print('pose_est=\n', pose2)
print('pose=\n', pose1)
return err
def err_plot():
# Test 1
# Define object points and image points
obj_pts = np.random.randint(-40, 40, [n, 3])
obj_pts = obj_pts.astype(float)
obj_pts[0, :] = [0, 0, 0]
img_pts = np.empty([n, 3])
img_pts[:, 2] = 1
pose_est = np.empty([6, 1])
pose_act = np.empty([6, 1])
err_net_t = []
err_net_q = []
for it in np.arange(0, n_iter):
# Generate random camera extrinsic matrix
v = 2 * np.random.random([3]) - np.array([1, 1, 1])
v = v / np.linalg.norm(v)
#R=np.array([[0.841986, -0.352662, -0.408276],[0.308904, 0.935579, -0.171085],[0.442309, 0.0179335, 0.896683]])
R = vector2RotMat(v, np.pi * 2 / 3)
q = RotMat2quat(R)
t = np.array([0.0, 0.0, 200.0])
# Compute image points based on actual extrinsic matrix and add noise to measurements
for i in range(0, n):
pt = np.dot(R, obj_pts[i, :]) + t
img_pts[i, 0:2] = np.array([pt[0] / pt[2], pt[1] / pt[2]])
img_pts[:, 0:2] = img_pts[:, 0:2] + sigma * np.random.randn(n, 2)
pose_act[0:3, 0] = t
pose_act[3:6, 0] = q[1:4]
# Use the c-library to compute the pose
err_t = []
err_q = []
for i in np.arange(0, n_algos):
algos[i](obj_pts, img_pts, n, cam_intrinsic, pose_est)
e = calc_err(pose_est, pose_act)
err_t.append(e[0])
err_q.append(e[1])
if np.isnan(e[0]) or np.isnan(e[1]):
print('algo=\n', algo_names[i])
print('err=\n', e)
err_net_t.append(err_t)
err_net_q.append(err_q)
printProgress(it, n_iter, prefix='Test' + str(1) +
' Progress:', suffix='Complete', barLength=50)
mean_err_t = np.mean(err_net_t, axis=0)
mean_err_q = np.mean(err_net_q, axis=0)
median_err_t = np.median(err_net_t, axis=0)
median_err_q = np.median(err_net_q, axis=0)
vals = np.empty([7, 4])
vals[:, 0] = mean_err_t
vals[:, 1] = median_err_t
vals[:, 2] = mean_err_q
vals[:, 3] = median_err_q
"""
for i in np.arange(0, n_algos):
print 'mean_err_t_' + algo_names[i] + '=', mean_err_t[i], 'median_err_t_' + algo_names[i] + '=', median_err_t[i]
print 'mean_err_q_' + algo_names[i] + '=', mean_err_q[i], 'median_err_q_' + algo_names[i] + '=', median_err_q[i]
"""
it = np.arange(0, n_iter)
err_net_t = np.array(err_net_t)
err_net_q = np.array(err_net_q)
s = '<h2> Translation error and Rotation error for 100 iterations (R - Randomly varying, t - fixed) </h2>'
s1 = display_comparison_plot_mpld3(it, err_net_t, names=algo_names, line_styles=algo_ls,
title='% Translation Error Plot', xtitle='Iteration', ytitle='e_t', ylim=[0, 2], figname='err_t')
s2 = display_comparison_plot_mpld3(it, err_net_q, names=algo_names, line_styles=algo_ls,
title='Rotation Error Plot (deg)', xtitle='Iteration', ytitle='e_q', ylim=[0, 1], figname='err_q')
s = s + '\n <table > \n <tr > \n <td > \n' + s1 + \
'</td > \n <td> \n' + s2 + '</td> \n </tr> \n </table> \n'
return s, vals
"""
comp_arr = np.zeros([n_iter,2])
for i in np.arange(0,n_iter):
comp_arr[i,:] = 2*mean_err_p3p
nconv_epnp = float(np.sum(err_epnp>comp_arr))/n_iter*100
nconv_dls = float(np.sum(err_dls>comp_arr))/n_iter*100
nconv_ppnp = float(np.sum(err_ppnp>comp_arr))/n_iter*100
nconv_posit = float(np.sum(err_posit>comp_arr))/n_iter*100
nconv_lhm = float(np.sum(err_lhm>comp_arr))/n_iter*100
nconv_p3p = float(np.sum(err_p3p>comp_arr))/n_iter*100
plt.figure()
xvals = ['epnp', 'dls', 'ppnp', 'posit','lhm', 'p3p']
xvals_int = np.arange(0,n_algos)
yvals = [nconv_epnp, nconv_dls, nconv_ppnp, nconv_posit, nconv_lhm, nconv_p3p]
plt.bar(xvals_int, yvals, align='center')
plt.xticks(xvals_int, xvals)
plt.title('%Divergence')
plt.show()
plt.savefig('divergence.pdf')
"""
def err_statistics_fcn_n():
# Test 2
mean_err_t_net = []
mean_err_q_net = []
median_err_t_net = []
median_err_q_net = []
for n in n_range:
# Define object points and image points
obj_pts = np.random.randint(-40, 40, [n, 3])
obj_pts = obj_pts.astype(float)
obj_pts[0, :] = [0, 0, 0]
img_pts = np.empty([n, 3])
img_pts[:, 2] = 1
pose_est = np.empty([6, 1])
pose_act = np.empty([6, 1])
err_net_t = []
err_net_q = []
for it in np.arange(0, n_iter):
# Define camera extrinsic matrix
v = 2 * np.random.random([3]) - np.array([1, 1, 1])
v = v / np.linalg.norm(v)
#R=np.array([[0.841986, -0.352662, -0.408276],[0.308904, 0.935579, -0.171085],[0.442309, 0.0179335, 0.896683]])
R = vector2RotMat(v, np.pi * 2 / 3)
q = RotMat2quat(R)
t = np.array([0.0, 0.0, 200.0])
# Compute image points based on actual extrinsic matrix and add noise to measurements
for i in range(0, n):
pt = np.dot(R, obj_pts[i, :]) + t
img_pts[i, 0:2] = np.array([pt[0] / pt[2], pt[1] / pt[2]])
img_pts[:, 0:2] = img_pts[:, 0:2] + sigma * np.random.randn(n, 2)
pose_act[0:3, 0] = t
pose_act[3:6, 0] = q[1:4]
# Use the c-library to compute the pose
err_t = []
err_q = []
for i in range(0, n_algos):
algos[i](obj_pts, img_pts, n, cam_intrinsic, pose_est)
e = calc_err(pose_est, pose_act)
err_t.append(e[0])
err_q.append(e[1])
err_net_t.append(err_t)
err_net_q.append(err_q)
mean_err_t_net.append(np.mean(err_net_t, axis=0))
mean_err_q_net.append(np.mean(err_net_q, axis=0))
median_err_t_net.append(np.median(err_net_t, axis=0))
median_err_q_net.append(np.median(err_net_q, axis=0))
printProgress(n - 5, len(n_range), prefix='Test' + str(3) +
' Progress:', suffix='Complete', barLength=50)
it = np.arange(5, 25)
mean_err_t_net = np.array(mean_err_t_net)
mean_err_q_net = np.array(mean_err_q_net)
median_err_t_net = np.array(median_err_t_net)
median_err_q_net = np.array(median_err_q_net)
s = '<h2> Mean and Median error in Translation and Rotation with varying 2d/3d correspondences (n) </h2>'
s1 = display_comparison_plot_mpld3(it, mean_err_t_net, names=algo_names, line_styles=algo_ls,
title='Mean Translation %Error Plot', xtitle='n', ytitle=r'% Translation error e_t', ylim=[0, 10], figname='mean_err_t')
s2 = display_comparison_plot_mpld3(it, mean_err_q_net, names=algo_names, line_styles=algo_ls,
title='Mean Rotation Error Plot (deg)', xtitle='n', ytitle=r'Rotation error e_q (deg)', ylim=[0, 0.5], figname='mean_err_q')
s3 = display_comparison_plot_mpld3(it, median_err_t_net, names=algo_names, line_styles=algo_ls,
title='Median Translation %Error Plot', xtitle='n', ytitle=r'% Translation error e_t', ylim=[0, 1], figname='median_err_t')
s4 = display_comparison_plot_mpld3(it, median_err_q_net, names=algo_names, line_styles=algo_ls,
title='Median Rotation Error Plot (deg)', xtitle='n', ytitle=r'Rotation error e_q(deg)', ylim=[0, 0.5], figname='median_err_q')
s = s + '\n<table>\n <tr>\n <td>\n' + s1 + '</td>\n <td>\n' + s2 + '</td>\n </tr>\n' + \
'\n<tr>\n <td>\n' + s3 + '\n</td>\n <td>\n' + s4 + '\n</td>\n </tr>\n </table>\n'
return s
def err_statistics_fcn_sigma():
# Test 3
mean_err_t_net = []
mean_err_q_net = []
median_err_t_net = []
median_err_q_net = []
n = 10
for sigma in sigma_range:
# Define object points and image points
obj_pts = np.random.randint(-40, 40, [n, 3])
obj_pts = obj_pts.astype(float)
obj_pts[0, :] = [0, 0, 0]
img_pts = np.empty([n, 3])
img_pts[:, 2] = 1
pose_est = np.empty([6, 1])
pose_act = np.empty([6, 1])
err_net_t = []
err_net_q = []
for it in np.arange(0, n_iter):
# Define camera extrinsic matrix
v = 2 * np.random.random([3]) - np.array([1, 1, 1])
v = v / np.linalg.norm(v)
#R=np.array([[0.841986, -0.352662, -0.408276],[0.308904, 0.935579, -0.171085],[0.442309, 0.0179335, 0.896683]])
R = vector2RotMat(v, np.pi * 2 / 3)
q = RotMat2quat(R)
t = np.array([0.0, 0.0, 200.0])
# Compute image points based on actual extrinsic matrix and add noise to measurements
for i in range(0, n):
pt = np.dot(R, obj_pts[i, :]) + t
img_pts[i, 0:2] = np.array([pt[0] / pt[2], pt[1] / pt[2]])
img_pts[:, 0:2] = img_pts[:, 0:2] + sigma * np.random.randn(n, 2)
pose_act[0:3, 0] = t
pose_act[3:6, 0] = q[1:4]
# Use the c-library to compute the pose
err_t = []
err_q = []
for i in range(0, n_algos):
algos[i](obj_pts, img_pts, n, cam_intrinsic, pose_est)
e = calc_err(pose_est, pose_act)
err_t.append(e[0])
err_q.append(e[1])
err_net_t.append(err_t)
err_net_q.append(err_q)
mean_err_t_net.append(np.mean(err_net_t, axis=0))
mean_err_q_net.append(np.mean(err_net_q, axis=0))
median_err_t_net.append(np.median(err_net_t, axis=0))
median_err_q_net.append(np.median(err_net_q, axis=0))
printProgress(sigma * 1000, len(sigma_range), prefix='Test' + str(2) +
' Progress:', suffix='Complete', barLength=50)
it = np.arange(0.001, 0.010, 0.001)
mean_err_t_net = np.array(mean_err_t_net)
mean_err_q_net = np.array(mean_err_t_net)
median_err_t_net = np.array(median_err_t_net)
median_err_q_net = np.array(median_err_t_net)
s = '\n<h2>\n Mean and Median error in Translation and Rotation with varying noise variance (sigma)\n </h2>\n'
s1 = display_comparison_plot_mpld3(it, mean_err_t_net, names=algo_names, line_styles=algo_ls, title='Mean Translation %Error Plot',
xtitle=r'\sigma', ytitle=r'% Translation error e_t', ylim=[0, 10], figname='mean_sigma_err_t')
s2 = display_comparison_plot_mpld3(it, mean_err_q_net, names=algo_names, line_styles=algo_ls, title='Mean Rotation Error Plot (deg)',
xtitle=r'\sigma', ytitle=r'Rotation error e_q (deg)', ylim=[0, 0.5], figname='mean_sigma_err_q')
s3 = display_comparison_plot_mpld3(it, median_err_t_net, names=algo_names, line_styles=algo_ls, title='Median Translation %Error Plot',
xtitle=r'\sigma', ytitle=r'% Translation error e_t', ylim=[0, 1], figname='median_sigma_err_t')
s4 = display_comparison_plot_mpld3(it, median_err_q_net, names=algo_names, line_styles=algo_ls, title='Median Rotation Error Plot (deg)',
xtitle=r'\sigma', ytitle=r'Rotation error e_q (deg)', ylim=[0, 0.5], figname='median_sigma_err_q')
s = s + '\n<table>\n <tr>\n <td>\n' + s1 + '\n</td>\n <td>\n' + s2 + '\n</td>\n </tr>\n' + \
'\n<tr>\n <td>\n' + s3 + '\n</td> \n<td>\n' + s4 + '\n</td>\n </tr>\n </table>\n'
return s
def time_comp():
# Test 4
obj_pts_store = []
img_pts_store = []
n_max = 50
n_step = 1
n_start = 10
n_iter = 10
tcomp_storage = []
for n in np.arange(n_start, n_max, n_step):
# Generate object points and image points
for i in np.arange(0, n_iter):
obj_pts = np.random.randint(-40, 40, [n, 3])
obj_pts = obj_pts.astype(float)
obj_pts[0, :] = [0, 0, 0]
img_pts = np.empty([n, 3])
img_pts[:, 2] = 1
# Define camera extrinsic matrix
v = 2 * np.random.random([3]) - np.array([1, 1, 1])
v = v / np.linalg.norm(v)
#R=np.array([[0.841986, -0.352662, -0.408276],[0.308904, 0.935579, -0.171085],[0.442309, 0.0179335, 0.896683]])
R = vector2RotMat(v, np.pi * 2 / 3)
t = np.array([0.0, 0.0, 200.0])
# Compute image points based on actual extrinsic matrix
for i in range(0, n):
pt = np.dot(R, obj_pts[i, :]) | |
repetition
... numpy.array([0., 1., 2., -1., 0.]), 'test', (-2, 2))
Traceback (most recent call last):
...
RuntimeError: duplicates in test dimension: [0.]
"""
# check for values outside of limits
if limits is not None:
if np.amin(dimension) < limits[0] or np.amax(dimension) > limits[1]:
raise RuntimeError(f"{name} dimension beyond limits "
f"[{limits[0]}, {limits[1]}]")
# check for duplicated coordinates, meaning domain overlap
# for cyclic dimensions, but should not happen anyway so check
# for non-cyclic dimensions too
if dimension.ndim > 0:
sort_dim = np.sort(dimension)
dup_dim = sort_dim[1:][sort_dim[1:] == sort_dim[:-1]]
if dup_dim.size > 0:
raise RuntimeError(f"duplicates in {name} dimension: {dup_dim}")
@staticmethod
def _check_dimension_direction(dimension, name, limits, wrap_around):
"""**Examples:**
>>> import numpy
>>> Grid._check_dimension_direction( # scalar
... numpy.array(1.), 'test', (-2, 2), False)
>>> Grid._check_dimension_direction( # not cyclic, no wrap around
... numpy.array([0., 1., 2.]), 'test', (-2, 2), False)
>>> Grid._check_dimension_direction( # cyclic, no wrap around
... numpy.array([0., 1., 2.]), 'test', (-2, 2), True)
>>> Grid._check_dimension_direction( # cyclic, wrap around, sign case 1
... numpy.array([-1., 0., 2.]), 'test', (-2, 2), True)
>>> Grid._check_dimension_direction( # cyclic, wrap around, sign case 2
... numpy.array([-1., 0., -2.]), 'test', (-2, 2), True)
>>> Grid._check_dimension_direction( # cyclic, wrap around, end
... numpy.array([0., 2., -2.]), 'test', (-3, 3), True)
>>> Grid._check_dimension_direction( # cyclic, wrap around, start
... numpy.array([2., -2., 0.]), 'test', (-3, 3), True)
>>> Grid._check_dimension_direction( # negative direction
... numpy.array([2., 1., 0.]), 'test', (-2, 2), True)
Traceback (most recent call last):
...
RuntimeError: test dimension not directed positively
"""
error = RuntimeError(f"{name} dimension not directed positively")
if dimension.ndim > 0:
space_diff = np.diff(dimension)
if wrap_around:
if np.any(space_diff < 0):
# add one full rotation to first negative difference
# to assume it is wrapping around (since positive
# direction is required, and cross-over can happen
# at most once without domain wrapping on itself)
neg = space_diff[space_diff < 0]
neg[0] += limits[1] - limits[0]
space_diff[space_diff < 0] = neg
else:
# it is a scalar, set difference to one to pass next check
space_diff = 1
if not np.all(space_diff > 0):
# if not all positive, at least one space gap is in
# negative direction
raise error
@staticmethod
def _check_dimension_regularity(dimension, name, limits, wrap_around):
"""**Examples:**
>>> import numpy
>>> Grid._check_dimension_regularity( # scalar
... numpy.array(1.), 'test', (-2, 2), False)
>>> Grid._check_dimension_regularity( # not cyclic, no wrap around
... numpy.array([0., 1., 2.]), 'test', (-2, 2), False)
>>> Grid._check_dimension_regularity( # cyclic, no wrap around
... numpy.array([0., 1., 2.]), 'test', (-2, 2), True)
>>> Grid._check_dimension_regularity( # cyclic, wrap around, sign case 1
... numpy.array([-2., 0., 2.]), 'test', (-2, 2), True)
>>> Grid._check_dimension_regularity( # cyclic, wrap around, sign case 2
... numpy.array([-2., 0., -2.]), 'test', (-2, 2), True)
>>> Grid._check_dimension_regularity( # cyclic, wrap around, end
... numpy.array([.9, 1.9, -1.1]), 'test', (-2, 2), True)
>>> Grid._check_dimension_regularity( # cyclic, wrap around, start
... numpy.array([1.9, -1.1, -0.1]), 'test', (-2, 2), True)
>>> Grid._check_dimension_regularity( # irregular, not cyclic
... numpy.array([0., .9, 1.]), 'test', (-2, 2), False)
Traceback (most recent call last):
...
RuntimeError: test space gap not constant across region
>>> Grid._check_dimension_regularity( # irregular, cyclic
... numpy.array([1., 1.9, -1]), 'test', (-2, 2), True)
Traceback (most recent call last):
...
RuntimeError: test space gap not constant across region
"""
if dimension.ndim > 0:
space_diff = np.diff(dimension)
if wrap_around:
if np.any(space_diff < 0):
# add one full rotation to first negative difference
# to assume it is wrapping around (since positive
# direction is required, and cross-over can happen
# at most once without domain wrapping on itself)
neg = space_diff[space_diff < 0]
neg[0] += limits[1] - limits[0]
space_diff[space_diff < 0] = neg
else:
# it is a scalar, set difference to one to pass next check
space_diff = 1
if not np.isclose(np.amin(space_diff), np.amax(space_diff),
rtol(), atol()):
raise RuntimeError(
f"{name} space gap not constant across region"
)
@staticmethod
def _check_dimension_bounds_limits(bounds, name, limits):
"""**Examples:**
>>> import numpy
>>> Grid._check_dimension_bounds_limits( # 1D
... numpy.array([0., -1.]), 'test', (-2, 2))
>>> Grid._check_dimension_bounds_limits( # 2D, edging upper limit
... numpy.array([[0., 1.], [1., 2.], [2., 3.]]), 'test', (-3, 3))
>>> Grid._check_dimension_bounds_limits( # 2D, edging lower limit
... numpy.array([[-3., -2.], [-2., -1.], [-1., 0.]]), 'test', (-3, 3))
>>> Grid._check_dimension_bounds_limits( # 1D, beyond upper limit
... numpy.array([0., 3.]), 'test', (-2, 2))
Traceback (most recent call last):
...
RuntimeError: test dimension bounds beyond limits [-2, 2]
>>> Grid._check_dimension_bounds_limits( # 2D, beyond upper limit
... numpy.array([[0., 1.], [1., 2.], [2., 3.]]), 'test', (-2, 2))
Traceback (most recent call last):
...
RuntimeError: test dimension bounds beyond limits [-2, 2]
>>> Grid._check_dimension_bounds_limits( # 2D, beyond lower limit
... numpy.array([[-3., -2.], [-2., -1.], [-1., 0.]]), 'test', (-2, 2))
Traceback (most recent call last):
...
RuntimeError: test dimension bounds beyond limits [-2, 2]
"""
if limits is not None:
if np.amin(bounds) < limits[0] or np.amax(bounds) > limits[1]:
raise RuntimeError(f"{name} dimension bounds beyond limits "
f"[{limits[0]}, {limits[1]}]")
@staticmethod
def _check_dimension_bounds_direction(bounds, name, limits, wrap_around):
"""
TODO: Last example should raise error because last pair of
bounds is either in the negative direction or a second
wrap around, but because the algorithm allows for up to
two negative differences to cover for a pair of bounds
across the limits, the negative value for this last pair
is caught in the assumption it is a wrap around, while the
first wrap around only generated one negative difference,
so the second negative difference was tolerated
erroneously. This is likely to be really an edge case, so
it is kept as is for now. Plus, it is caught as an error
in `_check_dimension_bounds_regularity`.
**Examples:**
>>> import numpy
>>> Grid._check_dimension_bounds_direction( # 1D, not cyclic
... numpy.array([0., 1.]), 'test', (-2, 2), False)
>>> Grid._check_dimension_bounds_direction( # 1D, cyclic, no wrap around
... numpy.array([0., 1.]), 'test', (-2, 2), True)
>>> Grid._check_dimension_bounds_direction( # 1D, cyclic, wrap around
... numpy.array([0., -1.]), 'test', (-2, 2), True)
>>> Grid._check_dimension_bounds_direction( # 2D, not cyclic
... numpy.array([[-1., 0.], [0., 1.], [1., 2.]]), 'test', (-3, 3), False)
>>> Grid._check_dimension_bounds_direction( # 2D, cyclic
... numpy.array([[-1., 0.], [0., 1.], [1., 2.]]), 'test', (-3, 3), True)
>>> Grid._check_dimension_bounds_direction( # 2D, cyclic, wrap around, bound across
... numpy.array([[1.5, 2.5], [2.5, -2.5], [-2.5, -1.5]]), 'test', (-3, 3), True)
>>> Grid._check_dimension_bounds_direction( # 2D, cyclic, wrap around, bound edging, sign case 1
... numpy.array([[1., 2.], [2., 3.], [3., -2.], [-2., -1.]]), 'test', (-3, 3), True)
>>> Grid._check_dimension_bounds_direction( # 2D, cyclic, wrap around, bound edging, sign case 2
... numpy.array([[1., 2.], [2., 3.], [-3., -2.], [-2., -1.]]), 'test', (-3, 3), True)
>>> Grid._check_dimension_bounds_direction( # 2D, cyclic, wrap around, bound edging, sign case 3
... numpy.array([[1., 2.], [2., -3.], [3., -2.], [-2., -1.]]), 'test', (-3, 3), True)
>>> Grid._check_dimension_bounds_direction( # 2D, cyclic, wrap around, bound edging, sign case 4
... numpy.array([[1., 2.], [2., -3.], [-3., -2.], [-2., -1.]]), 'test', (-3, 3), True)
>>> Grid._check_dimension_bounds_direction( # negative direction
... numpy.array([2., 1.]), 'test', (-2, 2), False)
Traceback (most recent call last):
...
RuntimeError: test dimension bounds not directed positively
>>> Grid._check_dimension_bounds_direction( # not cyclic but wrap around
... numpy.array([0., -1.]), 'test', (-2, 2), False)
Traceback (most recent call last):
...
RuntimeError: test dimension bounds not directed positively
>>> Grid._check_dimension_bounds_direction( # negative direction, not cyclic
... numpy.array([[3., 2.], [2., 1.], [1., 0.]]), 'test', (-3, 3), False)
Traceback (most recent call last):
...
RuntimeError: test dimension bounds not directed positively
>>> Grid._check_dimension_bounds_direction( # negative direction, cyclic
... numpy.array([[3., 2.], [2., 1.], [1., 0.]]), 'test', (-3, 3), True)
Traceback (most recent call last):
...
RuntimeError: test dimension bounds not directed positively
>>> Grid._check_dimension_bounds_direction( # wrap around, negative after
... numpy.array([[2., 3.], [-3., -2.], [-1., -2.]]), 'test', (-3, 3), True)
Traceback (most recent call last):
...
RuntimeError: test dimension bounds not directed positively
>>> Grid._check_dimension_bounds_direction( # [!] current bug
... numpy.array([[1., 2.], [-2., -1.], [-1., -2]]), 'test', (-2, 2), True)
"""
# replace lower limit by upper limit to acknowledge it is same
# location (e.g. -180degE same as +180degE, so replace -180degE
# by +180degE)
bnds = deepcopy(bounds)
| |
<reponame>gorpoorko/Bot-Tcxs-Heroku
# -*- coding: utf-8 -*-
#███╗ ███╗ █████╗ ███╗ ██╗██╗ ██████╗ ██████╗ ███╗ ███╗██╗ ██████╗
#████╗ ████║██╔══██╗████╗ ██║██║██╔════╝██╔═══██╗████╗ ████║██║██╔═══██╗
#██╔████╔██║███████║██╔██╗ ██║██║██║ ██║ ██║██╔████╔██║██║██║ ██║
#██║╚██╔╝██║██╔══██║██║╚██╗██║██║██║ ██║ ██║██║╚██╔╝██║██║██║ ██║
#██║ ╚═╝ ██║██║ ██║██║ ╚████║██║╚██████╗╚██████╔╝██║ ╚═╝ ██║██║╚██████╔╝
#╚═╝ ╚═╝╚═╝ ╚═╝╚═╝ ╚═══╝╚═╝ ╚═════╝ ╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═════╝
# [+] @GorpoOrko 2020 - Telegram Bot and Personal Assistant [+]
# | TCXS Project Hacker Team - https://tcxsproject.com.br |
# | Telegram: @GorpoOrko Mail:<EMAIL> |
# [+] Github Gorpo Dev: https://github.com/gorpo [+]
import time
import speech_recognition as sr
from pydub import AudioSegment
import os
import sqlite3
from config import bot,bot_username,logs
from datetime import datetime
from plugins.admins import is_admin
from plugins.inteligencias.ia_global import ia_global
from plugins.inteligencias.ia_local import ia_local
from plugins.inteligencias.ia_cadastro_perguntas import ia_cadastro_perguntas
from plugins.inteligencias.ia_cadastro_manual import ia_cadastro_manual
from plugins.inteligencias.ia_mensagens_proibidas import ia_mensagens_proibidas
from plugins.inteligencias.ia_wikipedia import ia_wikipedia
from plugins.inteligencias.ia_privado_bot import ia_privado_bot
from plugins.inteligencias.ia_crawler_sites import crawling
from plugins.inteligencias.ia_corrige_palavras import ia_corrige_palavras
async def inteligencia(msg):
try:#verifica se o usuario é um admin de grupo ou canal
id_usuario = msg['from']['id']
adm = await is_admin(msg['chat']['id'], msg['from']['id'], id_usuario)
except Exception as e:
pass
try:
if msg['chat']['type'] == 'supergroup':
try:
grupo = f"https://t.me/{msg['chat']['username']}"
except:
grupo = f"Secreto: {msg['chat']['title']}"
pass
try:
usuario = f"@{msg['from']['username']}"
except:
usuario = f"@{msg['from']['id']}({msg['from']['first_name']})"
pass
# database geral---->
conexao_sqlite = sqlite3.connect('bot_database.db')
conexao_sqlite.row_factory = sqlite3.Row
cursor_sqlite = conexao_sqlite.cursor()
# database logs ---->
conexao_logs = sqlite3.connect('bot_database_logs.db')
conexao_logs.row_factory = sqlite3.Row
cursor_logs = conexao_logs.cursor()
data = datetime.now().strftime('%d/%m/%Y %H:%M')
chat_type = msg['chat']['type']
id_grupo = msg['chat']['id']
chat_id = msg['chat']['id']
#inicio da inteligencia artificial---------------------------------------->
if chat_type == 'supergroup': # se o chat for supergrupo ele manda mensagem
if msg.get('text'):
#PLUGINS EXTERNOS DA INTELIGENCIA ARTIFICIAL | PASTA INTELIGENCIAS
ativa_cadastro_perguntas = await ia_cadastro_perguntas(msg)
ativa_cadastro_manual = await ia_cadastro_manual(msg)
ativa_mensagens_proibidas = await ia_mensagens_proibidas(msg)
ativa_wikipedia = await ia_wikipedia(msg)
ativa_crawling = await crawling(msg)
ativa_correcao = await ia_corrige_palavras(msg)
#inicia o banco de dados geral para captar o tipo de inteligencia------->
cursor_sqlite.execute("""SELECT * FROM inteligencia; """)
inteligencias = cursor_sqlite.fetchall()
for inteligencia in inteligencias: # loop em todos resultados da Database
idgrupo = str(msg['chat']['id'])
if inteligencia['inteligencia'] == 'global':
pass
if inteligencia['id_grupo'] == idgrupo:
#print(f'Inteligencia Global setada no grupo {grupo}')
inteligencia_global = await ia_global(msg)
if inteligencia['inteligencia'] == 'local':
pass
if inteligencia['id_grupo'] == idgrupo:
#print(f'Inteligencia Local setada no grupo {grupo}')
inteligencia_local = await ia_local(msg)
# ATIVA A INTELIGENCIA BASEADO NA IA GLOBAL/LOCAL
#COMANDO PARA MUDAR ENTRE A INTELIGENCIA GLOBAL/LOCAL--------------------->>
#sistema de mudança da ia entre global ou local | comando inteligencia ou ia
if msg.get('text').split()[0] == 'inteligencia':
if adm['user'] == True:
if len(msg['text'].split()) > 1 and msg['text'].split()[1] == 'global' or len(msg['text'].split()) > 1 and msg['text'].split()[1] == 'local':
inteligencia = msg['text'].split()[1]
try:
tipo = msg['text'].split()[2]
except:
tipo = 'IA'
pass
try:
linguagem = msg['from']['language_code']
except:
linguagem = 'none'
pass
try:
cursor_sqlite.execute(f"""DELETE FROM inteligencia WHERE id_grupo='{msg['chat']['id']}' """)
cursor_sqlite.execute(f"""INSERT INTO inteligencia (int_id, grupo, tipo_grupo, id_grupo, usuario, id_usuario, linguagem, tipo, data,inteligencia)VALUES(null,'{grupo}','{chat_type}','{id_grupo}','{usuario}','{id_usuario}','{linguagem}','{tipo}','{data}','{inteligencia}')""")
conexao_sqlite.commit()
await bot.sendMessage(chat_id,f"@{msg['from']['username']} `Inteligencia Artificial:`***{inteligencia}***\nAgora vocês irão receber coisas que aprendi nesta categoria.",'markdown')
except Exception as e:
print(f'banco de dados inteligencia erro: {e}')
if msg.get('text') == 'inteligencia':
await bot.sendMessage(chat_id,f"`Inteligencia Artificial:`***Tenho um sistema de IA que aprendo tudo em todos os lugares que estou, para selecionar oque devo falar use os comandos como exemplo:***\n`comando:`inteligencia global\n```---- Com o comando inteligencia global irei falar oque aprendi em todos os lugares que estive.```\n`comando:`inteligencia local\n```Com o comando inteligencia local irei falar oque aprendi aqui.```\n***Para selecionar a frequência de minha interação no grupo use o comando frêquencia***", 'markdown')
if msg.get('text').split()[0] == 'inteligencia' and adm['user'] == False:
await bot.sendMessage(chat_id,f"@{msg['from']['username']} `este comando é permitido so para admin's`",'markdown')
#sistema que o bot le todas mensagens aprendidas para os usuarios
if msg.get('text').split()[0] == 'mensagens':
if adm['user'] == True:
cursor_sqlite.execute("""SELECT * FROM mensagens; """)
mensagens = cursor_sqlite.fetchall()
a = await bot.sendMessage(chat_id,f"🤖 {msg['from']['first_name']} tenho {str(len(mensagens))} mensagens, vou exibir elas em ordem...",reply_to_message_id=msg['message_id'])
time.sleep(2)
for mensagem in mensagens:
try:
mensagem = mensagem['mensagem']
await bot.editMessageText((msg['chat']['id'], a['message_id']), f"```{mensagem}```","markdown")
time.sleep(1)
except:
pass
#CADASTRO AUTOMATICO INTELIGENCIA DATABASE
#CADASTRA AUTOMATICAMENTE TUDO QUE POSTAREM NA DATABASE, MANTENDO ASSIM CONHECIMENTO PARA IA
# sistema de cadastro automatico dos posts dos grupos na Database------------------------------------------------------------
if chat_type == 'supergroup' or chat_type == 'private' or chat_type == 'channel':
try:
try:
linguagem = msg['from']['language_code']
except:
linguagem = 'none'
pass
if msg.get('sticker'):
id_sticker = msg['sticker']['file_id']
id_mensagem = msg['sticker']['thumb']['file_unique_id']
await bot.sendSticker(msg['from']['id'], sticker=id_sticker)
#banco de dados geral mensagens------->>
cursor_sqlite.execute("""SELECT * FROM mensagens; """)
mensagens = cursor_sqlite.fetchall()
#banco de dados logs mensagens ------->>
cursor_logs.execute("""SELECT * FROM mensagens; """)
cursor_logs.execute(f"""INSERT INTO mensagens (int_id, grupo, tipo_grupo, id_grupo, usuario, id_usuario, linguagem, tipo, data,id_mensagem, mensagem)VALUES(null,'{grupo}','{chat_type}','{id_grupo}','{usuario}','{id_usuario}','{linguagem}','sticker','{data}','{id_mensagem}','{id_sticker}')""")
conexao_logs.commit()
existe_cadastro = 0 # contador para verificar se o comando ja existe
for mensagem in mensagens: # loop em todos resultados da Database
if mensagem['id_mensagem'] == id_mensagem:
existe_cadastro = 1 # troca o valor de existe_cadastro para 1
if existe_cadastro == 1:
pass
else:
cursor_sqlite.execute(f"""INSERT INTO mensagens (int_id, grupo, tipo_grupo, id_grupo, usuario, id_usuario, linguagem, tipo, data,id_mensagem, mensagem)VALUES(null,'{grupo}','{chat_type}','{id_grupo}','{usuario}','{id_usuario}','{linguagem}','sticker','{data}','{id_mensagem}','{id_sticker}')""")
conexao_sqlite.commit()
if msg.get('photo'):
id_foto = msg['photo'][0]['file_id']
id_mensagem = msg['photo'][0]['file_unique_id']
# banco de dados geral mensagens------->>
cursor_sqlite.execute("""SELECT * FROM mensagens; """)
mensagens = cursor_sqlite.fetchall()
# banco de dados logs mensagens ------->>
cursor_logs.execute("""SELECT * FROM mensagens; """)
cursor_logs.execute(f"""INSERT INTO mensagens (int_id, grupo, tipo_grupo, id_grupo, usuario, id_usuario, linguagem, tipo, data,id_mensagem, mensagem)VALUES(null,'{grupo}','{chat_type}','{id_grupo}','{usuario}','{id_usuario}','{linguagem}','imagem','{data}','{id_mensagem}','{id_foto}')""")
conexao_logs.commit()
existe_cadastro = 0 # contador para verificar se o comando ja existe
for mensagem in mensagens: # loop em todos resultados da Database
if mensagem['id_mensagem'] == id_mensagem:
existe_cadastro = 1 # troca o valor de existe_cadastro para 1
if existe_cadastro == 1:
pass
else:
cursor_sqlite.execute(f"""INSERT INTO mensagens (int_id, grupo, tipo_grupo, id_grupo, usuario, id_usuario, linguagem, tipo, data,id_mensagem, mensagem)VALUES(null,'{grupo}','{chat_type}','{id_grupo}','{usuario}','{id_usuario}','{linguagem}','imagem','{data}','{id_mensagem}','{id_foto}')""")
conexao_sqlite.commit()
try:
await bot.sendPhoto(logs, id_foto) #canal para marcinho = -1001402280935
except:
pass
try:
await bot.sendPhoto(msg['from']['id'], photo=id_foto, caption='Você mandou esta foto no grupo.')
except:
pass
if msg.get('document'):#grava os dados pelo nome pois a unique id nao fica mesma se mesmo arquivo
id_documento = msg['document']['file_id']
try:
id_mensagem = msg['document']['file_name']
except:
id_mensagem = msg['document']['file_unique_id']
pass
# banco de dados geral mensagens------->>
cursor_sqlite.execute("""SELECT * FROM mensagens; """)
mensagens = cursor_sqlite.fetchall()
# banco de dados logs mensagens ------->>
cursor_logs.execute("""SELECT * FROM mensagens; """)
cursor_logs.execute(f"""INSERT INTO mensagens (int_id, grupo, tipo_grupo, id_grupo, usuario, id_usuario, linguagem, tipo, data,id_mensagem, mensagem)VALUES(null,'{grupo}','{chat_type}','{id_grupo}','{usuario}','{id_usuario}','{linguagem}','documento','{data}','{id_mensagem}','{id_documento}')""")
conexao_logs.commit()
existe_cadastro = 0 # contador para verificar se o comando ja existe
for mensagem in mensagens: # loop em todos resultados da Database
if mensagem['id_mensagem'] == id_mensagem:
existe_cadastro = 1 # troca o valor de existe_cadastro para 1
if existe_cadastro == 1:
pass
else:
await bot.sendDocument(msg['from']['id'], id_documento)
await bot.sendDocument(logs, id_documento)
cursor_sqlite.execute(f"""INSERT INTO mensagens (int_id, grupo, tipo_grupo, id_grupo, usuario, id_usuario, linguagem, tipo, data,id_mensagem, mensagem)VALUES(null,'{grupo}','{chat_type}','{id_grupo}','{usuario}','{id_usuario}','{linguagem}','documento','{data}','{id_mensagem}','{id_documento}')""")
conexao_sqlite.commit()
try:
captado = msg['caption']
except:
captado = f'Vem de PV q o Pai ta ON: @{bot_username} '
#await bot.sendDocument(-1001363303197, id_documento, captado) #para marcinho -1001166426209
if msg.get('audio'):
id_audio = msg['audio']['file_id']
id_mensagem = msg['audio']['file_unique_id']
# banco de dados geral mensagens------->>
cursor_sqlite.execute("""SELECT * FROM mensagens; """)
mensagens = cursor_sqlite.fetchall()
# banco de dados logs mensagens ------->>
cursor_logs.execute("""SELECT * FROM mensagens; """)
cursor_logs.execute(f"""INSERT INTO mensagens (int_id, grupo, tipo_grupo, id_grupo, usuario, id_usuario, linguagem, tipo, data,id_mensagem, mensagem)VALUES(null,'{grupo}','{chat_type}','{id_grupo}','{usuario}','{id_usuario}','{linguagem}','audio','{data}','{id_mensagem}','{id_audio}')""")
conexao_logs.commit()
existe_cadastro = 0 # contador para verificar se o comando ja existe
for mensagem in mensagens: # loop em todos resultados da Database
if mensagem['id_mensagem'] == id_mensagem:
existe_cadastro = 1 # troca o valor de existe_cadastro para 1
if existe_cadastro == 1:
pass
else:
cursor_sqlite.execute(f"""INSERT INTO mensagens (int_id, grupo, tipo_grupo, id_grupo, usuario, id_usuario, linguagem, tipo, data,id_mensagem, mensagem)VALUES(null,'{grupo}','{chat_type}','{id_grupo}','{usuario}','{id_usuario}','{linguagem}','audio','{data}','{id_mensagem}','{id_audio}')""")
conexao_sqlite.commit()
if msg.get('video'):
id_video = msg['video']['file_id']
id_mensagem = msg['video']['file_unique_id']
# banco de dados geral mensagens------->>
cursor_sqlite.execute("""SELECT * FROM mensagens; """)
mensagens = cursor_sqlite.fetchall()
# banco de dados logs mensagens ------->>
cursor_logs.execute("""SELECT * FROM mensagens; """)
cursor_logs.execute(f"""INSERT INTO mensagens (int_id, grupo, tipo_grupo, id_grupo, usuario, id_usuario, linguagem, tipo, data,id_mensagem, mensagem)VALUES(null,'{grupo}','{chat_type}','{id_grupo}','{usuario}','{id_usuario}','{linguagem}','video','{data}','{id_mensagem}','{id_video}')""")
conexao_logs.commit()
existe_cadastro = 0 # contador para verificar se o comando ja existe
for mensagem in mensagens: # loop em todos resultados da Database
if mensagem['id_mensagem'] == id_mensagem:
existe_cadastro = 1 # troca o valor de existe_cadastro para 1
if existe_cadastro == 1:
pass
else:
cursor_sqlite.execute(f"""INSERT INTO mensagens (int_id, grupo, tipo_grupo, id_grupo, usuario, id_usuario, linguagem, tipo, data,id_mensagem, mensagem)VALUES(null,'{grupo}','{chat_type}','{id_grupo}','{usuario}','{id_usuario}','{linguagem}','video','{data}','{id_mensagem}','{id_video}')""")
conexao_sqlite.commit()
try:
await bot.sendVideo(logs, id_video) #marcinho -1001402280935
except:
pass
try:
await bot.sendVideo(msg['from']['id'], id_video, caption=f'@{usuario} Você mandou este video no {grupo}.')
except:
pass
if msg.get('voice'):#MELHOR NAO APLICAR AQUI A REGRA SE JA EXISTE SENAO NAO VAI CADASTRAR
id_voz = msg['voice']['file_id']
#id_mensagem = msg['voice']['file_unique_id']
await bot.download_file(msg['voice']['file_id'], 'arquivos/audio_usuario_db.ogg')
sound | |
#!/usr/bin/env python
# Licensed under a 3-clause BSD style license, see LICENSE.
"""
Tests for the skhep.math.vectors module.
"""
# -----------------------------------------------------------------------------
# Import statements
# -----------------------------------------------------------------------------
import pytest
from pytest import approx
from functools import partial
import numpy as np
assert_allclose = partial(np.testing.assert_allclose, atol=.0000001)
from hepvector import Vector2D, Vector3D, LorentzVector
from math import pi, sqrt
# -----------------------------------------------------------------------------
# Actual tests
# -----------------------------------------------------------------------------
# From iterable removed (Use python * syntax instead)
def test_from_iterable():
iterable = [1,2,3]
vec = Vector3D(*iterable)
assert np.all(vec == Vector3D(1,2,3))
with pytest.raises(TypeError):
iterable = [1, 2, 3, 4]
Vector3D(*iterable)
def test_default_const_iterable():
several_vec = Vector3D(1,2,[3,4,5])
assert several_vec.shape == (3,3)
several_vec = Vector3D([1,2,3],[2,3,4],[3,4,5])
assert several_vec.shape == (3,3)
with pytest.raises(ValueError):
Vector3D([1, 2, 3], [2, 3], [3, 4, 5])
def test_vectors_3D_constructors():
v1 = Vector3D()
assert str(v1) == str(np.array([0.,0.,0.]))
assert str(v1) == str(Vector3D.origin())
v2 = Vector3D(1.,1.,1.)
assert str(v2) == str(np.array([1.,1.,1.]))
v3 = Vector3D.from_vector(v1)
assert str(v3) == str(np.array([0.,0.,0.]))
v4 = Vector3D(*[1.0, 1.0, 1.0])
assert str(v4) == str(np.array([1.,1.,1.]))
v5 = Vector3D.from_cylindrical_coords(1., 0., 1.)
assert np.all(v5 == Vector3D(1., 0., 1.))
assert v5.rho == 1.
v6 = Vector3D.from_cylindrical_coords(0.5, pi / 2, 0.)
assert_allclose(v6, Vector3D(0., 0.5, 0.))
v7 = Vector3D.from_spherical_coords(1.0, 0., 0.)
assert np.all(v7 == Vector3D(0., 0., 1.))
assert v7.r == 1.
assert v7.theta == 0.
v8 = Vector3D.from_spherical_coords(1.0, 0., pi / 2)
assert_allclose(v8, Vector3D(0., 0., 1.))
v9 = Vector3D.from_spherical_coords(2.0, pi / 2, pi / 4)
assert_allclose(v9, Vector3D(sqrt(2), sqrt(2), 0.))
def test_vectors_Lorentz_constructors():
with pytest.raises(ValueError):
LorentzVector(*['str1', 'str2', 'str3', 'str4'])
#
lv1 = LorentzVector()
assert str(lv1) == str(np.array([0.,0.,0.,0.]))
lv2 = LorentzVector(1., 1., 1., 1.)
assert str(lv2) == str(np.array([1.,1.,1.,1.]))
def test_containers_properties():
with pytest.raises(IndexError):
Vector3D.__setitem__(Vector3D(), 3, 1.)
with pytest.raises(IndexError):
Vector3D.__getitem__(Vector3D(), 3)
#
v1 = Vector3D()
v1.x = 5.; v1.y = -5.; v1.z = 10
assert np.all(v1 == Vector3D(5., -5., 10.))
v1[:] = (-5., 5., 10.)
assert np.all(v1 == Vector3D(-5., 5., 10.))
v1[0] = 1.
assert np.all(v1 == Vector3D(1., 5., 10.))
v1[1] = 1.
assert np.all(v1 == Vector3D(1., 1., 10.))
v1[2] = 1.
assert np.all(v1 == Vector3D(1., 1., 1.))
assert v1[0] == 1.
assert v1[1] == 1.
assert v1[2] == 1.
assert len(v1) == 3.
assert v1.tolist() == [1., 1., 1.]
assert list(v1) == [1., 1., 1.]
assert [v for v in v1] == [1., 1., 1.]
#
with pytest.raises(IndexError):
LorentzVector.__setitem__(LorentzVector(), 4, 1.)
with pytest.raises(IndexError):
LorentzVector.__getitem__(LorentzVector(), 4)
#
lv1 = LorentzVector()
lv1.x = 5.; lv1.y = -5.; lv1.z = 10; lv1.t = 2.
assert np.all(lv1 == LorentzVector(5., -5., 10., 2.))
lv1.x = 5.; lv1.y = 5.; lv1.z = -10; lv1.t = 2.
assert np.all(lv1 == LorentzVector(5., 5., -10., 2.))
lv1[:] = (-5., 5., 10., -2.)
assert np.all(lv1 == LorentzVector(-5., 5., 10., -2.))
lv1[0] = 1.
assert np.all(lv1 == LorentzVector(1., 5., 10., -2.))
lv1[1] = 1.
assert np.all(lv1 == LorentzVector(1., 1., 10., -2.))
lv1[2] = 1.
assert np.all(lv1 == LorentzVector(1., 1., 1., -2.))
lv1[3] = 1.
assert np.all(lv1 == LorentzVector(1., 1., 1., 1.))
assert lv1[0] == 1.
assert lv1[1] == 1.
assert lv1[2] == 1.
assert lv1[3] == 1.
assert len(lv1) == 4.
assert list(lv1) == [1., 1., 1., 1.]
def test_vectors_3D_operators():
v1 = Vector3D(0., 0., 0.)
v2 = Vector3D(1., 1., 1.)
v3 = Vector3D(2., 2., 2.)
v4 = Vector3D(3., 3., 3.)
v5 = Vector3D(1., 2., 3.)
assert np.all(v1 == 0.)
assert_allclose(v1 + v2, Vector3D(1., 1., 1.))
assert_allclose(v1 - v2, Vector3D(-1., -1., -1.))
assert_allclose(v1 - v2, -1.*Vector3D(1., 1., 1.))
assert_allclose(v1 * 2., Vector3D(0., 0., 0.))
assert_allclose(v2 * 2., Vector3D(2., 2., 2.))
assert_allclose(2. * v1, Vector3D(0., 0., 0.))
assert_allclose(2. * v2, Vector3D(2., 2., 2.))
assert v1.dot(v2) == 0.
assert v2.dot(v1) == 0.
assert v4.dot(v3) == v3.dot(v4)
assert v3.dot(v4) == 18.
assert_allclose( v3 / 2. , v2)
assert_allclose( v4 / 3. , v2)
v1 *= 2.
v2 *= 2.
assert np.all(v1 == Vector3D(0., 0., 0.))
assert np.all(v2 == Vector3D(2., 2., 2.))
v1 /= 2.
v2 /= 2.
assert np.all(v1 == Vector3D(0., 0., 0.))
assert np.all(v2 == Vector3D(1., 1., 1.))
assert (v1 + v2).dot(v3) == 6., "Check operations combination"
assert (v2 - v1).dot(v3) == 6., "Check operations combination"
assert v3.dot((v1 - v2)) == -6., "Check operations combination"
assert 18. / (v3.dot(v4)) == 1., "Check operations combination"
assert_allclose(v4 / (v3.dot(v2)) , Vector3D(0.5, 0.5, 0.5))
assert_allclose( v2.cross(v2), Vector3D(0., 0., 0.))
assert_allclose( v2.cross(v5), Vector3D(1., -2., 1.))
assert_allclose( v5.cross(v2), -1 * v2.cross(v5))
assert_allclose( v2, v2)
def test_vectors_Lorentz_operators():
with pytest.raises(TypeError):
LorentzVector.__iadd__(LorentzVector(), "a")
with pytest.raises(TypeError):
LorentzVector.__isub__(LorentzVector(), "a")
#
lv1 = LorentzVector(0., 0., 0., 0.)
lv2 = LorentzVector(1., 1., 1., 0.)
lv3 = LorentzVector(2., 2., 2., 1.)
lv4 = LorentzVector(3., 3., 3., 1.)
lv5 = LorentzVector(1., 1., 1., 6.)
assert np.all(lv1 == 0.)
assert_allclose( lv1 + lv2 , LorentzVector(1., 1., 1., 0.))
assert_allclose( lv1 - lv2 , LorentzVector(-1., -1., -1., 0.))
assert_allclose( lv1 * 2. , LorentzVector(0., 0., 0., 0.))
assert_allclose( lv2 * 2. , LorentzVector(2., 2., 2., 0.))
assert_allclose( 2. * lv1 , LorentzVector(0., 0., 0., 0.))
assert_allclose( 2. * lv2 , LorentzVector(2., 2., 2., 0.))
assert lv2.dot(lv1) == 0.
assert lv1.dot(lv2) == 0.
assert lv3.dot(lv4) == lv4.dot(lv3)
assert lv3.dot(lv4) == -17.
assert_allclose( lv3 / 2. , LorentzVector(1., 1., 1., 0.5))
assert_allclose( lv4 / 3. , LorentzVector(1., 1., 1., 1./3))
assert lv3.dot(lv5) == 0.0
lv1 *= 2.
lv2 *= 2.
assert_allclose( lv1 , LorentzVector(0., 0., 0., 0.))
assert_allclose( lv2 , LorentzVector(2., 2., 2., 0.))
lv1 /= 2.
lv2 /= 2.
assert_allclose( lv1 , LorentzVector(0., 0., 0., 0.))
assert_allclose( lv2 , LorentzVector(1., 1., 1., 0.))
assert_allclose( lv2 , lv2)
def test_vectors_3D_rotations():
v1 = Vector3D.from_cylindrical_coords(1., 0., 0.)
assert v1.phi == 0.0
assert v1.rotate_axis(Vector3D.Z, pi/2).phi == pi/2
assert_allclose(v1.rotate_axis(Vector3D.Z, pi/2), Vector3D(0., 1., 0.), atol = .0000001)
assert v1.rotate_axis(Vector3D.Y, pi).phi == pi
assert v1.rotate_axis(Vector3D.Y, -pi).phi == pi
assert_allclose( v1.rotate_axis(Vector3D.Y, pi) , Vector3D(-1., 0., 0.), atol = .0000001)
assert_allclose( v1.rotate_axis(Vector3D.Y, -pi) , Vector3D(-1., 0., 0.), atol = .0000001)
assert v1.rotate_axis(Vector3D.X, pi).phi == 0.
assert_allclose(v1.rotate_axis(Vector3D.X, pi) , Vector3D(1., 0., 0.), atol = .0000001)
v2 = Vector3D.from_spherical_coords(1.0, pi / 2, pi / 2)
assert v2.phi == pi/2
assert v2.theta == pi/2
assert v2.rotate_axis(Vector3D.X, pi).phi == -pi/2
assert v2.rotate_axis(Vector3D.X, pi).theta == pi/2
assert_allclose(v2.rotate_axis(Vector3D.X, pi), Vector3D(0., -1., 0.), atol = .0000001)
v3 = Vector3D.from_spherical_coords(1.0, pi / 4, pi / 4)
angle = v2.angle(v3)
axis = v2.cross(v3)
assert_allclose( v2.rotate_axis(axis, angle) , v3, atol = .0000001)
assert_allclose( v2.rotate_axis(-1.*axis, -angle) , v3, atol = .0000001)
def test_vectors_Lorentz_rotations():
v1 = Vector3D.from_cylindrical_coords(1., 0., 0.)
v2 = Vector3D.from_spherical_coords(1.0, pi / 2, pi / 2)
v3 = Vector3D.from_spherical_coords(1.0, pi / 4, pi / 4)
angle = v2.angle(v3)
axis = v2.cross(v3)
with pytest.raises(AttributeError):
LorentzVector.rotate_axis(LorentzVector(), pi, 1)
with pytest.raises(AttributeError):
LorentzVector.rotate_axis(LorentzVector(), pi, [1,2])
with pytest.raises(TypeError):
LorentzVector.rotate_axis(LorentzVector(), pi, 1, 2, 3, 4)
with pytest.raises(TypeError):
LorentzVector.rotate_axis(LorentzVector(), pi, 0, 1, 'a')
with pytest.raises(AttributeError):
LorentzVector.rotate_axis(LorentzVector(), pi, ['a','b',3])
#
lv1 = LorentzVector(v1[0], v1[1], v1[2], 1.)
assert lv1.phi == 0.
assert lv1.rotate_axis(Vector3D.Z, pi/2).phi == pi/2
assert_allclose( lv1.rotate_axis(Vector3D.Z,pi/2) , LorentzVector(0., 1., 0., 1.))
assert lv1.rotate_axis(Vector3D.Y,pi).phi == pi
assert lv1.rotate_axis(Vector3D.Y,-pi).phi == pi
assert_allclose( lv1.rotate_axis(Vector3D.Y,pi) , LorentzVector(-1., 0., 0., 1.))
assert_allclose( lv1.rotate_axis(Vector3D.Y,-pi) , LorentzVector(-1., 0., 0., 1.))
assert lv1.rotate_axis(Vector3D.X,pi).phi == 0.
assert_allclose( lv1.rotate_axis(Vector3D.X,pi) , LorentzVector(1., 0., 0., 1.))
lv2 = LorentzVector(v2[0], v2[1], v2[2], 2.0)
assert lv2.phi == pi/2
assert lv2.theta == pi/2
assert lv2.rotate_axis(Vector3D.X,pi).phi == -pi/2
assert lv2.rotate_axis(Vector3D.X,pi).theta == pi/2
assert_allclose( lv2.rotate_axis(Vector3D.X,pi) , LorentzVector(0., -1., 0., 2.0))
lv3 = LorentzVector(v3[0], v3[1], v3[2], 2.0)
assert_allclose( lv2.rotate_axis(axis, angle) , lv3, atol = .0000001)
assert_allclose( lv2.rotate_axis(-1.*axis, -angle) , lv3, atol = .0000001)
def test_3Dvectors_properties():
v0 = Vector3D()
v1, v2 = Vector3D(1., 1., 1.), Vector3D(2., 2., 2.)
v3, v4 = Vector3D(-1., -1., -1.), Vector3D(-2., -2., -2.)
v5, v6 = Vector3D(1., 1., 0.), Vector3D(0., 0., 2.)
assert np.all(v0 == [0,0,0])
assert np.all(v1 == [1,1,1])
assert v1.mag2 == 3.
assert v1.mag == sqrt(3.)
assert v2.mag2 == 12.
assert v2.mag == sqrt(12.)
assert v1.unit.mag == 1.
v7 = v1.unit
assert np.all(v1.unit == v7.unit)
# assert v1.isparallel(v2) == True
# assert v2.isparallel(v1) == True
# assert v1.isantiparallel(v2) == False
# assert v2.isantiparallel(v1) == False
# assert v1.isantiparallel(v3) == True
# assert v2.isantiparallel(v4) == True
# assert v1.isparallel(v3) == False
# assert v2.isparallel(v4) == False
# assert v1.isopposite(v3) == True
# assert v1.isopposite(v4) == False
# assert v2.isopposite(v4) == True
# assert v2.isopposite(v3) == False
# assert v1.isperpendicular(v2) == False
# assert v2.isperpendicular(v1) == False
# assert v5.isperpendicular(v6) | |
<filename>src/players.py
"""players module.
Used for players data processes
"""
import numpy as np
import pandas as pd
import src.config as config
import src.utilities as utilities
from src.utilities import logging
pd.set_option("display.max_columns", 500)
pd.set_option("display.expand_frame_repr", False)
# master_file = config.MASTER_FILES["ftb_players"]
# distance_columns = ["Age", "ChancesInvolved", "DefensiveActions", "FoulsCommited", "FoulsSuffered", "Height", "Minutes", "NPG+A", "Points", "Weight", "SuccessfulPasses"]
def get_outfile(source_name):
"""Return outfile stub for given source.
INPUT:
source_name: String containing name of the data source
OUTPUT:
outfile_stub: Stub to use when saving output
"""
logging.info("Mapping {0} to outfile".format(source_name))
if source_name == "tmk_cnt":
outfile_stub = "players_contract"
elif source_name == "tmk_psm":
outfile_stub = "players_performance"
logging.debug(outfile_stub)
return outfile_stub
def clean_data(source_name, directory=config.MASTER_DIR):
"""Clean raw player data and save processed version.
INPUT:
source_name: String containing name of the data source
directory: Directory to save output to
OUTPUT:
df: Dataframe containing the cleaned data
"""
logging.info("Loading {0} data".format(source_name))
if source_name == "tmk_cnt":
source_header = [
"Shirt number",
"Position",
"Name",
"Date of birth",
"Nationality",
"Height",
"Foot",
"Joined",
"Signed from",
"Contract expires",
"Market value",
]
drop_cols = ["Nationality", "Signed from", "Competition"]
notna_cols = ["Market value"]
elif source_name == "tmk_psm":
source_header = [
"Shirt number",
"Position",
"Name",
"Age",
"Nationality",
"In squad",
"Games started",
"Goals",
"Assists",
"Yellow cards",
"Second yellow cards",
"Red cards",
"Substitutions on",
"Substitutions off",
"PPG",
"Minutes played",
]
drop_cols = ["Nationality"]
notna_cols = ["In squad"]
df = utilities.folder_loader(
source_name[:3], source_name, "comp_season", source_header=source_header
)
## Name and Position are mis-aligned in the source files
df["Name"].fillna(method="bfill", inplace=True)
df["Position"] = df.Name.shift(-1)
df.loc[df.Position == df.Name, "Position"] = df.Name.shift(-2)
df.drop(axis=1, columns=drop_cols, inplace=True)
df.dropna(subset=notna_cols, inplace=True)
df = df.apply(lambda x: x.str.strip() if x.dtype == "object" else x)
df = df.replace("-", np.nan)
df = df.replace("Was not used during this season", np.nan)
df = df.replace("Not in squad during this season", np.nan)
df = df.replace("Not used during this season", np.nan)
df["Shirt number"] = pd.to_numeric(df["Shirt number"], downcast="integer")
df["Position group"] = None
df.loc[
(df.Position.str.upper().str.contains("KEEPER"))
| (df.Position.str.upper().str.contains("GOAL")),
"Position group",
] = "G"
df.loc[
(df.Position.str.upper().str.contains("BACK"))
| (df.Position.str.upper().str.contains("DEF")),
"Position group",
] = "D"
df.loc[
(df.Position.str.upper().str.contains("MID"))
| (df.Position.str.upper().str.contains("MIT"))
| (df.Position.str.upper().str.contains("WING")),
"Position group",
] = "M"
df.loc[
(df.Position.str.upper().str.contains("STRIKER"))
| (df.Position.str.upper().str.contains("FORW")),
"Position group",
] = "F"
if source_name == "tmk_cnt":
df["Age"] = (
df["Date of birth"].str.extract(r".*([0-9]{2})", expand=False).astype("int")
)
df["Date of birth"] = pd.to_datetime(
df["Date of birth"].str.extract(r"(.*) \([0-9]{2}\)", expand=False),
format="%b %d, %Y",
)
df["Joined"] = pd.to_datetime(df.Joined, format="%b %d, %Y")
df["Contract expires"] = pd.to_datetime(
df["Contract expires"], format="%d.%m.%Y"
)
df["Height"] = (
df["Height"]
.str.strip()
.str.replace(" ", "")
.str.replace(",", "")
.str.replace("m", "")
.replace({"-": np.nan, "": np.nan})
.astype(float)
)
df.loc[
df.Name.isin(df[df.Height.notna()].Name.values)
& df.Name.isin(df[df.Height.isna()].Name.values),
"Height",
] = (
df.loc[
df.Name.isin(df[df.Height.notna()].Name.values)
& df.Name.isin(df[df.Height.isna()].Name.values)
]
.sort_values(by=["Name", "Season"])
.Height.fillna(method="bfill")
)
df.loc[
df.Name.isin(df[df.Foot.notna()].Name.values)
& df.Name.isin(df[df.Foot.isna()].Name.values),
"Foot",
] = (
df.loc[
df.Name.isin(df[df.Foot.notna()].Name.values)
& df.Name.isin(df[df.Foot.isna()].Name.values)
]
.sort_values(by=["Name", "Season"])
.Foot.fillna(method="bfill")
)
df["Market value"] = (
df["Market value"]
.str.strip()
.replace({"-": np.nan})
.replace(r"[£kmTh\.]", "", regex=True)
.astype(float)
* df["Market value"]
.str.extract(r"[\d\.]+([kmTh\.]+)", expand=False)
.fillna(1)
.replace(["k", "Th.", "m"], [10 ** 3, 10 ** 3, 10 ** 6])
.astype(int)
/ 10 ** 6
)
elif source_name == "tmk_psm":
df["PPG"] = df["PPG"].str.strip().replace(r"[,]", ".", regex=True).astype(float)
df["Minutes played"] = (
df["Minutes played"]
.str.strip()
.replace(r"[.\']", "", regex=True)
.astype(float)
)
df[
[
"In squad",
"Games started",
"Goals",
"Assists",
"Yellow cards",
"Second yellow cards",
"Red cards",
"Substitutions on",
"Substitutions off",
"PPG",
"Minutes played",
]
] = df[
[
"In squad",
"Games started",
"Goals",
"Assists",
"Yellow cards",
"Second yellow cards",
"Red cards",
"Substitutions on",
"Substitutions off",
"PPG",
"Minutes played",
]
].fillna(
0
)
df[
[
"In squad",
"Games started",
"Goals",
"Assists",
"Yellow cards",
"Second yellow cards",
"Red cards",
"Substitutions on",
"Substitutions off",
"PPG",
"Minutes played",
]
] = df[
[
"In squad",
"Games started",
"Goals",
"Assists",
"Yellow cards",
"Second yellow cards",
"Red cards",
"Substitutions on",
"Substitutions off",
"PPG",
"Minutes played",
]
].astype(
float
)
logging.debug(df.describe(include="all"))
logging.info("Saving processed data to ")
utilities.save_master(df, get_outfile(source_name), directory=directory)
return df
# def get_players():
# """
# INPUT:
# None
# OUTPUT:
# df - Dataframe of aggregated player data
# """
# logging.info("Fetching aggregated player data")
# # fetch from master csv
# # df = pd.read_csv(master_file, sep='|', encoding="ISO-8859-1")
# df = utilities.get_master("players")
# # filter unwanted records
# df = df[(df["Season"] >= "s1314") & (df["Competition"].isin(["chm", "cpo", "prm"]))]
# df.dropna(subset=["Name"], inplace=True)
# # select columns
# group_key = "Name"
# max_cols = ["Age", "Height", "Weight"]
# # p90_cols = ["AerialsWon", "ChancesInvolved", "DefensiveActions", "Dispossesed", "Dribbles", "FoulsCommited", "FoulsSuffered", "NPG+A", "SuccessfulPasses"]
# p90_cols = [
# "AerialsWon",
# "Assists",
# "BadControl",
# "Blocks",
# "CalledOffside",
# "Clearances",
# "Crosses",
# "Dispossesed",
# "Dribbles",
# "DribblesAgainst",
# "FirstYellowCards",
# "FoulsCommited",
# "FoulsSuffered",
# "GoalsConceded",
# "Interceptions",
# "KeyPasses",
# "LongBalls",
# "NonPenaltyGoals",
# "OffsidesWon",
# "OwnGoals",
# "Passes",
# "PenaltyGoals",
# "RedCards",
# "Saves",
# "Shots",
# "ShotsFaced",
# "ShotsOnTarget",
# "Tackles",
# "ThroughBalls",
# "YellowCards",
# ]
# pGm_cols = ["Appearances", "Minutes", "Points"]
# sum_cols = p90_cols + pGm_cols
# selected_columns = [group_key] + max_cols + sum_cols
# df = df[selected_columns]
# # aggregate to player level
# df_max = df[[group_key] + max_cols].groupby(group_key).max()
# df_sum = df[[group_key] + sum_cols].groupby(group_key).sum()
# df = pd.concat([df_max, df_sum], axis=1)
# df = df[(df["Minutes"] >= 900)]
# # convert action totals to per90
# for col in p90_cols:
# df[col + "P90"] = 90 * df[col] / df["Minutes"]
# for col in pGm_cols:
# df[col + "PGm"] = df[col] / df["Appearances"]
# for col in sum_cols:
# del df[col]
# del df["AppearancesPGm"]
# logging.debug(df.describe(include="all"))
# return df
# def find_similar():
# players = get_players()
# # print players
# print("\nNumber of players included: " + str(len(players)))
# # Normalize all of the numeric columns
# players_normalized = (players - players.mean()) / players.std()
# players_normalized.fillna(0, inplace=True)
# # players_normalized.info()
# # print players_normalized.describe(include="all")
# # print players_normalized.index.values
# for (
# name
# ) in (
# players_normalized.index.values
# ): # ["<NAME>", "<NAME>", "<NAME>", "<NAME>"]:
# # print "\n###############################"
# print("\n" + name, end=" ")
# # selected_player = players.loc[name]
# # print selected_player.name
# # print selected_player.to_frame().T #.name
# # Normalize all of the numeric columns
# selected_normalized = players_normalized.loc[name]
# # print selected_normalized
# # Find the distance between select player and everyone else.
# euclidean_distances = players_normalized.apply(
# lambda row: distance.euclidean(row, selected_normalized), axis=1
# )
# # Create a new dataframe with distances.
# distance_frame = pd.DataFrame(
# data={"dist": euclidean_distances, "idx": euclidean_distances.index}
# )
# distance_frame.sort_values("dist", inplace=True)
# most_similar_players = distance_frame.iloc[1:4]["idx"]
# # most_similar_players = players.loc[nearest_neighbours] #["Name"]
# # print most_similar_players
# print("... is similar to... ", end=" ")
# print(list(most_similar_players.index.values))
# def make_prediction():
# players = get_players()
# pred_col = "AssistsP90"
# x_columns = list(players.columns.values)
# x_columns.remove(pred_col)
# y_column = [pred_col]
# # # The columns that we will be making predictions with.
# # x_columns = ['Age', 'Height', 'Weight', 'AerialsWonP90', 'AssistsP90', 'BadControlP90', 'BlocksP90', 'CalledOffsideP90', 'ClearancesP90', 'CrossesP90', 'DispossesedP90', 'DribblesP90', 'DribblesAgainstP90', 'FirstYellowCardsP90', 'FoulsCommitedP90', 'FoulsSufferedP90', 'GoalsConcededP90', 'InterceptionsP90', 'KeyPassesP90', 'LongBallsP90', 'NonPenaltyGoalsP90', 'OffsidesWonP90', 'OwnGoalsP90', 'PassesP90', 'PenaltyGoalsP90', 'RedCardsP90', 'SavesP90', 'ShotsP90', 'ShotsFacedP90', 'ShotsOnTargetP90', 'TacklesP90', 'ThroughBallsP90', 'YellowCardsP90', 'MinutesPGm']
# # print x_columns
# # # The column that we want to predict.
# # y_column = [pred_col]
# # print y_column
# ###Generating training and testing sets
# # Randomly shuffle the index of nba.
# random_indices = permutation(players.index)
# # Set a cutoff for how many items we want in the test set (in this case 1/3 of the items)
# test_cutoff = math.floor(len(players) / 3)
# # Generate the test set by taking the first 1/3 of the randomly shuffled indices.
# test = players.loc[random_indices[1:test_cutoff]]
# test.fillna(0, inplace=True)
# # test.info()
# # print test.describe(include="all")
# # Generate the train set with the rest of the data.
# train = players.loc[random_indices[test_cutoff:]]
# train.fillna(0, inplace=True)
# # train.info()
# # print train.describe(include="all")
# ###Using sklearn for k nearest neighbors
# # print "Using sklearn for k nearest neighbors..."
# from sklearn.neighbors import KNeighborsRegressor
# # Create the knn model.
# # Look at the five closest neighbors.
# knn = KNeighborsRegressor(n_neighbors=5)
# # print knn
# # Fit the model on the training data.
# knn.fit(train[x_columns], train[y_column])
# # print knn
# # Make point predictions on the test set using the fit model.
# predictions = knn.predict(test[x_columns])
# # print "\nPredicted PointsPGm:"
# # print predictions.shape
# ###Computing error
# # Get the actual values for the test set.
# actual = test[y_column].copy()
# # Compute the mean squared error of our predictions.
# mse = (((predictions - actual) ** 2).sum()) / len(predictions)
# print("\nMean Squared Error:")
# print(mse)
# actual["Predicted" + pred_col] = predictions
# actual["Diff"] = actual[pred_col] - actual["Predicted" + pred_col]
# print("\nActual and Predicted " + pred_col + ":")
# print(actual.sort_values(["Diff"], ascending=False))
# def test_opinions():
# players = get_players()
# players = players.reset_index()
# players = players[
# players["Name"].isin(
# [
# "<NAME>",
# "<NAME>",
# "<NAME>",
# "<NAME>",
# "<NAME>",
# "<NAME>",
# "<NAME>",
# "<NAME>",
# "<NAME>",
# ]
# )
# ]
# # df_info(players)
# players["ShotAccuracy"] = players["ShotsOnTargetP90"] / players["ShotsP90"]
# players["ShotEfficiency"] = (
# players["NonPenaltyGoalsP90"] + players["PenaltyGoalsP90"].fillna(0)
# ) / players["ShotsP90"]
# players["ShotPercentage"] = (
# players["NonPenaltyGoalsP90"] + players["PenaltyGoalsP90"].fillna(0)
# ) / players["ShotsOnTargetP90"]
# players = players[
# [
# "Name",
# "NonPenaltyGoalsP90",
# "PenaltyGoalsP90",
# "ShotsP90",
# "ShotsOnTargetP90",
# "ShotAccuracy",
# "ShotEfficiency",
# "ShotPercentage",
# ]
# ]
# # df_info(players)
# print(players.describe())
# print(players)
def main():
"""Use | |
from manim_imports_ext import *
import scipy.stats
CASE_DATA = [
9,
15,
30,
40,
56,
66,
84,
102,
131,
159,
173,
186,
190,
221,
248,
278,
330,
354,
382,
461,
481,
526,
587,
608,
697,
781,
896,
999,
1124,
1212,
1385,
1715,
2055,
2429,
2764,
3323,
4288,
5364,
6780,
8555,
10288,
12742,
14901,
17865,
21395,
# 25404,
# 29256,
# 33627,
# 38170,
# 45421,
# 53873,
]
SICKLY_GREEN = "#9BBD37"
class IntroducePlot(Scene):
def construct(self):
axes = self.get_axes()
self.add(axes)
# Dots
dots = VGroup()
for day, nc in zip(it.count(1), CASE_DATA):
dot = Dot()
dot.set_height(0.075)
dot.x = day
dot.y = nc
dot.axes = axes
dot.add_updater(lambda d: d.move_to(d.axes.c2p(d.x, d.y)))
dots.add(dot)
dots.set_color(YELLOW)
# Rescale y axis
origin = axes.c2p(0, 0)
axes.y_axis.tick_marks.save_state()
for tick in axes.y_axis.tick_marks:
tick.match_width(axes.y_axis.tick_marks[0])
axes.y_axis.add(
axes.h_lines,
axes.small_h_lines,
axes.tiny_h_lines,
axes.tiny_ticks,
)
axes.y_axis.stretch(25, 1, about_point=origin)
dots.update()
self.add(axes.small_y_labels)
self.add(axes.tiny_y_labels)
# Add title
title = self.get_title(axes)
self.add(title)
# Introduce the data
day = 10
self.add(*dots[:day + 1])
dot = Dot()
dot.match_style(dots[day])
dot.replace(dots[day])
count = Integer(CASE_DATA[day])
count.add_updater(lambda m: m.next_to(dot, UP))
count.add_updater(lambda m: m.set_stroke(BLACK, 5, background=True))
v_line = Line(DOWN, UP)
v_line.set_stroke(YELLOW, 1)
v_line.add_updater(
lambda m: m.put_start_and_end_on(
axes.c2p(
axes.x_axis.p2n(dot.get_center()),
0,
),
dot.get_bottom(),
)
)
self.add(dot)
self.add(count)
self.add(v_line)
for new_day in range(day + 1, len(dots)):
new_dot = dots[new_day]
new_dot.update()
line = Line(dot.get_center(), new_dot.get_center())
line.set_stroke(PINK, 3)
self.add(line, dot)
self.play(
dot.move_to, new_dot.get_center(),
dot.set_color, RED,
ChangeDecimalToValue(count, CASE_DATA[new_day]),
ShowCreation(line),
)
line.rotate(PI)
self.play(
dot.set_color, YELLOW,
Uncreate(line),
run_time=0.5
)
self.add(dots[new_day])
day = new_day
if day == 27:
self.add(
axes.y_axis, axes.tiny_y_labels, axes.tiny_h_lines, axes.tiny_ticks,
title
)
self.play(
axes.y_axis.stretch, 0.2, 1, {"about_point": origin},
VFadeOut(axes.tiny_y_labels),
VFadeOut(axes.tiny_h_lines),
VFadeOut(axes.tiny_ticks),
MaintainPositionRelativeTo(dot, dots[new_day]),
run_time=2,
)
self.add(axes, title, *dots[:new_day])
if day == 36:
self.add(axes.y_axis, axes.small_y_labels, axes.small_h_lines, title)
self.play(
axes.y_axis.stretch, 0.2, 1, {"about_point": origin},
VFadeOut(axes.small_y_labels),
VFadeOut(axes.small_h_lines),
MaintainPositionRelativeTo(dot, dots[new_day]),
run_time=2,
)
self.add(axes, title, *dots[:new_day])
count.add_background_rectangle()
count.background_rectangle.stretch(1.1, 0)
self.add(count)
# Show multiplications
last_label = VectorizedPoint(dots[25].get_center())
last_line = VMobject()
for d1, d2 in zip(dots[25:], dots[26:]):
line = Line(
d1.get_top(),
d2.get_corner(UL),
path_arc=-90 * DEGREES,
)
line.set_stroke(PINK, 2)
label = VGroup(
Tex("\\times"),
DecimalNumber(
axes.y_axis.p2n(d2.get_center()) /
axes.y_axis.p2n(d1.get_center()),
)
)
label.arrange(RIGHT, buff=SMALL_BUFF)
label.set_height(0.25)
label.next_to(line.point_from_proportion(0.5), UL, SMALL_BUFF)
label.match_color(line)
label.add_background_rectangle()
label.save_state()
label.move_to(last_label)
label.set_opacity(0)
self.play(
ShowCreation(line),
Restore(label),
last_label.move_to, label.saved_state,
VFadeOut(last_label),
FadeOut(last_line),
)
last_line = line
last_label = label
self.wait()
self.play(
FadeOut(last_label),
FadeOut(last_line),
)
#
def get_title(self, axes):
title = TexText(
"Recorded COVID-19 cases\\\\outside mainland China",
tex_to_color_map={"COVID-19": RED}
)
title.next_to(axes.c2p(0, 1e3), RIGHT, LARGE_BUFF)
title.to_edge(UP)
title.add_background_rectangle()
return title
def get_axes(self, width=12, height=6):
n_cases = len(CASE_DATA)
axes = Axes(
x_range=(0, n_cases),
y_range=(0, 25000, 1000),
width=width,
height=height,
)
axes.center()
axes.to_edge(DOWN, buff=LARGE_BUFF)
# Add dates
text_pos_pairs = [
("Mar 6", 0),
("Feb 23", -12),
("Feb 12", -23),
("Feb 1", -34),
("Jan 22", -44),
]
labels = VGroup()
extra_ticks = VGroup()
for text, pos in text_pos_pairs:
label = TexText(text)
label.set_height(0.2)
label.rotate(45 * DEGREES)
axis_point = axes.c2p(n_cases + pos, 0)
label.move_to(axis_point, UR)
label.shift(MED_SMALL_BUFF * DOWN)
label.shift(SMALL_BUFF * RIGHT)
labels.add(label)
tick = Line(UP, DOWN)
tick.set_stroke(GREEN, 3)
tick.set_height(0.25)
tick.move_to(axis_point)
extra_ticks.add(tick)
axes.x_labels = labels
axes.extra_x_ticks = extra_ticks
axes.add(labels, extra_ticks)
# Adjust y ticks
axes.y_axis.ticks.stretch(0.5, 0)
axes.y_axis.ticks[0::5].stretch(2, 0)
# Add y_axis_labels
def get_y_labels(axes, y_values):
labels = VGroup()
for y in y_values:
try:
label = TexText(f"{y}k")
label.set_height(0.25)
tick = axes.y_axis.ticks[y]
always(label.next_to, tick, LEFT, SMALL_BUFF)
labels.add(label)
except IndexError:
pass
return labels
main_labels = get_y_labels(axes, range(5, 30, 5))
axes.y_labels = main_labels
axes.add(main_labels)
axes.small_y_labels = get_y_labels(axes, range(1, 6))
tiny_labels = VGroup()
tiny_ticks = VGroup()
for y in range(200, 1000, 200):
tick = axes.y_axis.ticks[0].copy()
point = axes.c2p(0, y)
tick.move_to(point)
label = Integer(y)
label.set_height(0.25)
always(label.next_to, tick, LEFT, SMALL_BUFF)
tiny_labels.add(label)
tiny_ticks.add(tick)
axes.tiny_y_labels = tiny_labels
axes.tiny_ticks = tiny_ticks
# Horizontal lines
axes.h_lines = VGroup()
axes.small_h_lines = VGroup()
axes.tiny_h_lines = VGroup()
group_range_pairs = [
(axes.h_lines, 5e3 * np.arange(1, 6)),
(axes.small_h_lines, 1e3 * np.arange(1, 5)),
(axes.tiny_h_lines, 200 * np.arange(1, 5)),
]
for group, _range in group_range_pairs:
for y in _range:
group.add(
Line(
axes.c2p(0, y),
axes.c2p(n_cases, y),
)
)
group.set_stroke(WHITE, 1, opacity=0.5)
return axes
class Thumbnail(IntroducePlot):
def construct(self):
axes = self.get_axes()
self.add(axes)
dots = VGroup()
data = CASE_DATA
data.append(25398)
for day, nc in zip(it.count(1), CASE_DATA):
dot = Dot()
dot.set_height(0.2)
dot.x = day
dot.y = nc
dot.axes = axes
dot.add_updater(lambda d: d.move_to(d.axes.c2p(d.x, d.y)))
dots.add(dot)
dots.set_color(YELLOW)
dots.set_submobject_colors_by_gradient(BLUE, GREEN, RED)
self.add(dots)
title = TexText("COVID-19")
title.set_height(1)
title.set_color(RED)
title.to_edge(UP, buff=LARGE_BUFF)
subtitle = TexText("and exponential growth")
subtitle.match_width(title)
subtitle.next_to(title, DOWN)
# self.add(title)
# self.add(subtitle)
title = TexText("The early warning\\\\of ", "COVID-19\\\\")
title.set_color_by_tex("COVID", RED)
title.set_height(2.5)
title.to_edge(UP, buff=LARGE_BUFF)
self.add(title)
# self.remove(words)
# words = TexText("Exponential growth")
# words.move_to(ORIGIN, DL)
# words.apply_function(
# lambda p: [
# p[0], p[1] + np.exp(0.2 * p[0]), p[2]
# ]
# )
# self.add(words)
self.embed()
class IntroQuestion(Scene):
def construct(self):
questions = VGroup(
TexText("What is exponential growth?"),
TexText("Where does it come from?"),
TexText("What does it imply?"),
TexText("When does it stop?"),
)
questions.arrange(DOWN, buff=MED_LARGE_BUFF, aligned_edge=LEFT)
for question in questions:
self.play(FadeIn(question, RIGHT))
self.wait()
self.play(LaggedStartMap(
FadeOutAndShift, questions,
lambda m: (m, DOWN),
))
class ViralSpreadModel(Scene):
CONFIG = {
"num_neighbors": 5,
"infection_probability": 0.3,
"random_seed": 1,
}
def construct(self):
# Init population
randys = self.get_randys()
self.add(*randys)
# Show the sicko
self.show_patient0(randys)
# Repeatedly spread
for x in range(20):
self.spread_infection(randys)
def get_randys(self):
randys = VGroup(*[
Randolph()
for x in range(150)
])
for randy in randys:
randy.set_height(0.5)
randys.arrange_in_grid(10, 15, buff=0.5)
randys.set_height(FRAME_HEIGHT - 1)
for i in range(0, 10, 2):
randys[i * 15:(i + 1) * 15].shift(0.25 * RIGHT)
for randy in randys:
randy.shift(0.2 * random.random() * RIGHT)
randy.shift(0.2 * random.random() * UP)
randy.infected = False
randys.center()
return randys
def show_patient0(self, randys):
patient0 = random.choice(randys)
patient0.infected = True
circle = Circle()
circle.set_stroke(SICKLY_GREEN)
circle.replace(patient0)
circle.scale(1.5)
self.play(
patient0.change, "sick",
patient0.set_color, SICKLY_GREEN,
ShowCreationThenFadeOut(circle),
)
def spread_infection(self, randys):
E = self.num_neighbors
inf_p = self.infection_probability
cough_anims = []
new_infection_anims = []
for randy in randys:
if randy.infected:
cough_anims.append(Flash(
randy,
color=SICKLY_GREEN,
num_lines=16,
line_stroke_width=1,
flash_radius=0.5,
line_length=0.1,
))
random.shuffle(cough_anims)
self.play(LaggedStart(
*cough_anims,
run_time=1,
lag_ratio=1 / len(cough_anims),
))
newly_infected = []
for randy in randys:
if randy.infected:
distances = [
get_norm(r2.get_center() - randy.get_center())
for r2 in randys
]
for i in np.argsort(distances)[1:E + 1]:
r2 = randys[i]
if random.random() < inf_p and not r2.infected and r2 not in newly_infected:
newly_infected.append(r2)
r2.generate_target()
r2.target.change("sick")
r2.target.set_color(SICKLY_GREEN)
new_infection_anims.append(MoveToTarget(r2))
random.shuffle(new_infection_anims)
self.play(LaggedStart(*new_infection_anims, run_time=1))
for randy in newly_infected:
randy.infected = True
class GrowthEquation(Scene):
def construct(self):
# Add labels
N_label = TexText("$N_d$", " = Number of cases on a given day", )
E_label = TexText("$E$", " = Average number of people someone infected is exposed to each day")
p_label = TexText("$p$", " = Probability of each exposure becoming an infection")
N_label[0].set_color(YELLOW)
E_label[0].set_color(BLUE)
p_label[0].set_color(TEAL)
labels = VGroup(
N_label,
E_label,
p_label
)
labels.arrange(DOWN, buff=MED_LARGE_BUFF, aligned_edge=LEFT)
labels.set_width(FRAME_WIDTH - 1)
labels.to_edge(UP)
for label in labels:
self.play(FadeInFromDown(label))
self.wait()
delta_N = Tex("\\Delta", "N_d")
delta_N.set_color(YELLOW)
eq = Tex("=")
eq.center()
delta_N.next_to(eq, LEFT)
delta_N_brace = Brace(delta_N, DOWN)
delta_N_text = delta_N_brace.get_text("Change over a day")
nep = Tex("E", "\\cdot", "p", "\\cdot", "N_d")
nep[4].match_color(N_label[0])
nep[0].match_color(E_label[0])
nep[2].match_color(p_label[0])
nep.next_to(eq, RIGHT)
self.play(FadeIn(delta_N), FadeIn(eq))
self.play(
GrowFromCenter(delta_N_brace),
FadeIn(delta_N_text, 0.5 * UP),
)
self.wait()
self.play(LaggedStart(
TransformFromCopy(N_label[0], nep[4]),
TransformFromCopy(E_label[0], nep[0]),
TransformFromCopy(p_label[0], nep[2]),
FadeIn(nep[1]),
FadeIn(nep[3]),
lag_ratio=0.2,
run_time=2,
))
self.wait()
self.play(ShowCreationThenFadeAround(
nep[-1],
surrounding_rectangle_config={"color": RED},
))
# Recursive equation
lhs = Tex("N_{d + 1}", "=")
lhs[0].set_color(YELLOW)
lhs.move_to(eq, RIGHT)
lhs.shift(DOWN)
rhs = VGroup(
nep[-1].copy(),
Tex("+"),
nep.copy(),
)
rhs.arrange(RIGHT)
rhs.next_to(lhs, RIGHT)
self.play(
FadeOut(delta_N_brace),
FadeOut(delta_N_text),
FadeIn(lhs, UP),
)
self.play(FadeIn(rhs[:2]))
self.play(TransformFromCopy(nep, rhs[2]))
self.wait()
alt_rhs = Tex(
"(", "1", "+", "E", "\\cdot", "p", ")", "N_d",
tex_to_color_map={
"E": BLUE,
"p": TEAL,
"N_d": YELLOW,
}
)
new_lhs = lhs.copy()
new_lhs.shift(DOWN)
alt_rhs.next_to(new_lhs, RIGHT)
self.play(TransformFromCopy(lhs, new_lhs))
self.play(
TransformFromCopy(rhs[0], alt_rhs[7].copy()),
TransformFromCopy(rhs[2][4], alt_rhs[7]),
)
self.play(
TransformFromCopy(rhs[1][0], alt_rhs[2]),
TransformFromCopy(rhs[2][0], alt_rhs[3]),
TransformFromCopy(rhs[2][1], alt_rhs[4]),
TransformFromCopy(rhs[2][2], alt_rhs[5]),
TransformFromCopy(rhs[2][3], alt_rhs[6]),
FadeIn(alt_rhs[0]),
FadeIn(alt_rhs[1]),
)
self.wait()
# Comment on factor
brace = Brace(alt_rhs[:7], DOWN)
text = TexText("For example, ", "1.15")
text.next_to(brace, DOWN)
self.play(
GrowFromCenter(brace),
FadeIn(text, 0.5 * UP)
)
self.wait()
# Show exponential
eq_group = VGroup(
delta_N, eq, nep,
lhs, rhs,
new_lhs, alt_rhs,
brace,
text,
)
self.clear()
self.add(labels, eq_group)
self.play(ShowCreationThenFadeAround(
VGroup(delta_N, eq, nep),
surrounding_rectangle_config={"color": RED},
))
self.play(ShowCreationThenFadeAround(
VGroup(new_lhs, alt_rhs, brace, text),
surrounding_rectangle_config={"color": RED},
))
self.wait()
self.play(eq_group.to_edge, LEFT, LARGE_BUFF)
exp_eq = Tex(
"N_d = (1 + E \\cdot p)^{d} \\cdot N_0",
tex_to_color_map={
"N_d": YELLOW,
"E": BLUE,
"p": TEAL,
"{d}": YELLOW,
"N_0": YELLOW,
}
)
exp_eq.next_to(alt_rhs, RIGHT, buff=3)
arrow = Arrow(alt_rhs.get_right(), exp_eq.get_left())
self.play(
GrowArrow(arrow),
FadeIn(exp_eq, 2 * LEFT)
)
self.wait()
# Discuss factor in front of N
ep = nep[:3]
ep_rect | |
<reponame>induane/harrenrpg<filename>src/pyscroll/orthographic.py
# Standard
import logging
import math
import time
from itertools import chain, groupby, product
from operator import gt, itemgetter
# Third Party
from pygame import Rect, Surface, transform, SRCALPHA
# Project
from .quadtree import FastQuadTree
from .lib import surface_clipping_context
LOG = logging.getLogger(__name__)
class BufferedRenderer:
"""
Renderer that support scrolling, zooming, layers, and animated tiles
The buffered renderer must be used with a data class to get tile, shape,
and animation information. See the data class api in pyscroll.data, or
use the built-in pytmx support for loading maps created with Tiled.
NOTE: colorkey and alpha transparency is quite slow
"""
_rgba_clear_color = 0, 0, 0, 0
_rgb_clear_color = 0, 0, 0
def __init__(
self,
data,
size,
clamp_camera=True,
colorkey=None,
alpha=False,
time_source=time.time,
scaling_function=transform.scale,
tall_sprites=0,
**kwargs
):
bg_fill = kwargs.get("background_color")
if bg_fill:
self._rgb_clear_color = bg_fill
self._rgba_clear_color = bg_fill
# default options
self.data = data # reference to data source
self.clamp_camera = clamp_camera # if true, cannot scroll past map edge
self.time_source = time_source # determines how tile animations are processed
self.scaling_function = scaling_function # what function to use when scaling the zoom buffer
self.tall_sprites = tall_sprites # correctly render tall sprites
self.map_rect = None # pygame rect of entire map
# Tall Spritesthat's
# this value, if greater than 0, is the number of pixels from the bottom of
# tall sprites which is compared against the bottom of a tile on the same
# layer of the sprite. In other words, if set, it prevents tiles from being
# drawn over sprites which are taller than the tile height. The value that
# is how far apart the sprites have to be before drawing the tile over.
# Reasonable values are about 10% of the tile height
# This feature only works for the first layer over the tall sprite, all
# other layers will be drawn over the tall sprite.
# internal private defaults
if colorkey and alpha:
LOG.error("cannot select both colorkey and alpha. choose one.")
raise ValueError
elif colorkey:
self._clear_color = colorkey
elif alpha:
self._clear_color = self._rgba_clear_color
else:
self._clear_color = None
# private attributes
self._anchored_view = True # if true, map is fixed to upper left corner
self._previous_blit = None # rect of the previous map blit when map edges are visible
self._size = None # actual pixel size of the view, as it occupies the screen
self._redraw_cutoff = None # size of dirty tile edge that will trigger full redraw
self._x_offset = None # offsets are used to scroll map in sub-tile increments
self._y_offset = None
self._buffer = None # complete rendering of tilemap
self._tile_view = None # this rect represents each tile on the buffer
self._half_width = None # 'half x' attributes are used to reduce division ops.
self._half_height = None
self._tile_queue = None # tiles queued to be draw onto buffer
self._animation_queue = None # heap queue of animation token; schedules tile changes
self._layer_quadtree = None # used to draw tiles that overlap optional surfaces
self._zoom_buffer = None # used to speed up zoom operations
self._zoom_level = 1.0 # negative numbers make map smaller, positive: bigger
self._real_ratio_x = 1.0 # zooming slightly changes aspect ratio; this compensates
self._real_ratio_y = 1.0 # zooming slightly changes aspect ratio; this compensates
self.view_rect = Rect(0, 0, 0, 0) # this represents the viewable map pixels
if hasattr(Surface, "blits"):
self._flush_tile_queue = self._flush_tile_queue_blits
self.set_size(size)
def scroll(self, vector):
"""scroll the background in pixels
:param vector: (int, int)
"""
self.center((vector[0] + self.view_rect.centerx, vector[1] + self.view_rect.centery))
def center(self, coords):
"""center the map on a pixel
float numbers will be rounded.
:param coords: (number, number)
"""
x, y = round(coords[0]), round(coords[1])
self.view_rect.center = x, y
mw, mh = self.data.map_size
tw, th = self.data.tile_size
vw, vh = self._tile_view.size
# prevent camera from exposing edges of the map
if self.clamp_camera:
self._anchored_view = True
self.view_rect.clamp_ip(self.map_rect)
x, y = self.view_rect.center
# calc the new position in tiles and pixel offset
left, self._x_offset = divmod(x - self._half_width, tw)
top, self._y_offset = divmod(y - self._half_height, th)
right = left + vw
bottom = top + vh
if not self.clamp_camera:
# not anchored, so the rendered map is being offset by values larger
# than the tile size. this occurs when the edges of the map are inside
# the screen. a situation like is shows a background under the map.
self._anchored_view = True
dx = int(left - self._tile_view.left)
dy = int(top - self._tile_view.top)
if mw < vw or left < 0:
left = 0
self._x_offset = x - self._half_width
self._anchored_view = False
elif right > mw:
left = mw - vw
self._x_offset += dx * tw
self._anchored_view = False
if mh < vh or top < 0:
top = 0
self._y_offset = y - self._half_height
self._anchored_view = False
elif bottom > mh:
top = mh - vh
self._y_offset += dy * th
self._anchored_view = False
# adjust the view if the view has changed without a redraw
dx = int(left - self._tile_view.left)
dy = int(top - self._tile_view.top)
view_change = max(abs(dx), abs(dy))
if view_change and (view_change <= self._redraw_cutoff):
self._buffer.scroll(-dx * tw, -dy * th)
self._tile_view.move_ip(dx, dy)
self._queue_edge_tiles(dx, dy)
self._flush_tile_queue(self._buffer)
elif view_change > self._redraw_cutoff:
LOG.info("scrolling too quickly. redraw forced")
self._tile_view.move_ip(dx, dy)
self.redraw_tiles(self._buffer)
def draw(self, surface, rect, surfaces=None):
"""Draw the map onto a surface
pass a rect that defines the draw area for:
drawing to an area smaller that the whole window/screen
surfaces may optionally be passed that will be blitted onto the surface.
this must be a sequence of tuples containing a layer number, image, and
rect in screen coordinates. surfaces will be drawn in order passed,
and will be correctly drawn with tiles from a higher layer overlapping
the surface.
surfaces list should be in the following format:
[ (layer, surface, rect), ... ]
or this:
[ (layer, surface, rect, blendmode_flags), ... ]
:param surface: pygame surface to draw to
:param rect: area to draw to
:param surfaces: optional sequence of surfaces to interlace between tiles
:return rect: area that was drawn over
"""
if self._zoom_level == 1.0:
self._render_map(surface, rect, surfaces)
else:
self._render_map(self._zoom_buffer, self._zoom_buffer.get_rect(), surfaces)
self.scaling_function(self._zoom_buffer, rect.size, surface)
return self._previous_blit.copy()
@property
def zoom(self):
"""Zoom the map in or out.
Increase this number to make map appear to come closer to camera.
Decrease this number to make map appear to move away from camera.
Default value is 1.0
This value cannot be negative or 0.0
:return: float
"""
return self._zoom_level
@zoom.setter
def zoom(self, value):
zoom_buffer_size = self._calculate_zoom_buffer_size(self._size, value)
self._zoom_level = value
self._initialize_buffers(zoom_buffer_size)
zoom_buffer_size = self._zoom_buffer.get_size()
self._real_ratio_x = float(self._size[0]) / zoom_buffer_size[0]
self._real_ratio_y = float(self._size[1]) / zoom_buffer_size[1]
def set_size(self, size):
"""Set the size of the map in pixels
This is an expensive operation, do only when absolutely needed.
:param size: (width, height) pixel size of camera/view of the group
"""
buffer_size = self._calculate_zoom_buffer_size(size, self._zoom_level)
self._size = size
self._initialize_buffers(buffer_size)
def redraw_tiles(self, surface):
"""Redraw the visible portion of the buffer -- this is slow."""
# TODO/BUG: Redraw animated tiles correctly. They are getting reset here
LOG.debug("pyscroll buffer redraw")
self._clear_surface(self._buffer)
self._tile_queue = self.data.get_tile_images_by_rect(self._tile_view)
self._flush_tile_queue(surface)
def get_center_offset(self):
"""Return x, y pair that will change world coords to screen coords
:return: int, int
"""
return (-self.view_rect.centerx + self._half_width, -self.view_rect.centery + self._half_height)
def translate_point(self, point):
"""
Translate world coordinates and return screen coordinates.
Respects zoom level. Will be returned as tuple.
:rtype: tuple
"""
mx, my = self.get_center_offset()
if self._zoom_level == 1.0:
return point[0] + mx, point[1] + my
else:
return int(round((point[0] + mx)) * self._real_ratio_x), int(round((point[1] + my) * self._real_ratio_y))
def translate_rect(self, rect):
"""Translate rect position and size to screen coordinates. Respects zoom level.
:rtype: Rect
"""
mx, my = self.get_center_offset()
rx = self._real_ratio_x
ry = self._real_ratio_y
x, y, w, h = rect
if self._zoom_level == 1.0:
return Rect(x + mx, y + my, w, h)
else:
return Rect(round((x + mx) * rx), round((y + my) * ry), round(w * rx), round(h * ry))
def translate_points(self, points):
"""Translate coordinates and return screen coordinates
Will be | |
import pandas as pd
import pytest
from config import proteoforms, proteins, genes, sm
from lib.graph_database_access import fix_neo4j_values, get_low_level_pathways, \
get_reactions_by_pathway, get_pathways, get_reactions, get_complexes, get_complex_components_by_complex, \
make_proteoform_string, get_participants_by_pathway, get_components_by_pathway, get_complexes_by_pathway
@pytest.fixture(scope="session")
def glycolysis_participants_genes():
return get_participants_by_pathway("R-HSA-70171", genes)
@pytest.fixture(scope="session")
def glycolysis_participants_proteins():
return get_participants_by_pathway("R-HSA-70171", proteins)
@pytest.fixture(scope="session")
def glycolysis_components_proteins():
return get_components_by_pathway("R-HSA-70171", proteins)
@pytest.fixture(scope="session")
def glycolysis_participants_proteoforms():
return get_participants_by_pathway("R-HSA-70171", proteoforms)
@pytest.fixture(scope="session")
def glycolysis_participants_sm():
return get_participants_by_pathway("R-HSA-70171", sm)
@pytest.fixture(scope="session")
def ras_processing_participants_genes():
return get_participants_by_pathway("R-HSA-9648002", genes)
@pytest.fixture(scope="session")
def ras_processing_participants_proteins():
return get_participants_by_pathway("R-HSA-9648002", proteins)
@pytest.fixture(scope="session")
def ras_processing_participants_proteoforms():
return get_participants_by_pathway("R-HSA-9648002", proteoforms)
@pytest.fixture(scope="session")
def ras_processing_participants_sm():
return get_participants_by_pathway("R-HSA-9648002", sm)
def test_participant_records_columns_genes(ras_processing_participants_genes):
df = ras_processing_participants_genes
assert "Pathway" in df.columns
assert "Reaction" in df.columns
assert "Entity" in df.columns
assert "Name" in df.columns
assert "Type" in df.columns
assert "Id" in df.columns
assert "Database" in df.columns
assert "Role" in df.columns
def test_participant_records_columns_proteins(ras_processing_participants_proteins):
df = ras_processing_participants_proteins
assert "Pathway" in df.columns
assert "Reaction" in df.columns
assert "Entity" in df.columns
assert "Name" in df.columns
assert "Type" in df.columns
assert "Id" in df.columns
assert "Database" in df.columns
assert "Role" in df.columns
def test_participant_records_columns_proteoforms(ras_processing_participants_proteoforms):
df = ras_processing_participants_proteoforms
assert "Pathway" in df.columns
assert "Reaction" in df.columns
assert "Entity" in df.columns
assert "Name" in df.columns
assert "Type" in df.columns
assert "Id" in df.columns
assert "Database" in df.columns
assert "Role" in df.columns
def test_participant_records_columns_sm(ras_processing_participants_sm):
df = ras_processing_participants_sm
assert "Pathway" in df.columns
assert "Reaction" in df.columns
assert "Entity" in df.columns
assert "Name" in df.columns
assert "Type" in df.columns
assert "Id" in df.columns
assert "Database" in df.columns
assert "Role" in df.columns
def test_fix_neo4j_values_empty_dataframe():
df = pd.DataFrame()
result = fix_neo4j_values(df, level="proteins")
assert len(result) == 0
assert type(result) == pd.DataFrame
assert len(result.columns) == 0
def test_pathway_not_exists_returns_empty_dataframe():
result = get_participants_by_pathway("blabla", genes)
assert len(result) == 0
assert type(result) == pd.DataFrame
assert len(result.columns) == 0
# Test: Cypher query to get all pathways gets all of them correctly
def test_get_pathways():
pathways = get_low_level_pathways()
assert len(pathways) == 1657
assert type(pathways) == pd.DataFrame
assert ((pathways['stId'] == "R-HSA-110056") & (pathways['displayName'] == "MAPK3 (ERK1) activation")).any()
assert ((pathways['stId'] == "R-HSA-69200") & (pathways[
'displayName'] == "Phosphorylation of proteins involved in G1/S transition by active Cyclin E:Cdk2 complexes")).any()
assert ((pathways['stId'] == "R-HSA-6782135") & (pathways['displayName'] == "Dual incision in TC-NER")).any()
assert not ((pathways['stId'] == 'R-HSA-9612973') & (pathways['displayName'] == 'Autophagy')).any()
assert not ((pathways['stId'] == 'R-HSA-1640170') & (pathways['displayName'] == 'Cell Cycle')).any()
assert not ((pathways['stId'] == 'R-HSA-70171') & (pathways['displayName'] == 'Glycolysis')).any()
# Test: Query to get reactions of pathway returns the correct list of reactions
def test_get_reactions_of_pathway():
reactions = get_reactions_by_pathway("R-HSA-170822")
assert len(reactions) == 5
assert (reactions['reaction'] == "R-HSA-170824").any()
assert (reactions['reaction'] == "R-HSA-170796").any()
assert (reactions['reaction'] == "R-HSA-170825").any()
# Test: Query to get participants of a pathway gets all participant reactions.
def test_query_for_pathway_participants_has_all_reactions(glycolysis_participants_proteins):
df = glycolysis_participants_proteins
assert len(df['Reaction'].unique()) == 15
assert (df['Reaction'] == "R-HSA-8955794").any()
assert (df['Reaction'] == "R-HSA-6799604").any()
assert (df['Reaction'] == "R-HSA-70467").any()
# Test: Query to get participants of a pathway returns all small molecule participants
def test_query_for_pathway_participans_returns_all_simple_molecules(glycolysis_participants_sm):
df = glycolysis_participants_sm
print(df.loc[df['Type'] == 'SimpleEntity'])
assert len(df.loc[df['Type'] == 'SimpleEntity']['Entity'].unique()) == 32
# Test: Query to get participants of a pathway returns all gene participants
def test_query_for_pathway_participans_returns_all_ewas(glycolysis_participants_proteins):
df = glycolysis_participants_proteins
print(df.loc[df['Type'] == 'EntityWithAccessionedSequence'])
assert len(df.loc[df['Type'] == 'EntityWithAccessionedSequence']['Entity'].unique()) == 31
@pytest.fixture(scope="session")
def participant_proteins_Erythrocytes_take_up_carbon_dioxide_and_release_oxygen():
return get_participants_by_pathway("R-HSA-1237044", proteins)
# Test: Query for participants of a pathway returns all the gene and small molecule participants decomposing complexes
# Pathway R-HSA-1237044 hast complex participants in its reactions, some complexes also have complex components.
# Checks if the component molecules of the complex components are in the result
def test_query_for_pathway_participants_decomposes_complexes(
participant_proteins_Erythrocytes_take_up_carbon_dioxide_and_release_oxygen):
df = participant_proteins_Erythrocytes_take_up_carbon_dioxide_and_release_oxygen
# Reaction R-HSA-1237325 in the pathway has complex participants: "R-HSA-1237320"
# Complex R-HSA-1237320 has 6 participant molecules:
assert (df['Id'] == "P22748").any()
assert (df['Id'] == "P29972").any()
assert (df['Id'] == "P02730").any()
assert (df['Id'] == "P68871").any()
assert (df['Id'] == "P69905").any()
# Test: Query for participants of a pathway returns all the gene and small molecule participants decomposing sets
# Pathway R-HSA-70171 has reactions with EntitySets as participants, like reaction R-HSA-70420 with set R-HSA-450097
def test_query_for_pathway_participants_decomposes_sets(
participant_proteins_Erythrocytes_take_up_carbon_dioxide_and_release_oxygen):
df = participant_proteins_Erythrocytes_take_up_carbon_dioxide_and_release_oxygen
# DefinedSet R-HSA-450097 has 4 members
assert (df['Id'] == "P00915").any()
assert (df['Id'] == "P00918").any()
# Test: If small molecules disabled, cypher query returns only the gene direct participants
def test_query_for_pathway_participants_disable_small_molecules(glycolysis_participants_proteins):
df = glycolysis_participants_proteins
assert len(df) == 31 # Only EWAS
# Has EWAS participants
assert (df['Entity'] == "R-HSA-5696062").any()
assert (df['Entity'] == "R-HSA-6798334").any()
assert (df['Entity'] == "R-HSA-6799597").any()
# Does not have Small molecule participants
assert (df['Entity'] != "R-ALL-449701").all()
assert (df['Entity'] != "R-ALL-70113").all()
assert (df['Entity'] != "R-ALL-217314").all()
# Test: If small molecules disabled, cypher query returns only the gene complex decomposed participants
def test_query_for_pathway_participants_complexes_show_only_ewas(glycolysis_participants_proteins):
df = glycolysis_participants_proteins
# The pathway has the reaction R-HSA-5696021 which has the complex R-HSA-5696043 as participant
# THe components of the complex are 2, one EWAS and one small molecule:
assert (df['Entity'] == 'R-HSA-5696062').any() # EWAS ADPGK
assert (df['Entity'] != 'R-ALL-217314').all() # Small molecule Mg2+
# Test: Get pathway participants as genes
def test_query_for_pathway_participants_as_genes(glycolysis_participants_genes):
df = glycolysis_participants_genes
assert len(df) == 31
# Participant protein Q9BRR6 should be in the result as a gene name: ADPGK not as UniProt accession
assert (df['Id'] == 'ADPGK').any()
assert (df['Id'] != 'Q9BRR6').all()
# Simmilar for the next proteins
assert (df['Id'] == 'BPGM').any()
assert (df['Id'] != 'P07738').all()
assert (df['Id'] == 'BPGM').any()
assert (df['Id'] != 'P07738').all()
assert (df['Id'] == 'PKLR').any()
assert (df['Id'] != 'P07738').all()
def test_query_for_pathway_participants_as_genes_trims_gene_id(glycolysis_participants_genes):
df = glycolysis_participants_genes
assert ((df['Entity'] == 'R-HSA-70097') & (df['Id'] == "PKLR")).any()
assert ((df['Entity'] == 'R-HSA-450658') & (df['Id'] == "PKM")).any()
assert not ((df['Entity'] == 'R-HSA-450658') & (df['Id'] == "PKM-2 [cytosol]")).any()
assert not ((df['Entity'] == 'R-HSA-450658') & (df['Id'] == "P62993")).any()
print(df.loc[(df['Entity'] == 'R-HSA-70097') & (df['Id'] == "PKLR")])
assert not ((df['Entity'] == 'R-HSA-70097') & (df['Id'] == "PKLR-1 [cytosol]")).any()
assert not ((df['Entity'] == 'R-HSA-211388') & (df['Id'] == "PKLR-2")).any()
def test_query_for_pathway_participants_replaces_small_molecule_names(glycolysis_participants_sm):
df = glycolysis_participants_sm
assert ((df['Entity'] == 'R-ALL-29370') & (df['Id'] == 'sm_ADP')).any()
assert ((df['Entity'] == 'R-ALL-29926') & (df['Id'] == 'sm_Mg2+')).any()
assert ((df['Entity'] == 'R-ALL-70106') & (df['Id'] == 'sm_H+')).any()
# Test: Get pathway participants as proteins
def test_query_for_pathway_participants_as_proteins(glycolysis_participants_proteins):
df = glycolysis_participants_proteins
assert not ((df['Entity'] == 'R-HSA-5696062') & (df['Id'] == 'ADPGK')).any()
assert ((df['Entity'] == 'R-HSA-5696062') & (df['Id'] == 'Q9BRR6')).any()
assert not ((df['Entity'] == "R-HSA-6798334") & (df['Id'] == 'BPGM')).any()
assert ((df['Entity'] == "R-HSA-6798334") & (df['Id'] == 'P07738')).any()
assert not ((df['Entity'] == 'R-HSA-70412') & (df['Id'] == 'HK3')).any()
assert ((df['Entity'] == 'R-HSA-70412') & (df['Id'] == 'P52790')).any()
def test_query_for_pathway_participants_as_proteins_returns_genes(glycolysis_participants_proteins):
df = glycolysis_participants_proteins
assert 'PrevId' in df.columns
assert ((df['Id'] == 'Q9BRR6') & (df['PrevId'] == 'ADPGK')).any()
assert ((df['Id'] == 'P07738') & (df['PrevId'] == 'BPGM')).any()
assert not ((df['Id'] == 'Mg2+')).any()
assert not ((df['Id'] == 'ADP')).any()
def test_query_for_pathway_participants_as_proteins_implicit_parameter(glycolysis_participants_proteins):
df = glycolysis_participants_proteins
assert not ((df['Entity'] == 'R-HSA-5696062') & (df['Id'] == 'ADPGK')).any()
assert ((df['Entity'] == 'R-HSA-5696062') & (df['Id'] == 'Q9BRR6')).any()
assert not ((df['Entity'] == "R-HSA-6798334") & (df['Id'] == 'BPGM')).any()
assert ((df['Entity'] == "R-HSA-6798334") & (df['Id'] == 'P07738')).any()
assert not ((df['Entity'] == 'R-HSA-70412') & (df['Id'] == 'HK3')).any()
assert ((df['Entity'] == 'R-HSA-70412') & (df['Id'] == 'P52790')).any()
def test_query_for_pathway_participants_as_proteins_complex_should_not_be_in_records(glycolysis_participants_proteins):
df = glycolysis_participants_proteins
assert not (df["Entity"] == "R-HSA-5696043").any()
assert not (df["Name"] == "BPGM dimer [cytosol]").any()
assert not (df["Entity"] == "R-HSA-6799598").any()
def test_get_pathways():
df = get_pathways()
assert type(df) == pd.DataFrame
assert len(df) == 2516
assert "stId" in df.columns
assert "displayName" in df.columns
assert ((df['stId'] == "R-HSA-9612973") & (df["displayName"] == "Autophagy")).any()
assert ((df['stId'] == "R-HSA-3000480") & (df["displayName"] == "Scavenging by Class A Receptors")).any()
def test_get_reactions():
result = get_reactions()
assert len(result) == 13661
def test_get_complexes():
result = get_complexes()
assert len(result) == 13362
def test_get_complex_components_genes_returns_components():
df = get_complex_components_by_complex("R-HSA-983126", genes)
assert type(df) == pd.DataFrame
assert len(df) == 264
assert ((df['Entity'] == 'R-HSA-141412') & (df['Id'] == 'CDC20')).any()
assert ((df['Entity'] == 'R-HSA-174229') & (df['Id'] == 'ANAPC2')).any()
assert ((df['Entity'] == 'R-HSA-939239') & (df['Id'] == 'UBC')).any()
def test_get_complex_components_genes_returns_components_2():
df = get_complex_components_by_complex("R-HSA-2168879", genes)
assert df['Id'].nunique() == 4
assert (df['Id'] == 'HP').any()
assert (df['Id'] == 'HBA1').any()
assert (df['Id'] == 'HBB').any()
assert (df['Id'] == 'CD163').any()
def test_get_complex_components_genes_with_non_existent_complex_returns_empty_list():
df = get_complex_components_by_complex("fake_complex", genes)
assert len(df) == 0
def test_get_complex_components_genes_without_small_molecules():
df = get_complex_components_by_complex("R-HSA-2168879", genes)
assert len(df) == 5
assert not ((df['Entity'] == 'R-ALL-352327') & (df['Id'] == 'O2')).any()
assert not ((df['Entity'] == 'R-ALL-917877') & (df['Id'] == 'heme')).any()
assert ((df['Entity'] == 'R-HSA-2168862') & (df['Id'] == 'HBA1')).any()
def test_get_complex_components_proteins_returns_components():
df = get_complex_components_by_complex("R-HSA-983126", proteins)
assert type(df) == pd.DataFrame
assert len(df) == 264
assert not ((df['Entity'] == 'R-HSA-141412') & (df['Id'] == 'CDC20')).any()
assert ((df['Entity'] == 'R-HSA-141412') & (df['Id'] == 'Q12834')).any()
assert not ((df['Entity'] == 'R-HSA-174229') & (df['Id'] == 'ANAPC2')).any()
assert ((df['Entity'] == 'R-HSA-174229') & (df['Id'] == 'Q9UJX6')).any()
assert not ((df['Entity'] == 'R-HSA-939239') & (df['Id'] == 'UBC')).any()
assert ((df['Entity'] == 'R-HSA-939239') & (df['Id'] == 'P0CG48')).any()
def test_get_complex_components_proteins_returns_components_2():
df = get_complex_components_by_complex("R-HSA-2168879", proteins)
assert df['Id'].nunique() == | |
#!/usr/bin/python3
import argparse, glob, os, sys, shutil
import datetime, dateutil.parser, pytz, time
import pydoc
import requests, json
import sqlite_api
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.common.exceptions import NoSuchElementException
# DEBUG
# import pdb; pdb.set_trace()
##
# CLASSES
##
class Installations:
def __init__(self):
self.db = None
self.dataLocations = None
self.installationData = None
self.meters = None
self.options = {}
self.tzTarget = "US/Alaska"
self.tzObj = pytz.timezone(self.tzTarget)
self.tzUTC = pytz.timezone("UTC")
def hasUnits(self, installationName):
result = False
s = "SELECT * FROM installUnits WHERE siteName=\"%s\"" % (installationName)
self.db.query(s)
if len(self.db.data) > 0:
result = True
return result
def getActiveMeters(self):
# Get active installations
##
act = {
'sites': [],
'credentials': []
}
for i in self.installationData:
if i['siteEnabled'] > 0:
act['sites'].append(i)
cc = i['credCode']
if not(cc in act['credentials']):
act['credentials'].append(cc)
act['credentials'].sort()
return act
def getOption(self, opt):
'''Get an option'''
if opt in self.options.keys():
return self.options[opt]
return None
def getUnits(self, installationName):
s = "SELECT * FROM installUnits WHERE siteName=\"%s\"" % (installationName)
self.db.query(s)
unitNames = {}
for r in self.db.data:
unitNames[r['unitName']] = r['siteUnit']
return unitNames
def openDB(self, dbname):
if os.path.isfile(dbname):
self.db = sqlite_api.sql(dbConfig)
self.db.query("SELECT * FROM installations")
self.installationData = self.db.data
self.db.query("SELECT * FROM meters")
meterData = self.db.data
# Realign meters by credential code (credCode)
##
self.meters = {}
for m in meterData:
c = m['credCode']
self.meters[c] = m
# Update any installation records with meterID
# based on credential code. Detect mismatches.
##
for i in self.installationData:
if i['meterID'] == None:
mID = self.meters[i['credCode']]['meterID']
s = "UPDATE installations SET meterID=%s WHERE installID=%s" % (mID,i['installID'])
#print(s)
self.db.run(s)
self.db.query("SELECT * FROM installations")
self.installationData = self.db.data
self.db.query("SELECT * FROM dataLocations")
dataLocs = {}
for rec in self.db.data:
if not(rec['urlCode'] in dataLocs.keys()):
dataLocs[rec['urlCode']] = {}
dataLocs[rec['urlCode']][rec['dataMode']] = rec['urlTemplate']
self.dataLocations = dataLocs
return
def setOption(self, opt, val):
'''Set an option'''
self.options[opt] = val
return
def setTimezone(self, tzStr):
self.tzTarget = tzStr
self.tzObj = pytz.timezone(self.tzTarget)
return
class DebugOutput:
def __init__(self,logDIR,logFILE):
self.logFN = None
self.logDir = None
self.logOpen = False
if os.path.isdir(logDIR):
self.logFN = open(logDIR + "/" + logFILE,"w")
self.logDir = logDIR
self.logOpen = True
def msg(self,msg):
if self.logOpen:
ts = datetime.datetime.utcnow().strftime("%Y%m%dT%H%M%S")
self.logFN.write("%s %s\n" % (ts,msg))
return
def close(self):
if self.logOpen:
self.logFN.close()
self.logOpen = False
return
class WebScraper:
def __init__(self,log=None,ins=None):
self.credentials = {}
self.debug = False
self.driver = None
self.driverLogFile = "geckodriver.log"
self.driverOpen = False
self.errorFlag = False
self.errorMsg = None
self.meter = {}
self.webMod = None
# Bring objects forward, will figure out
# inheritance later.
##
self.log = log
self.ins = ins
return
def clearLogs(self):
'''Clears *.log files from specified directory'''
baseDir = self.log.logDir
if 'siteCode' in self.meter:
meterDir = os.path.join(baseDir,self.meter['siteCode'])
if os.path.isdir(meterDir):
fileList = glob.glob(os.path.join(meterDir,"*.log"))
if len(fileList) > 0:
for fileName in fileList:
os.unlink(fileName)
def dumpLog(self, logFile):
if self.driver == None:
return
baseDir = self.log.logDir
if 'siteCode' in self.meter:
meterDir = os.path.join(baseDir,self.meter['siteCode'])
if not(os.path.isdir(meterDir)):
os.mkdir(meterDir)
fullDumpFile = os.path.join(meterDir,logFile)
fn = open(fullDumpFile,'w')
fn.write("HTML:\n")
fn.write(self.driver.page_source)
fn.write("\n")
fn.close()
def collectData(self):
try:
self.webMod.gotoDataPage()
except Exception as err:
msg = "Unhandled exception: %s" % str(err)
self.logError(msg)
self.saveScreen("error.png")
self.dumpLog("error.log")
if self.debug:
self.saveScreen("dataPage.png")
self.dumpLog("data.log")
dataRecords = self.webMod.getDataRecords()
if self.debug:
print("WS:",dataRecords)
return dataRecords
def logError(self,errorMessage):
self.errorFlag = True
self.errorMsg = errorMessage.strip()
self.log.msg(self.errorMsg)
return
def postData(self, dataRecords):
# Determine if we are posting multiple records
# for a single site or a single record.
# Data structure
# Units within a site and data
# { '204000001190': [{'ob': '2020-04-21 16:54:44', 'power': 2350}, {}] }
# or
# SiteID and data
# { 'AmblerWTP': [{'ob': '2020-04-21 16:54:44', 'power': 2350}, {}] }
# NOTE: FOR NOW WILL ONLY SUPPORT ONE DAYS WORTH OF DATA AT ONE TIME
##
site = self.meter['siteCode']
if self.ins.hasUnits(site):
units = self.ins.getUnits(site)
else:
units = {}
urlTemplateStore = self.ins.dataLocations[self.meter['urlCode']]['store']
urlTemplateRead = self.ins.dataLocations[self.meter['urlCode']]['read']
fullURLStoreMany = self.ins.dataLocations[self.meter['urlCode']]['storeMany']
for rec in dataRecords.keys():
if rec in units.keys():
site = units[rec]
fullURLStore = (urlTemplateStore % (site))
fullURLRead = (urlTemplateRead % (site))
tmUTCmin = None
tmUTCmax = None
# Convert localized timezone to UTC before transmitting
##
ct = 0
#import pdb; pdb.set_trace()
for r in dataRecords[rec]:
kwValue = r['power'] / 1000.0
tmParse = dateutil.parser.parse(r['ob'])
tmLOC = self.ins.tzObj.localize(tmParse)
tmUTC = tmLOC.astimezone(pytz.timezone('UTC')).strftime("%Y-%m-%d %H:%M:%S")
# Replace ob date/time with UTC value
##
r['ob'] = tmUTC
r['power'] = kwValue
ct = ct + 1
if ct == 1:
tmUTCmin = tmUTC
tmUTCmax = tmUTC
else:
if tmUTC > tmUTCmax:
tmUTCmax = tmUTC
if tmUTC < tmUTCmin:
tmUTCmin = tmUTC
# Try to read data to see if it already exists
##
tmExists = []
payload = {
'timezone': 'UTC',
'start_ts': tmUTCmin,
'end_ts': tmUTCmax
}
readResult = requests.get(fullURLRead,params=payload)
bmonData = json.loads(readResult.text)
if bmonData['status'] != 'success':
msg = "Unable to read from bmon server, not performing updates."
self.log.msg(msg)
return
for bmonRec in bmonData['data']['readings']:
tmStr = bmonRec[0]
tmExists.append(tmStr)
dataFlag = "write"
# Delete any existing records read from website
# which are already in the bmon site
##
newRec = []
for r in dataRecords[rec]:
obStr = r['ob']
if not(obStr in tmExists):
newRec.append(r)
dataRecords[rec] = newRec
if len(dataRecords[rec]) == 0:
dataFlag = "skip"
if self.debug:
print(dataFlag,dataRecords[rec])
if dataFlag == "write":
if len(dataRecords[rec]) > 1:
# This format is form multiple readings
# [timestamp, siteID, value]
##
#{"storeKey": "123abc",
# "readings": [
# [1432327040, "AmblerWTP", 71.788],
# [1432327042, "test_cpu_temp", 45.527],
# [1432327040, "28.FF1A2D021400", 65.859]
# ]
#}
# Process into a bmon payload
# Skip existing records
##
data = { 'storeKey': self.meter['dataStoreKey'],
'readings': [
]
}
for r in dataRecords[rec]:
obStr = r['ob']
t = dateutil.parser.parse(obStr)
t = self.ins.tzUTC.localize(t)
obDt = t.timestamp()
obVal = r['power']
drec = [obDt, site, obVal]
data['readings'].append(drec)
data = json.dumps(data)
if self.debug:
print("JSON:%s" % (data))
headers = {'Accept' : 'application/json', 'Content-Type' : 'application/json'}
resp = requests.post(fullURLStoreMany,data=data,headers=headers)
msg = "*> Multi value write: %s (%s)" % (site,resp.text)
self.log.msg(msg)
else:
# This stores one reading
##
kwValue = dataRecords[rec][0]['power']
tmUTC = dataRecords[rec][0]['ob']
data = {
'storeKey': self.meter['dataStoreKey'],
'val': str(kwValue),
'ts': tmUTC
}
# Do not convert data to json before requests.post call
##
resp = requests.post(fullURLStore,data=data)
msg = "*> %s %s %s %s" % (site,tmUTC,str(kwValue),dataFlag)
self.log.msg(msg)
return
def saveScreen(self, saveFile):
if self.driverOpen:
baseSaveDir = '/var/www/html/acepCollect'
if 'siteCode' in self.meter:
meterDir = os.path.join(baseSaveDir,self.meter['siteCode'])
if not(os.path.isdir(meterDir)):
os.mkdir(meterDir)
fullSaveFile = os.path.join(meterDir,saveFile)
self.driver.save_screenshot(fullSaveFile)
return
def setMeter(self,meter,cred):
self.meter = meter
self.credentials = cred
if meter['siteEnabled'] == 9:
self.debug = True
else:
self.debug = False
if self.ins.getOption('debug'):
self.debug = True
return
def startDriver(self):
if self.driverOpen:
return
# Create tmp directory (if requested)
##
try:
if not(os.path.isdir(self.tmpDir)):
os.mkdir(self.tmpDir,0o755)
if not(os.path.isdir(self.tmpDir)):
msg = "Unable to make temporary directory: %s" % (self.tmpDir)
self.logError(msg)
sys.exit()
except:
pass
options = Options()
options.add_argument('-headless')
cap = DesiredCapabilities.FIREFOX
cap = self.webMod.setCapabilities(cap)
pro = webdriver.FirefoxProfile()
pro = self.webMod.setProfile(pro)
#pro.set_preference("security.tls.version.min",1)
#pro.set_preference("security.tls.version.max",4)
#pro.accept_untrusted_certs = True
#pro.set_preference("security.tls.version.enable-depricated",True)
#print(dir(pro))
#print(pro.default_preferences)
# Check for geckodriver.log file and erase
# before starting a new driver
##
if os.path.isfile(self.driverLogFile):
os.unlink(self.driverLogFile)
self.driver = webdriver.Firefox(firefox_profile=pro,options=options,capabilities=cap)
self.driverOpen = True
return
def stopDriver(self):
if self.driverOpen:
self.driver.quit()
self.driverOpen = False
# Delete temporary directory
# if utilized
##
try:
if self.webMod.tmpDir != None:
if os.path.isdir(self.webMod.tmpDir):
shutil.rmtree(self.webMod.tmpDir)
except:
pass
return
def login(self):
#if self.debug:
# print(self.meter)
# print(self.credentials)
meterClass = self.credentials['meterClass']
# Do not reload the class if the module is already
# loaded.
##
if not(self.webMod) or self.webMod.__name__ != meterClass:
dynObj = pydoc.locate(meterClass)
if not(meterClass) in dir(dynObj):
self.webMod = None
self.logError("Unable to load meter class: %s" % (meterClass))
return
dynAttr = getattr(dynObj,meterClass)
dynClass = dynAttr()
self.webMod = dynClass
# Start the driver if we are not testing
##
# Sometimes the web scraping driver is not really needed.
# This is signaled with urlLogin set to ""
##
if self.webMod.testing == False:
if self.webMod.urlLogin != "":
self.startDriver()
self.log.msg("* Driver started")
else:
# Start the driver from the testing application only
##
if os.path.basename(sys.argv[0]) == 'gatherDataBeta.py':
if self.webMod.urlLogin != "":
self.startDriver()
self.log.msg("* Driver testing: %s" % (meterClass))
self.webMod.setWebScraper(self)
# Using the meter class, proceed to login to the
# website.
##
try:
self.webMod.gotoLoginPage()
except Exception as err:
msg = "Unhandled exception: %s" % str(err)
self.logError(msg)
self.saveScreen("error.png")
self.dumpLog("error.log")
if self.debug:
self.saveScreen("login.png")
if not(self.errorFlag):
try:
self.webMod.doLogin(self.credentials)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.