max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
|---|---|---|---|---|---|---|---|---|---|---|
jdBuyMask/message.py
|
Ellison997/Scampering
| 357
|
6629051
|
# -*- encoding=utf8 -*-
from jdEmail import sendMail
from wechat_ftqq import sendWechat
class message(object):
"""消息推送类"""
def __init__(self, messageType, sc_key, mail):
if messageType == '2':
if not sc_key:
raise Exception('sc_key can not be empty')
self.sc_key = sc_key
elif messageType == '1':
if not mail:
raise Exception('mail can not be empty')
self.mail = mail
self.messageType = messageType
def send(self, desp='', isOrder=False):
desp = str(desp)
if isOrder:
msg = desp + ' 类型口罩,已经下单了。24小时内付款'
else:
msg = desp + ' 类型口罩,下单失败了'
if self.messageType == '1':
sendMail(self.mail, msg)
if self.messageType == '2':
sendWechat(sc_key=self.sc_key, desp=msg)
def sendAny(self, desp=''):
desp = str(desp)
msg = desp
if self.messageType == '1':
sendMail(self.mail, msg)
if self.messageType == '2':
sendWechat(sc_key=self.sc_key, desp=msg)
|
# -*- encoding=utf8 -*-
from jdEmail import sendMail
from wechat_ftqq import sendWechat
class message(object):
"""消息推送类"""
def __init__(self, messageType, sc_key, mail):
if messageType == '2':
if not sc_key:
raise Exception('sc_key can not be empty')
self.sc_key = sc_key
elif messageType == '1':
if not mail:
raise Exception('mail can not be empty')
self.mail = mail
self.messageType = messageType
def send(self, desp='', isOrder=False):
desp = str(desp)
if isOrder:
msg = desp + ' 类型口罩,已经下单了。24小时内付款'
else:
msg = desp + ' 类型口罩,下单失败了'
if self.messageType == '1':
sendMail(self.mail, msg)
if self.messageType == '2':
sendWechat(sc_key=self.sc_key, desp=msg)
def sendAny(self, desp=''):
desp = str(desp)
msg = desp
if self.messageType == '1':
sendMail(self.mail, msg)
if self.messageType == '2':
sendWechat(sc_key=self.sc_key, desp=msg)
|
ja
| 0.284897
|
# -*- encoding=utf8 -*- 消息推送类
| 2.707801
| 3
|
edition/parsing/fortran/fortran_file_parser.py
|
Manu343726/biicode-common
| 17
|
6629052
|
from biicode.common.edition.parsing.file_parser import FileParser
from biicode.common.utils.bii_logging import logger
import re
from biicode.common.edition.parsing.code_reference import CodeReference
from biicode.common.edition.parsing.fortran.fortran_code_ref import FItem
class FortranFileParser(FileParser):
CALL = 'SUBPROGRAM'
USE = 'MODULE'
INCLUDE = 'INCLUDE'
DEF_SUB = 'DEF_SUBPROGRAM'
DEF_MODULE = 'DEF_MODULE'
limits = {'!': ('\n', FileParser.COMMENT),
'"': ('"', FileParser.TEXT),
"'": ("'", FileParser.TEXT),
'subroutine ': ('\n', DEF_SUB),
'module ': ('\n', DEF_MODULE),
'call ': ('\n', CALL),
'include ': ('\n', INCLUDE),
'use ': ('\n', USE)}
initial_pattern = re.compile(r'//|#|"|!|/[*]|subroutine |module |call |use |include ')
def __init__(self, content=None):
self.includes = []
self.references = []
self.modules = set()
self.declarations = set()
self.definitions = set()
self.content = content
self.has_main_function = False
def parse(self, code):
result, clean_code = self._parse_strings_comments(code.lower())
self.has_main_function = self.mainFunction(clean_code)
self.parse_references(result)
def mainFunction(self, code):
tokenized_code = self.tokenize_code(code.upper())
if len(tokenized_code) == 0:
return False
return tokenized_code[0] == 'PROGRAM'
def parse_references(self, result):
for item in result:
if item.type == self.INCLUDE:
begin, end, deps = self.handle_preprocessor(item.content)
if deps:
self.includes.append(CodeReference(deps, item.start+begin, item.start+end))
if item.type == self.USE:
scope = self.scope_preprocessor(item.content)
name = self.name_preprocessor(item.content)
self.modules.add(FItem('module', name, scope))
if item.type == self.CALL:
scope = self.scope_preprocessor(item.content)
name = self.name_preprocessor(item.content)
self.declarations.add(FItem('subprogram', name, scope))
if item.type == self.DEF_SUB:
scope = self.scope_preprocessor(item.content)
name = self.name_preprocessor(item.content)
self.definitions.add(FItem('subprogram', name, scope))
if item.type == self.DEF_MODULE:
scope = self.scope_preprocessor(item.content)
name = self.name_preprocessor(item.content)
self.definitions.add(FItem('module', name, scope))
if item.type == self.COMMENT:
data = self.handle_comment(item.content)
if data:
# +2, -1 to account for // and line ending
self.references.append(CodeReference(data, item.start+len(FileParser.BII_DATA_REF)+2, item.end-1))
start_require_pattern = re.compile(r'\'|"|!')
def handle_preprocessor(self, text):
closer = {'"': '"',
"'": "'"}
tokenized_code = self.tokenize_code(text)
if 'INCLUDE' == tokenized_code[0] or 'include' == tokenized_code[0]:
try:
m = self.start_require_pattern.search(text)
start = m.start() + 1
c = closer[m.group()]
end = text.find(c, start + 1)
return start, end, text[start:end].strip()
except:
logger.error('Unable to parse require %s ' % text)
return (None, None, None)
def scope_preprocessor(self, text):
tokenized_code = self.tokenize_code(text)
if 'call' == tokenized_code[0] or 'use' == tokenized_code[0]:
try:
m = self.start_require_pattern.search(text)
if m:
start = m.start() + 1
for index, token in enumerate(tokenized_code):
if token == "!":
end = text.find("\n")
return text[start:end].strip()
except:
pass
return None
def name_preprocessor(self, text):
tokenized_code = self.tokenize_code(text)
name = ''
for items in tokenized_code[1:]:
if items == "\n" or items == "!" or items == "(":
return name
name = name + items
return name
|
from biicode.common.edition.parsing.file_parser import FileParser
from biicode.common.utils.bii_logging import logger
import re
from biicode.common.edition.parsing.code_reference import CodeReference
from biicode.common.edition.parsing.fortran.fortran_code_ref import FItem
class FortranFileParser(FileParser):
CALL = 'SUBPROGRAM'
USE = 'MODULE'
INCLUDE = 'INCLUDE'
DEF_SUB = 'DEF_SUBPROGRAM'
DEF_MODULE = 'DEF_MODULE'
limits = {'!': ('\n', FileParser.COMMENT),
'"': ('"', FileParser.TEXT),
"'": ("'", FileParser.TEXT),
'subroutine ': ('\n', DEF_SUB),
'module ': ('\n', DEF_MODULE),
'call ': ('\n', CALL),
'include ': ('\n', INCLUDE),
'use ': ('\n', USE)}
initial_pattern = re.compile(r'//|#|"|!|/[*]|subroutine |module |call |use |include ')
def __init__(self, content=None):
self.includes = []
self.references = []
self.modules = set()
self.declarations = set()
self.definitions = set()
self.content = content
self.has_main_function = False
def parse(self, code):
result, clean_code = self._parse_strings_comments(code.lower())
self.has_main_function = self.mainFunction(clean_code)
self.parse_references(result)
def mainFunction(self, code):
tokenized_code = self.tokenize_code(code.upper())
if len(tokenized_code) == 0:
return False
return tokenized_code[0] == 'PROGRAM'
def parse_references(self, result):
for item in result:
if item.type == self.INCLUDE:
begin, end, deps = self.handle_preprocessor(item.content)
if deps:
self.includes.append(CodeReference(deps, item.start+begin, item.start+end))
if item.type == self.USE:
scope = self.scope_preprocessor(item.content)
name = self.name_preprocessor(item.content)
self.modules.add(FItem('module', name, scope))
if item.type == self.CALL:
scope = self.scope_preprocessor(item.content)
name = self.name_preprocessor(item.content)
self.declarations.add(FItem('subprogram', name, scope))
if item.type == self.DEF_SUB:
scope = self.scope_preprocessor(item.content)
name = self.name_preprocessor(item.content)
self.definitions.add(FItem('subprogram', name, scope))
if item.type == self.DEF_MODULE:
scope = self.scope_preprocessor(item.content)
name = self.name_preprocessor(item.content)
self.definitions.add(FItem('module', name, scope))
if item.type == self.COMMENT:
data = self.handle_comment(item.content)
if data:
# +2, -1 to account for // and line ending
self.references.append(CodeReference(data, item.start+len(FileParser.BII_DATA_REF)+2, item.end-1))
start_require_pattern = re.compile(r'\'|"|!')
def handle_preprocessor(self, text):
closer = {'"': '"',
"'": "'"}
tokenized_code = self.tokenize_code(text)
if 'INCLUDE' == tokenized_code[0] or 'include' == tokenized_code[0]:
try:
m = self.start_require_pattern.search(text)
start = m.start() + 1
c = closer[m.group()]
end = text.find(c, start + 1)
return start, end, text[start:end].strip()
except:
logger.error('Unable to parse require %s ' % text)
return (None, None, None)
def scope_preprocessor(self, text):
tokenized_code = self.tokenize_code(text)
if 'call' == tokenized_code[0] or 'use' == tokenized_code[0]:
try:
m = self.start_require_pattern.search(text)
if m:
start = m.start() + 1
for index, token in enumerate(tokenized_code):
if token == "!":
end = text.find("\n")
return text[start:end].strip()
except:
pass
return None
def name_preprocessor(self, text):
tokenized_code = self.tokenize_code(text)
name = ''
for items in tokenized_code[1:]:
if items == "\n" or items == "!" or items == "(":
return name
name = name + items
return name
|
en
| 0.567835
|
#|"|!|/[*]|subroutine |module |call |use |include ') # +2, -1 to account for // and line ending
| 2.347856
| 2
|
sdk/python-sdk/test/protocols/test_CommitedAnswer.py
|
madhugoundla-attributes/samplewebapp
| 40
|
6629053
|
<reponame>madhugoundla-attributes/samplewebapp<gh_stars>10-100
import pytest
from test.test_utils import get_test_config, cleanup
from verity_sdk.protocols.v1_0.CommittedAnswer import CommittedAnswer
from verity_sdk.utils import COMMUNITY_MSG_QUALIFIER
from verity_sdk.utils.Context import Context
for_relationship = 'some_did'
question_text = 'Are you trying to login to acme.com?'
question_detail = 'IP Address: 192.168.127.12'
valid_responses = ['Yes', 'No, that\'s not me!']
signature_required = True
def test_init():
committed_answer = CommittedAnswer(for_relationship, None, question_text, question_detail, valid_responses,
signature_required)
assert committed_answer.for_relationship == for_relationship
assert committed_answer.question == question_text
assert committed_answer.descr == question_detail
assert committed_answer.valid_responses == valid_responses
assert committed_answer.signature_required == signature_required
@pytest.mark.asyncio
async def test_ask():
context = await Context.create_with_config(await get_test_config())
committed_answer = CommittedAnswer(for_relationship, None, question_text, question_detail, valid_responses,
signature_required)
msg = committed_answer.ask_msg(context)
assert msg['@type'] == '{}/{}/{}/{}'.format(
COMMUNITY_MSG_QUALIFIER,
CommittedAnswer.MSG_FAMILY,
CommittedAnswer.MSG_FAMILY_VERSION,
CommittedAnswer.ASK_QUESTION
)
assert msg['@id'] is not None
assert msg['~thread'] is not None
assert msg['~thread']['thid'] is not None
assert msg['~for_relationship'] == for_relationship
assert msg['text'] == question_text
assert msg['detail'] == question_detail
assert msg['valid_responses'] == valid_responses
assert msg['signature_required'] == signature_required
await cleanup(context)
@pytest.mark.asyncio
async def test_status():
context = await Context.create_with_config(await get_test_config())
committed_answer = CommittedAnswer(for_relationship, None, question_text, question_detail, valid_responses,
signature_required)
msg = committed_answer.status_msg(context)
assert msg['@type'] == '{}/{}/{}/{}'.format(
COMMUNITY_MSG_QUALIFIER,
CommittedAnswer.MSG_FAMILY,
CommittedAnswer.MSG_FAMILY_VERSION,
CommittedAnswer.GET_STATUS
)
assert msg['@id'] is not None
assert msg['~for_relationship'] == for_relationship
assert msg['~thread'] is not None
assert msg['~thread']['thid'] is not None
await cleanup(context)
|
import pytest
from test.test_utils import get_test_config, cleanup
from verity_sdk.protocols.v1_0.CommittedAnswer import CommittedAnswer
from verity_sdk.utils import COMMUNITY_MSG_QUALIFIER
from verity_sdk.utils.Context import Context
for_relationship = 'some_did'
question_text = 'Are you trying to login to acme.com?'
question_detail = 'IP Address: 192.168.127.12'
valid_responses = ['Yes', 'No, that\'s not me!']
signature_required = True
def test_init():
committed_answer = CommittedAnswer(for_relationship, None, question_text, question_detail, valid_responses,
signature_required)
assert committed_answer.for_relationship == for_relationship
assert committed_answer.question == question_text
assert committed_answer.descr == question_detail
assert committed_answer.valid_responses == valid_responses
assert committed_answer.signature_required == signature_required
@pytest.mark.asyncio
async def test_ask():
context = await Context.create_with_config(await get_test_config())
committed_answer = CommittedAnswer(for_relationship, None, question_text, question_detail, valid_responses,
signature_required)
msg = committed_answer.ask_msg(context)
assert msg['@type'] == '{}/{}/{}/{}'.format(
COMMUNITY_MSG_QUALIFIER,
CommittedAnswer.MSG_FAMILY,
CommittedAnswer.MSG_FAMILY_VERSION,
CommittedAnswer.ASK_QUESTION
)
assert msg['@id'] is not None
assert msg['~thread'] is not None
assert msg['~thread']['thid'] is not None
assert msg['~for_relationship'] == for_relationship
assert msg['text'] == question_text
assert msg['detail'] == question_detail
assert msg['valid_responses'] == valid_responses
assert msg['signature_required'] == signature_required
await cleanup(context)
@pytest.mark.asyncio
async def test_status():
context = await Context.create_with_config(await get_test_config())
committed_answer = CommittedAnswer(for_relationship, None, question_text, question_detail, valid_responses,
signature_required)
msg = committed_answer.status_msg(context)
assert msg['@type'] == '{}/{}/{}/{}'.format(
COMMUNITY_MSG_QUALIFIER,
CommittedAnswer.MSG_FAMILY,
CommittedAnswer.MSG_FAMILY_VERSION,
CommittedAnswer.GET_STATUS
)
assert msg['@id'] is not None
assert msg['~for_relationship'] == for_relationship
assert msg['~thread'] is not None
assert msg['~thread']['thid'] is not None
await cleanup(context)
|
none
| 1
| 2.230734
| 2
|
|
scripts/mgear/maya/shifter/component/guide.py
|
KRNKRS/mgear
| 94
|
6629054
|
<reponame>KRNKRS/mgear<gh_stars>10-100
"""
Shifter's Component guide class.
"""
from functools import partial
import maya.cmds as cmds
# pyMel
import pymel.core as pm
from pymel.core import datatypes
# mgear
import mgear
from mgear import string
from mgear.maya import dag, vector, transform, applyop, attribute, curve, icon
from mgear.maya.shifter import guide, gui
import mainSettingsUI as msui
from mgear.vendor.Qt import QtWidgets, QtCore
##########################################################
# COMPONENT GUIDE
##########################################################
class ComponentGuide(guide.Main):
"""Main class for component guide creation.
This class handles all the parameters and objectDefs creation.
It also know how to parse its own hierachy of object to retrieve position
and transform.
Finally it also now how to export itself as xml_node.
Attributes:
paramNames (list): List of parameter name cause it's actually important
to keep them sorted.
paramDefs (dic): Dictionary of parameter definition.
values (dic): Dictionary of options values.
valid (bool): We will check a few things and make sure the guide we are
loading is up to date.
If parameters or object are missing a warning message will be
display and the guide should be updated.
tra (dic): dictionary of global transform
atra (list): list of global transform
pos (dic): dictionary of global postion
apos (list): list of global position
prim (dic): dictionary of primitive
blades (dic): dictionary of blades
size (float): Size reference of the component. Default = .1
save_transform (list): Transform of object name in this list will
be saved
save_primitive (list): Primitive of object name in this list will
be saved
save_blade (list): Normal and BiNormal of object will be saved
minmax (dic): Define the min and max object for multi location objects
"""
compType = "component" # Component type
compName = "component" # Component default name
compSide = "C"
compIndex = 0 # Component default index
description = "" # Description of the component
connectors = []
compatible = []
ctl_grp = ""
# ====================================================
# Init method.
def __init__(self):
# Parameters names, definition and values.
# List of parameter name cause it's actually important to keep
# them sorted.
self.paramNames = []
# Dictionary of parameter definition.
self.paramDefs = {}
# Dictionary of options values.
self.values = {}
# We will check a few things and make sure the guide we are loading is
# up to date.
# If parameters or object are missing a warning message will be display
# and the guide should be updated.
self.valid = True
self.root = None
self.id = None
# parent component identification
self.parentComponent = None
self.parentLocalName = None
# List and dictionary used during the creation of the component
self.tra = {} # dictionary of global transform
self.atra = [] # list of global transform
self.pos = {} # dictionary of global postion
self.apos = [] # list of global position
self.prim = {} # dictionary of primitive
self.blades = {}
self.size = .1
# self.root_size = None
# List and dictionary used to define data of the guide that
# should be saved
# Transform of object name in this list will be saved
self.save_transform = []
# Primitive of object name in this list will be saved
self.save_primitive = []
# Normal and BiNormal of object will be saved
self.save_blade = []
# Define the min and max object for multi location objects
self.minmax = {}
# Init the guide
self.postInit()
self.initialHierarchy()
self.addParameters()
def postInit(self):
"""Define the objects name and categories.
Note:
REIMPLEMENT. This method should be reimplemented in each component.
"""
self.save_transform = ["root"]
return
# ====================================================
# OBJECTS AND PARAMETERS
def initialHierarchy(self):
"""Initial hierachy.
It's no more than the basic set of parameters and layout
needed for the setting property.
"""
# Parameters --------------------------------------
# This are the necessary parameter for component guide definition
self.pCompType = self.addParam("comp_type", "string", self.compType)
self.pCompName = self.addParam("comp_name", "string", self.compName)
self.pCompSide = self.addParam("comp_side", "string", self.compSide)
self.pCompIndex = self.addParam(
"comp_index", "long", self.compIndex, 0)
self.pConnector = self.addParam("connector", "string", "standard")
self.pUIHost = self.addParam("ui_host", "string", "")
self.pCtlGroup = self.addParam("ctlGrp", "string", "")
# Items -------------------------------------------
typeItems = [self.compType, self.compType]
for type in self.compatible:
typeItems.append(type)
typeItems.append(type)
connectorItems = ["standard", "standard"]
for item in self.connectors:
connectorItems.append(item)
connectorItems.append(item)
def addObjects(self):
"""Create the objects of the guide.
Note:
REIMPLEMENT. This method should be reimplemented in each component.
"""
self.root = self.addRoot()
def addParameters(self):
"""Create the parameter definitions of the guide.
Note:
REIMPLEMENT. This method should be reimplemented in each component.
"""
return
# ====================================================
# SET / GET
def setFromHierarchy(self, root):
"""Set the component guide from given hierarchy.
Args:
root (dagNode): The root of the hierarchy to parse.
"""
self.root = root
self.model = self.root.getParent(generations=-1)
# ---------------------------------------------------
# First check and set the settings
if not self.root.hasAttr("comp_type"):
mgear.log("%s is not a proper guide." %
self.root.longName(), mgear.sev_error)
self.valid = False
return
self.setParamDefValuesFromProperty(self.root)
# ---------------------------------------------------
# Then get the objects
for name in self.save_transform:
if "#" in name:
i = 0
while not self.minmax[name].max > 0 or i < \
self.minmax[name].max:
localName = string.replaceSharpWithPadding(name, i)
node = dag.findChild(self.model, self.getName(localName))
if not node:
break
self.tra[localName] = node.getMatrix(worldSpace=True)
self.atra.append(node.getMatrix(worldSpace=True))
self.pos[localName] = node.getTranslation(space="world")
self.apos.append(node.getTranslation(space="world"))
i += 1
if i < self.minmax[name].min:
mgear.log("Minimum of object requiered for " +
name + " hasn't been reached!!",
mgear.sev_warning)
self.valid = False
continue
else:
node = dag.findChild(self.model, self.getName(name))
if not node:
mgear.log("Object missing : %s" % (
self.getName(name)), mgear.sev_warning)
self.valid = False
continue
self.tra[name] = node.getMatrix(worldSpace=True)
self.atra.append(node.getMatrix(worldSpace=True))
self.pos[name] = node.getTranslation(space="world")
self.apos.append(node.getTranslation(space="world"))
for name in self.save_blade:
node = dag.findChild(self.model, self.getName(name))
if not node:
mgear.log("Object missing : %s" % (
self.getName(name)), mgear.sev_warning)
self.valid = False
continue
self.blades[name] = vector.Blade(node.getMatrix(worldSpace=True))
self.size = self.getSize()
# ====================================================
# DRAW
def draw(self, parent):
"""Draw the guide in the scene.
Args:
parent (dagNode): the parent of the component.
"""
self.parent = parent
self.setIndex(self.parent)
self.addObjects()
pm.select(self.root)
# TODO: add function to scale the points of the icons
# Set the size of the root
# self.root.size = self.root_size
def drawFromUI(self, parent):
"""Draw the guide in the scene from the UI command.
Args:
parent (dagNode): the parent of the component.
"""
if not self.modalPositions():
mgear.log("aborded", mgear.sev_warning)
return False
self.draw(parent)
transform.resetTransform(self.root, r=False, s=False)
gui.Guide_UI.inspectSettings()
return True
def modalPositions(self):
"""Launch a modal dialog to set position of the guide."""
self.jNumberVal = False
self.dirAxisVal = False
self.jSpacVal = False
for name in self.save_transform:
if "#" in name:
def _addLocMultiOptions():
pm.setParent(q=True)
pm.columnLayout(adjustableColumn=True, cal="right")
pm.text(label='', al="center")
fl = pm.formLayout()
jNumber = pm.intFieldGrp(v1=3, label="Joint Number")
pm.setParent('..')
pm.formLayout(fl, e=True, af=(jNumber, "left", -30))
dirSet = ["X", "-X", "Y", "-Y", "Z", "-Z"]
fl = pm.formLayout()
dirAxis = pm.optionMenu(label="Direction")
dirAxis.addMenuItems(dirSet)
pm.setParent('..')
pm.formLayout(fl, e=True, af=(dirAxis, "left", 70))
fl = pm.formLayout()
jSpac = pm.floatFieldGrp(v1=1.0, label="spacing")
pm.setParent('..')
pm.formLayout(fl, e=True, af=(jSpac, "left", -30))
pm.text(label='', al="center")
pm.button(label='Continue', c=partial(
_retriveOptions, jNumber, dirAxis, jSpac))
pm.setParent('..')
def _retriveOptions(jNumber, dirAxis, jSpac, *args):
self.jNumberVal = jNumber.getValue()[0]
self.dirAxisVal = dirAxis.getValue()
self.jSpacVal = jSpac.getValue()[0]
pm.layoutDialog(dismiss="Continue")
def _show():
pm.layoutDialog(ui=_addLocMultiOptions)
_show()
if self.jNumberVal:
if self.dirAxisVal == "X":
offVec = datatypes.Vector(self.jSpacVal, 0, 0)
elif self.dirAxisVal == "-X":
offVec = datatypes.Vector(self.jSpacVal * -1, 0, 0)
elif self.dirAxisVal == "Y":
offVec = datatypes.Vector(0, self.jSpacVal, 0)
elif self.dirAxisVal == "-Y":
offVec = datatypes.Vector(0, self.jSpacVal * -1, 0)
elif self.dirAxisVal == "Z":
offVec = datatypes.Vector(0, 0, self.jSpacVal)
elif self.dirAxisVal == "-Z":
offVec = datatypes.Vector(0, 0, self.jSpacVal * -1)
newPosition = datatypes.Vector(0, 0, 0)
for i in range(self.jNumberVal):
newPosition = offVec + newPosition
localName = string.replaceSharpWithPadding(name, i)
self.tra[localName] = transform.getTransformFromPos(
newPosition)
return True
# ====================================================
# UPDATE
def setIndex(self, model):
"""Update the component index to get the next valid one.
Args:
model (dagNode): The parent model of the guide.
"""
self.model = model.getParent(generations=-1)
# Find next index available
while True:
obj = dag.findChild(self.model, self.getName("root"))
if not obj or (self.root and obj == self.root):
break
self.setParamDefValue("comp_index", self.values["comp_index"] + 1)
def symmetrize(self):
"""Inverse the transform of each element of the guide."""
if self.values["comp_side"] not in ["R", "L"]:
mgear.log("Can't symmetrize central component", mgear.sev_error)
return False
for name, paramDef in self.paramDefs.items():
if paramDef.valueType == "string":
self.setParamDefValue(
name, mgear.string.convertRLName(self.values[name]))
for name, t in self.tra.items():
self.tra[name] = transform.getSymmetricalTransform(t)
for name, blade in self.blades.items():
self.blades[name] = vector.Blade(
transform.getSymmetricalTransform(blade.transform))
return True
def rename(self, root, newName, newSide, newIndex):
"""Rename the component.
Args:
root (dagNode): The parent of the component
newName (str): The new name.
newSide (str): Side of the component.
newIndex (int): index of the comonent.
"""
self.parent = root
# store old properties
oldIndex = self.parent.attr("comp_index").get()
oldSide = self.parent.attr("comp_side").get()
oldName = self.parent.attr("comp_name").get()
oldSideIndex = oldSide + str(oldIndex)
# change attr side in root
self.parent.attr("comp_name").set(newName)
self.parent.attr("comp_side").set(newSide)
# set new index and update to the next valid
self.setParamDefValue("comp_name", newName)
self.setParamDefValue("comp_side", newSide)
self.setParamDefValue("comp_index", newIndex)
self.setIndex(self.parent)
self.parent.attr("comp_index").set(self.values["comp_index"])
# objList = dag.findComponentChildren(self.parent,
# oldName, oldSideIndex)
# NOTE: Experimenta using findComponentChildren2
objList = dag.findComponentChildren2(
self.parent, oldName, oldSideIndex)
newSideIndex = newSide + str(self.values["comp_index"])
objList.append(self.parent)
for obj in objList:
suffix = obj.name().split("_")[-1]
if len(obj.name().split("_")) == 3:
new_name = "_".join([newName, newSideIndex, suffix])
else:
subIndex = obj.name().split("_")[-2]
new_name = "_".join([newName, newSideIndex, subIndex, suffix])
pm.rename(obj, new_name)
# ====================================================
# ELEMENTS
def addRoot(self):
"""Add a root object to the guide.
This method can initialize the object or draw it.
Root object is a simple transform with a specific display and a setting
property.
Returns:
dagNode: The root
"""
if "root" not in self.tra.keys():
self.tra["root"] = transform.getTransformFromPos(
datatypes.Vector(0, 0, 0))
self.root = icon.guideRootIcon(self.parent, self.getName(
"root"), color=13, m=self.tra["root"])
# Add Parameters from parameter definition list.
for scriptName in self.paramNames:
paramDef = self.paramDefs[scriptName]
paramDef.create(self.root)
return self.root
def addLoc(self, name, parent, position=None):
"""Add a loc object to the guide.
This mehod can initialize the object or draw it.
Loc object is a simple null to define a position or a tranformation in
the guide.
Args:
name (str): Local name of the element.
parent (dagNode): The parent of the element.
position (vector): The default position of the element.
Returns:
dagNode: The locator object.
"""
if name not in self.tra.keys():
self.tra[name] = transform.getTransformFromPos(position)
if name in self.prim.keys():
# this functionality is not implemented. The actual design from
# softimage Gear should be review to fit in Maya.
loc = self.prim[name].create(
parent, self.getName(name), self.tra[name], color=17)
else:
loc = icon.guideLocatorIcon(parent, self.getName(
name), color=17, m=self.tra[name])
return loc
def addLocMulti(self, name, parent, updateParent=True):
"""Add multiple loc objects to the guide.
This method can initialize the object or draw it.
Loc object is a simple null to define a position or a tranformation in
the guide.
Args:
name (str): Local name of the element.
parent (dagNode): The parent of the element.
minimum (int): The minimum number of loc.
maximum (int): The maximum number of loc.
updateParent (bool): if True update the parent reference. False,
keep the same for all loc.
Returns:
list of dagNode: The created loc objects in a list.
"""
if "#" not in name:
mgear.log(
"You need to put a '#' in the name of multiple location.",
mgear.sev_error)
return False
locs = []
i = 0
while True:
localName = string.replaceSharpWithPadding(name, i)
if localName not in self.tra.keys():
break
loc = icon.guideLocatorIcon(parent, self.getName(
localName), color=17, m=self.tra[localName])
locs.append(loc)
if updateParent:
parent = loc
i += 1
return locs
def addBlade(self, name, parentPos, parentDir):
"""Add a blade object to the guide.
This mehod can initialize the object or draw it.
Blade object is a 3points curve to define a plan in the guide.
Args:
name (str): Local name of the element.
parentPos (dagNode): The parent of the element.
parentDir (dagNode): The direction constraint of the element.
Returns:
dagNode: The created blade curve.
"""
if name not in self.blades.keys():
self.blades[name] = vector.Blade(
transform.getTransformFromPos(datatypes.Vector(0, 0, 0)))
offset = False
else:
offset = True
dist = .6 * self.root.attr("scaleX").get()
blade = icon.guideBladeIcon(parent=parentPos, name=self.getName(
name), lenX=dist, color=13, m=self.blades[name].transform)
aim_cns = applyop.aimCns(blade, parentDir, axis="xy", wupType=2,
wupVector=[0, 1, 0], wupObject=self.root,
maintainOffset=offset)
pm.pointConstraint(parentPos, blade)
offsetAttr = attribute.addAttribute(
blade, "bladeRollOffset", "float", aim_cns.attr("offsetX").get())
pm.connectAttr(offsetAttr, aim_cns.attr("offsetX"))
attribute.lockAttribute(blade)
return blade
def addDispCurve(self, name, centers=[], degree=1):
"""Add a display curve object to the guide.
Display curve object is a simple curve to show the connection between
different guide element..
Args:
name (str): Local name of the element.
centers (list of dagNode): List of object to define the curve.
degree (int): Curve degree. Default 1 = lineal.
Returns:
dagNode: The newly creted curve.
"""
return icon.connection_display_curve(self.getName(name),
centers,
degree)
# ====================================================
# MISC
def getObjects(self, model, includeShapes=True):
"""Get the objects of the component.
Args:
model(dagNode): The root of the component.
includeShapes (boo): If True, will include the shapes.
Returns:
list of dagNode: The list of the objects.
"""
objects = {}
if includeShapes:
children = pm.listRelatives(model, ad=True)
else:
children = pm.listRelatives(model, ad=True, typ='transform')
pm.select(children)
for child in pm.ls(self.fullName + "_*", selection=True):
objects[child[child.index(
self.fullName + "_") + len(self.fullName + "_"):]] = child
return objects
def getObjects2(self, model, includeShapes=True):
"""Get the objects of the component.
Args:
model(dagNode): The root of the component.
includeShapes (boo): If True, will include the shapes.
Returns:
list of dagNode: The list of the objects.
"""
objects = {}
if includeShapes:
children = [pm.PyNode(x) for x in cmds.listRelatives(
model.longName(), ad=True, fullPath=True)]
else:
children = [pm.PyNode(x) for x in cmds.listRelatives(
model.longName(), ad=True, typ='transform', fullPath=True)]
for child in children:
cName = child.longName()
if cName.startswith(self.fullName):
objects[cName.split("_")[-1]] = child
return objects
def getObjects3(self, model):
"""
NOTE: Experimental function
Get the objects of the component.
This version only get the transforms by Name using Maya Cmds
Args:
model(dagNode): The root of the component.
Returns:
list of dagNode: The list of the objects.
"""
objects = {}
for child in cmds.ls(self.fullName + "_*", type="transform"):
if pm.PyNode(child).getParent(-1) == model:
objects[child[child.index(
self.fullName + "_") + len(self.fullName + "_"):]] = child
return objects
def addMinMax(self, name, minimum=1, maximum=-1):
"""Add minimun and maximum number of locator
When we use the modal menu.
"""
if "#" not in name:
mgear.log(
"Invalid definition for min/max. You should have a '#' in "
"the name", mgear.sev_error)
self.minmax[name] = MinMax(minimum, maximum)
def getSize(self):
"""Get the size of the component.
Returns:
float: the size
"""
size = .01
for pos in self.apos:
d = vector.getDistance(self.pos["root"], pos)
size = max(size, d)
size = max(size, .01)
return size
def getName(self, name):
"""Return the fullname of given element of the component.
Args:
name (str): Localname of the element.
Returns:
str: Element fullname.
"""
return self.fullName + "_" + name
def getFullName(self):
"""Return the fullname of the component.
Returns:
str: Component fullname.
"""
return self.values["comp_name"] + "_" + self.values["comp_side"] + \
str(self.values["comp_index"])
def getType(self):
"""Return the type of the component.
Returns:
str: component type.
"""
return self.compType
def getObjectNames(self):
"""Get the objects names of the component
Returns:
set: The names set.
"""
names = set()
names.update(self.save_transform)
names.update(self.save_primitive)
names.update(self.save_blade)
return names
def getVersion(self):
"""Get the version of the component.
Returns:
str: versionof the component.
"""
return ".".join([str(i) for i in self.version])
fullName = property(getFullName)
type = property(getType)
objectNames = property(getObjectNames)
##########################################################
# OTHER CLASSES
##########################################################
class MinMax(object):
"""
Minimun and maximum class.
This class is used in addMinMax method.
Attributes:
minimum (int): minimum.
maximum (int): maximum.
"""
def __init__(self, minimum=1, maximum=-1):
self.min = minimum
self.max = maximum
##########################################################
# Setting Page
##########################################################
class mainSettingsTab(QtWidgets.QDialog, msui.Ui_Form):
# ============================================
# INIT
def __init__(self, parent=None):
super(mainSettingsTab, self).__init__()
self.setupUi(self)
class componentMainSettings(QtWidgets.QDialog, guide.helperSlots):
valueChanged = QtCore.Signal(int)
def __init__(self, parent=None):
super(componentMainSettings, self).__init__()
# the inspectSettings function set the current selection to the
# component root before open the settings dialog
self.root = pm.selected()[0]
self.mainSettingsTab = mainSettingsTab()
self.create_controls()
self.populate_controls()
self.create_layout()
self.create_connections()
self.setAttribute(QtCore.Qt.WA_DeleteOnClose, True)
def create_controls(self):
"""
Create the controls for the component base
"""
self.tabs = QtWidgets.QTabWidget()
self.tabs.setObjectName("settings_tab")
# Close Button
self.close_button = QtWidgets.QPushButton("Close")
def populate_controls(self):
"""Populate Controls attribute values
Populate the controls values from the custom attributes
of the component.
"""
# populate tab
self.tabs.insertTab(0, self.mainSettingsTab, "Main Settings")
# populate main settings
self.mainSettingsTab.name_lineEdit.setText(
self.root.attr("comp_name").get())
sideSet = ["C", "L", "R"]
sideIndex = sideSet.index(self.root.attr("comp_side").get())
self.mainSettingsTab.side_comboBox.setCurrentIndex(sideIndex)
self.mainSettingsTab.componentIndex_spinBox.setValue(
self.root.attr("comp_index").get())
if self.root.attr("useIndex").get():
self.mainSettingsTab.useJointIndex_checkBox.setCheckState(
QtCore.Qt.Checked)
else:
self.mainSettingsTab.useJointIndex_checkBox.setCheckState(
QtCore.Qt.Unchecked)
self.mainSettingsTab.parentJointIndex_spinBox.setValue(
self.root.attr("parentJointIndex").get())
self.mainSettingsTab.host_lineEdit.setText(
self.root.attr("ui_host").get())
self.mainSettingsTab.subGroup_lineEdit.setText(
self.root.attr("ctlGrp").get())
def create_layout(self):
"""
Create the layout for the component base settings
"""
return
def create_connections(self):
"""
Create the slots connections to the controls functions
"""
self.close_button.clicked.connect(self.close_settings)
self.mainSettingsTab.name_lineEdit.editingFinished.connect(
self.updateComponentName)
self.mainSettingsTab.side_comboBox.currentIndexChanged.connect(
self.updateComponentName)
self.mainSettingsTab.componentIndex_spinBox.valueChanged.connect(
self.updateComponentName)
self.mainSettingsTab.useJointIndex_checkBox.stateChanged.connect(
partial(self.updateCheck,
self.mainSettingsTab.useJointIndex_checkBox,
"useIndex"))
self.mainSettingsTab.parentJointIndex_spinBox.valueChanged.connect(
partial(self.updateSpinBox,
self.mainSettingsTab.parentJointIndex_spinBox,
"parentJointIndex"))
self.mainSettingsTab.host_pushButton.clicked.connect(
partial(self.updateHostUI,
self.mainSettingsTab.host_lineEdit,
"ui_host"))
self.mainSettingsTab.subGroup_lineEdit.editingFinished.connect(
partial(self.updateLineEdit,
self.mainSettingsTab.subGroup_lineEdit,
"ctlGrp"))
|
"""
Shifter's Component guide class.
"""
from functools import partial
import maya.cmds as cmds
# pyMel
import pymel.core as pm
from pymel.core import datatypes
# mgear
import mgear
from mgear import string
from mgear.maya import dag, vector, transform, applyop, attribute, curve, icon
from mgear.maya.shifter import guide, gui
import mainSettingsUI as msui
from mgear.vendor.Qt import QtWidgets, QtCore
##########################################################
# COMPONENT GUIDE
##########################################################
class ComponentGuide(guide.Main):
"""Main class for component guide creation.
This class handles all the parameters and objectDefs creation.
It also know how to parse its own hierachy of object to retrieve position
and transform.
Finally it also now how to export itself as xml_node.
Attributes:
paramNames (list): List of parameter name cause it's actually important
to keep them sorted.
paramDefs (dic): Dictionary of parameter definition.
values (dic): Dictionary of options values.
valid (bool): We will check a few things and make sure the guide we are
loading is up to date.
If parameters or object are missing a warning message will be
display and the guide should be updated.
tra (dic): dictionary of global transform
atra (list): list of global transform
pos (dic): dictionary of global postion
apos (list): list of global position
prim (dic): dictionary of primitive
blades (dic): dictionary of blades
size (float): Size reference of the component. Default = .1
save_transform (list): Transform of object name in this list will
be saved
save_primitive (list): Primitive of object name in this list will
be saved
save_blade (list): Normal and BiNormal of object will be saved
minmax (dic): Define the min and max object for multi location objects
"""
compType = "component" # Component type
compName = "component" # Component default name
compSide = "C"
compIndex = 0 # Component default index
description = "" # Description of the component
connectors = []
compatible = []
ctl_grp = ""
# ====================================================
# Init method.
def __init__(self):
# Parameters names, definition and values.
# List of parameter name cause it's actually important to keep
# them sorted.
self.paramNames = []
# Dictionary of parameter definition.
self.paramDefs = {}
# Dictionary of options values.
self.values = {}
# We will check a few things and make sure the guide we are loading is
# up to date.
# If parameters or object are missing a warning message will be display
# and the guide should be updated.
self.valid = True
self.root = None
self.id = None
# parent component identification
self.parentComponent = None
self.parentLocalName = None
# List and dictionary used during the creation of the component
self.tra = {} # dictionary of global transform
self.atra = [] # list of global transform
self.pos = {} # dictionary of global postion
self.apos = [] # list of global position
self.prim = {} # dictionary of primitive
self.blades = {}
self.size = .1
# self.root_size = None
# List and dictionary used to define data of the guide that
# should be saved
# Transform of object name in this list will be saved
self.save_transform = []
# Primitive of object name in this list will be saved
self.save_primitive = []
# Normal and BiNormal of object will be saved
self.save_blade = []
# Define the min and max object for multi location objects
self.minmax = {}
# Init the guide
self.postInit()
self.initialHierarchy()
self.addParameters()
def postInit(self):
"""Define the objects name and categories.
Note:
REIMPLEMENT. This method should be reimplemented in each component.
"""
self.save_transform = ["root"]
return
# ====================================================
# OBJECTS AND PARAMETERS
def initialHierarchy(self):
"""Initial hierachy.
It's no more than the basic set of parameters and layout
needed for the setting property.
"""
# Parameters --------------------------------------
# This are the necessary parameter for component guide definition
self.pCompType = self.addParam("comp_type", "string", self.compType)
self.pCompName = self.addParam("comp_name", "string", self.compName)
self.pCompSide = self.addParam("comp_side", "string", self.compSide)
self.pCompIndex = self.addParam(
"comp_index", "long", self.compIndex, 0)
self.pConnector = self.addParam("connector", "string", "standard")
self.pUIHost = self.addParam("ui_host", "string", "")
self.pCtlGroup = self.addParam("ctlGrp", "string", "")
# Items -------------------------------------------
typeItems = [self.compType, self.compType]
for type in self.compatible:
typeItems.append(type)
typeItems.append(type)
connectorItems = ["standard", "standard"]
for item in self.connectors:
connectorItems.append(item)
connectorItems.append(item)
def addObjects(self):
"""Create the objects of the guide.
Note:
REIMPLEMENT. This method should be reimplemented in each component.
"""
self.root = self.addRoot()
def addParameters(self):
"""Create the parameter definitions of the guide.
Note:
REIMPLEMENT. This method should be reimplemented in each component.
"""
return
# ====================================================
# SET / GET
def setFromHierarchy(self, root):
"""Set the component guide from given hierarchy.
Args:
root (dagNode): The root of the hierarchy to parse.
"""
self.root = root
self.model = self.root.getParent(generations=-1)
# ---------------------------------------------------
# First check and set the settings
if not self.root.hasAttr("comp_type"):
mgear.log("%s is not a proper guide." %
self.root.longName(), mgear.sev_error)
self.valid = False
return
self.setParamDefValuesFromProperty(self.root)
# ---------------------------------------------------
# Then get the objects
for name in self.save_transform:
if "#" in name:
i = 0
while not self.minmax[name].max > 0 or i < \
self.minmax[name].max:
localName = string.replaceSharpWithPadding(name, i)
node = dag.findChild(self.model, self.getName(localName))
if not node:
break
self.tra[localName] = node.getMatrix(worldSpace=True)
self.atra.append(node.getMatrix(worldSpace=True))
self.pos[localName] = node.getTranslation(space="world")
self.apos.append(node.getTranslation(space="world"))
i += 1
if i < self.minmax[name].min:
mgear.log("Minimum of object requiered for " +
name + " hasn't been reached!!",
mgear.sev_warning)
self.valid = False
continue
else:
node = dag.findChild(self.model, self.getName(name))
if not node:
mgear.log("Object missing : %s" % (
self.getName(name)), mgear.sev_warning)
self.valid = False
continue
self.tra[name] = node.getMatrix(worldSpace=True)
self.atra.append(node.getMatrix(worldSpace=True))
self.pos[name] = node.getTranslation(space="world")
self.apos.append(node.getTranslation(space="world"))
for name in self.save_blade:
node = dag.findChild(self.model, self.getName(name))
if not node:
mgear.log("Object missing : %s" % (
self.getName(name)), mgear.sev_warning)
self.valid = False
continue
self.blades[name] = vector.Blade(node.getMatrix(worldSpace=True))
self.size = self.getSize()
# ====================================================
# DRAW
def draw(self, parent):
"""Draw the guide in the scene.
Args:
parent (dagNode): the parent of the component.
"""
self.parent = parent
self.setIndex(self.parent)
self.addObjects()
pm.select(self.root)
# TODO: add function to scale the points of the icons
# Set the size of the root
# self.root.size = self.root_size
def drawFromUI(self, parent):
"""Draw the guide in the scene from the UI command.
Args:
parent (dagNode): the parent of the component.
"""
if not self.modalPositions():
mgear.log("aborded", mgear.sev_warning)
return False
self.draw(parent)
transform.resetTransform(self.root, r=False, s=False)
gui.Guide_UI.inspectSettings()
return True
def modalPositions(self):
"""Launch a modal dialog to set position of the guide."""
self.jNumberVal = False
self.dirAxisVal = False
self.jSpacVal = False
for name in self.save_transform:
if "#" in name:
def _addLocMultiOptions():
pm.setParent(q=True)
pm.columnLayout(adjustableColumn=True, cal="right")
pm.text(label='', al="center")
fl = pm.formLayout()
jNumber = pm.intFieldGrp(v1=3, label="Joint Number")
pm.setParent('..')
pm.formLayout(fl, e=True, af=(jNumber, "left", -30))
dirSet = ["X", "-X", "Y", "-Y", "Z", "-Z"]
fl = pm.formLayout()
dirAxis = pm.optionMenu(label="Direction")
dirAxis.addMenuItems(dirSet)
pm.setParent('..')
pm.formLayout(fl, e=True, af=(dirAxis, "left", 70))
fl = pm.formLayout()
jSpac = pm.floatFieldGrp(v1=1.0, label="spacing")
pm.setParent('..')
pm.formLayout(fl, e=True, af=(jSpac, "left", -30))
pm.text(label='', al="center")
pm.button(label='Continue', c=partial(
_retriveOptions, jNumber, dirAxis, jSpac))
pm.setParent('..')
def _retriveOptions(jNumber, dirAxis, jSpac, *args):
self.jNumberVal = jNumber.getValue()[0]
self.dirAxisVal = dirAxis.getValue()
self.jSpacVal = jSpac.getValue()[0]
pm.layoutDialog(dismiss="Continue")
def _show():
pm.layoutDialog(ui=_addLocMultiOptions)
_show()
if self.jNumberVal:
if self.dirAxisVal == "X":
offVec = datatypes.Vector(self.jSpacVal, 0, 0)
elif self.dirAxisVal == "-X":
offVec = datatypes.Vector(self.jSpacVal * -1, 0, 0)
elif self.dirAxisVal == "Y":
offVec = datatypes.Vector(0, self.jSpacVal, 0)
elif self.dirAxisVal == "-Y":
offVec = datatypes.Vector(0, self.jSpacVal * -1, 0)
elif self.dirAxisVal == "Z":
offVec = datatypes.Vector(0, 0, self.jSpacVal)
elif self.dirAxisVal == "-Z":
offVec = datatypes.Vector(0, 0, self.jSpacVal * -1)
newPosition = datatypes.Vector(0, 0, 0)
for i in range(self.jNumberVal):
newPosition = offVec + newPosition
localName = string.replaceSharpWithPadding(name, i)
self.tra[localName] = transform.getTransformFromPos(
newPosition)
return True
# ====================================================
# UPDATE
def setIndex(self, model):
"""Update the component index to get the next valid one.
Args:
model (dagNode): The parent model of the guide.
"""
self.model = model.getParent(generations=-1)
# Find next index available
while True:
obj = dag.findChild(self.model, self.getName("root"))
if not obj or (self.root and obj == self.root):
break
self.setParamDefValue("comp_index", self.values["comp_index"] + 1)
def symmetrize(self):
"""Inverse the transform of each element of the guide."""
if self.values["comp_side"] not in ["R", "L"]:
mgear.log("Can't symmetrize central component", mgear.sev_error)
return False
for name, paramDef in self.paramDefs.items():
if paramDef.valueType == "string":
self.setParamDefValue(
name, mgear.string.convertRLName(self.values[name]))
for name, t in self.tra.items():
self.tra[name] = transform.getSymmetricalTransform(t)
for name, blade in self.blades.items():
self.blades[name] = vector.Blade(
transform.getSymmetricalTransform(blade.transform))
return True
def rename(self, root, newName, newSide, newIndex):
"""Rename the component.
Args:
root (dagNode): The parent of the component
newName (str): The new name.
newSide (str): Side of the component.
newIndex (int): index of the comonent.
"""
self.parent = root
# store old properties
oldIndex = self.parent.attr("comp_index").get()
oldSide = self.parent.attr("comp_side").get()
oldName = self.parent.attr("comp_name").get()
oldSideIndex = oldSide + str(oldIndex)
# change attr side in root
self.parent.attr("comp_name").set(newName)
self.parent.attr("comp_side").set(newSide)
# set new index and update to the next valid
self.setParamDefValue("comp_name", newName)
self.setParamDefValue("comp_side", newSide)
self.setParamDefValue("comp_index", newIndex)
self.setIndex(self.parent)
self.parent.attr("comp_index").set(self.values["comp_index"])
# objList = dag.findComponentChildren(self.parent,
# oldName, oldSideIndex)
# NOTE: Experimenta using findComponentChildren2
objList = dag.findComponentChildren2(
self.parent, oldName, oldSideIndex)
newSideIndex = newSide + str(self.values["comp_index"])
objList.append(self.parent)
for obj in objList:
suffix = obj.name().split("_")[-1]
if len(obj.name().split("_")) == 3:
new_name = "_".join([newName, newSideIndex, suffix])
else:
subIndex = obj.name().split("_")[-2]
new_name = "_".join([newName, newSideIndex, subIndex, suffix])
pm.rename(obj, new_name)
# ====================================================
# ELEMENTS
def addRoot(self):
"""Add a root object to the guide.
This method can initialize the object or draw it.
Root object is a simple transform with a specific display and a setting
property.
Returns:
dagNode: The root
"""
if "root" not in self.tra.keys():
self.tra["root"] = transform.getTransformFromPos(
datatypes.Vector(0, 0, 0))
self.root = icon.guideRootIcon(self.parent, self.getName(
"root"), color=13, m=self.tra["root"])
# Add Parameters from parameter definition list.
for scriptName in self.paramNames:
paramDef = self.paramDefs[scriptName]
paramDef.create(self.root)
return self.root
def addLoc(self, name, parent, position=None):
"""Add a loc object to the guide.
This mehod can initialize the object or draw it.
Loc object is a simple null to define a position or a tranformation in
the guide.
Args:
name (str): Local name of the element.
parent (dagNode): The parent of the element.
position (vector): The default position of the element.
Returns:
dagNode: The locator object.
"""
if name not in self.tra.keys():
self.tra[name] = transform.getTransformFromPos(position)
if name in self.prim.keys():
# this functionality is not implemented. The actual design from
# softimage Gear should be review to fit in Maya.
loc = self.prim[name].create(
parent, self.getName(name), self.tra[name], color=17)
else:
loc = icon.guideLocatorIcon(parent, self.getName(
name), color=17, m=self.tra[name])
return loc
def addLocMulti(self, name, parent, updateParent=True):
"""Add multiple loc objects to the guide.
This method can initialize the object or draw it.
Loc object is a simple null to define a position or a tranformation in
the guide.
Args:
name (str): Local name of the element.
parent (dagNode): The parent of the element.
minimum (int): The minimum number of loc.
maximum (int): The maximum number of loc.
updateParent (bool): if True update the parent reference. False,
keep the same for all loc.
Returns:
list of dagNode: The created loc objects in a list.
"""
if "#" not in name:
mgear.log(
"You need to put a '#' in the name of multiple location.",
mgear.sev_error)
return False
locs = []
i = 0
while True:
localName = string.replaceSharpWithPadding(name, i)
if localName not in self.tra.keys():
break
loc = icon.guideLocatorIcon(parent, self.getName(
localName), color=17, m=self.tra[localName])
locs.append(loc)
if updateParent:
parent = loc
i += 1
return locs
def addBlade(self, name, parentPos, parentDir):
"""Add a blade object to the guide.
This mehod can initialize the object or draw it.
Blade object is a 3points curve to define a plan in the guide.
Args:
name (str): Local name of the element.
parentPos (dagNode): The parent of the element.
parentDir (dagNode): The direction constraint of the element.
Returns:
dagNode: The created blade curve.
"""
if name not in self.blades.keys():
self.blades[name] = vector.Blade(
transform.getTransformFromPos(datatypes.Vector(0, 0, 0)))
offset = False
else:
offset = True
dist = .6 * self.root.attr("scaleX").get()
blade = icon.guideBladeIcon(parent=parentPos, name=self.getName(
name), lenX=dist, color=13, m=self.blades[name].transform)
aim_cns = applyop.aimCns(blade, parentDir, axis="xy", wupType=2,
wupVector=[0, 1, 0], wupObject=self.root,
maintainOffset=offset)
pm.pointConstraint(parentPos, blade)
offsetAttr = attribute.addAttribute(
blade, "bladeRollOffset", "float", aim_cns.attr("offsetX").get())
pm.connectAttr(offsetAttr, aim_cns.attr("offsetX"))
attribute.lockAttribute(blade)
return blade
def addDispCurve(self, name, centers=[], degree=1):
"""Add a display curve object to the guide.
Display curve object is a simple curve to show the connection between
different guide element..
Args:
name (str): Local name of the element.
centers (list of dagNode): List of object to define the curve.
degree (int): Curve degree. Default 1 = lineal.
Returns:
dagNode: The newly creted curve.
"""
return icon.connection_display_curve(self.getName(name),
centers,
degree)
# ====================================================
# MISC
def getObjects(self, model, includeShapes=True):
"""Get the objects of the component.
Args:
model(dagNode): The root of the component.
includeShapes (boo): If True, will include the shapes.
Returns:
list of dagNode: The list of the objects.
"""
objects = {}
if includeShapes:
children = pm.listRelatives(model, ad=True)
else:
children = pm.listRelatives(model, ad=True, typ='transform')
pm.select(children)
for child in pm.ls(self.fullName + "_*", selection=True):
objects[child[child.index(
self.fullName + "_") + len(self.fullName + "_"):]] = child
return objects
def getObjects2(self, model, includeShapes=True):
"""Get the objects of the component.
Args:
model(dagNode): The root of the component.
includeShapes (boo): If True, will include the shapes.
Returns:
list of dagNode: The list of the objects.
"""
objects = {}
if includeShapes:
children = [pm.PyNode(x) for x in cmds.listRelatives(
model.longName(), ad=True, fullPath=True)]
else:
children = [pm.PyNode(x) for x in cmds.listRelatives(
model.longName(), ad=True, typ='transform', fullPath=True)]
for child in children:
cName = child.longName()
if cName.startswith(self.fullName):
objects[cName.split("_")[-1]] = child
return objects
def getObjects3(self, model):
"""
NOTE: Experimental function
Get the objects of the component.
This version only get the transforms by Name using Maya Cmds
Args:
model(dagNode): The root of the component.
Returns:
list of dagNode: The list of the objects.
"""
objects = {}
for child in cmds.ls(self.fullName + "_*", type="transform"):
if pm.PyNode(child).getParent(-1) == model:
objects[child[child.index(
self.fullName + "_") + len(self.fullName + "_"):]] = child
return objects
def addMinMax(self, name, minimum=1, maximum=-1):
"""Add minimun and maximum number of locator
When we use the modal menu.
"""
if "#" not in name:
mgear.log(
"Invalid definition for min/max. You should have a '#' in "
"the name", mgear.sev_error)
self.minmax[name] = MinMax(minimum, maximum)
def getSize(self):
"""Get the size of the component.
Returns:
float: the size
"""
size = .01
for pos in self.apos:
d = vector.getDistance(self.pos["root"], pos)
size = max(size, d)
size = max(size, .01)
return size
def getName(self, name):
"""Return the fullname of given element of the component.
Args:
name (str): Localname of the element.
Returns:
str: Element fullname.
"""
return self.fullName + "_" + name
def getFullName(self):
"""Return the fullname of the component.
Returns:
str: Component fullname.
"""
return self.values["comp_name"] + "_" + self.values["comp_side"] + \
str(self.values["comp_index"])
def getType(self):
"""Return the type of the component.
Returns:
str: component type.
"""
return self.compType
def getObjectNames(self):
"""Get the objects names of the component
Returns:
set: The names set.
"""
names = set()
names.update(self.save_transform)
names.update(self.save_primitive)
names.update(self.save_blade)
return names
def getVersion(self):
"""Get the version of the component.
Returns:
str: versionof the component.
"""
return ".".join([str(i) for i in self.version])
fullName = property(getFullName)
type = property(getType)
objectNames = property(getObjectNames)
##########################################################
# OTHER CLASSES
##########################################################
class MinMax(object):
"""
Minimun and maximum class.
This class is used in addMinMax method.
Attributes:
minimum (int): minimum.
maximum (int): maximum.
"""
def __init__(self, minimum=1, maximum=-1):
self.min = minimum
self.max = maximum
##########################################################
# Setting Page
##########################################################
class mainSettingsTab(QtWidgets.QDialog, msui.Ui_Form):
# ============================================
# INIT
def __init__(self, parent=None):
super(mainSettingsTab, self).__init__()
self.setupUi(self)
class componentMainSettings(QtWidgets.QDialog, guide.helperSlots):
valueChanged = QtCore.Signal(int)
def __init__(self, parent=None):
super(componentMainSettings, self).__init__()
# the inspectSettings function set the current selection to the
# component root before open the settings dialog
self.root = pm.selected()[0]
self.mainSettingsTab = mainSettingsTab()
self.create_controls()
self.populate_controls()
self.create_layout()
self.create_connections()
self.setAttribute(QtCore.Qt.WA_DeleteOnClose, True)
def create_controls(self):
"""
Create the controls for the component base
"""
self.tabs = QtWidgets.QTabWidget()
self.tabs.setObjectName("settings_tab")
# Close Button
self.close_button = QtWidgets.QPushButton("Close")
def populate_controls(self):
"""Populate Controls attribute values
Populate the controls values from the custom attributes
of the component.
"""
# populate tab
self.tabs.insertTab(0, self.mainSettingsTab, "Main Settings")
# populate main settings
self.mainSettingsTab.name_lineEdit.setText(
self.root.attr("comp_name").get())
sideSet = ["C", "L", "R"]
sideIndex = sideSet.index(self.root.attr("comp_side").get())
self.mainSettingsTab.side_comboBox.setCurrentIndex(sideIndex)
self.mainSettingsTab.componentIndex_spinBox.setValue(
self.root.attr("comp_index").get())
if self.root.attr("useIndex").get():
self.mainSettingsTab.useJointIndex_checkBox.setCheckState(
QtCore.Qt.Checked)
else:
self.mainSettingsTab.useJointIndex_checkBox.setCheckState(
QtCore.Qt.Unchecked)
self.mainSettingsTab.parentJointIndex_spinBox.setValue(
self.root.attr("parentJointIndex").get())
self.mainSettingsTab.host_lineEdit.setText(
self.root.attr("ui_host").get())
self.mainSettingsTab.subGroup_lineEdit.setText(
self.root.attr("ctlGrp").get())
def create_layout(self):
"""
Create the layout for the component base settings
"""
return
def create_connections(self):
"""
Create the slots connections to the controls functions
"""
self.close_button.clicked.connect(self.close_settings)
self.mainSettingsTab.name_lineEdit.editingFinished.connect(
self.updateComponentName)
self.mainSettingsTab.side_comboBox.currentIndexChanged.connect(
self.updateComponentName)
self.mainSettingsTab.componentIndex_spinBox.valueChanged.connect(
self.updateComponentName)
self.mainSettingsTab.useJointIndex_checkBox.stateChanged.connect(
partial(self.updateCheck,
self.mainSettingsTab.useJointIndex_checkBox,
"useIndex"))
self.mainSettingsTab.parentJointIndex_spinBox.valueChanged.connect(
partial(self.updateSpinBox,
self.mainSettingsTab.parentJointIndex_spinBox,
"parentJointIndex"))
self.mainSettingsTab.host_pushButton.clicked.connect(
partial(self.updateHostUI,
self.mainSettingsTab.host_lineEdit,
"ui_host"))
self.mainSettingsTab.subGroup_lineEdit.editingFinished.connect(
partial(self.updateLineEdit,
self.mainSettingsTab.subGroup_lineEdit,
"ctlGrp"))
|
en
| 0.616415
|
Shifter's Component guide class. # pyMel # mgear ########################################################## # COMPONENT GUIDE ########################################################## Main class for component guide creation. This class handles all the parameters and objectDefs creation. It also know how to parse its own hierachy of object to retrieve position and transform. Finally it also now how to export itself as xml_node. Attributes: paramNames (list): List of parameter name cause it's actually important to keep them sorted. paramDefs (dic): Dictionary of parameter definition. values (dic): Dictionary of options values. valid (bool): We will check a few things and make sure the guide we are loading is up to date. If parameters or object are missing a warning message will be display and the guide should be updated. tra (dic): dictionary of global transform atra (list): list of global transform pos (dic): dictionary of global postion apos (list): list of global position prim (dic): dictionary of primitive blades (dic): dictionary of blades size (float): Size reference of the component. Default = .1 save_transform (list): Transform of object name in this list will be saved save_primitive (list): Primitive of object name in this list will be saved save_blade (list): Normal and BiNormal of object will be saved minmax (dic): Define the min and max object for multi location objects # Component type # Component default name # Component default index # Description of the component # ==================================================== # Init method. # Parameters names, definition and values. # List of parameter name cause it's actually important to keep # them sorted. # Dictionary of parameter definition. # Dictionary of options values. # We will check a few things and make sure the guide we are loading is # up to date. # If parameters or object are missing a warning message will be display # and the guide should be updated. # parent component identification # List and dictionary used during the creation of the component # dictionary of global transform # list of global transform # dictionary of global postion # list of global position # dictionary of primitive # self.root_size = None # List and dictionary used to define data of the guide that # should be saved # Transform of object name in this list will be saved # Primitive of object name in this list will be saved # Normal and BiNormal of object will be saved # Define the min and max object for multi location objects # Init the guide Define the objects name and categories. Note: REIMPLEMENT. This method should be reimplemented in each component. # ==================================================== # OBJECTS AND PARAMETERS Initial hierachy. It's no more than the basic set of parameters and layout needed for the setting property. # Parameters -------------------------------------- # This are the necessary parameter for component guide definition # Items ------------------------------------------- Create the objects of the guide. Note: REIMPLEMENT. This method should be reimplemented in each component. Create the parameter definitions of the guide. Note: REIMPLEMENT. This method should be reimplemented in each component. # ==================================================== # SET / GET Set the component guide from given hierarchy. Args: root (dagNode): The root of the hierarchy to parse. # --------------------------------------------------- # First check and set the settings # --------------------------------------------------- # Then get the objects # ==================================================== # DRAW Draw the guide in the scene. Args: parent (dagNode): the parent of the component. # TODO: add function to scale the points of the icons # Set the size of the root # self.root.size = self.root_size Draw the guide in the scene from the UI command. Args: parent (dagNode): the parent of the component. Launch a modal dialog to set position of the guide. # ==================================================== # UPDATE Update the component index to get the next valid one. Args: model (dagNode): The parent model of the guide. # Find next index available Inverse the transform of each element of the guide. Rename the component. Args: root (dagNode): The parent of the component newName (str): The new name. newSide (str): Side of the component. newIndex (int): index of the comonent. # store old properties # change attr side in root # set new index and update to the next valid # objList = dag.findComponentChildren(self.parent, # oldName, oldSideIndex) # NOTE: Experimenta using findComponentChildren2 # ==================================================== # ELEMENTS Add a root object to the guide. This method can initialize the object or draw it. Root object is a simple transform with a specific display and a setting property. Returns: dagNode: The root # Add Parameters from parameter definition list. Add a loc object to the guide. This mehod can initialize the object or draw it. Loc object is a simple null to define a position or a tranformation in the guide. Args: name (str): Local name of the element. parent (dagNode): The parent of the element. position (vector): The default position of the element. Returns: dagNode: The locator object. # this functionality is not implemented. The actual design from # softimage Gear should be review to fit in Maya. Add multiple loc objects to the guide. This method can initialize the object or draw it. Loc object is a simple null to define a position or a tranformation in the guide. Args: name (str): Local name of the element. parent (dagNode): The parent of the element. minimum (int): The minimum number of loc. maximum (int): The maximum number of loc. updateParent (bool): if True update the parent reference. False, keep the same for all loc. Returns: list of dagNode: The created loc objects in a list. Add a blade object to the guide. This mehod can initialize the object or draw it. Blade object is a 3points curve to define a plan in the guide. Args: name (str): Local name of the element. parentPos (dagNode): The parent of the element. parentDir (dagNode): The direction constraint of the element. Returns: dagNode: The created blade curve. Add a display curve object to the guide. Display curve object is a simple curve to show the connection between different guide element.. Args: name (str): Local name of the element. centers (list of dagNode): List of object to define the curve. degree (int): Curve degree. Default 1 = lineal. Returns: dagNode: The newly creted curve. # ==================================================== # MISC Get the objects of the component. Args: model(dagNode): The root of the component. includeShapes (boo): If True, will include the shapes. Returns: list of dagNode: The list of the objects. Get the objects of the component. Args: model(dagNode): The root of the component. includeShapes (boo): If True, will include the shapes. Returns: list of dagNode: The list of the objects. NOTE: Experimental function Get the objects of the component. This version only get the transforms by Name using Maya Cmds Args: model(dagNode): The root of the component. Returns: list of dagNode: The list of the objects. Add minimun and maximum number of locator When we use the modal menu. Get the size of the component. Returns: float: the size Return the fullname of given element of the component. Args: name (str): Localname of the element. Returns: str: Element fullname. Return the fullname of the component. Returns: str: Component fullname. Return the type of the component. Returns: str: component type. Get the objects names of the component Returns: set: The names set. Get the version of the component. Returns: str: versionof the component. ########################################################## # OTHER CLASSES ########################################################## Minimun and maximum class. This class is used in addMinMax method. Attributes: minimum (int): minimum. maximum (int): maximum. ########################################################## # Setting Page ########################################################## # ============================================ # INIT # the inspectSettings function set the current selection to the # component root before open the settings dialog Create the controls for the component base # Close Button Populate Controls attribute values Populate the controls values from the custom attributes of the component. # populate tab # populate main settings Create the layout for the component base settings Create the slots connections to the controls functions
| 2.19497
| 2
|
src/spaceone/statistics/scheduler/stat_hourly_scheduler.py
|
choonho/statistics
| 0
|
6629055
|
import consul
import datetime
import logging
import time
from spaceone.core import config
from spaceone.core.locator import Locator
from spaceone.core.scheduler import HourlyScheduler
from spaceone.core.auth.jwt.jwt_util import JWTUtil
__all__ = ['StatHourlyScheduler']
_LOGGER = logging.getLogger(__name__)
def _get_domain_id_from_token(token):
decoded_token = JWTUtil.unverified_decode(token)
return decoded_token['did']
WAIT_QUEUE_INITIALIZED = 10 # seconds for waiting queue initialization
INTERVAL = 10
MAX_COUNT = 10
def _validate_token(token):
if isinstance(token, dict):
protocol = token['protocol']
if protocol == 'consul':
consul_instance = Consul(token['config'])
value = False
while value is False:
uri = token['uri']
value = consul_instance.patch_token(uri)
_LOGGER.warn(f'[_validate_token] token: {value[:30]} uri: {uri}')
if value:
break
time.sleep(INTERVAL)
token = value
return token
class StatHourlyScheduler(HourlyScheduler):
def __init__(self, queue, interval, minute=':00'):
super().__init__(queue, interval, minute)
self.count = self._init_count()
self.locator = Locator()
self.TOKEN = self._update_token()
self.domain_id = _get_domain_id_from_token(self.TOKEN)
def _init_count(self):
# get current time
cur = datetime.datetime.now()
count = {
'previous': cur, # Last check_count time
'index': 0, # index
'hour': cur.hour, # previous hour
'started_at': 0, # start time of push_token
'ended_at': 0 # end time of execution in this tick
}
_LOGGER.debug(f'[_init_count] {count}')
return count
def _update_token(self):
token = config.get_global('TOKEN')
if token == "":
token = _validate_token(config.get_global('TOKEN_INFO'))
return token
def create_task(self):
# self.check_global_configuration()
domains = self.list_domains()
result = []
for domain in domains:
stp = self._create_job_request(domain)
result.append(stp)
return result
def list_domains(self):
try:
ok = self.check_count()
if ok == False:
# ERROR LOGGING
pass
# Loop all domain, then find schedule
metadata = {'token': self.TOKEN, 'domain_id': self.domain_id}
schedule_svc = self.locator.get_service('ScheduleService', metadata)
params = {}
resp = schedule_svc.list_domains(params)
_LOGGER.debug(f'[list_domain] num of domains: {resp["total_count"]}')
return resp['results']
except Exception as e:
_LOGGER.error(e)
return []
def check_count(self):
# check current count is correct or not
cur = datetime.datetime.now()
hour = cur.hour
# check
if (self.count['hour'] + self.config) % 24 != hour:
if self.count['hour'] == hour:
_LOGGER.error('[check_count] duplicated call in the same time')
else:
_LOGGER.error('[check_count] missing time')
# This is continuous task
count = {
'previous': cur,
'index': self.count['index'] + 1,
'hour': hour,
'started_at': cur
}
self.count.update(count)
def _update_count_ended_at(self):
cur = datetime.datetime.now()
self.count['ended_at'] = cur
def _list_schedule(self, hour, domain_id):
""" List statistics.Schedule
"""
params = {'query': {
'filter': [{'k': 'schedule.hours', 'v': hour, 'o': 'eq'}],
},
'domain_id': domain_id}
metadata = {'token': self.TOKEN, 'domain_id': self.domain_id}
schedule_svc = self.locator.get_service('ScheduleService', metadata)
schedules, total_count = schedule_svc.list(params)
_LOGGER.debug(f'[_list_schedule] schedules: {schedules}, total_count: {total_count}')
return schedules
def _create_job_request(self, domain):
""" Based on domain, create Job Request
Returns:
jobs: SpaceONE Pipeline Template
"""
_LOGGER.debug(f'[_create_job_request] domain: {domain}')
metadata = {'token': self.TOKEN, 'domain_id': self.domain_id}
schedules = self._list_schedule(self.count['hour'], domain['domain_id'])
sched_jobs = []
for schedule in schedules:
sched_job = {
'locator': 'SERVICE',
'name': 'HistoryService',
'metadata': metadata,
'method': 'create',
'params': {'params': {'schedule_id': schedule.schedule_id, 'domain_id': domain['domain_id']}
}
}
sched_jobs.append(sched_job)
stp = {'name': 'statistics_hourly_schedule',
'version': 'v1',
'executionEngine': 'BaseWorker',
'stages': sched_jobs}
_LOGGER.debug(f'[_create_job_request] tasks: {stp}')
return stp
@staticmethod
def _create_schedule_params(schedule, domain_id):
dict_schedule = dict(schedule.to_dict())
_LOGGER.debug(f'[_create_schedule_params] schedule: {schedule}')
required_params = ['schedule_id', 'data_source_id', 'resource_type', 'query', 'join', 'formulas', 'domain_id']
result = {'schedule_id': dict_schedule['schedule_id'], 'domain_id': domain_id}
print('#' * 30)
for param in required_params:
print(f'check : {param}')
if param in dict_schedule['options']:
result[param] = dict_schedule['options'][param]
_LOGGER.debug(f'[_create_schedule_params] params: {result}')
return result
class Consul:
def __init__(self, config):
"""
Args:
- config: connection parameter
Example:
config = {
'host': 'consul.example.com',
'port': 8500
}
"""
self.config = self._validate_config(config)
def _validate_config(self, config):
"""
Parameter for Consul
- host, port=8500, token=None, scheme=http, consistency=default, dc=None, verify=True, cert=None
"""
options = ['host', 'port', 'token', 'scheme', 'consistency', 'dc', 'verify', 'cert']
result = {}
for item in options:
value = config.get(item, None)
if value:
result[item] = value
return result
def patch_token(self, key):
"""
Args:
key: Query key (ex. /debug/supervisor/TOKEN)
"""
try:
conn = consul.Consul(**self.config)
index, data = conn.kv.get(key)
return data['Value'].decode('ascii')
except Exception as e:
_LOGGER.debug(f'[patch_token] failed: {e}')
return False
|
import consul
import datetime
import logging
import time
from spaceone.core import config
from spaceone.core.locator import Locator
from spaceone.core.scheduler import HourlyScheduler
from spaceone.core.auth.jwt.jwt_util import JWTUtil
__all__ = ['StatHourlyScheduler']
_LOGGER = logging.getLogger(__name__)
def _get_domain_id_from_token(token):
decoded_token = JWTUtil.unverified_decode(token)
return decoded_token['did']
WAIT_QUEUE_INITIALIZED = 10 # seconds for waiting queue initialization
INTERVAL = 10
MAX_COUNT = 10
def _validate_token(token):
if isinstance(token, dict):
protocol = token['protocol']
if protocol == 'consul':
consul_instance = Consul(token['config'])
value = False
while value is False:
uri = token['uri']
value = consul_instance.patch_token(uri)
_LOGGER.warn(f'[_validate_token] token: {value[:30]} uri: {uri}')
if value:
break
time.sleep(INTERVAL)
token = value
return token
class StatHourlyScheduler(HourlyScheduler):
def __init__(self, queue, interval, minute=':00'):
super().__init__(queue, interval, minute)
self.count = self._init_count()
self.locator = Locator()
self.TOKEN = self._update_token()
self.domain_id = _get_domain_id_from_token(self.TOKEN)
def _init_count(self):
# get current time
cur = datetime.datetime.now()
count = {
'previous': cur, # Last check_count time
'index': 0, # index
'hour': cur.hour, # previous hour
'started_at': 0, # start time of push_token
'ended_at': 0 # end time of execution in this tick
}
_LOGGER.debug(f'[_init_count] {count}')
return count
def _update_token(self):
token = config.get_global('TOKEN')
if token == "":
token = _validate_token(config.get_global('TOKEN_INFO'))
return token
def create_task(self):
# self.check_global_configuration()
domains = self.list_domains()
result = []
for domain in domains:
stp = self._create_job_request(domain)
result.append(stp)
return result
def list_domains(self):
try:
ok = self.check_count()
if ok == False:
# ERROR LOGGING
pass
# Loop all domain, then find schedule
metadata = {'token': self.TOKEN, 'domain_id': self.domain_id}
schedule_svc = self.locator.get_service('ScheduleService', metadata)
params = {}
resp = schedule_svc.list_domains(params)
_LOGGER.debug(f'[list_domain] num of domains: {resp["total_count"]}')
return resp['results']
except Exception as e:
_LOGGER.error(e)
return []
def check_count(self):
# check current count is correct or not
cur = datetime.datetime.now()
hour = cur.hour
# check
if (self.count['hour'] + self.config) % 24 != hour:
if self.count['hour'] == hour:
_LOGGER.error('[check_count] duplicated call in the same time')
else:
_LOGGER.error('[check_count] missing time')
# This is continuous task
count = {
'previous': cur,
'index': self.count['index'] + 1,
'hour': hour,
'started_at': cur
}
self.count.update(count)
def _update_count_ended_at(self):
cur = datetime.datetime.now()
self.count['ended_at'] = cur
def _list_schedule(self, hour, domain_id):
""" List statistics.Schedule
"""
params = {'query': {
'filter': [{'k': 'schedule.hours', 'v': hour, 'o': 'eq'}],
},
'domain_id': domain_id}
metadata = {'token': self.TOKEN, 'domain_id': self.domain_id}
schedule_svc = self.locator.get_service('ScheduleService', metadata)
schedules, total_count = schedule_svc.list(params)
_LOGGER.debug(f'[_list_schedule] schedules: {schedules}, total_count: {total_count}')
return schedules
def _create_job_request(self, domain):
""" Based on domain, create Job Request
Returns:
jobs: SpaceONE Pipeline Template
"""
_LOGGER.debug(f'[_create_job_request] domain: {domain}')
metadata = {'token': self.TOKEN, 'domain_id': self.domain_id}
schedules = self._list_schedule(self.count['hour'], domain['domain_id'])
sched_jobs = []
for schedule in schedules:
sched_job = {
'locator': 'SERVICE',
'name': 'HistoryService',
'metadata': metadata,
'method': 'create',
'params': {'params': {'schedule_id': schedule.schedule_id, 'domain_id': domain['domain_id']}
}
}
sched_jobs.append(sched_job)
stp = {'name': 'statistics_hourly_schedule',
'version': 'v1',
'executionEngine': 'BaseWorker',
'stages': sched_jobs}
_LOGGER.debug(f'[_create_job_request] tasks: {stp}')
return stp
@staticmethod
def _create_schedule_params(schedule, domain_id):
dict_schedule = dict(schedule.to_dict())
_LOGGER.debug(f'[_create_schedule_params] schedule: {schedule}')
required_params = ['schedule_id', 'data_source_id', 'resource_type', 'query', 'join', 'formulas', 'domain_id']
result = {'schedule_id': dict_schedule['schedule_id'], 'domain_id': domain_id}
print('#' * 30)
for param in required_params:
print(f'check : {param}')
if param in dict_schedule['options']:
result[param] = dict_schedule['options'][param]
_LOGGER.debug(f'[_create_schedule_params] params: {result}')
return result
class Consul:
def __init__(self, config):
"""
Args:
- config: connection parameter
Example:
config = {
'host': 'consul.example.com',
'port': 8500
}
"""
self.config = self._validate_config(config)
def _validate_config(self, config):
"""
Parameter for Consul
- host, port=8500, token=None, scheme=http, consistency=default, dc=None, verify=True, cert=None
"""
options = ['host', 'port', 'token', 'scheme', 'consistency', 'dc', 'verify', 'cert']
result = {}
for item in options:
value = config.get(item, None)
if value:
result[item] = value
return result
def patch_token(self, key):
"""
Args:
key: Query key (ex. /debug/supervisor/TOKEN)
"""
try:
conn = consul.Consul(**self.config)
index, data = conn.kv.get(key)
return data['Value'].decode('ascii')
except Exception as e:
_LOGGER.debug(f'[patch_token] failed: {e}')
return False
|
en
| 0.512938
|
# seconds for waiting queue initialization # get current time # Last check_count time # index # previous hour # start time of push_token # end time of execution in this tick # self.check_global_configuration() # ERROR LOGGING # Loop all domain, then find schedule # check current count is correct or not # check # This is continuous task List statistics.Schedule Based on domain, create Job Request Returns: jobs: SpaceONE Pipeline Template Args: - config: connection parameter Example: config = { 'host': 'consul.example.com', 'port': 8500 } Parameter for Consul - host, port=8500, token=None, scheme=http, consistency=default, dc=None, verify=True, cert=None Args: key: Query key (ex. /debug/supervisor/TOKEN)
| 1.97336
| 2
|
cliente2.py
|
megaserch/untornolam-cnc
| 0
|
6629056
|
#!/usr/bin/env python
'''from socketIO_client import SocketIO, BaseNamespace
class Namespace(BaseNamespace):
def on_connect(self):
print('[Connected]')
def on_reconnect(self):
print('[Reconnected]')
def on_disconnect(self):
print('[Disconnected]')
socketIO = SocketIO('192.168.3.11', 3003, Namespace)
socketIO.wait(seconds=1)'''
import os
import sys
import readline
import atexit
#import socketio
from socketIO_client import SocketIO
# standard Python
soquete = SocketIO('http://192.168.3.11',3003)
@soquete.event
def message(data):
print('I received a message!')
@soquete.on('chat message')
def on_message(data):
#print('I received a message!')
print(data)
@soquete.event
def connect():
print("I'm connected!")
@soquete.event
def connect_error():
print("The connection failed!")
@soquete.event
def disconnect():
print("I'm disconnected!")
print("intentanto conectar")
#soquete.connect('http://192.168.3.11:3003/test')
#soquete.connect('http://192.168.3.11:3003/')
print('El sid es', soquete.sid)
#soquete.emit('test', {'probando': 'desde python'})
try: # python3 compatibility
type(raw_input)
except NameError:
# noinspection PyShadowingBuiltins
raw_input = input
try:
# Main loop for interactive shell
# Use stdin/stdout, additional interfaces like
# UART, Socket or any other can be added.
print("*************** Welcome to PyCNC! ***************")
while True:
line = raw_input('> ')
if line == 'quit' or line == 'exit':
break
#soquete.emit('test', line)
soquete.emit('chat message', line)
#do_line(line)
except KeyboardInterrupt:
pass
print("\r\nExiting...")
#soquete.emit('test', 'ESTE MENSAJE VIENE DE PYTHON! ah y juan se la come')
#soquete.emit()
print("conectado")
print("saliendo")
soquete.disconnect()
<EMAIL>
#def my_event(sid, data):
# handle the message
# print("el dato es:" + data)
# return "OK", 123
|
#!/usr/bin/env python
'''from socketIO_client import SocketIO, BaseNamespace
class Namespace(BaseNamespace):
def on_connect(self):
print('[Connected]')
def on_reconnect(self):
print('[Reconnected]')
def on_disconnect(self):
print('[Disconnected]')
socketIO = SocketIO('192.168.3.11', 3003, Namespace)
socketIO.wait(seconds=1)'''
import os
import sys
import readline
import atexit
#import socketio
from socketIO_client import SocketIO
# standard Python
soquete = SocketIO('http://192.168.3.11',3003)
@soquete.event
def message(data):
print('I received a message!')
@soquete.on('chat message')
def on_message(data):
#print('I received a message!')
print(data)
@soquete.event
def connect():
print("I'm connected!")
@soquete.event
def connect_error():
print("The connection failed!")
@soquete.event
def disconnect():
print("I'm disconnected!")
print("intentanto conectar")
#soquete.connect('http://192.168.3.11:3003/test')
#soquete.connect('http://192.168.3.11:3003/')
print('El sid es', soquete.sid)
#soquete.emit('test', {'probando': 'desde python'})
try: # python3 compatibility
type(raw_input)
except NameError:
# noinspection PyShadowingBuiltins
raw_input = input
try:
# Main loop for interactive shell
# Use stdin/stdout, additional interfaces like
# UART, Socket or any other can be added.
print("*************** Welcome to PyCNC! ***************")
while True:
line = raw_input('> ')
if line == 'quit' or line == 'exit':
break
#soquete.emit('test', line)
soquete.emit('chat message', line)
#do_line(line)
except KeyboardInterrupt:
pass
print("\r\nExiting...")
#soquete.emit('test', 'ESTE MENSAJE VIENE DE PYTHON! ah y juan se la come')
#soquete.emit()
print("conectado")
print("saliendo")
soquete.disconnect()
<EMAIL>
#def my_event(sid, data):
# handle the message
# print("el dato es:" + data)
# return "OK", 123
|
en
| 0.216155
|
#!/usr/bin/env python from socketIO_client import SocketIO, BaseNamespace class Namespace(BaseNamespace): def on_connect(self): print('[Connected]') def on_reconnect(self): print('[Reconnected]') def on_disconnect(self): print('[Disconnected]') socketIO = SocketIO('192.168.3.11', 3003, Namespace) socketIO.wait(seconds=1) #import socketio # standard Python #print('I received a message!') #soquete.connect('http://192.168.3.11:3003/test') #soquete.connect('http://192.168.3.11:3003/') #soquete.emit('test', {'probando': 'desde python'}) # python3 compatibility # noinspection PyShadowingBuiltins # Main loop for interactive shell # Use stdin/stdout, additional interfaces like # UART, Socket or any other can be added. #soquete.emit('test', line) #do_line(line) #soquete.emit('test', 'ESTE MENSAJE VIENE DE PYTHON! ah y juan se la come') #soquete.emit() #def my_event(sid, data): # handle the message # print("el dato es:" + data) # return "OK", 123
| 3.086387
| 3
|
python/paddle/v2/fluid/tests/test_split_and_merge_lod_tensor_op.py
|
jczaja/Paddle
| 1
|
6629057
|
<reponame>jczaja/Paddle
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import paddle.v2.fluid.core as core
import numpy as np
import paddle.v2.fluid.layers as layers
from paddle.v2.fluid.framework import Program, program_guard
from paddle.v2.fluid.executor import Executor
from paddle.v2.fluid.backward import append_backward
class TestCPULoDTensorArrayOps(unittest.TestCase):
def place(self):
return core.CPUPlace()
def test_split_and_merge_lod_tensor_no_lod(self):
tensor = core.LoDTensor()
tensor.set(np.arange(10).reshape(10, 1).astype('int32'), self.place())
mask_np = np.array([0, 0, 1, 1, 1, 1, 0, 0, 0, 0]).astype('bool')
mask_np = np.expand_dims(mask_np, axis=1)
mask = core.LoDTensor()
mask.set(mask_np, self.place())
expect_true_tensor = np.array([2, 3, 4, 5]).astype('int32')
expect_true_tensor = np.expand_dims(expect_true_tensor, axis=1)
expect_true = core.LoDTensor()
expect_true.set(expect_true_tensor, self.place())
expect_false_tensor = np.array([0, 1, 6, 7, 8, 9]).astype('int32')
expect_false_tensor = np.expand_dims(expect_false_tensor, axis=1)
expect_false = core.LoDTensor()
expect_false.set(expect_false_tensor, self.place())
self.main(
tensor=tensor,
mask=mask,
expect_true=expect_true,
expect_false=expect_false,
expect_out=tensor)
def test_split_and_merge_lod_tensor_level_0(self):
tensor = core.LoDTensor()
tensor.set(np.arange(10).reshape(10, 1).astype('int32'), self.place())
tensor.set_lod([[0, 3, 9, 10]])
mask_np = np.array([0, 1, 0]).astype('bool')
mask_np = np.expand_dims(mask_np, axis=1)
mask = core.LoDTensor()
mask.set(mask_np, self.place())
expect_true_tensor = np.array([3, 4, 5, 6, 7, 8]).astype('int32')
expect_true_tensor = np.expand_dims(expect_true_tensor, axis=1)
expect_true = core.LoDTensor()
expect_true.set(expect_true_tensor, self.place())
expect_true.set_lod([[0, 6]])
expect_false_tensor = np.array([0, 1, 2, 9]).astype('int32')
expect_false_tensor = np.expand_dims(expect_false_tensor, axis=1)
expect_false_lod = [[0, 3, 4]]
expect_false = core.LoDTensor()
expect_false.set(expect_false_tensor, self.place())
expect_false.set_lod(expect_false_lod)
self.main(
tensor=tensor,
mask=mask,
expect_true=expect_true,
expect_false=expect_false,
expect_out=tensor)
def main(self, tensor, mask, expect_true, expect_false, expect_out,
level=0):
place = self.place()
program = Program()
with program_guard(program):
x = layers.data(name='x', shape=[1])
x.persistable = True
y = layers.data(name='y', shape=[1])
y.persistable = True
out_true, out_false = layers.split_lod_tensor(
input=x, mask=y, level=level)
out_true.persistable = True
out_false.persistable = True
out = layers.merge_lod_tensor(
in_true=out_true, in_false=out_false, mask=y, x=x, level=level)
out.persistable = True
exe = Executor(place)
scope = core.Scope()
exe.run(program,
feed={'x': tensor,
'y': mask},
scope=scope,
return_numpy=False)
var_true = scope.find_var(out_true.name).get_tensor()
var_false = scope.find_var(out_false.name).get_tensor()
var_out = scope.find_var(out.name).get_tensor()
self.check_tensor_same(var_true, expect_true)
self.check_tensor_same(var_false, expect_false)
self.check_tensor_same(var_out, expect_out)
def check_tensor_same(self, actual, expect):
self.assertTrue(np.allclose(np.array(actual), np.array(expect)))
self.assertEqual(actual.lod(), expect.lod())
class TestCPUSplitMergeLoDTensorGrad(unittest.TestCase):
def test_grad(self):
place = core.CPUPlace()
program = Program()
with program_guard(program):
x = layers.data(
name='x', shape=[1], dtype='float32', stop_gradient=False)
y = layers.data(
name='y', shape=[1], dtype='bool', stop_gradient=False)
level = 0
out_true, out_false = layers.split_lod_tensor(
input=x, mask=y, level=level)
out = layers.merge_lod_tensor(
in_true=out_true, in_false=out_false, mask=y, x=x, level=level)
mean = layers.mean(x=out)
append_backward(mean)
tensor = core.LoDTensor()
tensor.set(np.arange(10).reshape(10, 1).astype('float32'), place)
tensor.set_lod([[0, 3, 9, 10]])
mask_np = np.array([0, 1, 0]).astype('bool')
mask_np = np.expand_dims(mask_np, axis=1)
mask = core.LoDTensor()
mask.set(mask_np, place)
exe = Executor(place)
scope = core.Scope()
g_vars = program.global_block().var(x.name + "@GRAD")
g_out = [
item.sum()
for item in map(np.array,
exe.run(program,
feed={'x': tensor,
'y': mask},
fetch_list=[g_vars],
scope=scope,
return_numpy=False))
]
g_out_sum = np.array(g_out).sum()
self.assertAlmostEqual(1.0, g_out_sum, delta=0.1)
if __name__ == '__main__':
unittest.main()
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import paddle.v2.fluid.core as core
import numpy as np
import paddle.v2.fluid.layers as layers
from paddle.v2.fluid.framework import Program, program_guard
from paddle.v2.fluid.executor import Executor
from paddle.v2.fluid.backward import append_backward
class TestCPULoDTensorArrayOps(unittest.TestCase):
def place(self):
return core.CPUPlace()
def test_split_and_merge_lod_tensor_no_lod(self):
tensor = core.LoDTensor()
tensor.set(np.arange(10).reshape(10, 1).astype('int32'), self.place())
mask_np = np.array([0, 0, 1, 1, 1, 1, 0, 0, 0, 0]).astype('bool')
mask_np = np.expand_dims(mask_np, axis=1)
mask = core.LoDTensor()
mask.set(mask_np, self.place())
expect_true_tensor = np.array([2, 3, 4, 5]).astype('int32')
expect_true_tensor = np.expand_dims(expect_true_tensor, axis=1)
expect_true = core.LoDTensor()
expect_true.set(expect_true_tensor, self.place())
expect_false_tensor = np.array([0, 1, 6, 7, 8, 9]).astype('int32')
expect_false_tensor = np.expand_dims(expect_false_tensor, axis=1)
expect_false = core.LoDTensor()
expect_false.set(expect_false_tensor, self.place())
self.main(
tensor=tensor,
mask=mask,
expect_true=expect_true,
expect_false=expect_false,
expect_out=tensor)
def test_split_and_merge_lod_tensor_level_0(self):
tensor = core.LoDTensor()
tensor.set(np.arange(10).reshape(10, 1).astype('int32'), self.place())
tensor.set_lod([[0, 3, 9, 10]])
mask_np = np.array([0, 1, 0]).astype('bool')
mask_np = np.expand_dims(mask_np, axis=1)
mask = core.LoDTensor()
mask.set(mask_np, self.place())
expect_true_tensor = np.array([3, 4, 5, 6, 7, 8]).astype('int32')
expect_true_tensor = np.expand_dims(expect_true_tensor, axis=1)
expect_true = core.LoDTensor()
expect_true.set(expect_true_tensor, self.place())
expect_true.set_lod([[0, 6]])
expect_false_tensor = np.array([0, 1, 2, 9]).astype('int32')
expect_false_tensor = np.expand_dims(expect_false_tensor, axis=1)
expect_false_lod = [[0, 3, 4]]
expect_false = core.LoDTensor()
expect_false.set(expect_false_tensor, self.place())
expect_false.set_lod(expect_false_lod)
self.main(
tensor=tensor,
mask=mask,
expect_true=expect_true,
expect_false=expect_false,
expect_out=tensor)
def main(self, tensor, mask, expect_true, expect_false, expect_out,
level=0):
place = self.place()
program = Program()
with program_guard(program):
x = layers.data(name='x', shape=[1])
x.persistable = True
y = layers.data(name='y', shape=[1])
y.persistable = True
out_true, out_false = layers.split_lod_tensor(
input=x, mask=y, level=level)
out_true.persistable = True
out_false.persistable = True
out = layers.merge_lod_tensor(
in_true=out_true, in_false=out_false, mask=y, x=x, level=level)
out.persistable = True
exe = Executor(place)
scope = core.Scope()
exe.run(program,
feed={'x': tensor,
'y': mask},
scope=scope,
return_numpy=False)
var_true = scope.find_var(out_true.name).get_tensor()
var_false = scope.find_var(out_false.name).get_tensor()
var_out = scope.find_var(out.name).get_tensor()
self.check_tensor_same(var_true, expect_true)
self.check_tensor_same(var_false, expect_false)
self.check_tensor_same(var_out, expect_out)
def check_tensor_same(self, actual, expect):
self.assertTrue(np.allclose(np.array(actual), np.array(expect)))
self.assertEqual(actual.lod(), expect.lod())
class TestCPUSplitMergeLoDTensorGrad(unittest.TestCase):
def test_grad(self):
place = core.CPUPlace()
program = Program()
with program_guard(program):
x = layers.data(
name='x', shape=[1], dtype='float32', stop_gradient=False)
y = layers.data(
name='y', shape=[1], dtype='bool', stop_gradient=False)
level = 0
out_true, out_false = layers.split_lod_tensor(
input=x, mask=y, level=level)
out = layers.merge_lod_tensor(
in_true=out_true, in_false=out_false, mask=y, x=x, level=level)
mean = layers.mean(x=out)
append_backward(mean)
tensor = core.LoDTensor()
tensor.set(np.arange(10).reshape(10, 1).astype('float32'), place)
tensor.set_lod([[0, 3, 9, 10]])
mask_np = np.array([0, 1, 0]).astype('bool')
mask_np = np.expand_dims(mask_np, axis=1)
mask = core.LoDTensor()
mask.set(mask_np, place)
exe = Executor(place)
scope = core.Scope()
g_vars = program.global_block().var(x.name + "@GRAD")
g_out = [
item.sum()
for item in map(np.array,
exe.run(program,
feed={'x': tensor,
'y': mask},
fetch_list=[g_vars],
scope=scope,
return_numpy=False))
]
g_out_sum = np.array(g_out).sum()
self.assertAlmostEqual(1.0, g_out_sum, delta=0.1)
if __name__ == '__main__':
unittest.main()
|
en
| 0.851907
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
| 2.10178
| 2
|
src/agent.py
|
misterdev/flatland-marl
| 9
|
6629058
|
import copy
import os
import random
from collections import namedtuple, deque, Iterable
import numpy as np
import torch
import torch.nn.functional as F
import torch.optim as optim
from src.model import Dueling_DQN
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
class DQNAgent:
"""Interacts with and learns from the environment."""
def __init__(self, args, action_space, bitmap_height, double_dqn=True):
"""Initialize an Agent object.
Params
======
state_size (int): dimension of each state
action_space (int): dimension of each action
"""
self.args = args
# self.state_size = state_size # used by the network, not the algorithm
self.width = args.prediction_depth + 1 # Bitmap width
self.height = bitmap_height # num altmaps (3) x max num rails
self.action_space = action_space
self.double_dqn = double_dqn
# Q-Network
self.qnetwork_local = Dueling_DQN(self.width, self.height, action_space).to(device)
self.qnetwork_target = copy.deepcopy(self.qnetwork_local)
self.optimizer = optim.Adam(self.qnetwork_local.parameters(), lr=self.args.lr)
# Replay memory
self.memory = ReplayBuffer(action_space, self.args.buffer_size, self.args.batch_size, self.width, self.height)
# Initialize time step (for updating every UPDATE_EVERY steps)
self.t_step = 0
def save(self, filename):
torch.save(self.qnetwork_local.state_dict(), filename + ".local")
torch.save(self.qnetwork_target.state_dict(), filename + ".target")
def load(self, filename):
if os.path.exists(filename + ".local"):
self.qnetwork_local.load_state_dict(torch.load(filename + ".local"))
if os.path.exists(filename + ".target"):
self.qnetwork_target.load_state_dict(torch.load(filename + ".target"))
def step(self, state, action, reward, next_state, done, train=True):
# Save experience in replay memory
self.memory.add(state, action, reward, next_state, done)
# Learn every UPDATE_EVERY time steps.
# training is done taking a sample batch from the replay memory
self.t_step = (self.t_step + 1) % self.args.update_every # TODO Meglio farla ad eps?
if self.t_step == 0:
# If enough samples are available in memory, get random subset and learn
if len(self.memory) > self.args.batch_size:
experiences = self.memory.sample()
if train:
self.learn(experiences, self.args.gamma)
def act(self, state, eps=0.):
"""Returns actions for given state as per current policy.
Params
======
state (array_like): current state
eps (float): epsilon, for epsilon-greedy action selection
"""
state = torch.from_numpy(state).float().unsqueeze(0).unsqueeze(0).to(device)
self.qnetwork_local.eval()
with torch.no_grad():
action_values = self.qnetwork_local(state) # Compute Q values
self.qnetwork_local.train() # Set PyTorch module in training mode
return action_values[0]
def learn(self, experiences, gamma):
"""Update value parameters using given batch of experience tuples.
Params
======
experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples
gamma (float): discount factor
"""
states, actions, rewards, next_states, dones = experiences # batch_size experiences
# Get expected Q values from local model
Q_expected = self.qnetwork_local(states).gather(1, actions.unsqueeze(-1)).view(self.args.batch_size)
if self.double_dqn:
# Double DQN
q_best_action = self.qnetwork_local(next_states).max(1)[1] # shape (512)
Q_targets_next = self.qnetwork_target(next_states).gather(1, q_best_action.unsqueeze(-1)).view(self.args.batch_size) # (512, 1)
else:
# DQN
Q_targets_next = self.qnetwork_target(next_states).detach().max(1)[0].unsqueeze(-1)
# Compute Q targets for current states
Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))
# Compute loss
loss = F.mse_loss(Q_expected, Q_targets)
# Minimize the loss
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# ------------------- Update target network ------------------- #
self.soft_update(self.qnetwork_local, self.qnetwork_target, self.args.tau)
def soft_update(self, local_model, target_model, tau):
"""Soft update model parameters.
θ_target = τ*θ_local + (1 - τ)*θ_target
Params
======
local_model (PyTorch model): weights will be copied from
target_model (PyTorch model): weights will be copied to
tau (float): interpolation parameter
"""
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau * local_param.data + (1.0 - tau) * target_param.data)
class ReplayBuffer:
"""Fixed-size buffer to store experience tuples."""
def __init__(self, action_space, buffer_size, batch_size, width, height):
"""Initialize a ReplayBuffer object.
Params
======
action_space (int): dimension of each action
buffer_size (int): maximum size of buffer
batch_size (int): size of each training batch
"""
self.action_space = action_space
self.memory = deque(maxlen=buffer_size)
self.batch_size = batch_size
self.width = width
self.height = height
self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"])
def add(self, state, action, reward, next_state, done):
"""Add a new experience to memory."""
# expand_dims adds one dimension along axis 0 for PyTorch
e = self.experience(np.expand_dims(state, 0), action, reward, np.expand_dims(next_state, 0), done)
self.memory.append(e)
def sample(self):
"""Randomly sample a batch of experiences from memory."""
experiences = random.sample(self.memory, k=self.batch_size)
states = torch.from_numpy(self.__v_stack_impr([e.state for e in experiences if e is not None])) \
.float().to(device)
actions = torch.from_numpy(self.__v_stack_impr([e.action for e in experiences if e is not None])) \
.long().to(device)
rewards = torch.from_numpy(self.__v_stack_impr([e.reward for e in experiences if e is not None])) \
.float().to(device)
next_states = torch.from_numpy(self.__v_stack_impr([e.next_state for e in experiences if e is not None])) \
.float().to(device)
dones = torch.from_numpy(self.__v_stack_impr([e.done for e in experiences if e is not None]).astype(np.uint8)) \
.float().to(device)
return states, actions, rewards, next_states, dones
def __len__(self):
"""Return the current size of internal memory."""
return len(self.memory)
# This same function is used for states, actions, rewards etc, so the parameter 'states' doesn't contain states all the time
# and for this reason has different shapes
def __v_stack_impr(self, states):
"""
:param states: a list of states (or actions/rewards/dones), len = self.batch_size
:return:
"""
if isinstance(states[0], Iterable): # States, next_states
# Debug shapes
#for i in range(len(states)):
# print(states[i].shape)
# States and next_states
np_states = np.array(states) # (512, 1, 400, 101)
np_states = np.reshape(np_states, (len(states), 1, self.height, self.width))
else: # Actions, rewards, dones
np_states = np.reshape(np.array(states), (len(states))) # (512, )
return np_states
|
import copy
import os
import random
from collections import namedtuple, deque, Iterable
import numpy as np
import torch
import torch.nn.functional as F
import torch.optim as optim
from src.model import Dueling_DQN
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
class DQNAgent:
"""Interacts with and learns from the environment."""
def __init__(self, args, action_space, bitmap_height, double_dqn=True):
"""Initialize an Agent object.
Params
======
state_size (int): dimension of each state
action_space (int): dimension of each action
"""
self.args = args
# self.state_size = state_size # used by the network, not the algorithm
self.width = args.prediction_depth + 1 # Bitmap width
self.height = bitmap_height # num altmaps (3) x max num rails
self.action_space = action_space
self.double_dqn = double_dqn
# Q-Network
self.qnetwork_local = Dueling_DQN(self.width, self.height, action_space).to(device)
self.qnetwork_target = copy.deepcopy(self.qnetwork_local)
self.optimizer = optim.Adam(self.qnetwork_local.parameters(), lr=self.args.lr)
# Replay memory
self.memory = ReplayBuffer(action_space, self.args.buffer_size, self.args.batch_size, self.width, self.height)
# Initialize time step (for updating every UPDATE_EVERY steps)
self.t_step = 0
def save(self, filename):
torch.save(self.qnetwork_local.state_dict(), filename + ".local")
torch.save(self.qnetwork_target.state_dict(), filename + ".target")
def load(self, filename):
if os.path.exists(filename + ".local"):
self.qnetwork_local.load_state_dict(torch.load(filename + ".local"))
if os.path.exists(filename + ".target"):
self.qnetwork_target.load_state_dict(torch.load(filename + ".target"))
def step(self, state, action, reward, next_state, done, train=True):
# Save experience in replay memory
self.memory.add(state, action, reward, next_state, done)
# Learn every UPDATE_EVERY time steps.
# training is done taking a sample batch from the replay memory
self.t_step = (self.t_step + 1) % self.args.update_every # TODO Meglio farla ad eps?
if self.t_step == 0:
# If enough samples are available in memory, get random subset and learn
if len(self.memory) > self.args.batch_size:
experiences = self.memory.sample()
if train:
self.learn(experiences, self.args.gamma)
def act(self, state, eps=0.):
"""Returns actions for given state as per current policy.
Params
======
state (array_like): current state
eps (float): epsilon, for epsilon-greedy action selection
"""
state = torch.from_numpy(state).float().unsqueeze(0).unsqueeze(0).to(device)
self.qnetwork_local.eval()
with torch.no_grad():
action_values = self.qnetwork_local(state) # Compute Q values
self.qnetwork_local.train() # Set PyTorch module in training mode
return action_values[0]
def learn(self, experiences, gamma):
"""Update value parameters using given batch of experience tuples.
Params
======
experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples
gamma (float): discount factor
"""
states, actions, rewards, next_states, dones = experiences # batch_size experiences
# Get expected Q values from local model
Q_expected = self.qnetwork_local(states).gather(1, actions.unsqueeze(-1)).view(self.args.batch_size)
if self.double_dqn:
# Double DQN
q_best_action = self.qnetwork_local(next_states).max(1)[1] # shape (512)
Q_targets_next = self.qnetwork_target(next_states).gather(1, q_best_action.unsqueeze(-1)).view(self.args.batch_size) # (512, 1)
else:
# DQN
Q_targets_next = self.qnetwork_target(next_states).detach().max(1)[0].unsqueeze(-1)
# Compute Q targets for current states
Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))
# Compute loss
loss = F.mse_loss(Q_expected, Q_targets)
# Minimize the loss
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# ------------------- Update target network ------------------- #
self.soft_update(self.qnetwork_local, self.qnetwork_target, self.args.tau)
def soft_update(self, local_model, target_model, tau):
"""Soft update model parameters.
θ_target = τ*θ_local + (1 - τ)*θ_target
Params
======
local_model (PyTorch model): weights will be copied from
target_model (PyTorch model): weights will be copied to
tau (float): interpolation parameter
"""
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau * local_param.data + (1.0 - tau) * target_param.data)
class ReplayBuffer:
"""Fixed-size buffer to store experience tuples."""
def __init__(self, action_space, buffer_size, batch_size, width, height):
"""Initialize a ReplayBuffer object.
Params
======
action_space (int): dimension of each action
buffer_size (int): maximum size of buffer
batch_size (int): size of each training batch
"""
self.action_space = action_space
self.memory = deque(maxlen=buffer_size)
self.batch_size = batch_size
self.width = width
self.height = height
self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"])
def add(self, state, action, reward, next_state, done):
"""Add a new experience to memory."""
# expand_dims adds one dimension along axis 0 for PyTorch
e = self.experience(np.expand_dims(state, 0), action, reward, np.expand_dims(next_state, 0), done)
self.memory.append(e)
def sample(self):
"""Randomly sample a batch of experiences from memory."""
experiences = random.sample(self.memory, k=self.batch_size)
states = torch.from_numpy(self.__v_stack_impr([e.state for e in experiences if e is not None])) \
.float().to(device)
actions = torch.from_numpy(self.__v_stack_impr([e.action for e in experiences if e is not None])) \
.long().to(device)
rewards = torch.from_numpy(self.__v_stack_impr([e.reward for e in experiences if e is not None])) \
.float().to(device)
next_states = torch.from_numpy(self.__v_stack_impr([e.next_state for e in experiences if e is not None])) \
.float().to(device)
dones = torch.from_numpy(self.__v_stack_impr([e.done for e in experiences if e is not None]).astype(np.uint8)) \
.float().to(device)
return states, actions, rewards, next_states, dones
def __len__(self):
"""Return the current size of internal memory."""
return len(self.memory)
# This same function is used for states, actions, rewards etc, so the parameter 'states' doesn't contain states all the time
# and for this reason has different shapes
def __v_stack_impr(self, states):
"""
:param states: a list of states (or actions/rewards/dones), len = self.batch_size
:return:
"""
if isinstance(states[0], Iterable): # States, next_states
# Debug shapes
#for i in range(len(states)):
# print(states[i].shape)
# States and next_states
np_states = np.array(states) # (512, 1, 400, 101)
np_states = np.reshape(np_states, (len(states), 1, self.height, self.width))
else: # Actions, rewards, dones
np_states = np.reshape(np.array(states), (len(states))) # (512, )
return np_states
|
en
| 0.731392
|
Interacts with and learns from the environment. Initialize an Agent object. Params ====== state_size (int): dimension of each state action_space (int): dimension of each action # self.state_size = state_size # used by the network, not the algorithm # Bitmap width # num altmaps (3) x max num rails # Q-Network # Replay memory # Initialize time step (for updating every UPDATE_EVERY steps) # Save experience in replay memory # Learn every UPDATE_EVERY time steps. # training is done taking a sample batch from the replay memory # TODO Meglio farla ad eps? # If enough samples are available in memory, get random subset and learn Returns actions for given state as per current policy. Params ====== state (array_like): current state eps (float): epsilon, for epsilon-greedy action selection # Compute Q values # Set PyTorch module in training mode Update value parameters using given batch of experience tuples. Params ====== experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples gamma (float): discount factor # batch_size experiences # Get expected Q values from local model # Double DQN # shape (512) # (512, 1) # DQN # Compute Q targets for current states # Compute loss # Minimize the loss # ------------------- Update target network ------------------- # Soft update model parameters. θ_target = τ*θ_local + (1 - τ)*θ_target Params ====== local_model (PyTorch model): weights will be copied from target_model (PyTorch model): weights will be copied to tau (float): interpolation parameter Fixed-size buffer to store experience tuples. Initialize a ReplayBuffer object. Params ====== action_space (int): dimension of each action buffer_size (int): maximum size of buffer batch_size (int): size of each training batch Add a new experience to memory. # expand_dims adds one dimension along axis 0 for PyTorch Randomly sample a batch of experiences from memory. Return the current size of internal memory. # This same function is used for states, actions, rewards etc, so the parameter 'states' doesn't contain states all the time # and for this reason has different shapes :param states: a list of states (or actions/rewards/dones), len = self.batch_size :return: # States, next_states # Debug shapes #for i in range(len(states)): # print(states[i].shape) # States and next_states # (512, 1, 400, 101) # Actions, rewards, dones # (512, )
| 2.329998
| 2
|
functions/Exercises/add_nums.py
|
WebucatorTraining/classfiles-actionable-python
| 2
|
6629059
|
<reponame>WebucatorTraining/classfiles-actionable-python
def add_3_and_6():
total = 3 + 6
print('3 + 6 = ', total)
def add_10_and_12():
total = 10 + 12
print('10 + 12 = ', total)
def main():
add_3_and_6()
add_10_and_12()
main()
|
def add_3_and_6():
total = 3 + 6
print('3 + 6 = ', total)
def add_10_and_12():
total = 10 + 12
print('10 + 12 = ', total)
def main():
add_3_and_6()
add_10_and_12()
main()
|
none
| 1
| 3.625712
| 4
|
|
tests/test_pretrained.py
|
kunosato-mado/ttslearn
| 146
|
6629060
|
from pathlib import Path
import numpy as np
import pytest
from scipy.io import wavfile
from tqdm.auto import tqdm
from ttslearn.pretrained import (
create_tts_engine,
get_available_model_ids,
is_pretrained_model_ready,
retrieve_pretrained_model,
)
OUT_DIR = Path(__file__).parent / "out_dir"
OUT_DIR.mkdir(exist_ok=True)
def test_is_pretrained_model_ready():
# warmup
create_tts_engine("dnntts").tts("test")
# should exist
assert is_pretrained_model_ready("dnntts")
# I wish...
assert not is_pretrained_model_ready("super_sugoi_tsuyoi_model")
def test_retrieve_pretrained_model():
# warmup
create_tts_engine("dnntts").tts("test")
# shouldn't raise
retrieve_pretrained_model("dnntts")
with pytest.raises(ValueError):
retrieve_pretrained_model("super_sugoi_tsuyoi_model")
# Test if the results sound okay. Check the generated wav files after running the test
def test_all_pretraind_models():
for idx, name in enumerate(get_available_model_ids()):
if not is_pretrained_model_ready(name):
print(f"Pretrained model does not exist: {name}")
continue
print(idx, name)
engine = create_tts_engine(name)
if hasattr(engine, "spks") and engine.spks is not None:
assert engine.spk2id is not None
wav, sr = engine.tts("ありがとうございました", tqdm=tqdm, spk_id=1)
else:
wav, sr = engine.tts("ありがとうございました", tqdm=tqdm)
assert wav.dtype == np.int16
wav = (wav / np.abs(wav).max() * 32767.0).astype(np.int16)
wavfile.write(OUT_DIR / f"{idx:02d}_test_{name}.wav", sr, wav)
assert len(wav) > 0
|
from pathlib import Path
import numpy as np
import pytest
from scipy.io import wavfile
from tqdm.auto import tqdm
from ttslearn.pretrained import (
create_tts_engine,
get_available_model_ids,
is_pretrained_model_ready,
retrieve_pretrained_model,
)
OUT_DIR = Path(__file__).parent / "out_dir"
OUT_DIR.mkdir(exist_ok=True)
def test_is_pretrained_model_ready():
# warmup
create_tts_engine("dnntts").tts("test")
# should exist
assert is_pretrained_model_ready("dnntts")
# I wish...
assert not is_pretrained_model_ready("super_sugoi_tsuyoi_model")
def test_retrieve_pretrained_model():
# warmup
create_tts_engine("dnntts").tts("test")
# shouldn't raise
retrieve_pretrained_model("dnntts")
with pytest.raises(ValueError):
retrieve_pretrained_model("super_sugoi_tsuyoi_model")
# Test if the results sound okay. Check the generated wav files after running the test
def test_all_pretraind_models():
for idx, name in enumerate(get_available_model_ids()):
if not is_pretrained_model_ready(name):
print(f"Pretrained model does not exist: {name}")
continue
print(idx, name)
engine = create_tts_engine(name)
if hasattr(engine, "spks") and engine.spks is not None:
assert engine.spk2id is not None
wav, sr = engine.tts("ありがとうございました", tqdm=tqdm, spk_id=1)
else:
wav, sr = engine.tts("ありがとうございました", tqdm=tqdm)
assert wav.dtype == np.int16
wav = (wav / np.abs(wav).max() * 32767.0).astype(np.int16)
wavfile.write(OUT_DIR / f"{idx:02d}_test_{name}.wav", sr, wav)
assert len(wav) > 0
|
en
| 0.778727
|
# warmup # should exist # I wish... # warmup # shouldn't raise # Test if the results sound okay. Check the generated wav files after running the test
| 2.397494
| 2
|
task 3.py
|
someshkr/Assignment-3
| 0
|
6629061
|
import numpy as np
def vander_matrix(ip_vector, n, increasing = True):
if not increasing:
op_matrix = np.array([x ** i for x in ip_vector for i in range(n)]).reshape(ip_vector.size, n)
else:
return 1
return op_matrix
input =np.array([1,2,3])
col = 3
op_mat = vander_matrix(input,col,False)
print('input matrix :\n',input)
print('\n\nVandermonde output matrix :\n',op_mat)
|
import numpy as np
def vander_matrix(ip_vector, n, increasing = True):
if not increasing:
op_matrix = np.array([x ** i for x in ip_vector for i in range(n)]).reshape(ip_vector.size, n)
else:
return 1
return op_matrix
input =np.array([1,2,3])
col = 3
op_mat = vander_matrix(input,col,False)
print('input matrix :\n',input)
print('\n\nVandermonde output matrix :\n',op_mat)
|
none
| 1
| 3.436508
| 3
|
|
gfg/graphs/bellman_ford.py
|
rrwt/daily-coding-challenge
| 1
|
6629062
|
<filename>gfg/graphs/bellman_ford.py
"""
Given a graph and a source vertex src in graph,
find shortest paths from src to all vertices in the given graph.
The graph may contain negative weight edges.
Dijkstra does not work for Graphs with negative weight edges,
Bellman-Ford works for such graphs.
Bellman-Ford is also simpler than Dijkstra and suites well for distributed systems.
But time complexity of Bellman-Ford is O(VE) - adjacency list, which is more than Dijkstra.
If there is a negative weight cycle, then shortest distances are not calculated,
negative weight cycle is reported.
"""
import sys
from gfg.graphs.ds import GraphM
def bellman_ford_sp(graph: list, source: int, num_vertices: int) -> list:
"""
Time Complexity: O(V*V*E) (O(V*E) for adjacency list)
"""
min_dist = [sys.maxsize] * num_vertices
min_dist[source] = 0
# calculate min_dist. BFS
# do it V-1 times. V-1 = number of edges in a graph with shortest path and no cycle
for _ in range(num_vertices-1):
for src in range(num_vertices):
if min_dist[src] < sys.maxsize:
for dest, weight in enumerate(graph[src]):
if weight < sys.maxsize:
min_dist[dest] = min(min_dist[dest], weight + min_dist[src])
# check for -ve cycle
for src in range(num_vertices):
for dest, weight in enumerate(graph[src]):
if src == dest:
continue
if graph[src][dest] < sys.maxsize and min_dist[dest] > min_dist[src] + weight:
print("-ve cycle found")
return []
return min_dist
if __name__ == "__main__":
g = GraphM(9)
g.add_edge(0, 1, 4)
g.add_edge(0, 7, 8)
g.add_edge(1, 2, 8)
g.add_edge(1, 7, 11)
g.add_edge(2, 3, 7)
g.add_edge(2, 5, 4)
g.add_edge(2, 8, 2)
g.add_edge(3, 4, 9)
g.add_edge(3, 5, 14)
g.add_edge(4, 5, 10)
g.add_edge(5, 6, 2)
g.add_edge(6, 7, 1)
g.add_edge(6, 8, 6)
g.add_edge(7, 8, 7)
print(bellman_ford_sp(g.graph, 0, g.num_vertices))
g.add_edge(2, 4, -1)
print(bellman_ford_sp(g.graph, 0, g.num_vertices))
|
<filename>gfg/graphs/bellman_ford.py
"""
Given a graph and a source vertex src in graph,
find shortest paths from src to all vertices in the given graph.
The graph may contain negative weight edges.
Dijkstra does not work for Graphs with negative weight edges,
Bellman-Ford works for such graphs.
Bellman-Ford is also simpler than Dijkstra and suites well for distributed systems.
But time complexity of Bellman-Ford is O(VE) - adjacency list, which is more than Dijkstra.
If there is a negative weight cycle, then shortest distances are not calculated,
negative weight cycle is reported.
"""
import sys
from gfg.graphs.ds import GraphM
def bellman_ford_sp(graph: list, source: int, num_vertices: int) -> list:
"""
Time Complexity: O(V*V*E) (O(V*E) for adjacency list)
"""
min_dist = [sys.maxsize] * num_vertices
min_dist[source] = 0
# calculate min_dist. BFS
# do it V-1 times. V-1 = number of edges in a graph with shortest path and no cycle
for _ in range(num_vertices-1):
for src in range(num_vertices):
if min_dist[src] < sys.maxsize:
for dest, weight in enumerate(graph[src]):
if weight < sys.maxsize:
min_dist[dest] = min(min_dist[dest], weight + min_dist[src])
# check for -ve cycle
for src in range(num_vertices):
for dest, weight in enumerate(graph[src]):
if src == dest:
continue
if graph[src][dest] < sys.maxsize and min_dist[dest] > min_dist[src] + weight:
print("-ve cycle found")
return []
return min_dist
if __name__ == "__main__":
g = GraphM(9)
g.add_edge(0, 1, 4)
g.add_edge(0, 7, 8)
g.add_edge(1, 2, 8)
g.add_edge(1, 7, 11)
g.add_edge(2, 3, 7)
g.add_edge(2, 5, 4)
g.add_edge(2, 8, 2)
g.add_edge(3, 4, 9)
g.add_edge(3, 5, 14)
g.add_edge(4, 5, 10)
g.add_edge(5, 6, 2)
g.add_edge(6, 7, 1)
g.add_edge(6, 8, 6)
g.add_edge(7, 8, 7)
print(bellman_ford_sp(g.graph, 0, g.num_vertices))
g.add_edge(2, 4, -1)
print(bellman_ford_sp(g.graph, 0, g.num_vertices))
|
en
| 0.892333
|
Given a graph and a source vertex src in graph, find shortest paths from src to all vertices in the given graph. The graph may contain negative weight edges. Dijkstra does not work for Graphs with negative weight edges, Bellman-Ford works for such graphs. Bellman-Ford is also simpler than Dijkstra and suites well for distributed systems. But time complexity of Bellman-Ford is O(VE) - adjacency list, which is more than Dijkstra. If there is a negative weight cycle, then shortest distances are not calculated, negative weight cycle is reported. Time Complexity: O(V*V*E) (O(V*E) for adjacency list) # calculate min_dist. BFS # do it V-1 times. V-1 = number of edges in a graph with shortest path and no cycle # check for -ve cycle
| 3.697291
| 4
|
importkit/import_/__init__.py
|
sprymix/importkit
| 2
|
6629063
|
<filename>importkit/import_/__init__.py<gh_stars>1-10
##
# Copyright (c) 2008-2012 Sprymix Inc.
# All rights reserved.
#
# See LICENSE for details.
##
import imp
import importlib
import os
import sys
from .context import ImportContext
from .finder import install, update_finders, register_package
class ObjectImportError(Exception):
pass
def get_object(cls):
modname, _, name = cls.rpartition('.')
try:
mod = importlib.import_module(modname)
except ImportError as e:
raise ObjectImportError('could not load object %s' % cls) from e
else:
try:
result = getattr(mod, name)
except AttributeError as e:
raise ObjectImportError('could not load object %s' % cls) from e
return result
|
<filename>importkit/import_/__init__.py<gh_stars>1-10
##
# Copyright (c) 2008-2012 Sprymix Inc.
# All rights reserved.
#
# See LICENSE for details.
##
import imp
import importlib
import os
import sys
from .context import ImportContext
from .finder import install, update_finders, register_package
class ObjectImportError(Exception):
pass
def get_object(cls):
modname, _, name = cls.rpartition('.')
try:
mod = importlib.import_module(modname)
except ImportError as e:
raise ObjectImportError('could not load object %s' % cls) from e
else:
try:
result = getattr(mod, name)
except AttributeError as e:
raise ObjectImportError('could not load object %s' % cls) from e
return result
|
en
| 0.68045
|
## # Copyright (c) 2008-2012 Sprymix Inc. # All rights reserved. # # See LICENSE for details. ##
| 2.145831
| 2
|
rhprocessor/controls.py
|
rhodief/rhprocessor
| 0
|
6629064
|
<filename>rhprocessor/controls.py
from typing import Any, Dict, Iterable, List
from datetime import datetime
import copy
from enum import Enum, auto
class ACTION_TYPE(Enum):
CHECKIN_NODE = auto()
CHECKIN_FN = auto()
START = auto()
END = auto()
CHECKOUT_FN = auto()
CHECKOUT_NODE = auto()
LOG = auto()
class ILog():
def __init__(self, txt) -> None:
self._txt = txt
self._dt = datetime.now()
@property
def txt(self):
return self._txt
@property
def dt(self):
return self._dt
def to_dict(self):
return {
'txt': self._txt,
'dt': datetime.timestamp(self._dt)
}
class ULogger():
def __init__(self, obj: List[Dict[str, Any]], _cb_fn = None) -> None:
self._obj = obj
self._fn = _cb_fn
def log(self, msg):
_o = ILog(msg)
self._obj.append(_o)
self._notify(ACTION_TYPE.LOG)
def logProgress(self, progress, total, msg):
pass
def _notify(self, action_type: ACTION_TYPE):
if callable(self._fn): self._fn(action_type)
class Logger():
def __init__(self) -> None:
self._store: Dict[str, List[Dict[str, Any]]] = {}
self._fn = None
def get_log_obj_from_string(self, idstring):
return self._store.get(idstring, None)
def get_log_obj(self, key: List[int]):
_id = self._id_string(key)
return self._store.get(_id, None)
def _id_string(self, id):
return '.'.join([str(i) for i in id])
def u_logger(self, id: List[int]):
_id = self._id_string(id)
self._store[_id] = []
return ULogger(self.get_log_obj_from_string(_id), self._fn)
def get_all_logs(self):
return {k: [l.to_dict() for l in v] for k, v in self._store.items()}
def on_log(self, fn):
self._fn = fn
class DataStore():
def __init__(self, data = {}, protected = True):
assert isinstance(data, dict), 'Data must be a dict'
self._data = {}
self._protected = protected
def set_data(self, key, data) -> bool:
if not self._protected or not self._data.get(key) or type(self._data[key]) == type(data):
self._data[key] = data
return True
return False
def get_data(self, key):
return self._data.get(key)
class MetaError():
def __init__(self, msg):
self._msg = msg
@property
def msg(self):
return self._msg
class PipeInterrupt(MetaError):
pass
class PipeStop(MetaError):
pass
class PipeError(MetaError):
pass
class FunctionError(MetaError):
pass
class PipeTransporterControl():
def pipe_error(self, msg = 'PipeError'):
return PipeError(msg)
def function_error(self, msg):
return FunctionError(msg = 'User Function Error')
class PipeData():
def __init__(self, data = None):
self._data = data
@property
def data(self):
return self._data
class NodeStatus(Enum):
AWAITING = auto()
SUCCESS = auto()
ERROR = auto()
IGNORED = auto()
RUNNING = auto()
class MetaNode():
def __init__(self, name: str, node_type: str) -> None:
self._name = name
self._node_type = node_type
self._start = None
self._end = None
self._status = NodeStatus.AWAITING
self._n_chld = None
def set_start(self, value):
self._start = value
def set_end(self, value):
self._end = value
def set_status(self, status):
if status not in [ns for ns in NodeStatus]:
raise ValueError('Status not valid')
self._status = status
def set_n_chld(self, n_tracks):
self._n_chld = n_tracks
def to_dict(self):
return {
'name': self._name,
'node_type': self._node_type,
'start': datetime.timestamp(self._start) if isinstance(self._start, datetime) else self._start,
'end': datetime.timestamp(self._end) if isinstance(self._end, datetime) else self._end,
'status': self._status.name,
'n_chld': self._n_chld
}
class Node(MetaNode):
def __init__(self, name: str, node_type: str) -> None:
super().__init__(name, node_type)
self._tracks: Dict[int, MetaNode] = {}
@property
def tracks(self):
return self._tracks
def set_tracks_position(self, n_pos, prev = None):
self._tracks = {n: dict() if prev != None else None for n in list(range(n_pos))}
def add_track(self, key: int, data: MetaNode):
self._tracks[key] = data
def to_dict(self):
_dct = super().to_dict()
_dct['tracks'] = {}
for k, v in self._tracks.items():
if hasattr(v, 'to_dict'):
_dct['tracks'][k] = v.to_dict()
continue
if isinstance(v, dict) and len(v) > 0:
for _k, _v in v.items():
if _dct['tracks'].get(k) == None:
_dct['tracks'][k] = {}
if hasattr(_v, 'to_dict'):
_dct['tracks'][k][_k] = _v.to_dict()
if isinstance(_v, dict) and len(_v) > 0:
_dct['tracks'][k][_k] = {_kk: _vv.to_dict() if hasattr(_vv, 'to_dict') else _vv for _kk, _vv in _v.items()}
continue
continue
_dct['tracks'][k] = v
return _dct
class NodeFunctions(MetaNode):
def __init__(self, name: str, node_type: str) -> None:
super().__init__(name, node_type)
self._logs: List[str] = []
class Tracks():
def __init__(self, dict_track = {}) -> None:
self._tracks = {}
def addNode(self, nodeKey: List[int], node: MetaNode):
_list = self._tracks
for k in nodeKey[:-1]:
try:
_list = _list.get(k) if isinstance(_list, dict) else _list.tracks.get(k)
except BaseException as e:
raise ValueError(f'Index "{k}" not found in {_list}')
if isinstance(_list, Node):
#print('_list', _list.to_dict())
_list.add_track(nodeKey[-1], node)
else:
_list[nodeKey[-1]] = node
def getNode(self, nodeKey: List[int]):
_list = self._tracks
for k in nodeKey:
try:
_list = _list.get(k) if isinstance(_list, dict) else _list.tracks.get(k)
except BaseException as e:
raise ValueError(f'Index "{k}" not found in {_list}')
return _list
def to_dict(self):
return {k: v.to_dict() for k, v in self._tracks.items()}
class ExecutionControl():
def __init__(self, execution_data: dict = {}):
self._current_nodes_id = {}
self._current_node = {}
self._tracks = Tracks()
@property
def current_nodes_id(self):
return self._current_nodes_id
@property
def current_node(self):
return self._current_node
@property
def tracks(self):
return self._tracks
def add_execution(self, node_id: List[int]):
self._current_nodes_id[self._get_key_string(node_id)] = node_id
def remove_execution(self, node_id: List[int]):
self._current_nodes_id.pop(self._get_key_string(node_id), None)
def add_node(self, node_id: List[int], node):
_kr = None
for k, _ in self._current_node.items():
k_list = self._get_key_list(k)
if (self._is_child(k_list, node_id)):
_kr = k_list
if _kr: self.remove_node(k_list)
self._current_node[self._get_key_string(node_id)] = node
def remove_node(self, node_id: List[int]):
self._current_node.pop(self._get_key_string(node_id), None)
def _is_child(self, parent_id: List[int], child_id: List[int]):
_test_child = child_id[:-1]
return parent_id == _test_child
def _get_key_string(self, node_id: List[int]):
return '.'.join([str(n) for n in node_id])
def _get_key_list(self, node_id_string: str):
return [int(n) for n in node_id_string.split('.')]
class Transporter():
def __init__(
self, pipe_data: PipeData,
data_store: DataStore,
logger: Logger,
execution_control: ExecutionControl,
id = [-1], child_id = None,
fn_move = None
):
self._pipe_data = pipe_data
self._data_store = data_store
self._logger = logger
self._execution_control = execution_control
self._id = id ## disabled
self._child_id = child_id ### Quando se tornar filhos, ele tem um ID...
self._n_children = None
self._start = None
self._end = None
self._error = isinstance(self._pipe_data.data, MetaError)
self._on_move_fn = fn_move
@property
def execution_control(self):
return self._execution_control
@property
def logger(self):
return self._logger
def check_in(self, articulator, fns_qnt = 0):
self._id[-1] += 1
_id = self._id if self._child_id == None else self._id + [self._child_id]
if fns_qnt:
_inst = Node(articulator.name, articulator.type)
_prev = None if articulator.type == 'BlockMode' else dict()
_inst.set_tracks_position(fns_qnt, _prev)
_inst.set_start(datetime.now())
_inst.set_status(NodeStatus.RUNNING)
self._execution_control.add_node(_id, _inst)
else:
## the children will use this.
_inst = NodeFunctions(articulator.name, articulator.type)
self._execution_control._tracks.addNode(_id, _inst)
if fns_qnt: self._id.append(-1)
type_node = ACTION_TYPE.CHECKIN_NODE if isinstance(_inst, Node) else ACTION_TYPE.CHECKIN_FN
self._notify(type_node)
def make_children(self, qnt = None):
if self._error or isinstance(self._pipe_data.data, MetaError):
return [self._new_instance(d, i) for i, d in enumerate([self._pipe_data.data])]
if not isinstance(self._pipe_data.data, Iterable):
raise ValueError('Data is not iterable')
self._n_children = len(self._pipe_data.data)
return [self._new_instance(d, i) for i, d in enumerate(self._pipe_data.data)]
def makeCopy(self, qnt: int):
n_id = self._id[:]
del n_id[-1]
return [self._new_instance(self.data().data, pid=(n_id + [i - 1])) for i in list(range(qnt))]
def _new_instance(self, d, num = None, pid = None):
_id_list = self._id[:] if not pid else pid
return Transporter(PipeData(copy.deepcopy(d)), self._data_store, self._logger, self._execution_control, _id_list, num, self._on_move_fn)
def set_total_tracks(self):
_id = self._id[:-1]
node = self._execution_control._tracks.getNode(_id)
node.set_n_chld(self._n_children)
def start(self):
self._start = datetime.now()
_id = self._id[:]
if self._child_id != None: _id = _id + [self._child_id]
node = self._execution_control._tracks.getNode(_id)
node.set_start(self._start)
node.set_status(NodeStatus.RUNNING)
self._execution_control.add_execution(_id)
self._notify(ACTION_TYPE.START)
def end(self, status = NodeStatus.SUCCESS):
self._end = datetime.now()
_id = self._id[:]
if self._child_id != None: _id = _id + [self._child_id]
node = self._execution_control._tracks.getNode(_id)
node.set_end(self._end)
node.set_status(status)
self._execution_control.remove_execution(_id)
self._notify(ACTION_TYPE.END)
def check_out(self, status = NodeStatus.SUCCESS):
self._id = self._id[:-1]
node = self._execution_control._tracks.getNode(self._id)
node.set_end(datetime.now())
node.set_status(status)
self._execution_control.remove_node(self._id)
type_node = ACTION_TYPE.CHECKOUT_NODE if isinstance(node, Node) else ACTION_TYPE.CHECKOUT_FN
self._notify(type_node)
def on_move(self, fn):
self._on_move_fn = fn
self._logger.on_log(fn)
def deliver(self):
_id = self._id[:]
if self._child_id != None: _id = _id + [self._child_id]
return self._pipe_data.data, PipeTransporterControl(), self._data_store, self._logger.u_logger(_id)
def receive_data(self, data: Any):
if isinstance(data, PipeError) or isinstance(data, PipeInterrupt) or isinstance(data, PipeStop):
self._pipe_data = data
else:
self._pipe_data = PipeData(data)
def recompose(self, chld_transp: List[Any]):
self.receive_data([c.data().data if hasattr(c.data(), 'data') else c.data() for c in chld_transp])
def data(self):
return self._pipe_data
def is_on_error(self) -> bool:
return self._error
def function_error(self, msg = 'User Error'):
_err = PipeTransporterControl().function_error(msg)
self.receive_data(_err)
self._error = True
def _notify(self, action_type: ACTION_TYPE):
if callable(self._on_move_fn): self._on_move_fn(action_type)
|
<filename>rhprocessor/controls.py
from typing import Any, Dict, Iterable, List
from datetime import datetime
import copy
from enum import Enum, auto
class ACTION_TYPE(Enum):
CHECKIN_NODE = auto()
CHECKIN_FN = auto()
START = auto()
END = auto()
CHECKOUT_FN = auto()
CHECKOUT_NODE = auto()
LOG = auto()
class ILog():
def __init__(self, txt) -> None:
self._txt = txt
self._dt = datetime.now()
@property
def txt(self):
return self._txt
@property
def dt(self):
return self._dt
def to_dict(self):
return {
'txt': self._txt,
'dt': datetime.timestamp(self._dt)
}
class ULogger():
def __init__(self, obj: List[Dict[str, Any]], _cb_fn = None) -> None:
self._obj = obj
self._fn = _cb_fn
def log(self, msg):
_o = ILog(msg)
self._obj.append(_o)
self._notify(ACTION_TYPE.LOG)
def logProgress(self, progress, total, msg):
pass
def _notify(self, action_type: ACTION_TYPE):
if callable(self._fn): self._fn(action_type)
class Logger():
def __init__(self) -> None:
self._store: Dict[str, List[Dict[str, Any]]] = {}
self._fn = None
def get_log_obj_from_string(self, idstring):
return self._store.get(idstring, None)
def get_log_obj(self, key: List[int]):
_id = self._id_string(key)
return self._store.get(_id, None)
def _id_string(self, id):
return '.'.join([str(i) for i in id])
def u_logger(self, id: List[int]):
_id = self._id_string(id)
self._store[_id] = []
return ULogger(self.get_log_obj_from_string(_id), self._fn)
def get_all_logs(self):
return {k: [l.to_dict() for l in v] for k, v in self._store.items()}
def on_log(self, fn):
self._fn = fn
class DataStore():
def __init__(self, data = {}, protected = True):
assert isinstance(data, dict), 'Data must be a dict'
self._data = {}
self._protected = protected
def set_data(self, key, data) -> bool:
if not self._protected or not self._data.get(key) or type(self._data[key]) == type(data):
self._data[key] = data
return True
return False
def get_data(self, key):
return self._data.get(key)
class MetaError():
def __init__(self, msg):
self._msg = msg
@property
def msg(self):
return self._msg
class PipeInterrupt(MetaError):
pass
class PipeStop(MetaError):
pass
class PipeError(MetaError):
pass
class FunctionError(MetaError):
pass
class PipeTransporterControl():
def pipe_error(self, msg = 'PipeError'):
return PipeError(msg)
def function_error(self, msg):
return FunctionError(msg = 'User Function Error')
class PipeData():
def __init__(self, data = None):
self._data = data
@property
def data(self):
return self._data
class NodeStatus(Enum):
AWAITING = auto()
SUCCESS = auto()
ERROR = auto()
IGNORED = auto()
RUNNING = auto()
class MetaNode():
def __init__(self, name: str, node_type: str) -> None:
self._name = name
self._node_type = node_type
self._start = None
self._end = None
self._status = NodeStatus.AWAITING
self._n_chld = None
def set_start(self, value):
self._start = value
def set_end(self, value):
self._end = value
def set_status(self, status):
if status not in [ns for ns in NodeStatus]:
raise ValueError('Status not valid')
self._status = status
def set_n_chld(self, n_tracks):
self._n_chld = n_tracks
def to_dict(self):
return {
'name': self._name,
'node_type': self._node_type,
'start': datetime.timestamp(self._start) if isinstance(self._start, datetime) else self._start,
'end': datetime.timestamp(self._end) if isinstance(self._end, datetime) else self._end,
'status': self._status.name,
'n_chld': self._n_chld
}
class Node(MetaNode):
def __init__(self, name: str, node_type: str) -> None:
super().__init__(name, node_type)
self._tracks: Dict[int, MetaNode] = {}
@property
def tracks(self):
return self._tracks
def set_tracks_position(self, n_pos, prev = None):
self._tracks = {n: dict() if prev != None else None for n in list(range(n_pos))}
def add_track(self, key: int, data: MetaNode):
self._tracks[key] = data
def to_dict(self):
_dct = super().to_dict()
_dct['tracks'] = {}
for k, v in self._tracks.items():
if hasattr(v, 'to_dict'):
_dct['tracks'][k] = v.to_dict()
continue
if isinstance(v, dict) and len(v) > 0:
for _k, _v in v.items():
if _dct['tracks'].get(k) == None:
_dct['tracks'][k] = {}
if hasattr(_v, 'to_dict'):
_dct['tracks'][k][_k] = _v.to_dict()
if isinstance(_v, dict) and len(_v) > 0:
_dct['tracks'][k][_k] = {_kk: _vv.to_dict() if hasattr(_vv, 'to_dict') else _vv for _kk, _vv in _v.items()}
continue
continue
_dct['tracks'][k] = v
return _dct
class NodeFunctions(MetaNode):
def __init__(self, name: str, node_type: str) -> None:
super().__init__(name, node_type)
self._logs: List[str] = []
class Tracks():
def __init__(self, dict_track = {}) -> None:
self._tracks = {}
def addNode(self, nodeKey: List[int], node: MetaNode):
_list = self._tracks
for k in nodeKey[:-1]:
try:
_list = _list.get(k) if isinstance(_list, dict) else _list.tracks.get(k)
except BaseException as e:
raise ValueError(f'Index "{k}" not found in {_list}')
if isinstance(_list, Node):
#print('_list', _list.to_dict())
_list.add_track(nodeKey[-1], node)
else:
_list[nodeKey[-1]] = node
def getNode(self, nodeKey: List[int]):
_list = self._tracks
for k in nodeKey:
try:
_list = _list.get(k) if isinstance(_list, dict) else _list.tracks.get(k)
except BaseException as e:
raise ValueError(f'Index "{k}" not found in {_list}')
return _list
def to_dict(self):
return {k: v.to_dict() for k, v in self._tracks.items()}
class ExecutionControl():
def __init__(self, execution_data: dict = {}):
self._current_nodes_id = {}
self._current_node = {}
self._tracks = Tracks()
@property
def current_nodes_id(self):
return self._current_nodes_id
@property
def current_node(self):
return self._current_node
@property
def tracks(self):
return self._tracks
def add_execution(self, node_id: List[int]):
self._current_nodes_id[self._get_key_string(node_id)] = node_id
def remove_execution(self, node_id: List[int]):
self._current_nodes_id.pop(self._get_key_string(node_id), None)
def add_node(self, node_id: List[int], node):
_kr = None
for k, _ in self._current_node.items():
k_list = self._get_key_list(k)
if (self._is_child(k_list, node_id)):
_kr = k_list
if _kr: self.remove_node(k_list)
self._current_node[self._get_key_string(node_id)] = node
def remove_node(self, node_id: List[int]):
self._current_node.pop(self._get_key_string(node_id), None)
def _is_child(self, parent_id: List[int], child_id: List[int]):
_test_child = child_id[:-1]
return parent_id == _test_child
def _get_key_string(self, node_id: List[int]):
return '.'.join([str(n) for n in node_id])
def _get_key_list(self, node_id_string: str):
return [int(n) for n in node_id_string.split('.')]
class Transporter():
def __init__(
self, pipe_data: PipeData,
data_store: DataStore,
logger: Logger,
execution_control: ExecutionControl,
id = [-1], child_id = None,
fn_move = None
):
self._pipe_data = pipe_data
self._data_store = data_store
self._logger = logger
self._execution_control = execution_control
self._id = id ## disabled
self._child_id = child_id ### Quando se tornar filhos, ele tem um ID...
self._n_children = None
self._start = None
self._end = None
self._error = isinstance(self._pipe_data.data, MetaError)
self._on_move_fn = fn_move
@property
def execution_control(self):
return self._execution_control
@property
def logger(self):
return self._logger
def check_in(self, articulator, fns_qnt = 0):
self._id[-1] += 1
_id = self._id if self._child_id == None else self._id + [self._child_id]
if fns_qnt:
_inst = Node(articulator.name, articulator.type)
_prev = None if articulator.type == 'BlockMode' else dict()
_inst.set_tracks_position(fns_qnt, _prev)
_inst.set_start(datetime.now())
_inst.set_status(NodeStatus.RUNNING)
self._execution_control.add_node(_id, _inst)
else:
## the children will use this.
_inst = NodeFunctions(articulator.name, articulator.type)
self._execution_control._tracks.addNode(_id, _inst)
if fns_qnt: self._id.append(-1)
type_node = ACTION_TYPE.CHECKIN_NODE if isinstance(_inst, Node) else ACTION_TYPE.CHECKIN_FN
self._notify(type_node)
def make_children(self, qnt = None):
if self._error or isinstance(self._pipe_data.data, MetaError):
return [self._new_instance(d, i) for i, d in enumerate([self._pipe_data.data])]
if not isinstance(self._pipe_data.data, Iterable):
raise ValueError('Data is not iterable')
self._n_children = len(self._pipe_data.data)
return [self._new_instance(d, i) for i, d in enumerate(self._pipe_data.data)]
def makeCopy(self, qnt: int):
n_id = self._id[:]
del n_id[-1]
return [self._new_instance(self.data().data, pid=(n_id + [i - 1])) for i in list(range(qnt))]
def _new_instance(self, d, num = None, pid = None):
_id_list = self._id[:] if not pid else pid
return Transporter(PipeData(copy.deepcopy(d)), self._data_store, self._logger, self._execution_control, _id_list, num, self._on_move_fn)
def set_total_tracks(self):
_id = self._id[:-1]
node = self._execution_control._tracks.getNode(_id)
node.set_n_chld(self._n_children)
def start(self):
self._start = datetime.now()
_id = self._id[:]
if self._child_id != None: _id = _id + [self._child_id]
node = self._execution_control._tracks.getNode(_id)
node.set_start(self._start)
node.set_status(NodeStatus.RUNNING)
self._execution_control.add_execution(_id)
self._notify(ACTION_TYPE.START)
def end(self, status = NodeStatus.SUCCESS):
self._end = datetime.now()
_id = self._id[:]
if self._child_id != None: _id = _id + [self._child_id]
node = self._execution_control._tracks.getNode(_id)
node.set_end(self._end)
node.set_status(status)
self._execution_control.remove_execution(_id)
self._notify(ACTION_TYPE.END)
def check_out(self, status = NodeStatus.SUCCESS):
self._id = self._id[:-1]
node = self._execution_control._tracks.getNode(self._id)
node.set_end(datetime.now())
node.set_status(status)
self._execution_control.remove_node(self._id)
type_node = ACTION_TYPE.CHECKOUT_NODE if isinstance(node, Node) else ACTION_TYPE.CHECKOUT_FN
self._notify(type_node)
def on_move(self, fn):
self._on_move_fn = fn
self._logger.on_log(fn)
def deliver(self):
_id = self._id[:]
if self._child_id != None: _id = _id + [self._child_id]
return self._pipe_data.data, PipeTransporterControl(), self._data_store, self._logger.u_logger(_id)
def receive_data(self, data: Any):
if isinstance(data, PipeError) or isinstance(data, PipeInterrupt) or isinstance(data, PipeStop):
self._pipe_data = data
else:
self._pipe_data = PipeData(data)
def recompose(self, chld_transp: List[Any]):
self.receive_data([c.data().data if hasattr(c.data(), 'data') else c.data() for c in chld_transp])
def data(self):
return self._pipe_data
def is_on_error(self) -> bool:
return self._error
def function_error(self, msg = 'User Error'):
_err = PipeTransporterControl().function_error(msg)
self.receive_data(_err)
self._error = True
def _notify(self, action_type: ACTION_TYPE):
if callable(self._on_move_fn): self._on_move_fn(action_type)
|
pt
| 0.483531
|
#print('_list', _list.to_dict()) ## disabled ### Quando se tornar filhos, ele tem um ID... ## the children will use this.
| 2.270547
| 2
|
app/service/operation_svc.py
|
gaybro8777/caldera
| 0
|
6629065
|
import asyncio
import json
import os
import traceback
from importlib import import_module
from app.service.base_service import BaseService
class OperationService(BaseService):
def __init__(self):
self.loop = asyncio.get_event_loop()
self.log = self.add_service('operation_svc', self)
self.op_states = dict(RUNNING='running',
RUN_ONE_LINK='run_one_link',
PAUSED='paused',
FINISHED='finished')
self.data_svc = self.get_service('data_svc')
async def resume(self):
"""
Resume an operation that was stopped
:return: None
"""
for op in await self.data_svc.explode_operation():
if not op['finish']:
self.loop.create_task(self.run(op['id']))
async def close_operation(self, operation):
"""
Perform all close actions for an operation
:param operation:
:return: None
"""
await self.get_service('planning_svc').create_cleanup_links(operation)
self.log.debug('Operation complete: %s' % operation['id'])
update = dict(finish=self.get_current_timestamp(), state=self.op_states['FINISHED'])
await self.data_svc.update('core_operation', key='id', value=operation['id'], data=update)
report = await self.generate_operation_report(operation['id'])
await self._write_report(report)
async def run(self, op_id):
"""
Run a new operation
:param op_id:
:return: None
"""
self.log.debug('Starting operation: %s' % op_id)
operation = await self.data_svc.explode_operation(dict(id=op_id))
try:
planner = await self._get_planning_module(operation[0])
for phase in operation[0]['adversary']['phases']:
operation_phase_name = 'Operation %s (%s) phase %s' % (op_id, operation[0]['name'], phase)
self.log.debug('%s: started' % operation_phase_name)
await planner.execute(phase)
self.log.debug('%s: completed' % operation_phase_name)
await self.data_svc.update('core_operation', key='id', value=op_id,
data=dict(phase=phase))
await self.close_operation(operation[0])
except Exception:
traceback.print_exc()
async def generate_operation_report(self, op_id):
"""
Create a new operation report and write it to the logs directory
:param op_id:
:return: a JSON report
"""
op = (await self.data_svc.explode_operation(dict(id=op_id)))[0]
planner = (await self.data_svc.explode_planners(criteria=dict(id=op['planner'])))[0]
report = dict(name=op['name'], id=op['id'], host_group=op['host_group'], start=op['start'], facts=op['facts'],
finish=op['finish'], planner=planner, adversary=op['adversary'], jitter=op['jitter'], steps=[])
for step in op['chain']:
ability = (await self.data_svc.explode_abilities(criteria=dict(id=step['ability'])))[0]
command = self.decode_bytes(step['command'])
report['steps'].append(dict(ability_id=ability['ability_id'], paw=step['paw'],
command=command, delegated=step['collect'],
run=step['finish'], status=step['status'],
description=ability['description'], name=ability['name'],
attack=dict(tactic=ability['tactic'],
technique_name=ability['technique_name'],
technique_id=ability['technique_id'])))
return report
""" PRIVATE """
@staticmethod
async def _write_report(report):
with open(os.path.join('logs', 'operation_report_' + report['name'] + '.json'), 'w') as f:
f.write(json.dumps(report, indent=4))
async def _get_planning_module(self, operation):
chosen_planner = await self.data_svc.explode_planners(dict(id=operation['planner']))
planning_module = import_module(chosen_planner[0]['module'])
return getattr(planning_module, 'LogicalPlanner')(operation, self.get_service('planning_svc'),
**chosen_planner[0]['params'])
|
import asyncio
import json
import os
import traceback
from importlib import import_module
from app.service.base_service import BaseService
class OperationService(BaseService):
def __init__(self):
self.loop = asyncio.get_event_loop()
self.log = self.add_service('operation_svc', self)
self.op_states = dict(RUNNING='running',
RUN_ONE_LINK='run_one_link',
PAUSED='paused',
FINISHED='finished')
self.data_svc = self.get_service('data_svc')
async def resume(self):
"""
Resume an operation that was stopped
:return: None
"""
for op in await self.data_svc.explode_operation():
if not op['finish']:
self.loop.create_task(self.run(op['id']))
async def close_operation(self, operation):
"""
Perform all close actions for an operation
:param operation:
:return: None
"""
await self.get_service('planning_svc').create_cleanup_links(operation)
self.log.debug('Operation complete: %s' % operation['id'])
update = dict(finish=self.get_current_timestamp(), state=self.op_states['FINISHED'])
await self.data_svc.update('core_operation', key='id', value=operation['id'], data=update)
report = await self.generate_operation_report(operation['id'])
await self._write_report(report)
async def run(self, op_id):
"""
Run a new operation
:param op_id:
:return: None
"""
self.log.debug('Starting operation: %s' % op_id)
operation = await self.data_svc.explode_operation(dict(id=op_id))
try:
planner = await self._get_planning_module(operation[0])
for phase in operation[0]['adversary']['phases']:
operation_phase_name = 'Operation %s (%s) phase %s' % (op_id, operation[0]['name'], phase)
self.log.debug('%s: started' % operation_phase_name)
await planner.execute(phase)
self.log.debug('%s: completed' % operation_phase_name)
await self.data_svc.update('core_operation', key='id', value=op_id,
data=dict(phase=phase))
await self.close_operation(operation[0])
except Exception:
traceback.print_exc()
async def generate_operation_report(self, op_id):
"""
Create a new operation report and write it to the logs directory
:param op_id:
:return: a JSON report
"""
op = (await self.data_svc.explode_operation(dict(id=op_id)))[0]
planner = (await self.data_svc.explode_planners(criteria=dict(id=op['planner'])))[0]
report = dict(name=op['name'], id=op['id'], host_group=op['host_group'], start=op['start'], facts=op['facts'],
finish=op['finish'], planner=planner, adversary=op['adversary'], jitter=op['jitter'], steps=[])
for step in op['chain']:
ability = (await self.data_svc.explode_abilities(criteria=dict(id=step['ability'])))[0]
command = self.decode_bytes(step['command'])
report['steps'].append(dict(ability_id=ability['ability_id'], paw=step['paw'],
command=command, delegated=step['collect'],
run=step['finish'], status=step['status'],
description=ability['description'], name=ability['name'],
attack=dict(tactic=ability['tactic'],
technique_name=ability['technique_name'],
technique_id=ability['technique_id'])))
return report
""" PRIVATE """
@staticmethod
async def _write_report(report):
with open(os.path.join('logs', 'operation_report_' + report['name'] + '.json'), 'w') as f:
f.write(json.dumps(report, indent=4))
async def _get_planning_module(self, operation):
chosen_planner = await self.data_svc.explode_planners(dict(id=operation['planner']))
planning_module = import_module(chosen_planner[0]['module'])
return getattr(planning_module, 'LogicalPlanner')(operation, self.get_service('planning_svc'),
**chosen_planner[0]['params'])
|
en
| 0.894415
|
Resume an operation that was stopped :return: None Perform all close actions for an operation :param operation: :return: None Run a new operation :param op_id: :return: None Create a new operation report and write it to the logs directory :param op_id: :return: a JSON report PRIVATE
| 2.411823
| 2
|
data_steward/test/unit_test/test_util.py
|
t-abdul-basser/curation
| 0
|
6629066
|
<filename>data_steward/test/unit_test/test_util.py
import os
import common
import gcs_utils
import resources
from validation import main
FAKE_HPO_ID = 'fake'
VALIDATE_HPO_FILES_URL = main.PREFIX + 'ValidateHpoFiles/' + FAKE_HPO_ID
COPY_HPO_FILES_URL = main.PREFIX + 'CopyFiles/' + FAKE_HPO_ID
TEST_DATA_PATH = os.path.join(resources.base_path, 'test', 'test_data')
EMPTY_VALIDATION_RESULT = os.path.join(TEST_DATA_PATH, 'empty_validation_result.csv')
ALL_FILES_UNPARSEABLE_VALIDATION_RESULT = os.path.join(TEST_DATA_PATH, 'all_files_unparseable_validation_result.csv')
ALL_FILES_UNPARSEABLE_VALIDATION_RESULT_NO_HPO_JSON = os.path.join(TEST_DATA_PATH, 'all_files_unparseable_validation_result_no_hpo.json')
BAD_PERSON_FILE_BQ_LOAD_ERRORS_CSV = os.path.join(TEST_DATA_PATH, 'bq_errors_bad_person.csv')
EMPTY_WARNINGS_CSV = os.path.join(TEST_DATA_PATH, 'empty_warnings.csv')
# Test files for five person sample
FIVE_PERSONS_PATH = os.path.join(TEST_DATA_PATH, 'five_persons')
FIVE_PERSONS_PERSON_CSV = os.path.join(FIVE_PERSONS_PATH, 'person.csv')
FIVE_PERSONS_VISIT_OCCURRENCE_CSV = os.path.join(FIVE_PERSONS_PATH, 'visit_occurrence.csv')
FIVE_PERSONS_CONDITION_OCCURRENCE_CSV = os.path.join(FIVE_PERSONS_PATH, 'condition_occurrence.csv')
FIVE_PERSONS_PROCEDURE_OCCURRENCE_CSV = os.path.join(FIVE_PERSONS_PATH, 'procedure_occurrence.csv')
FIVE_PERSONS_DRUG_EXPOSURE_CSV = os.path.join(FIVE_PERSONS_PATH, 'drug_exposure.csv')
FIVE_PERSONS_MEASUREMENT_CSV = os.path.join(FIVE_PERSONS_PATH, 'measurement.csv')
FIVE_PERSONS_FILES = [FIVE_PERSONS_PERSON_CSV,
FIVE_PERSONS_VISIT_OCCURRENCE_CSV,
FIVE_PERSONS_CONDITION_OCCURRENCE_CSV,
FIVE_PERSONS_PROCEDURE_OCCURRENCE_CSV,
FIVE_PERSONS_DRUG_EXPOSURE_CSV,
FIVE_PERSONS_MEASUREMENT_CSV]
FIVE_PERSONS_SUCCESS_RESULT_CSV = os.path.join(TEST_DATA_PATH, 'five_persons_success_result.csv')
FIVE_PERSONS_SUCCESS_RESULT_NO_HPO_JSON = os.path.join(TEST_DATA_PATH, 'five_persons_success_result_no_hpo.json')
# OMOP NYC and PITT test data from synpuf
NYC_FIVE_PERSONS_PATH = os.path.join(TEST_DATA_PATH,'nyc_five_person')
PITT_FIVE_PERSONS_PATH = os.path.join(TEST_DATA_PATH,'pitt_five_person')
NYC_FIVE_PERSONS_PERSON_CSV = os.path.join(NYC_FIVE_PERSONS_PATH, 'person.csv')
NYC_FIVE_PERSONS_VISIT_OCCURRENCE_CSV = os.path.join(NYC_FIVE_PERSONS_PATH, 'visit_occurrence.csv')
NYC_FIVE_PERSONS_CONDITION_OCCURRENCE_CSV = os.path.join(NYC_FIVE_PERSONS_PATH, 'condition_occurrence.csv')
NYC_FIVE_PERSONS_PROCEDURE_OCCURRENCE_CSV = os.path.join(NYC_FIVE_PERSONS_PATH, 'procedure_occurrence.csv')
NYC_FIVE_PERSONS_DRUG_EXPOSURE_CSV = os.path.join(NYC_FIVE_PERSONS_PATH, 'drug_exposure.csv')
NYC_FIVE_PERSONS_MEASUREMENT_CSV = os.path.join(NYC_FIVE_PERSONS_PATH, 'measurement.csv')
NYC_FIVE_PERSONS_OBSERVATION_CSV = os.path.join(NYC_FIVE_PERSONS_PATH, 'observation.csv')
NYC_FIVE_PERSONS_FILES = [
NYC_FIVE_PERSONS_PERSON_CSV,
NYC_FIVE_PERSONS_VISIT_OCCURRENCE_CSV,
NYC_FIVE_PERSONS_CONDITION_OCCURRENCE_CSV,
NYC_FIVE_PERSONS_PROCEDURE_OCCURRENCE_CSV,
NYC_FIVE_PERSONS_DRUG_EXPOSURE_CSV,
NYC_FIVE_PERSONS_MEASUREMENT_CSV,
NYC_FIVE_PERSONS_OBSERVATION_CSV]
PITT_FIVE_PERSONS_PERSON_CSV = os.path.join(PITT_FIVE_PERSONS_PATH, 'person.csv')
PITT_FIVE_PERSONS_VISIT_OCCURRENCE_CSV = os.path.join(PITT_FIVE_PERSONS_PATH, 'visit_occurrence.csv')
PITT_FIVE_PERSONS_CONDITION_OCCURRENCE_CSV = os.path.join(PITT_FIVE_PERSONS_PATH, 'condition_occurrence.csv')
PITT_FIVE_PERSONS_PROCEDURE_OCCURRENCE_CSV = os.path.join(PITT_FIVE_PERSONS_PATH, 'procedure_occurrence.csv')
PITT_FIVE_PERSONS_DRUG_EXPOSURE_CSV = os.path.join(PITT_FIVE_PERSONS_PATH, 'drug_exposure.csv')
PITT_FIVE_PERSONS_MEASUREMENT_CSV = os.path.join(PITT_FIVE_PERSONS_PATH, 'measurement.csv')
PITT_FIVE_PERSONS_OBSERVATION_CSV = os.path.join(PITT_FIVE_PERSONS_PATH, 'observation.csv')
PITT_FIVE_PERSONS_FILES = [
PITT_FIVE_PERSONS_PERSON_CSV,
PITT_FIVE_PERSONS_VISIT_OCCURRENCE_CSV,
PITT_FIVE_PERSONS_CONDITION_OCCURRENCE_CSV,
PITT_FIVE_PERSONS_PROCEDURE_OCCURRENCE_CSV,
PITT_FIVE_PERSONS_DRUG_EXPOSURE_CSV,
PITT_FIVE_PERSONS_MEASUREMENT_CSV,
PITT_FIVE_PERSONS_OBSERVATION_CSV]
RDR_PATH = os.path.join(TEST_DATA_PATH, 'rdr')
TEST_DATA_EXPORT_PATH = os.path.join(TEST_DATA_PATH, 'export')
TEST_DATA_EXPORT_SYNPUF_PATH = os.path.join(TEST_DATA_EXPORT_PATH, 'synpuf')
def _create_five_persons_success_result():
"""
Generate the expected result payload for five_persons data set. For internal testing only.
"""
import csv
field_names = ['cdm_file_name', 'found', 'parsed', 'loaded']
expected_result_items = []
for cdm_file in common.CDM_FILES:
expected_item = dict(cdm_file_name=cdm_file, found="1", parsed="1", loaded="1")
expected_result_items.append(expected_item)
with open(FIVE_PERSONS_SUCCESS_RESULT_CSV, 'w') as f:
writer = csv.DictWriter(f, field_names, quoting=csv.QUOTE_ALL)
writer.writeheader()
writer.writerows(expected_result_items)
def _export_query_response_by_path(p, hpo_id):
"""Utility to create response test payloads"""
from validation import export
import bq_utils
for f in export.list_files_only(p):
abs_path = os.path.join(p, f)
with open(abs_path, 'r') as fp:
sql = fp.read()
sql = export.render(sql, hpo_id, results_schema=bq_utils.get_dataset_id(), vocab_schema='synpuf_100')
query_result = bq_utils.query(sql)
out_file = os.path.join(TEST_DATA_EXPORT_PATH, f.replace('.sql', '_response.json'))
with open(out_file, 'w') as fp:
data = dict()
if 'rows' in query_result:
data['rows'] = query_result['rows']
if 'schema' in query_result:
data['schema'] = query_result['schema']
import json
json.dump(data, fp, sort_keys=True, indent=4, separators=(',', ': '))
def _export_query_responses():
from validation import export
for d in ['datadensity', 'achillesheel', 'person']:
p = os.path.join(export.EXPORT_PATH, d)
_export_query_response_by_path(p, FAKE_HPO_ID)
def empty_bucket(bucket):
bucket_items = gcs_utils.list_bucket(bucket)
for bucket_item in bucket_items:
gcs_utils.delete_object(bucket, bucket_item['name'])
def delete_all_tables(dataset_id):
"""
Remove all non-vocabulary tables from a dataset
:param dataset_id: ID of the dataset with the tables to delete
:return: list of deleted tables
"""
import bq_utils
deleted = []
tables = bq_utils.list_tables(dataset_id)
for table in tables:
table_id = table['tableReference']['tableId']
if table_id not in common.VOCABULARY_TABLES:
bq_utils.delete_table(table_id, dataset_id)
deleted.append(table_id)
return deleted
import requests
def download_file_from_google_drive(id, destination):
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params={'id': id}, stream=True)
token = get_confirm_token(response)
if token:
params = {'id': id, 'confirm': token}
response = session.get(URL, params=params, stream=True)
save_response_content(response, destination)
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def save_response_content(response, destination):
CHUNK_SIZE = 32768
with open(destination, "wb") as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
def get_synpuf_results_files():
files = [('0B8QSHCLE8g4JV1Q4UHFRLXNhM2c', 'achilles_results.csv'),
('0B8QSHCLE8g4JeUUxZEh0SS1YNlk', 'achilles_results_dist.csv'),
('0B8QSHCLE8g4JQUE1dGJLd1RpWEk', 'achilles_heel_results.csv')]
for file_id, file_name in files:
dest_path = os.path.join(TEST_DATA_EXPORT_SYNPUF_PATH, file_name)
if not os.path.exists(dest_path):
download_file_from_google_drive(file_id, os.path.join(TEST_DATA_EXPORT_SYNPUF_PATH, file_name))
def read_cloud_file(bucket, name):
return gcs_utils.get_object(bucket, name)
def write_cloud_str(bucket, name, contents_str):
import StringIO
fp = StringIO.StringIO(contents_str)
return write_cloud_fp(bucket, name, fp)
def write_cloud_file(bucket, f, prefix = ""):
name = os.path.basename(f)
with open(f, 'r') as fp:
return write_cloud_fp(bucket, prefix + name, fp)
def write_cloud_fp(bucket, name, fp):
return gcs_utils.upload_object(bucket, name, fp)
def populate_achilles(hpo_bucket, hpo_id = FAKE_HPO_ID, include_heel=True):
from validation import achilles, achilles_heel
from google.appengine.api import app_identity
import bq_utils
app_id = app_identity.get_application_id()
test_file_name = achilles.ACHILLES_ANALYSIS + '.csv'
achilles_analysis_file_path = os.path.join(TEST_DATA_EXPORT_PATH, test_file_name)
schema_path = os.path.join(resources.fields_path, achilles.ACHILLES_ANALYSIS + '.json')
write_cloud_file(hpo_bucket, achilles_analysis_file_path)
gcs_path = 'gs://' + hpo_bucket + '/' + test_file_name
dataset_id = bq_utils.get_dataset_id()
table_id = bq_utils.get_table_id(hpo_id, achilles.ACHILLES_ANALYSIS)
bq_utils.load_csv(schema_path, gcs_path, app_id, dataset_id, table_id)
table_names = [achilles.ACHILLES_RESULTS, achilles.ACHILLES_RESULTS_DIST]
if include_heel:
table_names.append(achilles_heel.ACHILLES_HEEL_RESULTS)
running_jobs = []
for table_name in table_names:
schema_file_name = table_name + '.json'
schema_path = os.path.join(resources.fields_path, schema_file_name)
test_file_name = table_name + '.csv'
test_file_path = os.path.join(TEST_DATA_EXPORT_SYNPUF_PATH, table_name + '.csv')
write_cloud_file(hpo_bucket, test_file_path)
gcs_path = 'gs://' + hpo_bucket + '/' + test_file_name
dataset_id = bq_utils.get_dataset_id()
table_id = bq_utils.get_table_id(hpo_id, table_name)
load_results = bq_utils.load_csv(schema_path, gcs_path, app_id, dataset_id, table_id)
running_jobs.append(load_results['jobReference']['jobId'])
bq_utils.wait_on_jobs(running_jobs)
def generate_rdr_files():
"""
Generate test csv files based on a sample of synthetic RDR data
:return:
"""
d = 'rdr_dataset_2018_4_17'
for table in common.CDM_TABLES:
q = 'SELECT * FROM fake_%s WHERE person_id IN (SELECT person_id FROM sample_person_id)' % table
cmd = 'bq query --dataset_id={d} --format=csv "{q}" > %(table)s.csv'.format(d=d, q=q)
os.system(cmd)
def bash(cmd):
"""
Run a bash-specific command
:param cmd: the command to run
:return: 0 if successful
:raises
CalledProcessError: raised when command has a non-zero result
Note: On Windows, bash and the gcloud SDK binaries (e.g. bq, gsutil) must be in PATH
"""
import subprocess
import platform
bash_cmd = '/bin/bash'
if platform.system().lower().startswith('windows'):
# extensions are not inferred
cmd = cmd.replace('bq ', 'bq.cmd ').replace('gsutil ', 'gsutil.cmd ')
bash_cmd = 'bash'
return subprocess.check_call([bash_cmd, '-c', cmd], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def command(cmd):
return os.system(cmd)
def response2rows(r):
"""
Convert a query response to a list of dict
:param r: a query response object
:return: list of dict
"""
rows = r.get('rows', [])
schema = r.get('schema', {'fields': None})['fields']
return [_transform_row(row, schema) for row in rows]
def _transform_row(row, schema):
"""
Apply the given schema to the given BigQuery data row. Adapted from https://goo.gl/dWszQJ.
:param row: A single BigQuery row to transform
:param schema: The BigQuery table schema to apply to the row, specifically the list of field dicts.
:returns: Row as a dict
"""
log = {}
# Match each schema column with its associated row value
for index, col_dict in enumerate(schema):
col_name = col_dict['name']
row_value = row['f'][index]['v']
if row_value is None:
log[col_name] = None
continue
# Recurse on nested records
if col_dict['type'] == 'RECORD':
row_value = self._recurse_on_row(col_dict, row_value)
# Otherwise just cast the value
elif col_dict['type'] == 'INTEGER':
row_value = int(row_value)
elif col_dict['type'] == 'FLOAT':
row_value = float(row_value)
elif col_dict['type'] == 'BOOLEAN':
row_value = row_value in ('True', 'true', 'TRUE')
elif col_dict['type'] == 'TIMESTAMP':
row_value = float(row_value)
log[col_name] = row_value
return log
def list_files_in(path):
"""
List the abs paths to files (not dirs) in the supplied path
:param path:
:return:
"""
return [os.path.join(path, f) for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))]
|
<filename>data_steward/test/unit_test/test_util.py
import os
import common
import gcs_utils
import resources
from validation import main
FAKE_HPO_ID = 'fake'
VALIDATE_HPO_FILES_URL = main.PREFIX + 'ValidateHpoFiles/' + FAKE_HPO_ID
COPY_HPO_FILES_URL = main.PREFIX + 'CopyFiles/' + FAKE_HPO_ID
TEST_DATA_PATH = os.path.join(resources.base_path, 'test', 'test_data')
EMPTY_VALIDATION_RESULT = os.path.join(TEST_DATA_PATH, 'empty_validation_result.csv')
ALL_FILES_UNPARSEABLE_VALIDATION_RESULT = os.path.join(TEST_DATA_PATH, 'all_files_unparseable_validation_result.csv')
ALL_FILES_UNPARSEABLE_VALIDATION_RESULT_NO_HPO_JSON = os.path.join(TEST_DATA_PATH, 'all_files_unparseable_validation_result_no_hpo.json')
BAD_PERSON_FILE_BQ_LOAD_ERRORS_CSV = os.path.join(TEST_DATA_PATH, 'bq_errors_bad_person.csv')
EMPTY_WARNINGS_CSV = os.path.join(TEST_DATA_PATH, 'empty_warnings.csv')
# Test files for five person sample
FIVE_PERSONS_PATH = os.path.join(TEST_DATA_PATH, 'five_persons')
FIVE_PERSONS_PERSON_CSV = os.path.join(FIVE_PERSONS_PATH, 'person.csv')
FIVE_PERSONS_VISIT_OCCURRENCE_CSV = os.path.join(FIVE_PERSONS_PATH, 'visit_occurrence.csv')
FIVE_PERSONS_CONDITION_OCCURRENCE_CSV = os.path.join(FIVE_PERSONS_PATH, 'condition_occurrence.csv')
FIVE_PERSONS_PROCEDURE_OCCURRENCE_CSV = os.path.join(FIVE_PERSONS_PATH, 'procedure_occurrence.csv')
FIVE_PERSONS_DRUG_EXPOSURE_CSV = os.path.join(FIVE_PERSONS_PATH, 'drug_exposure.csv')
FIVE_PERSONS_MEASUREMENT_CSV = os.path.join(FIVE_PERSONS_PATH, 'measurement.csv')
FIVE_PERSONS_FILES = [FIVE_PERSONS_PERSON_CSV,
FIVE_PERSONS_VISIT_OCCURRENCE_CSV,
FIVE_PERSONS_CONDITION_OCCURRENCE_CSV,
FIVE_PERSONS_PROCEDURE_OCCURRENCE_CSV,
FIVE_PERSONS_DRUG_EXPOSURE_CSV,
FIVE_PERSONS_MEASUREMENT_CSV]
FIVE_PERSONS_SUCCESS_RESULT_CSV = os.path.join(TEST_DATA_PATH, 'five_persons_success_result.csv')
FIVE_PERSONS_SUCCESS_RESULT_NO_HPO_JSON = os.path.join(TEST_DATA_PATH, 'five_persons_success_result_no_hpo.json')
# OMOP NYC and PITT test data from synpuf
NYC_FIVE_PERSONS_PATH = os.path.join(TEST_DATA_PATH,'nyc_five_person')
PITT_FIVE_PERSONS_PATH = os.path.join(TEST_DATA_PATH,'pitt_five_person')
NYC_FIVE_PERSONS_PERSON_CSV = os.path.join(NYC_FIVE_PERSONS_PATH, 'person.csv')
NYC_FIVE_PERSONS_VISIT_OCCURRENCE_CSV = os.path.join(NYC_FIVE_PERSONS_PATH, 'visit_occurrence.csv')
NYC_FIVE_PERSONS_CONDITION_OCCURRENCE_CSV = os.path.join(NYC_FIVE_PERSONS_PATH, 'condition_occurrence.csv')
NYC_FIVE_PERSONS_PROCEDURE_OCCURRENCE_CSV = os.path.join(NYC_FIVE_PERSONS_PATH, 'procedure_occurrence.csv')
NYC_FIVE_PERSONS_DRUG_EXPOSURE_CSV = os.path.join(NYC_FIVE_PERSONS_PATH, 'drug_exposure.csv')
NYC_FIVE_PERSONS_MEASUREMENT_CSV = os.path.join(NYC_FIVE_PERSONS_PATH, 'measurement.csv')
NYC_FIVE_PERSONS_OBSERVATION_CSV = os.path.join(NYC_FIVE_PERSONS_PATH, 'observation.csv')
NYC_FIVE_PERSONS_FILES = [
NYC_FIVE_PERSONS_PERSON_CSV,
NYC_FIVE_PERSONS_VISIT_OCCURRENCE_CSV,
NYC_FIVE_PERSONS_CONDITION_OCCURRENCE_CSV,
NYC_FIVE_PERSONS_PROCEDURE_OCCURRENCE_CSV,
NYC_FIVE_PERSONS_DRUG_EXPOSURE_CSV,
NYC_FIVE_PERSONS_MEASUREMENT_CSV,
NYC_FIVE_PERSONS_OBSERVATION_CSV]
PITT_FIVE_PERSONS_PERSON_CSV = os.path.join(PITT_FIVE_PERSONS_PATH, 'person.csv')
PITT_FIVE_PERSONS_VISIT_OCCURRENCE_CSV = os.path.join(PITT_FIVE_PERSONS_PATH, 'visit_occurrence.csv')
PITT_FIVE_PERSONS_CONDITION_OCCURRENCE_CSV = os.path.join(PITT_FIVE_PERSONS_PATH, 'condition_occurrence.csv')
PITT_FIVE_PERSONS_PROCEDURE_OCCURRENCE_CSV = os.path.join(PITT_FIVE_PERSONS_PATH, 'procedure_occurrence.csv')
PITT_FIVE_PERSONS_DRUG_EXPOSURE_CSV = os.path.join(PITT_FIVE_PERSONS_PATH, 'drug_exposure.csv')
PITT_FIVE_PERSONS_MEASUREMENT_CSV = os.path.join(PITT_FIVE_PERSONS_PATH, 'measurement.csv')
PITT_FIVE_PERSONS_OBSERVATION_CSV = os.path.join(PITT_FIVE_PERSONS_PATH, 'observation.csv')
PITT_FIVE_PERSONS_FILES = [
PITT_FIVE_PERSONS_PERSON_CSV,
PITT_FIVE_PERSONS_VISIT_OCCURRENCE_CSV,
PITT_FIVE_PERSONS_CONDITION_OCCURRENCE_CSV,
PITT_FIVE_PERSONS_PROCEDURE_OCCURRENCE_CSV,
PITT_FIVE_PERSONS_DRUG_EXPOSURE_CSV,
PITT_FIVE_PERSONS_MEASUREMENT_CSV,
PITT_FIVE_PERSONS_OBSERVATION_CSV]
RDR_PATH = os.path.join(TEST_DATA_PATH, 'rdr')
TEST_DATA_EXPORT_PATH = os.path.join(TEST_DATA_PATH, 'export')
TEST_DATA_EXPORT_SYNPUF_PATH = os.path.join(TEST_DATA_EXPORT_PATH, 'synpuf')
def _create_five_persons_success_result():
"""
Generate the expected result payload for five_persons data set. For internal testing only.
"""
import csv
field_names = ['cdm_file_name', 'found', 'parsed', 'loaded']
expected_result_items = []
for cdm_file in common.CDM_FILES:
expected_item = dict(cdm_file_name=cdm_file, found="1", parsed="1", loaded="1")
expected_result_items.append(expected_item)
with open(FIVE_PERSONS_SUCCESS_RESULT_CSV, 'w') as f:
writer = csv.DictWriter(f, field_names, quoting=csv.QUOTE_ALL)
writer.writeheader()
writer.writerows(expected_result_items)
def _export_query_response_by_path(p, hpo_id):
"""Utility to create response test payloads"""
from validation import export
import bq_utils
for f in export.list_files_only(p):
abs_path = os.path.join(p, f)
with open(abs_path, 'r') as fp:
sql = fp.read()
sql = export.render(sql, hpo_id, results_schema=bq_utils.get_dataset_id(), vocab_schema='synpuf_100')
query_result = bq_utils.query(sql)
out_file = os.path.join(TEST_DATA_EXPORT_PATH, f.replace('.sql', '_response.json'))
with open(out_file, 'w') as fp:
data = dict()
if 'rows' in query_result:
data['rows'] = query_result['rows']
if 'schema' in query_result:
data['schema'] = query_result['schema']
import json
json.dump(data, fp, sort_keys=True, indent=4, separators=(',', ': '))
def _export_query_responses():
from validation import export
for d in ['datadensity', 'achillesheel', 'person']:
p = os.path.join(export.EXPORT_PATH, d)
_export_query_response_by_path(p, FAKE_HPO_ID)
def empty_bucket(bucket):
bucket_items = gcs_utils.list_bucket(bucket)
for bucket_item in bucket_items:
gcs_utils.delete_object(bucket, bucket_item['name'])
def delete_all_tables(dataset_id):
"""
Remove all non-vocabulary tables from a dataset
:param dataset_id: ID of the dataset with the tables to delete
:return: list of deleted tables
"""
import bq_utils
deleted = []
tables = bq_utils.list_tables(dataset_id)
for table in tables:
table_id = table['tableReference']['tableId']
if table_id not in common.VOCABULARY_TABLES:
bq_utils.delete_table(table_id, dataset_id)
deleted.append(table_id)
return deleted
import requests
def download_file_from_google_drive(id, destination):
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params={'id': id}, stream=True)
token = get_confirm_token(response)
if token:
params = {'id': id, 'confirm': token}
response = session.get(URL, params=params, stream=True)
save_response_content(response, destination)
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def save_response_content(response, destination):
CHUNK_SIZE = 32768
with open(destination, "wb") as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
def get_synpuf_results_files():
files = [('0B8QSHCLE8g4JV1Q4UHFRLXNhM2c', 'achilles_results.csv'),
('0B8QSHCLE8g4JeUUxZEh0SS1YNlk', 'achilles_results_dist.csv'),
('0B8QSHCLE8g4JQUE1dGJLd1RpWEk', 'achilles_heel_results.csv')]
for file_id, file_name in files:
dest_path = os.path.join(TEST_DATA_EXPORT_SYNPUF_PATH, file_name)
if not os.path.exists(dest_path):
download_file_from_google_drive(file_id, os.path.join(TEST_DATA_EXPORT_SYNPUF_PATH, file_name))
def read_cloud_file(bucket, name):
return gcs_utils.get_object(bucket, name)
def write_cloud_str(bucket, name, contents_str):
import StringIO
fp = StringIO.StringIO(contents_str)
return write_cloud_fp(bucket, name, fp)
def write_cloud_file(bucket, f, prefix = ""):
name = os.path.basename(f)
with open(f, 'r') as fp:
return write_cloud_fp(bucket, prefix + name, fp)
def write_cloud_fp(bucket, name, fp):
return gcs_utils.upload_object(bucket, name, fp)
def populate_achilles(hpo_bucket, hpo_id = FAKE_HPO_ID, include_heel=True):
from validation import achilles, achilles_heel
from google.appengine.api import app_identity
import bq_utils
app_id = app_identity.get_application_id()
test_file_name = achilles.ACHILLES_ANALYSIS + '.csv'
achilles_analysis_file_path = os.path.join(TEST_DATA_EXPORT_PATH, test_file_name)
schema_path = os.path.join(resources.fields_path, achilles.ACHILLES_ANALYSIS + '.json')
write_cloud_file(hpo_bucket, achilles_analysis_file_path)
gcs_path = 'gs://' + hpo_bucket + '/' + test_file_name
dataset_id = bq_utils.get_dataset_id()
table_id = bq_utils.get_table_id(hpo_id, achilles.ACHILLES_ANALYSIS)
bq_utils.load_csv(schema_path, gcs_path, app_id, dataset_id, table_id)
table_names = [achilles.ACHILLES_RESULTS, achilles.ACHILLES_RESULTS_DIST]
if include_heel:
table_names.append(achilles_heel.ACHILLES_HEEL_RESULTS)
running_jobs = []
for table_name in table_names:
schema_file_name = table_name + '.json'
schema_path = os.path.join(resources.fields_path, schema_file_name)
test_file_name = table_name + '.csv'
test_file_path = os.path.join(TEST_DATA_EXPORT_SYNPUF_PATH, table_name + '.csv')
write_cloud_file(hpo_bucket, test_file_path)
gcs_path = 'gs://' + hpo_bucket + '/' + test_file_name
dataset_id = bq_utils.get_dataset_id()
table_id = bq_utils.get_table_id(hpo_id, table_name)
load_results = bq_utils.load_csv(schema_path, gcs_path, app_id, dataset_id, table_id)
running_jobs.append(load_results['jobReference']['jobId'])
bq_utils.wait_on_jobs(running_jobs)
def generate_rdr_files():
"""
Generate test csv files based on a sample of synthetic RDR data
:return:
"""
d = 'rdr_dataset_2018_4_17'
for table in common.CDM_TABLES:
q = 'SELECT * FROM fake_%s WHERE person_id IN (SELECT person_id FROM sample_person_id)' % table
cmd = 'bq query --dataset_id={d} --format=csv "{q}" > %(table)s.csv'.format(d=d, q=q)
os.system(cmd)
def bash(cmd):
"""
Run a bash-specific command
:param cmd: the command to run
:return: 0 if successful
:raises
CalledProcessError: raised when command has a non-zero result
Note: On Windows, bash and the gcloud SDK binaries (e.g. bq, gsutil) must be in PATH
"""
import subprocess
import platform
bash_cmd = '/bin/bash'
if platform.system().lower().startswith('windows'):
# extensions are not inferred
cmd = cmd.replace('bq ', 'bq.cmd ').replace('gsutil ', 'gsutil.cmd ')
bash_cmd = 'bash'
return subprocess.check_call([bash_cmd, '-c', cmd], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def command(cmd):
return os.system(cmd)
def response2rows(r):
"""
Convert a query response to a list of dict
:param r: a query response object
:return: list of dict
"""
rows = r.get('rows', [])
schema = r.get('schema', {'fields': None})['fields']
return [_transform_row(row, schema) for row in rows]
def _transform_row(row, schema):
"""
Apply the given schema to the given BigQuery data row. Adapted from https://goo.gl/dWszQJ.
:param row: A single BigQuery row to transform
:param schema: The BigQuery table schema to apply to the row, specifically the list of field dicts.
:returns: Row as a dict
"""
log = {}
# Match each schema column with its associated row value
for index, col_dict in enumerate(schema):
col_name = col_dict['name']
row_value = row['f'][index]['v']
if row_value is None:
log[col_name] = None
continue
# Recurse on nested records
if col_dict['type'] == 'RECORD':
row_value = self._recurse_on_row(col_dict, row_value)
# Otherwise just cast the value
elif col_dict['type'] == 'INTEGER':
row_value = int(row_value)
elif col_dict['type'] == 'FLOAT':
row_value = float(row_value)
elif col_dict['type'] == 'BOOLEAN':
row_value = row_value in ('True', 'true', 'TRUE')
elif col_dict['type'] == 'TIMESTAMP':
row_value = float(row_value)
log[col_name] = row_value
return log
def list_files_in(path):
"""
List the abs paths to files (not dirs) in the supplied path
:param path:
:return:
"""
return [os.path.join(path, f) for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))]
|
en
| 0.791253
|
# Test files for five person sample # OMOP NYC and PITT test data from synpuf Generate the expected result payload for five_persons data set. For internal testing only. Utility to create response test payloads Remove all non-vocabulary tables from a dataset :param dataset_id: ID of the dataset with the tables to delete :return: list of deleted tables # filter out keep-alive new chunks Generate test csv files based on a sample of synthetic RDR data :return: Run a bash-specific command :param cmd: the command to run :return: 0 if successful :raises CalledProcessError: raised when command has a non-zero result Note: On Windows, bash and the gcloud SDK binaries (e.g. bq, gsutil) must be in PATH # extensions are not inferred Convert a query response to a list of dict :param r: a query response object :return: list of dict Apply the given schema to the given BigQuery data row. Adapted from https://goo.gl/dWszQJ. :param row: A single BigQuery row to transform :param schema: The BigQuery table schema to apply to the row, specifically the list of field dicts. :returns: Row as a dict # Match each schema column with its associated row value # Recurse on nested records # Otherwise just cast the value List the abs paths to files (not dirs) in the supplied path :param path: :return:
| 2.175757
| 2
|
backend/src/baserow/contrib/database/views/view_types.py
|
calvinchengx/baserow
| 0
|
6629067
|
from django.urls import path, include
from .registries import ViewType
from .models import GridView
class GridViewType(ViewType):
type = 'grid'
model_class = GridView
def get_api_urls(self):
from baserow.contrib.database.api.views.grid import urls as api_urls
return [
path('grid/', include(api_urls, namespace=self.type)),
]
|
from django.urls import path, include
from .registries import ViewType
from .models import GridView
class GridViewType(ViewType):
type = 'grid'
model_class = GridView
def get_api_urls(self):
from baserow.contrib.database.api.views.grid import urls as api_urls
return [
path('grid/', include(api_urls, namespace=self.type)),
]
|
none
| 1
| 2.090673
| 2
|
|
trove_dashboard/content/databases/tabs.py
|
NeCTAR-RC/trove-dashboard
| 0
|
6629068
|
<gh_stars>0
# Copyright 2013 Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django import template
from django.utils.translation import ugettext_lazy as _
import six
from horizon import exceptions
from horizon import tabs
from oslo_log import log as logging
from trove_dashboard import api
from trove_dashboard.content.database_configurations import (
config_param_manager)
from trove_dashboard.content.databases import db_capability
from trove_dashboard.content.databases.logs import tables as log_tables
from trove_dashboard.content.databases import tables
LOG = logging.getLogger(__name__)
class OverviewTab(tabs.Tab):
name = _("Overview")
slug = "overview"
def get_context_data(self, request):
instance = self.tab_group.kwargs['instance']
context = {"instance": instance}
try:
root_show = api.trove.root_show(request, instance.id)
context["root_enabled"] = template.defaultfilters.yesno(
root_show.rootEnabled)
except Exception:
context["root_enabled"] = _('Unable to obtain information on '
'root user')
return context
def get_template_name(self, request):
instance = self.tab_group.kwargs['instance']
template_file = ('project/databases/_detail_overview_%s.html' %
self._get_template_type(instance.datastore['type']))
try:
template.loader.get_template(template_file)
return template_file
except template.TemplateDoesNotExist:
# This datastore type does not have a template file
# Just use the base template file
return ('project/databases/_detail_overview.html')
def _get_template_type(self, datastore):
if db_capability.is_mysql_compatible(datastore):
return 'mysql'
return datastore
class UserTab(tabs.TableTab):
table_classes = [tables.UsersTable]
name = _("Users")
slug = "users_tab"
instance = None
template_name = "horizon/common/_detail_table.html"
preload = False
def get_users_data(self):
instance = self.tab_group.kwargs['instance']
try:
data = api.trove.users_list(self.request, instance.id)
for user in data:
user.instance = instance
try:
user.access = api.trove.user_list_access(self.request,
instance.id,
user.name,
host=user.host)
except exceptions.NOT_FOUND:
pass
except Exception:
msg = _('Unable to get user access data.')
exceptions.handle(self.request, msg)
except Exception:
msg = _('Unable to get user data.')
exceptions.handle(self.request, msg)
data = []
return data
def allowed(self, request):
return tables.has_user_add_perm(request)
class DatabaseTab(tabs.TableTab):
table_classes = [tables.DatabaseTable]
name = _("Databases")
slug = "database_tab"
instance = None
template_name = "horizon/common/_detail_table.html"
preload = False
def get_databases_data(self):
instance = self.tab_group.kwargs['instance']
try:
data = api.trove.database_list(self.request, instance.id)
for database in data:
setattr(database, 'instance', instance)
except Exception:
msg = _('Unable to get databases data.')
exceptions.handle(self.request, msg)
data = []
return data
def allowed(self, request):
return tables.has_database_add_perm(request)
class ConfigDefaultsTab(tabs.TableTab):
table_classes = [tables.ConfigDefaultsTable]
name = _("Defaults")
slug = "config_defaults"
instance = None
template_name = "horizon/common/_detail_table.html"
preload = False
def get_config_defaults_data(self):
instance = self.tab_group.kwargs['instance']
values_data = []
data = api.trove.configuration_default(self.request, instance.id)
if data is not None:
for k, v in data.configuration.items():
values_data.append(
config_param_manager.ConfigParam(None, k, v))
return sorted(values_data, key=lambda config: config.name)
class BackupsTab(tabs.TableTab):
table_classes = [tables.InstanceBackupsTable]
name = _("Backups")
slug = "backups_tab"
instance = None
template_name = "horizon/common/_detail_table.html"
preload = False
def get_backups_data(self):
instance = self.tab_group.kwargs['instance']
try:
data = api.trove.instance_backups(self.request, instance.id)
except Exception:
msg = _('Unable to get database backup data.')
exceptions.handle(self.request, msg)
data = []
return data
def allowed(self, request):
return request.user.has_perm('openstack.services.object-store')
class LogsTab(tabs.TableTab):
table_classes = [log_tables.LogsTable]
name = _("Logs")
slug = "logs_tab"
template_name = "horizon/common/_detail_table.html"
preload = False
def get_logs_data(self):
instance = self.tab_group.kwargs['instance']
try:
logs = api.trove.log_list(self.request, instance.id)
return logs
except Exception as e:
LOG.exception(
_('Unable to retrieve list of logs.\n%s') % six.text_type(e))
logs = []
return logs
class InstanceDetailTabs(tabs.TabGroup):
slug = "instance_details"
tabs = (OverviewTab, UserTab, DatabaseTab, BackupsTab, LogsTab,
ConfigDefaultsTab)
sticky = True
|
# Copyright 2013 Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django import template
from django.utils.translation import ugettext_lazy as _
import six
from horizon import exceptions
from horizon import tabs
from oslo_log import log as logging
from trove_dashboard import api
from trove_dashboard.content.database_configurations import (
config_param_manager)
from trove_dashboard.content.databases import db_capability
from trove_dashboard.content.databases.logs import tables as log_tables
from trove_dashboard.content.databases import tables
LOG = logging.getLogger(__name__)
class OverviewTab(tabs.Tab):
name = _("Overview")
slug = "overview"
def get_context_data(self, request):
instance = self.tab_group.kwargs['instance']
context = {"instance": instance}
try:
root_show = api.trove.root_show(request, instance.id)
context["root_enabled"] = template.defaultfilters.yesno(
root_show.rootEnabled)
except Exception:
context["root_enabled"] = _('Unable to obtain information on '
'root user')
return context
def get_template_name(self, request):
instance = self.tab_group.kwargs['instance']
template_file = ('project/databases/_detail_overview_%s.html' %
self._get_template_type(instance.datastore['type']))
try:
template.loader.get_template(template_file)
return template_file
except template.TemplateDoesNotExist:
# This datastore type does not have a template file
# Just use the base template file
return ('project/databases/_detail_overview.html')
def _get_template_type(self, datastore):
if db_capability.is_mysql_compatible(datastore):
return 'mysql'
return datastore
class UserTab(tabs.TableTab):
table_classes = [tables.UsersTable]
name = _("Users")
slug = "users_tab"
instance = None
template_name = "horizon/common/_detail_table.html"
preload = False
def get_users_data(self):
instance = self.tab_group.kwargs['instance']
try:
data = api.trove.users_list(self.request, instance.id)
for user in data:
user.instance = instance
try:
user.access = api.trove.user_list_access(self.request,
instance.id,
user.name,
host=user.host)
except exceptions.NOT_FOUND:
pass
except Exception:
msg = _('Unable to get user access data.')
exceptions.handle(self.request, msg)
except Exception:
msg = _('Unable to get user data.')
exceptions.handle(self.request, msg)
data = []
return data
def allowed(self, request):
return tables.has_user_add_perm(request)
class DatabaseTab(tabs.TableTab):
table_classes = [tables.DatabaseTable]
name = _("Databases")
slug = "database_tab"
instance = None
template_name = "horizon/common/_detail_table.html"
preload = False
def get_databases_data(self):
instance = self.tab_group.kwargs['instance']
try:
data = api.trove.database_list(self.request, instance.id)
for database in data:
setattr(database, 'instance', instance)
except Exception:
msg = _('Unable to get databases data.')
exceptions.handle(self.request, msg)
data = []
return data
def allowed(self, request):
return tables.has_database_add_perm(request)
class ConfigDefaultsTab(tabs.TableTab):
table_classes = [tables.ConfigDefaultsTable]
name = _("Defaults")
slug = "config_defaults"
instance = None
template_name = "horizon/common/_detail_table.html"
preload = False
def get_config_defaults_data(self):
instance = self.tab_group.kwargs['instance']
values_data = []
data = api.trove.configuration_default(self.request, instance.id)
if data is not None:
for k, v in data.configuration.items():
values_data.append(
config_param_manager.ConfigParam(None, k, v))
return sorted(values_data, key=lambda config: config.name)
class BackupsTab(tabs.TableTab):
table_classes = [tables.InstanceBackupsTable]
name = _("Backups")
slug = "backups_tab"
instance = None
template_name = "horizon/common/_detail_table.html"
preload = False
def get_backups_data(self):
instance = self.tab_group.kwargs['instance']
try:
data = api.trove.instance_backups(self.request, instance.id)
except Exception:
msg = _('Unable to get database backup data.')
exceptions.handle(self.request, msg)
data = []
return data
def allowed(self, request):
return request.user.has_perm('openstack.services.object-store')
class LogsTab(tabs.TableTab):
table_classes = [log_tables.LogsTable]
name = _("Logs")
slug = "logs_tab"
template_name = "horizon/common/_detail_table.html"
preload = False
def get_logs_data(self):
instance = self.tab_group.kwargs['instance']
try:
logs = api.trove.log_list(self.request, instance.id)
return logs
except Exception as e:
LOG.exception(
_('Unable to retrieve list of logs.\n%s') % six.text_type(e))
logs = []
return logs
class InstanceDetailTabs(tabs.TabGroup):
slug = "instance_details"
tabs = (OverviewTab, UserTab, DatabaseTab, BackupsTab, LogsTab,
ConfigDefaultsTab)
sticky = True
|
en
| 0.83592
|
# Copyright 2013 Rackspace Hosting # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This datastore type does not have a template file # Just use the base template file
| 1.64061
| 2
|
wxmplot/imageconf.py
|
jkbgbr/wxmplot
| 0
|
6629069
|
<reponame>jkbgbr/wxmplot
import wx
import wx.lib.agw.flatnotebook as flat_nb
import wx.lib.scrolledpanel as scrolled
import wx.lib.colourselect as csel
from math import log10
import numpy as np
import matplotlib.cm as cmap
from matplotlib.ticker import FuncFormatter
from .colors import register_custom_colormaps, hexcolor, hex2rgb, mpl_color
from .config import bool_ifnotNone, ifnotNone
from .plotconfigframe import FNB_STYLE, autopack
from .utils import LabeledTextCtrl, SimpleText, Check, Choice, HLine, pack, FloatSpin, MenuItem
try:
import yaml
HAS_YAML = True
except ImportError:
HAS_YAML = False
cm_names = register_custom_colormaps()
ColorMap_List = []
for cm in ('gray', 'coolwarm', 'viridis', 'inferno', 'plasma', 'magma', 'red',
'green', 'blue', 'magenta', 'yellow', 'cyan', 'Reds', 'Greens',
'Blues', 'cool', 'hot', 'copper', 'red_heat', 'green_heat',
'blue_heat', 'spring', 'summer', 'autumn', 'winter', 'ocean',
'terrain', 'jet', 'stdgamma', 'hsv', 'Accent', 'Spectral', 'PiYG',
'PRGn', 'Spectral', 'YlGn', 'YlGnBu', 'RdBu', 'RdPu', 'RdYlBu',
'RdYlGn'):
if cm in cm_names or hasattr(cmap, cm):
ColorMap_List.append(cm)
Contrast_List = ['None', '0.01', '0.02', '0.05', '0.1', '0.2', '0.5', '1.0',
'2.0', '5.0']
Contrast_NDArray = np.array((-1.0, 0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1., 2, 5.))
Interp_List = ('nearest', 'bicubic', 'quadric', 'gaussian', 'kaiser',
'bessel', 'mitchell', 'catrom', 'spline16', 'spline36',
'bilinear', 'hanning', 'hamming', 'hermite', 'sinc', 'lanczos')
Slices_List = ('None', 'X', 'Y')
RGB_COLORS = ('red', 'green', 'blue')
class ImageConfig:
def __init__(self, axes=None, fig=None, canvas=None):
self.axes = axes
self.fig = fig
self.canvas = canvas
self.cmap = [cmap.gray, cmap.gray, cmap.gray]
self.cmap_reverse = False
self.interp = 'nearest'
self.show_axis = False
self.show_grid = False
self.grid_color = '#807030'
self.grid_alpha = 0.25
self.log_scale = False
self.flip_ud = False
self.flip_lr = False
self.rot_level = 0
self.contrast_level = 0
self.datalimits = [None, None, None, None]
self.cmap_lo = [0, 0, 0]
self.cmap_range = 1000
self.cmap_hi = [1000, 1000, 1000]
self.tricolor_bg = 'black'
self.tricolor_mode = 'rgb'
self.int_lo = [0, 0, 0]
self.int_hi = [1, 1, 1]
self.data = None
self.xdata = None
self.ydata = None
self.xlab = 'X'
self.ylab = 'Y'
self.indices = None
self.title = 'image'
self.style = 'image'
self.highlight_areas = []
self.ncontour_levels = 10
self.contour_levels = None
self.contour_labels = True
self.cursor_mode = 'zoom'
self.zoombrush = wx.Brush('#040410', wx.SOLID)
self.zoompen = wx.Pen('#101090', 3, wx.SOLID)
self.zoom_lims = []
self.slices = Slices_List[0]
self.slice_xy = -1, -1
self.slice_width = 1
self.slice_onmotion = False
self.scalebar_show = False
self.scalebar_showlabel = False
self.scalebar_label = ''
self.scalebar_pos = 5, 5
self.scalebar_size = 1, 1
self.scalebar_pixelsize = None, None
self.scalebar_units = 'mm'
self.scalebar_color = '#EEEE99'
self.set_formatters()
def set_colormap(self, name, reverse=False, icol=0):
self.cmap_reverse = reverse
if reverse and not name.endswith('_r'):
name = name + '_r'
elif not reverse and name.endswith('_r'):
name = name[:-2]
self.cmap[icol] = _cmap_ = cmap.get_cmap(name)
if hasattr(self, 'contour'):
xname = 'gray'
if name == 'gray_r':
xname = 'Reds_r'
elif name == 'gray':
xname = 'Reds'
elif name.endswith('_r'):
xname = 'gray_r'
self.contour.set_cmap(getattr(cmap, xname))
if hasattr(self, 'image'):
self.image.set_cmap(self.cmap[icol])
if hasattr(self, 'highlight_areas'):
if hasattr(self.cmap[icol], '_lut'):
rgb = [int(i*240)^255 for i in self.cmap[icol]._lut[0][:3]]
col = '#%02x%02x%02x' % (rgb[0], rgb[1], rgb[2])
for area in self.highlight_areas:
for w in area.collections + area.labelTexts:
w.set_color(col)
def flip_vert(self):
"flip image along vertical axis (up/down)"
self.data = np.flipud(self.data)
if self.ydata is not None:
self.ydata = self.ydata[::-1]
self.flip_ud = not self.flip_ud
def flip_horiz(self):
"flip image along horizontal axis (left/right)"
self.data = np.fliplr(self.data)
if self.xdata is not None:
self.xdata = self.xdata[::-1]
self.flip_lr = not self.flip_lr
def rotate90(self, event=None):
"rotate 90 degrees, CW"
if self.xdata is not None:
self.xdata = self.xdata[::-1]
if self.ydata is not None:
self.ydata = self.ydata[:]
self.xdata, self.ydata = self.ydata, self.xdata
self.xlab, self.ylab = self.ylab, self.xlab
self.data = np.rot90(self.data)
self.rot_level += 1
if self.rot_level == 4:
self.rot_level = 0
def set_formatters(self):
if self.axes is not None:
self.axes.xaxis.set_major_formatter(FuncFormatter(self.xformatter))
self.axes.yaxis.set_major_formatter(FuncFormatter(self.yformatter))
def xformatter(self, x, pos):
" x-axis formatter "
return self._format(x, pos, dtype='x')
def yformatter(self, y, pos):
" y-axis formatter "
return self._format(y, pos, dtype='y')
def _format(self, x, pos, dtype='x'):
""" home built tick formatter to use with FuncFormatter():
x value to be formatted
type 'x' or 'y' or 'y2' to set which list of ticks to get
also sets self._yfmt/self._xfmt for statusbar
"""
fmt = '%1.5g'
if dtype == 'y':
ax = self.axes.yaxis
dat = self.ydata
if dat is None:
dat = np.arange(self.data.shape[0])
else:
ax = self.axes.xaxis
dat = self.xdata
if dat is None:
dat = np.arange(self.data.shape[1])
ticks = [0,1]
onep = 1.00001
try:
dtick = 0.1 * onep * (dat.max() - dat.min())
except:
dtick = 0.2 * onep
try:
ticks = ax.get_major_locator()()
except:
ticks = [0, 1]
try:
dtick = abs(dat[int(ticks[1])] - dat[int(ticks[0])]) * onep
except:
pass
if dtick > 89999:
fmt = '%.2e'
else:
fmt = '%%1.%df' % max(0, -round(log10(0.75*dtick)))
try:
s = fmt % dat[int(x)]
except:
s = ''
s.strip()
s = s.replace('+', '')
while s.find('e0')>0:
s = s.replace('e0','e')
while s.find('-0')>0:
s = s.replace('-0','-')
return s
def relabel(self):
" re draw labels (title, x,y labels)"
pass
def set_zoombrush(self,color, style):
self.zoombrush = wx.Brush(color, style)
def set_zoompen(self,color, style):
self.zoompen = wx.Pen(color, 3, style)
def tricolor_white_bg(self, img):
"""transforms image from RGB with (0,0,0)
showing black to RGB with 0,0,0 showing white
takes the Red intensity and sets
the new intensity to go
from (0, 0.5, 0.5) (for Red=0) to (0, 0, 0) (for Red=1)
and so on for the Green and Blue maps.
Thus the image will be transformed from
old intensity new intensity
(0.0, 0.0, 0.0) (black) (1.0, 1.0, 1.0) (white)
(1.0, 1.0, 1.0) (white) (0.0, 0.0, 0.0) (black)
(1.0, 0.0, 0.0) (red) (1.0, 0.5, 0.5) (red)
(0.0, 1.0, 0.0) (green) (0.5, 1.0, 0.5) (green)
(0.0, 0.0, 1.0) (blue) (0.5, 0.5, 1.0) (blue)
"""
tmp = 0.5*(1.0 - (img - img.min())/(img.max() - img.min()))
out = tmp*0.0
out[:,:,0] = tmp[:,:,1] + tmp[:,:,2]
out[:,:,1] = tmp[:,:,0] + tmp[:,:,2]
out[:,:,2] = tmp[:,:,0] + tmp[:,:,1]
return out
def rgb2cmy(self, img, whitebg=False):
"""transforms image from RGB to CMY"""
tmp = img*1.0
if whitebg:
tmp = (1.0 - (img - img.min())/(img.max() - img.min()))
out = tmp*0.0
out[:,:,0] = (tmp[:,:,1] + tmp[:,:,2])/2.0
out[:,:,1] = (tmp[:,:,0] + tmp[:,:,2])/2.0
out[:,:,2] = (tmp[:,:,0] + tmp[:,:,1])/2.0
return out
def set_config(self, interp=None, colormap=None, reverse_colormap=None,
contrast_level=None, flip_ud=None, flip_lr=None,
rot=None, tricolor_bg=None, ncontour_levels=None,
title=None, style=None):
"""set configuration options:
interp, colormap, reverse_colormap, contrast_levels, flip_ud,
flip_lr, rot, tricolor_bg, ncontour_levels, title, style
"""
if interp is not None:
interp = interp.lower()
self.interp = interp if interp in Interp_List else self.interp
if colormap is not None:
colormap = colormap.lower()
if colormap.endswith('_r'):
reverse_colormap = True
colormap = colormap[:-2]
self.colormap = colormap if colormap in ColorMap_List else self.colormap
if contrast_level is not None:
self.contrast_level = float(contrast_level)
self.cmap_reverse = bool_ifnotNone(reverse_colormap, self.cmap_reverse)
self.flip_ud = bool_ifnotNone(flip_ud, self.flip_ud)
self.flip_lr = bool_ifnotNone(flip_lr, self.flip_lr)
self.rot = bool_ifnotNone(rot, self.rot)
if tricolor_bg is not None:
tricolor_bg = tricolor_bg.lower()
if tricolor_bg in ('black', 'white'):
self.tricolor_bg = tricolor_bg
if ncontour_levels is not None:
self.ncontour_level = int(ncontour_levels)
if style is not None:
style = style.lower()
if style in ('image', 'contour'):
self.style = style
self.title = ifnotNone(title, self.title)
def get_config(self):
"""get dictionary of configuration options"""
out = {'reverse_colormap': self.cmap_reverse}
for attr in ('interp', 'colormap', 'contrast_levels', 'flip_ud',
'flip_lr', 'rot', 'tricolor_bg', 'ncontour_levels',
'title', 'style'):
out[attr] = getattr(self, attr)
return out
labstyle= wx.ALIGN_LEFT|wx.ALIGN_CENTER_VERTICAL|wx.ALL
class ImageConfigFrame(wx.Frame):
""" GUI Configure Frame for Images"""
def __init__(self, parent=None, config=None, trace_color_callback=None):
if config is None:
config = ImageConfig()
self.conf = config
self.parent = parent
self.canvas = self.conf.canvas
self.axes = self.canvas.figure.get_axes()
self.DrawPanel()
mbar = wx.MenuBar()
fmenu = wx.Menu()
MenuItem(self, fmenu, "Save Configuration\tCtrl+S",
"Save Configuration",
self.save_config)
MenuItem(self, fmenu, "Load Configuration\tCtrl+R",
"Load Configuration",
self.load_config)
mbar.Append(fmenu, 'File')
self.SetMenuBar(mbar)
def save_config(self, evt=None, fname='wxmplot.yaml'):
if not HAS_YAML:
return
file_choices = 'YAML Config File (*.yaml)|*.yaml'
dlg = wx.FileDialog(self, message='Save image configuration',
defaultDir=os.getcwd(),
defaultFile=fname,
wildcard=file_choices,
style=wx.FD_SAVE|wx.FD_CHANGE_DIR)
if dlg.ShowModal() == wx.ID_OK:
conf = self.conf.get_current_config()
ppath = os.path.abspath(dlg.GetPath())
with open(ppath, 'w') as fh:
fh.write("%s\n" % yaml.dump(conf))
def load_config(self, evt=None):
if not HAS_YAML:
return
file_choices = 'YAML Config File (*.yaml)|*.yaml'
dlg = wx.FileDialog(self, message='Read image configuration',
defaultDir=os.getcwd(),
wildcard=file_choices,
style=wx.FD_OPEN)
if dlg.ShowModal() == wx.ID_OK:
conf = yaml.safe_load(open(os.path.abspath(dlg.GetPath()), 'r').read())
self.conf.load_config(conf)
def DrawPanel(self):
style = wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, self.parent, -1, 'Configure Image', style=style)
conf = self.conf
self.SetFont(wx.Font(12,wx.SWISS,wx.NORMAL,wx.NORMAL,False))
self.SetBackgroundColour(hex2rgb('#FEFEFE'))
sizer = wx.GridBagSizer(2, 2)
irow = 0
bstyle=wx.ALIGN_LEFT|wx.ALIGN_CENTER_VERTICAL|wx.ST_NO_AUTORESIZE
# contours
ctitle = SimpleText(self, 'Contours:', colour='#DD0000')
label = SimpleText(self, "# Levels:")
self.ncontours = FloatSpin(self, value=conf.ncontour_levels,
min_val=0, max_val=5000,
increment=1, digits=0, size=(60, -1),
action=self.onContourEvents)
self.showlabels = Check(self, label='Show Labels?',
default=conf.contour_labels,
action=self.onContourEvents)
sizer.Add(ctitle, (irow, 0), (1, 2), labstyle, 2)
irow += 1
sizer.Add(label, (irow, 0), (1, 1), labstyle, 2)
sizer.Add(self.ncontours, (irow, 1), (1, 1), labstyle, 2)
sizer.Add(self.showlabels, (irow, 2), (1, 1), labstyle, 2)
irow += 1
sizer.Add(HLine(self, size=(500, -1)), (irow, 0), (1, 4), labstyle, 2)
# Grid
title = SimpleText(self, 'Image Grid:', colour='#DD0000')
label_gcolor = SimpleText(self, "Color:")
label_galpha = SimpleText(self, "Alpha:")
self.show_grid = Check(self, label='Show Grid with Labeled Axes?',
default=conf.show_grid,
action=self.onGridEvents)
self.grid_alpha = FloatSpin(self, value=conf.grid_alpha,
min_val=0, max_val=1,
increment=0.05, digits=3, size=(130, -1),
action=self.onGridEvents)
self.grid_color = csel.ColourSelect(self, -1, "",
mpl_color(conf.grid_color),
size=(50, -1))
self.grid_color.Bind(csel.EVT_COLOURSELECT, self.onGridEvents)
irow += 1
sizer.Add(title, (irow, 0), (1, 1), labstyle, 2)
sizer.Add(self.show_grid, (irow, 1), (1, 1), labstyle, 2)
irow += 1
sizer.Add(label_gcolor, (irow, 0), (1, 1), labstyle, 2)
sizer.Add(self.grid_color, (irow, 1), (1, 1), labstyle, 2)
sizer.Add(label_galpha, (irow, 2), (1, 1), labstyle, 2)
sizer.Add(self.grid_alpha, (irow, 3), (1, 1), labstyle, 2)
irow += 1
sizer.Add(HLine(self, size=(500, -1)), (irow, 0), (1, 4), labstyle, 2)
# X/Y Slices
title = SimpleText(self, 'X/Y Slices:', colour='#DD0000')
label_dir = SimpleText(self, "Direction:")
label_wid = SimpleText(self, "Width (pixels):")
self.slice_width = FloatSpin(self, value=conf.slice_width,
min_val=0, max_val=5000,
increment=1, digits=0, size=(60, -1),
action=self.onSliceEvents)
self.slice_dir = Choice(self, size=(90, -1),
choices=Slices_List,
action=self.onSliceEvents)
self.slice_dir.SetStringSelection(conf.slices)
self.slice_dynamic = Check(self,label='Slices Follow Mouse Motion?',
default=conf.slice_onmotion,
action=self.onSliceEvents)
irow += 1
sizer.Add(title, (irow, 0), (1, 1), labstyle, 2)
sizer.Add(self.slice_dynamic, (irow, 1), (1, 2), labstyle, 2)
irow += 1
sizer.Add(label_dir, (irow, 0), (1, 1), labstyle, 2)
sizer.Add(self.slice_dir, (irow, 1), (1, 1), labstyle, 2)
sizer.Add(label_wid, (irow, 2), (1, 1), labstyle, 2)
sizer.Add(self.slice_width, (irow, 3), (1, 1), labstyle, 2)
irow += 1
sizer.Add(HLine(self, size=(500, -1)), (irow, 0), (1, 4), labstyle, 2)
# Scalebar
ypos, xpos = conf.scalebar_pos
ysiz, xsiz = conf.scalebar_size
units = conf.scalebar_units
dshape = conf.data.shape
ystep, xstep = conf.scalebar_pixelsize
if xstep is None or ystep is None:
ystep, xstep = 1, 1
if conf.xdata is not None:
xstep = abs(np.diff(conf.xdata).mean())
if conf.ydata is not None:
ystep = abs(np.diff(conf.ydata).mean())
conf.scalebar_pixelsize = ystep, xstep
title = SimpleText(self, 'Scalebar:', colour='#DD0000')
lab_opts = dict(size=(120, -1))
color_label = SimpleText(self, 'Color: ')
xpos_label = SimpleText(self, 'X Position: ')
ypos_label = SimpleText(self, 'Y Position: ')
size_label = SimpleText(self, 'Scalebar Size: ')
pos_label = SimpleText(self, "Scalebar Position (pixels from lower left):")
width_label = SimpleText(self, 'Width (%s): ' % units)
height_label = SimpleText(self, 'Height (pixels): ')
pixsize_label = SimpleText(self, 'Pixel Size: ')
xpix_label = SimpleText(self, 'X pixelsize: ')
ypix_label = SimpleText(self, 'Y pixelsize: ')
self.pixunits = LabeledTextCtrl(self, value=conf.scalebar_units,
size=(80, -1),
labeltext='Units:',
action=self.onScalebarEvents)
self.show_scalebar = Check(self, label='Show Scalebar',
default=conf.scalebar_show,
action=self.onScalebarEvents)
self.show_label = Check(self, label='Show Label?',
default=conf.scalebar_showlabel,
action=self.onScalebarEvents)
stext = "Image Size: X=%d, Y=%d pixels" % (dshape[1], dshape[0])
scale_text = SimpleText(self, label=stext)
self.label = LabeledTextCtrl(self, value=conf.scalebar_label,
size=(150, -1),
labeltext='Label:',
action=self.onScalebarEvents)
self.color = csel.ColourSelect(self, -1, "",
mpl_color(conf.scalebar_color),
size=(50, -1))
self.color.Bind(csel.EVT_COLOURSELECT, self.onScalebarEvents)
opts = dict(min_val=0, increment=1, digits=0, size=(100, -1),
action=self.onScalebarEvents)
self.xpos = FloatSpin(self, value=xpos, max_val=dshape[1], **opts)
self.ypos = FloatSpin(self, value=ypos, max_val=dshape[0], **opts)
self.height = FloatSpin(self, value=ysiz, max_val=dshape[0], **opts)
opts['increment'] = xstep
opts['digits'] = max(1, 2 - int(np.log10(abs(xstep))))
self.width = FloatSpin(self, value=xsiz, max_val=dshape[1]*xstep, **opts)
opts['increment'] = 0.001
opts['digits'] = 5
self.xpix = FloatSpin(self, value=xstep, **opts)
self.ypix = FloatSpin(self, value=ystep, **opts)
irow += 1
sizer.Add(title, (irow, 0), (1, 1), labstyle, 2)
sizer.Add(scale_text, (irow, 1), (1, 4), labstyle, 2)
irow += 1
sizer.Add(pixsize_label, (irow, 0), (1, 1), labstyle, 2)
sizer.Add(self.pixunits.label, (irow, 1), (1, 1), labstyle, 2)
sizer.Add(self.pixunits, (irow, 2), (1, 1), labstyle, 2)
irow += 1
sizer.Add(xpix_label, (irow, 0), (1, 1), labstyle, 2)
sizer.Add(self.xpix, (irow, 1), (1, 1), labstyle, 2)
sizer.Add(ypix_label, (irow, 2), (1, 1), labstyle, 2)
sizer.Add(self.ypix, (irow, 3), (1, 1), labstyle, 2)
irow += 1
sizer.Add(HLine(self, size=(500, -1)), (irow, 0), (1, 4), labstyle, 2)
irow += 1
sizer.Add(size_label, (irow, 0), (1, 3), labstyle, 2)
irow += 1
sizer.Add(width_label, (irow, 0), (1, 1), labstyle, 2)
sizer.Add(self.width, (irow, 1), (1, 1), labstyle, 2)
sizer.Add(height_label, (irow, 2), (1, 1), labstyle, 2)
sizer.Add(self.height, (irow, 3), (1, 1), labstyle, 2)
irow += 1
sizer.Add(HLine(self, size=(500, -1)), (irow, 0), (1, 4), labstyle, 2)
irow += 1
sizer.Add(pos_label, (irow, 0), (1, 3), labstyle, 2)
irow += 1
sizer.Add(xpos_label, (irow, 0), (1, 1), labstyle, 2)
sizer.Add(self.xpos, (irow, 1), (1, 1), labstyle, 2)
sizer.Add(ypos_label, (irow, 2), (1, 1), labstyle, 2)
sizer.Add(self.ypos, (irow, 3), (1, 1), labstyle, 2)
irow += 1
sizer.Add(HLine(self, size=(500, -1)), (irow, 0), (1, 4), labstyle, 2)
irow += 1
sizer.Add(self.label.label, (irow, 0), (1, 1), labstyle, 2)
sizer.Add(self.label, (irow, 1), (1, 1), labstyle, 2)
sizer.Add(color_label, (irow, 2), (1, 1), labstyle, 2)
sizer.Add(self.color, (irow, 3), (1, 1), labstyle, 2)
irow += 1
sizer.Add(self.show_scalebar, (irow, 1), (1, 1), labstyle, 2)
sizer.Add(self.show_label, (irow, 2), (1, 2), labstyle, 2)
irow += 1
sizer.Add(HLine(self, size=(500, -1)), (irow, 0), (1, 4), labstyle, 2)
autopack(self, sizer)
self.SetMinSize((500, 350))
xsiz, ysiz = self.GetBestSize()
self.SetSize((25*(1 + int(xsiz/25)), 25*(2 + int(ysiz/25))))
self.Show()
self.Raise()
def onGridEvents(self, event=None):
self.conf.show_grid = self.show_grid.IsChecked()
self.conf.grid_color = hexcolor(self.grid_color.GetValue()[:3])
self.conf.grid_alpha = self.grid_alpha.GetValue()
self.parent.panel.autoset_margins()
self.parent.panel.redraw()
def onContourEvents(self, event=None):
self.conf.ncontour_levels = self.ncontours.GetValue()
self.conf.contour_labels = self.showlabels.IsChecked()
self.parent.onContourToggle()
def onSliceEvents(self, event=None):
self.conf.slice_width = self.slice_width.GetValue()
self.conf.slices = self.slice_dir.GetStringSelection()
self.conf.slice_onmotion = self.slice_dynamic.IsChecked()
self.parent.onSliceChoice()
def onScalebarEvents(self, event=None):
self.conf.scalebar_show = self.show_scalebar.IsChecked()
self.conf.scalebar_showlabel = self.show_label.IsChecked()
self.conf.scalebar_label = self.label.GetValue()
self.conf.scalebar_pos = self.ypos.GetValue(), self.xpos.GetValue()
self.conf.scalebar_size = self.height.GetValue(), self.width.GetValue()
self.conf.scalebar_color = col = hexcolor(self.color.GetValue()[:3])
self.conf.scalebar_units = self.pixunits.GetValue()
self.conf.scalebar_pixelsize = self.ypix.GetValue(), self.xpix.GetValue()
self.parent.panel.redraw()
|
import wx
import wx.lib.agw.flatnotebook as flat_nb
import wx.lib.scrolledpanel as scrolled
import wx.lib.colourselect as csel
from math import log10
import numpy as np
import matplotlib.cm as cmap
from matplotlib.ticker import FuncFormatter
from .colors import register_custom_colormaps, hexcolor, hex2rgb, mpl_color
from .config import bool_ifnotNone, ifnotNone
from .plotconfigframe import FNB_STYLE, autopack
from .utils import LabeledTextCtrl, SimpleText, Check, Choice, HLine, pack, FloatSpin, MenuItem
try:
import yaml
HAS_YAML = True
except ImportError:
HAS_YAML = False
cm_names = register_custom_colormaps()
ColorMap_List = []
for cm in ('gray', 'coolwarm', 'viridis', 'inferno', 'plasma', 'magma', 'red',
'green', 'blue', 'magenta', 'yellow', 'cyan', 'Reds', 'Greens',
'Blues', 'cool', 'hot', 'copper', 'red_heat', 'green_heat',
'blue_heat', 'spring', 'summer', 'autumn', 'winter', 'ocean',
'terrain', 'jet', 'stdgamma', 'hsv', 'Accent', 'Spectral', 'PiYG',
'PRGn', 'Spectral', 'YlGn', 'YlGnBu', 'RdBu', 'RdPu', 'RdYlBu',
'RdYlGn'):
if cm in cm_names or hasattr(cmap, cm):
ColorMap_List.append(cm)
Contrast_List = ['None', '0.01', '0.02', '0.05', '0.1', '0.2', '0.5', '1.0',
'2.0', '5.0']
Contrast_NDArray = np.array((-1.0, 0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1., 2, 5.))
Interp_List = ('nearest', 'bicubic', 'quadric', 'gaussian', 'kaiser',
'bessel', 'mitchell', 'catrom', 'spline16', 'spline36',
'bilinear', 'hanning', 'hamming', 'hermite', 'sinc', 'lanczos')
Slices_List = ('None', 'X', 'Y')
RGB_COLORS = ('red', 'green', 'blue')
class ImageConfig:
def __init__(self, axes=None, fig=None, canvas=None):
self.axes = axes
self.fig = fig
self.canvas = canvas
self.cmap = [cmap.gray, cmap.gray, cmap.gray]
self.cmap_reverse = False
self.interp = 'nearest'
self.show_axis = False
self.show_grid = False
self.grid_color = '#807030'
self.grid_alpha = 0.25
self.log_scale = False
self.flip_ud = False
self.flip_lr = False
self.rot_level = 0
self.contrast_level = 0
self.datalimits = [None, None, None, None]
self.cmap_lo = [0, 0, 0]
self.cmap_range = 1000
self.cmap_hi = [1000, 1000, 1000]
self.tricolor_bg = 'black'
self.tricolor_mode = 'rgb'
self.int_lo = [0, 0, 0]
self.int_hi = [1, 1, 1]
self.data = None
self.xdata = None
self.ydata = None
self.xlab = 'X'
self.ylab = 'Y'
self.indices = None
self.title = 'image'
self.style = 'image'
self.highlight_areas = []
self.ncontour_levels = 10
self.contour_levels = None
self.contour_labels = True
self.cursor_mode = 'zoom'
self.zoombrush = wx.Brush('#040410', wx.SOLID)
self.zoompen = wx.Pen('#101090', 3, wx.SOLID)
self.zoom_lims = []
self.slices = Slices_List[0]
self.slice_xy = -1, -1
self.slice_width = 1
self.slice_onmotion = False
self.scalebar_show = False
self.scalebar_showlabel = False
self.scalebar_label = ''
self.scalebar_pos = 5, 5
self.scalebar_size = 1, 1
self.scalebar_pixelsize = None, None
self.scalebar_units = 'mm'
self.scalebar_color = '#EEEE99'
self.set_formatters()
def set_colormap(self, name, reverse=False, icol=0):
self.cmap_reverse = reverse
if reverse and not name.endswith('_r'):
name = name + '_r'
elif not reverse and name.endswith('_r'):
name = name[:-2]
self.cmap[icol] = _cmap_ = cmap.get_cmap(name)
if hasattr(self, 'contour'):
xname = 'gray'
if name == 'gray_r':
xname = 'Reds_r'
elif name == 'gray':
xname = 'Reds'
elif name.endswith('_r'):
xname = 'gray_r'
self.contour.set_cmap(getattr(cmap, xname))
if hasattr(self, 'image'):
self.image.set_cmap(self.cmap[icol])
if hasattr(self, 'highlight_areas'):
if hasattr(self.cmap[icol], '_lut'):
rgb = [int(i*240)^255 for i in self.cmap[icol]._lut[0][:3]]
col = '#%02x%02x%02x' % (rgb[0], rgb[1], rgb[2])
for area in self.highlight_areas:
for w in area.collections + area.labelTexts:
w.set_color(col)
def flip_vert(self):
"flip image along vertical axis (up/down)"
self.data = np.flipud(self.data)
if self.ydata is not None:
self.ydata = self.ydata[::-1]
self.flip_ud = not self.flip_ud
def flip_horiz(self):
"flip image along horizontal axis (left/right)"
self.data = np.fliplr(self.data)
if self.xdata is not None:
self.xdata = self.xdata[::-1]
self.flip_lr = not self.flip_lr
def rotate90(self, event=None):
"rotate 90 degrees, CW"
if self.xdata is not None:
self.xdata = self.xdata[::-1]
if self.ydata is not None:
self.ydata = self.ydata[:]
self.xdata, self.ydata = self.ydata, self.xdata
self.xlab, self.ylab = self.ylab, self.xlab
self.data = np.rot90(self.data)
self.rot_level += 1
if self.rot_level == 4:
self.rot_level = 0
def set_formatters(self):
if self.axes is not None:
self.axes.xaxis.set_major_formatter(FuncFormatter(self.xformatter))
self.axes.yaxis.set_major_formatter(FuncFormatter(self.yformatter))
def xformatter(self, x, pos):
" x-axis formatter "
return self._format(x, pos, dtype='x')
def yformatter(self, y, pos):
" y-axis formatter "
return self._format(y, pos, dtype='y')
def _format(self, x, pos, dtype='x'):
""" home built tick formatter to use with FuncFormatter():
x value to be formatted
type 'x' or 'y' or 'y2' to set which list of ticks to get
also sets self._yfmt/self._xfmt for statusbar
"""
fmt = '%1.5g'
if dtype == 'y':
ax = self.axes.yaxis
dat = self.ydata
if dat is None:
dat = np.arange(self.data.shape[0])
else:
ax = self.axes.xaxis
dat = self.xdata
if dat is None:
dat = np.arange(self.data.shape[1])
ticks = [0,1]
onep = 1.00001
try:
dtick = 0.1 * onep * (dat.max() - dat.min())
except:
dtick = 0.2 * onep
try:
ticks = ax.get_major_locator()()
except:
ticks = [0, 1]
try:
dtick = abs(dat[int(ticks[1])] - dat[int(ticks[0])]) * onep
except:
pass
if dtick > 89999:
fmt = '%.2e'
else:
fmt = '%%1.%df' % max(0, -round(log10(0.75*dtick)))
try:
s = fmt % dat[int(x)]
except:
s = ''
s.strip()
s = s.replace('+', '')
while s.find('e0')>0:
s = s.replace('e0','e')
while s.find('-0')>0:
s = s.replace('-0','-')
return s
def relabel(self):
" re draw labels (title, x,y labels)"
pass
def set_zoombrush(self,color, style):
self.zoombrush = wx.Brush(color, style)
def set_zoompen(self,color, style):
self.zoompen = wx.Pen(color, 3, style)
def tricolor_white_bg(self, img):
"""transforms image from RGB with (0,0,0)
showing black to RGB with 0,0,0 showing white
takes the Red intensity and sets
the new intensity to go
from (0, 0.5, 0.5) (for Red=0) to (0, 0, 0) (for Red=1)
and so on for the Green and Blue maps.
Thus the image will be transformed from
old intensity new intensity
(0.0, 0.0, 0.0) (black) (1.0, 1.0, 1.0) (white)
(1.0, 1.0, 1.0) (white) (0.0, 0.0, 0.0) (black)
(1.0, 0.0, 0.0) (red) (1.0, 0.5, 0.5) (red)
(0.0, 1.0, 0.0) (green) (0.5, 1.0, 0.5) (green)
(0.0, 0.0, 1.0) (blue) (0.5, 0.5, 1.0) (blue)
"""
tmp = 0.5*(1.0 - (img - img.min())/(img.max() - img.min()))
out = tmp*0.0
out[:,:,0] = tmp[:,:,1] + tmp[:,:,2]
out[:,:,1] = tmp[:,:,0] + tmp[:,:,2]
out[:,:,2] = tmp[:,:,0] + tmp[:,:,1]
return out
def rgb2cmy(self, img, whitebg=False):
"""transforms image from RGB to CMY"""
tmp = img*1.0
if whitebg:
tmp = (1.0 - (img - img.min())/(img.max() - img.min()))
out = tmp*0.0
out[:,:,0] = (tmp[:,:,1] + tmp[:,:,2])/2.0
out[:,:,1] = (tmp[:,:,0] + tmp[:,:,2])/2.0
out[:,:,2] = (tmp[:,:,0] + tmp[:,:,1])/2.0
return out
def set_config(self, interp=None, colormap=None, reverse_colormap=None,
contrast_level=None, flip_ud=None, flip_lr=None,
rot=None, tricolor_bg=None, ncontour_levels=None,
title=None, style=None):
"""set configuration options:
interp, colormap, reverse_colormap, contrast_levels, flip_ud,
flip_lr, rot, tricolor_bg, ncontour_levels, title, style
"""
if interp is not None:
interp = interp.lower()
self.interp = interp if interp in Interp_List else self.interp
if colormap is not None:
colormap = colormap.lower()
if colormap.endswith('_r'):
reverse_colormap = True
colormap = colormap[:-2]
self.colormap = colormap if colormap in ColorMap_List else self.colormap
if contrast_level is not None:
self.contrast_level = float(contrast_level)
self.cmap_reverse = bool_ifnotNone(reverse_colormap, self.cmap_reverse)
self.flip_ud = bool_ifnotNone(flip_ud, self.flip_ud)
self.flip_lr = bool_ifnotNone(flip_lr, self.flip_lr)
self.rot = bool_ifnotNone(rot, self.rot)
if tricolor_bg is not None:
tricolor_bg = tricolor_bg.lower()
if tricolor_bg in ('black', 'white'):
self.tricolor_bg = tricolor_bg
if ncontour_levels is not None:
self.ncontour_level = int(ncontour_levels)
if style is not None:
style = style.lower()
if style in ('image', 'contour'):
self.style = style
self.title = ifnotNone(title, self.title)
def get_config(self):
"""get dictionary of configuration options"""
out = {'reverse_colormap': self.cmap_reverse}
for attr in ('interp', 'colormap', 'contrast_levels', 'flip_ud',
'flip_lr', 'rot', 'tricolor_bg', 'ncontour_levels',
'title', 'style'):
out[attr] = getattr(self, attr)
return out
labstyle= wx.ALIGN_LEFT|wx.ALIGN_CENTER_VERTICAL|wx.ALL
class ImageConfigFrame(wx.Frame):
""" GUI Configure Frame for Images"""
def __init__(self, parent=None, config=None, trace_color_callback=None):
if config is None:
config = ImageConfig()
self.conf = config
self.parent = parent
self.canvas = self.conf.canvas
self.axes = self.canvas.figure.get_axes()
self.DrawPanel()
mbar = wx.MenuBar()
fmenu = wx.Menu()
MenuItem(self, fmenu, "Save Configuration\tCtrl+S",
"Save Configuration",
self.save_config)
MenuItem(self, fmenu, "Load Configuration\tCtrl+R",
"Load Configuration",
self.load_config)
mbar.Append(fmenu, 'File')
self.SetMenuBar(mbar)
def save_config(self, evt=None, fname='wxmplot.yaml'):
if not HAS_YAML:
return
file_choices = 'YAML Config File (*.yaml)|*.yaml'
dlg = wx.FileDialog(self, message='Save image configuration',
defaultDir=os.getcwd(),
defaultFile=fname,
wildcard=file_choices,
style=wx.FD_SAVE|wx.FD_CHANGE_DIR)
if dlg.ShowModal() == wx.ID_OK:
conf = self.conf.get_current_config()
ppath = os.path.abspath(dlg.GetPath())
with open(ppath, 'w') as fh:
fh.write("%s\n" % yaml.dump(conf))
def load_config(self, evt=None):
if not HAS_YAML:
return
file_choices = 'YAML Config File (*.yaml)|*.yaml'
dlg = wx.FileDialog(self, message='Read image configuration',
defaultDir=os.getcwd(),
wildcard=file_choices,
style=wx.FD_OPEN)
if dlg.ShowModal() == wx.ID_OK:
conf = yaml.safe_load(open(os.path.abspath(dlg.GetPath()), 'r').read())
self.conf.load_config(conf)
def DrawPanel(self):
style = wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, self.parent, -1, 'Configure Image', style=style)
conf = self.conf
self.SetFont(wx.Font(12,wx.SWISS,wx.NORMAL,wx.NORMAL,False))
self.SetBackgroundColour(hex2rgb('#FEFEFE'))
sizer = wx.GridBagSizer(2, 2)
irow = 0
bstyle=wx.ALIGN_LEFT|wx.ALIGN_CENTER_VERTICAL|wx.ST_NO_AUTORESIZE
# contours
ctitle = SimpleText(self, 'Contours:', colour='#DD0000')
label = SimpleText(self, "# Levels:")
self.ncontours = FloatSpin(self, value=conf.ncontour_levels,
min_val=0, max_val=5000,
increment=1, digits=0, size=(60, -1),
action=self.onContourEvents)
self.showlabels = Check(self, label='Show Labels?',
default=conf.contour_labels,
action=self.onContourEvents)
sizer.Add(ctitle, (irow, 0), (1, 2), labstyle, 2)
irow += 1
sizer.Add(label, (irow, 0), (1, 1), labstyle, 2)
sizer.Add(self.ncontours, (irow, 1), (1, 1), labstyle, 2)
sizer.Add(self.showlabels, (irow, 2), (1, 1), labstyle, 2)
irow += 1
sizer.Add(HLine(self, size=(500, -1)), (irow, 0), (1, 4), labstyle, 2)
# Grid
title = SimpleText(self, 'Image Grid:', colour='#DD0000')
label_gcolor = SimpleText(self, "Color:")
label_galpha = SimpleText(self, "Alpha:")
self.show_grid = Check(self, label='Show Grid with Labeled Axes?',
default=conf.show_grid,
action=self.onGridEvents)
self.grid_alpha = FloatSpin(self, value=conf.grid_alpha,
min_val=0, max_val=1,
increment=0.05, digits=3, size=(130, -1),
action=self.onGridEvents)
self.grid_color = csel.ColourSelect(self, -1, "",
mpl_color(conf.grid_color),
size=(50, -1))
self.grid_color.Bind(csel.EVT_COLOURSELECT, self.onGridEvents)
irow += 1
sizer.Add(title, (irow, 0), (1, 1), labstyle, 2)
sizer.Add(self.show_grid, (irow, 1), (1, 1), labstyle, 2)
irow += 1
sizer.Add(label_gcolor, (irow, 0), (1, 1), labstyle, 2)
sizer.Add(self.grid_color, (irow, 1), (1, 1), labstyle, 2)
sizer.Add(label_galpha, (irow, 2), (1, 1), labstyle, 2)
sizer.Add(self.grid_alpha, (irow, 3), (1, 1), labstyle, 2)
irow += 1
sizer.Add(HLine(self, size=(500, -1)), (irow, 0), (1, 4), labstyle, 2)
# X/Y Slices
title = SimpleText(self, 'X/Y Slices:', colour='#DD0000')
label_dir = SimpleText(self, "Direction:")
label_wid = SimpleText(self, "Width (pixels):")
self.slice_width = FloatSpin(self, value=conf.slice_width,
min_val=0, max_val=5000,
increment=1, digits=0, size=(60, -1),
action=self.onSliceEvents)
self.slice_dir = Choice(self, size=(90, -1),
choices=Slices_List,
action=self.onSliceEvents)
self.slice_dir.SetStringSelection(conf.slices)
self.slice_dynamic = Check(self,label='Slices Follow Mouse Motion?',
default=conf.slice_onmotion,
action=self.onSliceEvents)
irow += 1
sizer.Add(title, (irow, 0), (1, 1), labstyle, 2)
sizer.Add(self.slice_dynamic, (irow, 1), (1, 2), labstyle, 2)
irow += 1
sizer.Add(label_dir, (irow, 0), (1, 1), labstyle, 2)
sizer.Add(self.slice_dir, (irow, 1), (1, 1), labstyle, 2)
sizer.Add(label_wid, (irow, 2), (1, 1), labstyle, 2)
sizer.Add(self.slice_width, (irow, 3), (1, 1), labstyle, 2)
irow += 1
sizer.Add(HLine(self, size=(500, -1)), (irow, 0), (1, 4), labstyle, 2)
# Scalebar
ypos, xpos = conf.scalebar_pos
ysiz, xsiz = conf.scalebar_size
units = conf.scalebar_units
dshape = conf.data.shape
ystep, xstep = conf.scalebar_pixelsize
if xstep is None or ystep is None:
ystep, xstep = 1, 1
if conf.xdata is not None:
xstep = abs(np.diff(conf.xdata).mean())
if conf.ydata is not None:
ystep = abs(np.diff(conf.ydata).mean())
conf.scalebar_pixelsize = ystep, xstep
title = SimpleText(self, 'Scalebar:', colour='#DD0000')
lab_opts = dict(size=(120, -1))
color_label = SimpleText(self, 'Color: ')
xpos_label = SimpleText(self, 'X Position: ')
ypos_label = SimpleText(self, 'Y Position: ')
size_label = SimpleText(self, 'Scalebar Size: ')
pos_label = SimpleText(self, "Scalebar Position (pixels from lower left):")
width_label = SimpleText(self, 'Width (%s): ' % units)
height_label = SimpleText(self, 'Height (pixels): ')
pixsize_label = SimpleText(self, 'Pixel Size: ')
xpix_label = SimpleText(self, 'X pixelsize: ')
ypix_label = SimpleText(self, 'Y pixelsize: ')
self.pixunits = LabeledTextCtrl(self, value=conf.scalebar_units,
size=(80, -1),
labeltext='Units:',
action=self.onScalebarEvents)
self.show_scalebar = Check(self, label='Show Scalebar',
default=conf.scalebar_show,
action=self.onScalebarEvents)
self.show_label = Check(self, label='Show Label?',
default=conf.scalebar_showlabel,
action=self.onScalebarEvents)
stext = "Image Size: X=%d, Y=%d pixels" % (dshape[1], dshape[0])
scale_text = SimpleText(self, label=stext)
self.label = LabeledTextCtrl(self, value=conf.scalebar_label,
size=(150, -1),
labeltext='Label:',
action=self.onScalebarEvents)
self.color = csel.ColourSelect(self, -1, "",
mpl_color(conf.scalebar_color),
size=(50, -1))
self.color.Bind(csel.EVT_COLOURSELECT, self.onScalebarEvents)
opts = dict(min_val=0, increment=1, digits=0, size=(100, -1),
action=self.onScalebarEvents)
self.xpos = FloatSpin(self, value=xpos, max_val=dshape[1], **opts)
self.ypos = FloatSpin(self, value=ypos, max_val=dshape[0], **opts)
self.height = FloatSpin(self, value=ysiz, max_val=dshape[0], **opts)
opts['increment'] = xstep
opts['digits'] = max(1, 2 - int(np.log10(abs(xstep))))
self.width = FloatSpin(self, value=xsiz, max_val=dshape[1]*xstep, **opts)
opts['increment'] = 0.001
opts['digits'] = 5
self.xpix = FloatSpin(self, value=xstep, **opts)
self.ypix = FloatSpin(self, value=ystep, **opts)
irow += 1
sizer.Add(title, (irow, 0), (1, 1), labstyle, 2)
sizer.Add(scale_text, (irow, 1), (1, 4), labstyle, 2)
irow += 1
sizer.Add(pixsize_label, (irow, 0), (1, 1), labstyle, 2)
sizer.Add(self.pixunits.label, (irow, 1), (1, 1), labstyle, 2)
sizer.Add(self.pixunits, (irow, 2), (1, 1), labstyle, 2)
irow += 1
sizer.Add(xpix_label, (irow, 0), (1, 1), labstyle, 2)
sizer.Add(self.xpix, (irow, 1), (1, 1), labstyle, 2)
sizer.Add(ypix_label, (irow, 2), (1, 1), labstyle, 2)
sizer.Add(self.ypix, (irow, 3), (1, 1), labstyle, 2)
irow += 1
sizer.Add(HLine(self, size=(500, -1)), (irow, 0), (1, 4), labstyle, 2)
irow += 1
sizer.Add(size_label, (irow, 0), (1, 3), labstyle, 2)
irow += 1
sizer.Add(width_label, (irow, 0), (1, 1), labstyle, 2)
sizer.Add(self.width, (irow, 1), (1, 1), labstyle, 2)
sizer.Add(height_label, (irow, 2), (1, 1), labstyle, 2)
sizer.Add(self.height, (irow, 3), (1, 1), labstyle, 2)
irow += 1
sizer.Add(HLine(self, size=(500, -1)), (irow, 0), (1, 4), labstyle, 2)
irow += 1
sizer.Add(pos_label, (irow, 0), (1, 3), labstyle, 2)
irow += 1
sizer.Add(xpos_label, (irow, 0), (1, 1), labstyle, 2)
sizer.Add(self.xpos, (irow, 1), (1, 1), labstyle, 2)
sizer.Add(ypos_label, (irow, 2), (1, 1), labstyle, 2)
sizer.Add(self.ypos, (irow, 3), (1, 1), labstyle, 2)
irow += 1
sizer.Add(HLine(self, size=(500, -1)), (irow, 0), (1, 4), labstyle, 2)
irow += 1
sizer.Add(self.label.label, (irow, 0), (1, 1), labstyle, 2)
sizer.Add(self.label, (irow, 1), (1, 1), labstyle, 2)
sizer.Add(color_label, (irow, 2), (1, 1), labstyle, 2)
sizer.Add(self.color, (irow, 3), (1, 1), labstyle, 2)
irow += 1
sizer.Add(self.show_scalebar, (irow, 1), (1, 1), labstyle, 2)
sizer.Add(self.show_label, (irow, 2), (1, 2), labstyle, 2)
irow += 1
sizer.Add(HLine(self, size=(500, -1)), (irow, 0), (1, 4), labstyle, 2)
autopack(self, sizer)
self.SetMinSize((500, 350))
xsiz, ysiz = self.GetBestSize()
self.SetSize((25*(1 + int(xsiz/25)), 25*(2 + int(ysiz/25))))
self.Show()
self.Raise()
def onGridEvents(self, event=None):
self.conf.show_grid = self.show_grid.IsChecked()
self.conf.grid_color = hexcolor(self.grid_color.GetValue()[:3])
self.conf.grid_alpha = self.grid_alpha.GetValue()
self.parent.panel.autoset_margins()
self.parent.panel.redraw()
def onContourEvents(self, event=None):
self.conf.ncontour_levels = self.ncontours.GetValue()
self.conf.contour_labels = self.showlabels.IsChecked()
self.parent.onContourToggle()
def onSliceEvents(self, event=None):
self.conf.slice_width = self.slice_width.GetValue()
self.conf.slices = self.slice_dir.GetStringSelection()
self.conf.slice_onmotion = self.slice_dynamic.IsChecked()
self.parent.onSliceChoice()
def onScalebarEvents(self, event=None):
self.conf.scalebar_show = self.show_scalebar.IsChecked()
self.conf.scalebar_showlabel = self.show_label.IsChecked()
self.conf.scalebar_label = self.label.GetValue()
self.conf.scalebar_pos = self.ypos.GetValue(), self.xpos.GetValue()
self.conf.scalebar_size = self.height.GetValue(), self.width.GetValue()
self.conf.scalebar_color = col = hexcolor(self.color.GetValue()[:3])
self.conf.scalebar_units = self.pixunits.GetValue()
self.conf.scalebar_pixelsize = self.ypix.GetValue(), self.xpix.GetValue()
self.parent.panel.redraw()
|
en
| 0.709762
|
home built tick formatter to use with FuncFormatter(): x value to be formatted type 'x' or 'y' or 'y2' to set which list of ticks to get also sets self._yfmt/self._xfmt for statusbar transforms image from RGB with (0,0,0) showing black to RGB with 0,0,0 showing white takes the Red intensity and sets the new intensity to go from (0, 0.5, 0.5) (for Red=0) to (0, 0, 0) (for Red=1) and so on for the Green and Blue maps. Thus the image will be transformed from old intensity new intensity (0.0, 0.0, 0.0) (black) (1.0, 1.0, 1.0) (white) (1.0, 1.0, 1.0) (white) (0.0, 0.0, 0.0) (black) (1.0, 0.0, 0.0) (red) (1.0, 0.5, 0.5) (red) (0.0, 1.0, 0.0) (green) (0.5, 1.0, 0.5) (green) (0.0, 0.0, 1.0) (blue) (0.5, 0.5, 1.0) (blue) transforms image from RGB to CMY set configuration options: interp, colormap, reverse_colormap, contrast_levels, flip_ud, flip_lr, rot, tricolor_bg, ncontour_levels, title, style get dictionary of configuration options GUI Configure Frame for Images # contours # Grid # X/Y Slices # Scalebar
| 1.734589
| 2
|
test/python/test_skip_transpiler.py
|
biryani/qiskit-terra
| 0
|
6629070
|
<reponame>biryani/qiskit-terra<filename>test/python/test_skip_transpiler.py
# -*- coding: utf-8 -*-
# Copyright 2018, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
# pylint: disable=missing-docstring, redefined-builtin
import unittest
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
from qiskit import compile, execute
from qiskit import Aer
from .common import QiskitTestCase
class CompileSkipTranslationTest(QiskitTestCase):
"""Test compilation with skip translation."""
def test_simple_compile(self):
"""Test compile with and without skip_transpiler."""
qr = QuantumRegister(2)
cr = ClassicalRegister(2)
qc = QuantumCircuit(qr, cr)
qc.u1(3.14, qr[0])
qc.u2(3.14, 1.57, qr[0])
qc.measure(qr, cr)
backend = Aer.get_backend('qasm_simulator')
rtrue = compile(qc, backend, skip_transpiler=True)
rfalse = compile(qc, backend, skip_transpiler=False)
self.assertEqual(rtrue.config, rfalse.config)
self.assertEqual(rtrue.experiments, rfalse.experiments)
def test_simple_execute(self):
"""Test execute with and without skip_transpiler."""
qr = QuantumRegister(2)
cr = ClassicalRegister(2)
qc = QuantumCircuit(qr, cr)
qc.u1(3.14, qr[0])
qc.u2(3.14, 1.57, qr[0])
qc.measure(qr, cr)
backend = Aer.get_backend('qasm_simulator')
rtrue = execute(qc, backend, seed=42, skip_transpiler=True).result()
rfalse = execute(qc, backend, seed=42, skip_transpiler=False).result()
self.assertEqual(rtrue.get_counts(), rfalse.get_counts())
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
# Copyright 2018, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
# pylint: disable=missing-docstring, redefined-builtin
import unittest
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
from qiskit import compile, execute
from qiskit import Aer
from .common import QiskitTestCase
class CompileSkipTranslationTest(QiskitTestCase):
"""Test compilation with skip translation."""
def test_simple_compile(self):
"""Test compile with and without skip_transpiler."""
qr = QuantumRegister(2)
cr = ClassicalRegister(2)
qc = QuantumCircuit(qr, cr)
qc.u1(3.14, qr[0])
qc.u2(3.14, 1.57, qr[0])
qc.measure(qr, cr)
backend = Aer.get_backend('qasm_simulator')
rtrue = compile(qc, backend, skip_transpiler=True)
rfalse = compile(qc, backend, skip_transpiler=False)
self.assertEqual(rtrue.config, rfalse.config)
self.assertEqual(rtrue.experiments, rfalse.experiments)
def test_simple_execute(self):
"""Test execute with and without skip_transpiler."""
qr = QuantumRegister(2)
cr = ClassicalRegister(2)
qc = QuantumCircuit(qr, cr)
qc.u1(3.14, qr[0])
qc.u2(3.14, 1.57, qr[0])
qc.measure(qr, cr)
backend = Aer.get_backend('qasm_simulator')
rtrue = execute(qc, backend, seed=42, skip_transpiler=True).result()
rfalse = execute(qc, backend, seed=42, skip_transpiler=False).result()
self.assertEqual(rtrue.get_counts(), rfalse.get_counts())
if __name__ == '__main__':
unittest.main()
|
en
| 0.821383
|
# -*- coding: utf-8 -*- # Copyright 2018, IBM. # # This source code is licensed under the Apache License, Version 2.0 found in # the LICENSE.txt file in the root directory of this source tree. # pylint: disable=missing-docstring, redefined-builtin Test compilation with skip translation. Test compile with and without skip_transpiler. Test execute with and without skip_transpiler.
| 1.972909
| 2
|
publishconf.py
|
zentropi/zentropi.org
| 0
|
6629071
|
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
# This file is only used if you use `make publish` or
# explicitly specify it as your config file.
import os
import sys
sys.path.append(os.curdir)
from pelicanconf import *
# If your site is available via HTTPS, make sure SITEURL begins with https://
SITEURL = 'https://zentropi.org'
RELATIVE_URLS = False
SITENAME = 'zentropi.org'
SITETITLE = 'Zentropi.org'
SITESUBTITLE = 'Script Your World'
SITEDESCRIPTION = ''
SITELOGO = SITEURL + '/images/logo.png'
FAVICON = SITEURL + '/images/logo.png'
BROWSER_COLOR = '#333'
ROBOTS = 'index, follow'
FEED_ALL_ATOM = 'feeds/all.atom.xml'
CATEGORY_FEED_ATOM = 'feeds/{slug}.atom.xml'
DELETE_OUTPUT_DIRECTORY = True
# Following items are often useful when publishing
#DISQUS_SITENAME = ""
#GOOGLE_ANALYTICS = ""
|
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
# This file is only used if you use `make publish` or
# explicitly specify it as your config file.
import os
import sys
sys.path.append(os.curdir)
from pelicanconf import *
# If your site is available via HTTPS, make sure SITEURL begins with https://
SITEURL = 'https://zentropi.org'
RELATIVE_URLS = False
SITENAME = 'zentropi.org'
SITETITLE = 'Zentropi.org'
SITESUBTITLE = 'Script Your World'
SITEDESCRIPTION = ''
SITELOGO = SITEURL + '/images/logo.png'
FAVICON = SITEURL + '/images/logo.png'
BROWSER_COLOR = '#333'
ROBOTS = 'index, follow'
FEED_ALL_ATOM = 'feeds/all.atom.xml'
CATEGORY_FEED_ATOM = 'feeds/{slug}.atom.xml'
DELETE_OUTPUT_DIRECTORY = True
# Following items are often useful when publishing
#DISQUS_SITENAME = ""
#GOOGLE_ANALYTICS = ""
|
en
| 0.769843
|
#!/usr/bin/env python # -*- coding: utf-8 -*- # # This file is only used if you use `make publish` or # explicitly specify it as your config file. # If your site is available via HTTPS, make sure SITEURL begins with https:// # Following items are often useful when publishing #DISQUS_SITENAME = "" #GOOGLE_ANALYTICS = ""
| 1.483552
| 1
|
nurses/widgets/widget.py
|
salt-die/nurses
| 21
|
6629072
|
from collections import defaultdict
import curses
from ..observable import Observable
BORDER_STYLES = {
"light" : "┌┐│─└┘",
"heavy" : "┏┓┃━┗┛",
"double": "╔╗║═╚╝",
"curved": "╭╮│─╰╯",
}
class BindMagic:
"""
Decorator that binds a method to attributes. Attributes in `attrs` that aren't Observable will be redefined as Observables.
Warning! This is a very abusive way to use the automatic calling of `__set_name__` in the descriptor protocol.
"""
def __init__(self, *attrs):
self.attrs = attrs
def __set_name__(self, owner, name):
for attr in self.attrs:
for base in owner.__mro__:
if hasattr(base, attr):
base_attr = base.__dict__[attr]
if isinstance(base_attr, Observable):
prop = base_attr
else:
setattr(owner, attr, Observable(base_attr)) # default value of Observable will come from base.__dict__
prop = owner.__dict__[attr]
prop.__set_name__(owner, attr)
break
else:
setattr(owner, attr, Observable())
prop = owner.__dict__[attr]
prop.__set_name__(owner, attr)
prop.bind(owner.__name__, name)
setattr(owner, name, self.func)
def __call__(self, func):
self.func = func
return self
class Observer(type):
"""
This metaclass simply drops the `bind_to` decorator into the class dict.
`bind_to` allows one to quickly bind functions to attributes in the class body - these attributes
will be turned into Observables by the decorator.
"""
def __prepare__(name, bases):
return { "bind_to": BindMagic }
def __new__(meta, name, bases, methods):
del methods["bind_to"]
return super().__new__(meta, name, bases, methods)
class Widget(metaclass=Observer):
"""
The base window for nurses. A fancy wrapper around a curses window.
Parameters
----------
top, left, height, width: optional, positional only
Upper and left-most coordinates of widget relative to parent, and dimensions of the widget.
(the defaults are 0, 0, parent's max height, parent's max width)
Other Parameters
----------------
color: optional
A curses color_pair, the default color of this widget. (the default is `curses.color_pair(0)`)
pos_hint, size_hint: optional
If a pos_hint or size_hint are given they will override any given pos or size arguments. Hints are expected to be
2-tuples of numbers or None. Fractional arguments are interpreted as percentage of parent, and parent width or
height will be added to negative arguments. (e.g., `size_hint=(.5, None)` means widget will be half as tall as parent
and the width will come from the `width` arg.)
transparent: optional
If true, widget will overlay other widgets instead of overwrite them (whitespace will be "see-through"). (the default is `False`)
Notes
-----
Coordinates are (y, x) (both a curses and a numpy convention) with y being vertical and increasing as you move down
and x being horizontal and increasing as you move right. Top-left corner is (0, 0)
If some part of the widget moves out-of-bounds of the screen only the part that overlaps the screen will be drawn.
Widget size is limited by screen size. (but ArrayPad isn't)
"""
types = { } # Registry of subclasses of Widget
color = 0
parent = None
transparent = False
border_style = None
border_color = None
pos_hint = None, None
size_hint = None, None
def __init_subclass__(cls):
Widget.types[cls.__name__] = cls # Register subclasses
if not cls.on_press.__doc__:
cls.on_press.__doc__ = Widget.on_press.__doc__
def __init__(self, *args, **kwargs):
self.children = [ ]
self.group = defaultdict(list)
self.window = None
# Assign default values if len(args) < 4
top, left, height, width, *rest = args + (None, None) if len(args) == 2 else args or (0, 0, None, None)
self.top = top
self.left = left
self.height = height
self.width = width
for attr in tuple(kwargs):
# This allows one to set class attributes with keyword-arguments. TODO: Document this.
if hasattr(self, attr):
setattr(self, attr, kwargs.pop(attr))
super().__init__(*rest, **kwargs)
__init__.__text_signature__ = (
"($self, top=0, left=0, height=None, width=None, /, "
"color=0, parent=None, transparent=False, border_style=None, border_color=None, "
"pos_hint=(None, None), size_hint=(None, None), **kwargs)"
)
def getter(self, name, getter):
"""
Replace an attribute lookup with a no-argument function call.
::Warning:: This modifies the class dictionary, replacing any non-Observable attribute with an Observable.
"""
observable = getattr(type(self), name, None)
if not isinstance(observable, Observable):
setattr(type(self), name, observable := Observable(observable))
observable.getters[self] = getter
@bind_to("top")
def _set_pos_hint_y(self):
self.pos_hint = None, self.pos_hint[1]
@bind_to("left")
def _set_pos_hint_x(self):
self.pos_hint = self.pos_hint[0], None
@bind_to("height")
def _set_size_hint_y(self):
self.size_hint = None, self.size_hint[1]
@bind_to("width")
def _set_size_hint_x(self):
self.size_hint = self.size_hint[0], None
def update_geometry(self):
"""
Set or reset the widget's geometry based on size or pos hints if they exist.
Notes
-----
This should only be called by the widget's parent (usually when calling the parent's `add_widget` method).
This will immediately return if there isn't a root widget, since screen size can't be determined yet.
"""
if self.root is None:
return
border = int(self.parent.has_border)
h, w = self.parent.height - 2 * border, self.parent.width - 2 * border
top, left = self.pos_hint
height, width = self.size_hint
if top is not None:
self.top = self.convert(top, h)
if left is not None:
self.left = self.convert(left, w)
if height is not None:
self.height = self.convert(height, h)
if width is not None:
self.width = self.convert(width, w)
if self.height is None:
self.height = h
if self.width is None:
self.width = w
self.pos_hint = top, left
self.size_hint = height, width
if self.window is None:
self.window = curses.newwin(self.height, self.width + 1)
self.update_color(self.color)
if self.has_border:
self.border(self.border_style, self.border_color)
for child in self.children:
if child is not None:
child.update_geometry()
@bind_to("height", "width")
def _resize(self):
window = self.window
if not window:
return
if self.has_border: # Erase the right-most, lower-most border in case widget expands
h, w = window.getmaxyx()
h, w = h - 1, w - 2
color = self.color
ch = self.default_character
window.addstr(0, w, ch, color)
window.addstr(h, w, ch, color)
for x in range(1, w):
window.addstr(h, x, ch, color)
for y in range(1, h):
window.addstr(y, w, ch, color)
window.resize(self.height, self.width + 1)
self.update_color(self.color)
if self.has_border:
self.border(self.border_style, self.border_color)
@property
def bottom(self):
return self.top + self.height
@property
def right(self):
return self.left + self.width
@property
def root(self):
if self.parent is None:
return None
return self.parent.root
def walk(self, start=None):
if start is None:
start = self.root
for child in start.children:
yield from self.walk(child)
yield start
@property
def is_in_front(self):
return self.parent and self.parent.children[-1] is self
@property
def is_in_back(self):
return self.parent and self.parent.children[0] is self
def pull_to_front(self, widget):
"""Given a widget or an index of a widget, widget is moved to top of widget stack (so it is drawn last).
"""
widgets = self.children
if isinstance(widget, int):
widgets.append(widgets.pop(widget))
else:
widgets.remove(widget)
widgets.append(widget)
def push_to_back(self, widget):
"""Given a widget or an index of a widget, widget is moved to bottom of widget stack (so it is drawn first).
"""
widgets = self.children
if isinstance(widget, int):
widgets.insert(0, widgets.pop(widget))
else:
widgets.remove(widget)
widgets.insert(0, widget)
def add_widget(self, widget):
self.children.append(widget)
widget.parent = self
widget.update_geometry()
def remove_widget(self, widget):
self.children.remove(widget)
def new_widget(self, *args, group=None, create_with=None, **kwargs):
"""
Create a new widget and append to widget stack. Can group widgets if providing a hashable group.
To create a new subclassed widget use `create_with=MyWidget` or `create_with="MyWidget"` (pass the class or the class' name).
"""
if create_with is None:
create_with = Widget
elif isinstance(create_with, str):
create_with = Widget.types[create_with]
widget = create_with(*args, parent=self, **kwargs)
self.add_widget(widget)
if group is not None:
self.group[group].append(widget)
return widget
@property
def overlay(self):
return self.window.overlay if self.transparent else self.window.overwrite
def refresh(self):
"""Redraw children's windows.
"""
# Notably, we don't use curses.panels as they aren't available for windows-curses...
# ...upside is we don't error when moving a widget off-screen.
border = int(self.has_border)
h, w = self.height, self.width
for widget in self.children:
if widget is None:
continue
widget.refresh()
y, x = widget.top, widget.left
src_t, des_t = (-y, border) if y < 0 else (0, y + border)
src_l, des_l = (-x, border) if x < 0 else (0, x + border)
des_h = min(h - 1, des_t + widget.height)
des_w = min(w - 1, des_l + widget.width - 1) # -1 compensates for the extra width of widget's window
widget.overlay(self.window, src_t, src_l, des_t, des_l, des_h, des_w) # FIXME: This is causing an error on WSL terminal.
@staticmethod
def convert(value, bounds):
"""Utility function that converts a fractional or negative value to an absolute one.
"""
if isinstance(value, float):
value = int(value * bounds)
return value + bounds if value < 0 else value
@staticmethod
def line(y1, x1, y2, x2):
"""Yields coordinates for a line from (y1, x1) to (y2, x2).
"""
dy = abs(y2 - y1)
dx = abs(x2 - x1)
if dy == 0: # Horizontal
gen = ((y1, x) for x in range(x1, x2 + 1))
elif dx == 0: # Vertical
gen = ((y, x1) for y in range(y1, y2 + 1))
elif dy < dx:
gen = Widget._line_low(y2, x2, y1, x1) if x1 > x2 else Widget._line_low(y1, x1, y2, x2)
else:
gen = Widget._line_high(y2, x2, y1, x1) if y1 > y2 else Widget._line_high(y1, x1, y2, x2)
yield from gen
@staticmethod
def _line_low(y1, x1, y2, x2):
dx = x2 - x1
dy, yi = (2 * (y2 - y1), 1) if y2 >= y1 else (2 * (y1 - y2), -1)
dif = dy - 2 * dx
delta = dy - dx
y = y1
for x in range(x1, x2 + 1):
yield y, x
if delta > 0:
y += yi
delta += dif
else:
delta += dy
@staticmethod
def _line_high(y1, x1, y2, x2):
dx, xi = (2 * (x2 - x1), 1) if x2 >= x1 else (2 * (x1 - x2), -1)
dy = y2 - y1
dif = dx - 2 * dy
delta = dx - dy
x = x1
for y in range(y1, y2 + 1):
yield y, x
if delta > 0:
x += xi
delta += dif
else:
delta += dx
@property
def has_border(self):
return bool(self.border_style)
def border(self, style="light", color=None):
"""
Draw a border on the edges of the widget.
Parameters
----------
style: optional
The style of the border, can be one of `nurses.widget.BORDER_STYLES`. (the default is "light")
color: optional
The color of the border. (the default is the widget's `color`)
"""
# Curses windows already have a `border` method, but UnicodeEncodeErrors seem to happen when called with the
# characters in BORDER_STYLES. So we add the border "by hand".
self.border_style = style
self.border_color = color
window = self.window
h, w = self.height - 1, self.width - 1
ul, ur, v, hor, ll, lr = BORDER_STYLES[style]
color = color or self.color
window.addstr(0, 0, ul, color)
window.addstr(0, w, ur, color)
window.addstr(h, 0, ll, color)
window.addstr(h, w, lr, color)
for x in range(1, w):
window.addstr(0, x, hor, color)
window.addstr(h, x, hor, color)
for y in range(1, h):
window.addstr(y, 0, v, color)
window.addstr(y, w, v, color)
def dispatch(self, key):
for widget in reversed(self.children):
if widget.on_press(key) or widget.dispatch(key):
return True
def on_press(self, key):
"""
Called when a key is pressed and no widgets above this widget have handled the press.
A press is handled when a widget's `on_press` method returns True.
"""
try:
return super().on_press(key)
except AttributeError:
pass
def update_color(self, color):
self.color = color
self.window.attrset(color)
|
from collections import defaultdict
import curses
from ..observable import Observable
BORDER_STYLES = {
"light" : "┌┐│─└┘",
"heavy" : "┏┓┃━┗┛",
"double": "╔╗║═╚╝",
"curved": "╭╮│─╰╯",
}
class BindMagic:
"""
Decorator that binds a method to attributes. Attributes in `attrs` that aren't Observable will be redefined as Observables.
Warning! This is a very abusive way to use the automatic calling of `__set_name__` in the descriptor protocol.
"""
def __init__(self, *attrs):
self.attrs = attrs
def __set_name__(self, owner, name):
for attr in self.attrs:
for base in owner.__mro__:
if hasattr(base, attr):
base_attr = base.__dict__[attr]
if isinstance(base_attr, Observable):
prop = base_attr
else:
setattr(owner, attr, Observable(base_attr)) # default value of Observable will come from base.__dict__
prop = owner.__dict__[attr]
prop.__set_name__(owner, attr)
break
else:
setattr(owner, attr, Observable())
prop = owner.__dict__[attr]
prop.__set_name__(owner, attr)
prop.bind(owner.__name__, name)
setattr(owner, name, self.func)
def __call__(self, func):
self.func = func
return self
class Observer(type):
"""
This metaclass simply drops the `bind_to` decorator into the class dict.
`bind_to` allows one to quickly bind functions to attributes in the class body - these attributes
will be turned into Observables by the decorator.
"""
def __prepare__(name, bases):
return { "bind_to": BindMagic }
def __new__(meta, name, bases, methods):
del methods["bind_to"]
return super().__new__(meta, name, bases, methods)
class Widget(metaclass=Observer):
"""
The base window for nurses. A fancy wrapper around a curses window.
Parameters
----------
top, left, height, width: optional, positional only
Upper and left-most coordinates of widget relative to parent, and dimensions of the widget.
(the defaults are 0, 0, parent's max height, parent's max width)
Other Parameters
----------------
color: optional
A curses color_pair, the default color of this widget. (the default is `curses.color_pair(0)`)
pos_hint, size_hint: optional
If a pos_hint or size_hint are given they will override any given pos or size arguments. Hints are expected to be
2-tuples of numbers or None. Fractional arguments are interpreted as percentage of parent, and parent width or
height will be added to negative arguments. (e.g., `size_hint=(.5, None)` means widget will be half as tall as parent
and the width will come from the `width` arg.)
transparent: optional
If true, widget will overlay other widgets instead of overwrite them (whitespace will be "see-through"). (the default is `False`)
Notes
-----
Coordinates are (y, x) (both a curses and a numpy convention) with y being vertical and increasing as you move down
and x being horizontal and increasing as you move right. Top-left corner is (0, 0)
If some part of the widget moves out-of-bounds of the screen only the part that overlaps the screen will be drawn.
Widget size is limited by screen size. (but ArrayPad isn't)
"""
types = { } # Registry of subclasses of Widget
color = 0
parent = None
transparent = False
border_style = None
border_color = None
pos_hint = None, None
size_hint = None, None
def __init_subclass__(cls):
Widget.types[cls.__name__] = cls # Register subclasses
if not cls.on_press.__doc__:
cls.on_press.__doc__ = Widget.on_press.__doc__
def __init__(self, *args, **kwargs):
self.children = [ ]
self.group = defaultdict(list)
self.window = None
# Assign default values if len(args) < 4
top, left, height, width, *rest = args + (None, None) if len(args) == 2 else args or (0, 0, None, None)
self.top = top
self.left = left
self.height = height
self.width = width
for attr in tuple(kwargs):
# This allows one to set class attributes with keyword-arguments. TODO: Document this.
if hasattr(self, attr):
setattr(self, attr, kwargs.pop(attr))
super().__init__(*rest, **kwargs)
__init__.__text_signature__ = (
"($self, top=0, left=0, height=None, width=None, /, "
"color=0, parent=None, transparent=False, border_style=None, border_color=None, "
"pos_hint=(None, None), size_hint=(None, None), **kwargs)"
)
def getter(self, name, getter):
"""
Replace an attribute lookup with a no-argument function call.
::Warning:: This modifies the class dictionary, replacing any non-Observable attribute with an Observable.
"""
observable = getattr(type(self), name, None)
if not isinstance(observable, Observable):
setattr(type(self), name, observable := Observable(observable))
observable.getters[self] = getter
@bind_to("top")
def _set_pos_hint_y(self):
self.pos_hint = None, self.pos_hint[1]
@bind_to("left")
def _set_pos_hint_x(self):
self.pos_hint = self.pos_hint[0], None
@bind_to("height")
def _set_size_hint_y(self):
self.size_hint = None, self.size_hint[1]
@bind_to("width")
def _set_size_hint_x(self):
self.size_hint = self.size_hint[0], None
def update_geometry(self):
"""
Set or reset the widget's geometry based on size or pos hints if they exist.
Notes
-----
This should only be called by the widget's parent (usually when calling the parent's `add_widget` method).
This will immediately return if there isn't a root widget, since screen size can't be determined yet.
"""
if self.root is None:
return
border = int(self.parent.has_border)
h, w = self.parent.height - 2 * border, self.parent.width - 2 * border
top, left = self.pos_hint
height, width = self.size_hint
if top is not None:
self.top = self.convert(top, h)
if left is not None:
self.left = self.convert(left, w)
if height is not None:
self.height = self.convert(height, h)
if width is not None:
self.width = self.convert(width, w)
if self.height is None:
self.height = h
if self.width is None:
self.width = w
self.pos_hint = top, left
self.size_hint = height, width
if self.window is None:
self.window = curses.newwin(self.height, self.width + 1)
self.update_color(self.color)
if self.has_border:
self.border(self.border_style, self.border_color)
for child in self.children:
if child is not None:
child.update_geometry()
@bind_to("height", "width")
def _resize(self):
window = self.window
if not window:
return
if self.has_border: # Erase the right-most, lower-most border in case widget expands
h, w = window.getmaxyx()
h, w = h - 1, w - 2
color = self.color
ch = self.default_character
window.addstr(0, w, ch, color)
window.addstr(h, w, ch, color)
for x in range(1, w):
window.addstr(h, x, ch, color)
for y in range(1, h):
window.addstr(y, w, ch, color)
window.resize(self.height, self.width + 1)
self.update_color(self.color)
if self.has_border:
self.border(self.border_style, self.border_color)
@property
def bottom(self):
return self.top + self.height
@property
def right(self):
return self.left + self.width
@property
def root(self):
if self.parent is None:
return None
return self.parent.root
def walk(self, start=None):
if start is None:
start = self.root
for child in start.children:
yield from self.walk(child)
yield start
@property
def is_in_front(self):
return self.parent and self.parent.children[-1] is self
@property
def is_in_back(self):
return self.parent and self.parent.children[0] is self
def pull_to_front(self, widget):
"""Given a widget or an index of a widget, widget is moved to top of widget stack (so it is drawn last).
"""
widgets = self.children
if isinstance(widget, int):
widgets.append(widgets.pop(widget))
else:
widgets.remove(widget)
widgets.append(widget)
def push_to_back(self, widget):
"""Given a widget or an index of a widget, widget is moved to bottom of widget stack (so it is drawn first).
"""
widgets = self.children
if isinstance(widget, int):
widgets.insert(0, widgets.pop(widget))
else:
widgets.remove(widget)
widgets.insert(0, widget)
def add_widget(self, widget):
self.children.append(widget)
widget.parent = self
widget.update_geometry()
def remove_widget(self, widget):
self.children.remove(widget)
def new_widget(self, *args, group=None, create_with=None, **kwargs):
"""
Create a new widget and append to widget stack. Can group widgets if providing a hashable group.
To create a new subclassed widget use `create_with=MyWidget` or `create_with="MyWidget"` (pass the class or the class' name).
"""
if create_with is None:
create_with = Widget
elif isinstance(create_with, str):
create_with = Widget.types[create_with]
widget = create_with(*args, parent=self, **kwargs)
self.add_widget(widget)
if group is not None:
self.group[group].append(widget)
return widget
@property
def overlay(self):
return self.window.overlay if self.transparent else self.window.overwrite
def refresh(self):
"""Redraw children's windows.
"""
# Notably, we don't use curses.panels as they aren't available for windows-curses...
# ...upside is we don't error when moving a widget off-screen.
border = int(self.has_border)
h, w = self.height, self.width
for widget in self.children:
if widget is None:
continue
widget.refresh()
y, x = widget.top, widget.left
src_t, des_t = (-y, border) if y < 0 else (0, y + border)
src_l, des_l = (-x, border) if x < 0 else (0, x + border)
des_h = min(h - 1, des_t + widget.height)
des_w = min(w - 1, des_l + widget.width - 1) # -1 compensates for the extra width of widget's window
widget.overlay(self.window, src_t, src_l, des_t, des_l, des_h, des_w) # FIXME: This is causing an error on WSL terminal.
@staticmethod
def convert(value, bounds):
"""Utility function that converts a fractional or negative value to an absolute one.
"""
if isinstance(value, float):
value = int(value * bounds)
return value + bounds if value < 0 else value
@staticmethod
def line(y1, x1, y2, x2):
"""Yields coordinates for a line from (y1, x1) to (y2, x2).
"""
dy = abs(y2 - y1)
dx = abs(x2 - x1)
if dy == 0: # Horizontal
gen = ((y1, x) for x in range(x1, x2 + 1))
elif dx == 0: # Vertical
gen = ((y, x1) for y in range(y1, y2 + 1))
elif dy < dx:
gen = Widget._line_low(y2, x2, y1, x1) if x1 > x2 else Widget._line_low(y1, x1, y2, x2)
else:
gen = Widget._line_high(y2, x2, y1, x1) if y1 > y2 else Widget._line_high(y1, x1, y2, x2)
yield from gen
@staticmethod
def _line_low(y1, x1, y2, x2):
dx = x2 - x1
dy, yi = (2 * (y2 - y1), 1) if y2 >= y1 else (2 * (y1 - y2), -1)
dif = dy - 2 * dx
delta = dy - dx
y = y1
for x in range(x1, x2 + 1):
yield y, x
if delta > 0:
y += yi
delta += dif
else:
delta += dy
@staticmethod
def _line_high(y1, x1, y2, x2):
dx, xi = (2 * (x2 - x1), 1) if x2 >= x1 else (2 * (x1 - x2), -1)
dy = y2 - y1
dif = dx - 2 * dy
delta = dx - dy
x = x1
for y in range(y1, y2 + 1):
yield y, x
if delta > 0:
x += xi
delta += dif
else:
delta += dx
@property
def has_border(self):
return bool(self.border_style)
def border(self, style="light", color=None):
"""
Draw a border on the edges of the widget.
Parameters
----------
style: optional
The style of the border, can be one of `nurses.widget.BORDER_STYLES`. (the default is "light")
color: optional
The color of the border. (the default is the widget's `color`)
"""
# Curses windows already have a `border` method, but UnicodeEncodeErrors seem to happen when called with the
# characters in BORDER_STYLES. So we add the border "by hand".
self.border_style = style
self.border_color = color
window = self.window
h, w = self.height - 1, self.width - 1
ul, ur, v, hor, ll, lr = BORDER_STYLES[style]
color = color or self.color
window.addstr(0, 0, ul, color)
window.addstr(0, w, ur, color)
window.addstr(h, 0, ll, color)
window.addstr(h, w, lr, color)
for x in range(1, w):
window.addstr(0, x, hor, color)
window.addstr(h, x, hor, color)
for y in range(1, h):
window.addstr(y, 0, v, color)
window.addstr(y, w, v, color)
def dispatch(self, key):
for widget in reversed(self.children):
if widget.on_press(key) or widget.dispatch(key):
return True
def on_press(self, key):
"""
Called when a key is pressed and no widgets above this widget have handled the press.
A press is handled when a widget's `on_press` method returns True.
"""
try:
return super().on_press(key)
except AttributeError:
pass
def update_color(self, color):
self.color = color
self.window.attrset(color)
|
en
| 0.800076
|
Decorator that binds a method to attributes. Attributes in `attrs` that aren't Observable will be redefined as Observables. Warning! This is a very abusive way to use the automatic calling of `__set_name__` in the descriptor protocol. # default value of Observable will come from base.__dict__ This metaclass simply drops the `bind_to` decorator into the class dict. `bind_to` allows one to quickly bind functions to attributes in the class body - these attributes will be turned into Observables by the decorator. The base window for nurses. A fancy wrapper around a curses window. Parameters ---------- top, left, height, width: optional, positional only Upper and left-most coordinates of widget relative to parent, and dimensions of the widget. (the defaults are 0, 0, parent's max height, parent's max width) Other Parameters ---------------- color: optional A curses color_pair, the default color of this widget. (the default is `curses.color_pair(0)`) pos_hint, size_hint: optional If a pos_hint or size_hint are given they will override any given pos or size arguments. Hints are expected to be 2-tuples of numbers or None. Fractional arguments are interpreted as percentage of parent, and parent width or height will be added to negative arguments. (e.g., `size_hint=(.5, None)` means widget will be half as tall as parent and the width will come from the `width` arg.) transparent: optional If true, widget will overlay other widgets instead of overwrite them (whitespace will be "see-through"). (the default is `False`) Notes ----- Coordinates are (y, x) (both a curses and a numpy convention) with y being vertical and increasing as you move down and x being horizontal and increasing as you move right. Top-left corner is (0, 0) If some part of the widget moves out-of-bounds of the screen only the part that overlaps the screen will be drawn. Widget size is limited by screen size. (but ArrayPad isn't) # Registry of subclasses of Widget # Register subclasses # Assign default values if len(args) < 4 # This allows one to set class attributes with keyword-arguments. TODO: Document this. Replace an attribute lookup with a no-argument function call. ::Warning:: This modifies the class dictionary, replacing any non-Observable attribute with an Observable. Set or reset the widget's geometry based on size or pos hints if they exist. Notes ----- This should only be called by the widget's parent (usually when calling the parent's `add_widget` method). This will immediately return if there isn't a root widget, since screen size can't be determined yet. # Erase the right-most, lower-most border in case widget expands Given a widget or an index of a widget, widget is moved to top of widget stack (so it is drawn last). Given a widget or an index of a widget, widget is moved to bottom of widget stack (so it is drawn first). Create a new widget and append to widget stack. Can group widgets if providing a hashable group. To create a new subclassed widget use `create_with=MyWidget` or `create_with="MyWidget"` (pass the class or the class' name). Redraw children's windows. # Notably, we don't use curses.panels as they aren't available for windows-curses... # ...upside is we don't error when moving a widget off-screen. # -1 compensates for the extra width of widget's window # FIXME: This is causing an error on WSL terminal. Utility function that converts a fractional or negative value to an absolute one. Yields coordinates for a line from (y1, x1) to (y2, x2). # Horizontal # Vertical Draw a border on the edges of the widget. Parameters ---------- style: optional The style of the border, can be one of `nurses.widget.BORDER_STYLES`. (the default is "light") color: optional The color of the border. (the default is the widget's `color`) # Curses windows already have a `border` method, but UnicodeEncodeErrors seem to happen when called with the # characters in BORDER_STYLES. So we add the border "by hand". Called when a key is pressed and no widgets above this widget have handled the press. A press is handled when a widget's `on_press` method returns True.
| 3.177374
| 3
|
pydoccano/features.py
|
evstratbg/pydoccano
| 2
|
6629073
|
from requests import Session
from .base_api import BaseApi
class Features(BaseApi):
def __init__(self, base_url: str, session: Session, version='v1'):
super().__init__(base_url)
self.session = session
self.version = version
self.base_endpoint = f"{self.version}/features"
def get(self):
return self._get(endpoint=self.base_endpoint)
|
from requests import Session
from .base_api import BaseApi
class Features(BaseApi):
def __init__(self, base_url: str, session: Session, version='v1'):
super().__init__(base_url)
self.session = session
self.version = version
self.base_endpoint = f"{self.version}/features"
def get(self):
return self._get(endpoint=self.base_endpoint)
|
none
| 1
| 2.620952
| 3
|
|
pixiedust/utils/sparkJobProgressMonitor.py
|
jordangeorge/pixiedust
| 0
|
6629074
|
# -------------------------------------------------------------------------------
# Copyright IBM Corp. 2017
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------------
from pixiedust.utils.template import PixiedustTemplateEnvironment
from IPython.core.getipython import *
from IPython.display import display, HTML, Javascript
from pixiedust.utils.shellAccess import ShellAccess
from pixiedust.utils.environment import Environment
from functools import reduce
import uuid
import json
import sys
import traceback
import pixiedust
from IPython.core.getipython import get_ipython
from collections import OrderedDict
from threading import Thread, Lock, Event
import time
myLogger = pixiedust.getLogger(__name__)
_env = PixiedustTemplateEnvironment()
progressMonitor = None
loadingProgressMonitor = False
def enableSparkJobProgressMonitor():
if Environment.isRunningOnDSX:
print("Spark Job Progress Monitoring cannot be started on DSX")
return
global progressMonitor, loadingProgressMonitor
if progressMonitor is None and not loadingProgressMonitor:
loadingProgressMonitor = True
def startSparkJobProgressMonitor():
global progressMonitor
progressMonitor = SparkJobProgressMonitor()
t = Thread(target=startSparkJobProgressMonitor)
t.daemon = True
t.start()
print("Succesfully enabled Spark Job Progress Monitor")
else:
print("Spark Job Progress Monitor already enabled")
class SparkJobProgressMonitorOutput(Thread):
class Java:
implements = ["com.ibm.pixiedust.PixiedustOutputListener"]
def __init__(self):
super(SparkJobProgressMonitorOutput,self).__init__()
self.prefix = None
self.lock = Lock()
self.triggerEvent = Event()
self.daemon = True
self.progressData = OrderedDict()
def getUpdaterId(self):
return "updaterId{0}".format(self.prefix)
def getProgressHTMLId(self):
return "progress{0}".format(self.prefix)
def run(self):
while True:
self.triggerEvent.wait()
with self.lock:
self.triggerEvent.clear()
if bool(self.progressData):
progressData = self.progressData
self.progressData = OrderedDict()
else:
progressData = OrderedDict()
if bool(progressData):
js = ""
for data in progressData.values():
channel = data["channel"]
if channel=="jobStart":
js += _env.getTemplate("sparkJobProgressMonitor/addJobTab.js").render(
prefix=self.prefix, data=data, overalNumTasks=reduce(lambda x,y:x+y["numTasks"], data["stageInfos"], 0)
)
elif channel=="stageSubmitted":
js += _env.getTemplate("sparkJobProgressMonitor/updateStageStatus.js").render(
prefix=self.prefix, stageId=data["stageInfo"]["stageId"], status="Submitted", host=None
)
elif channel=="taskStart":
js += _env.getTemplate("sparkJobProgressMonitor/taskStart.js").render( prefix=self.prefix, data=data, increment = data["increment"] )
js += "\n"
js += _env.getTemplate("sparkJobProgressMonitor/updateStageStatus.js").render(
prefix=self.prefix, stageId=data["stageId"], status="Running",
host="{0}({1})".format(data["taskInfo"]["executorId"],data["taskInfo"]["host"] )
)
elif channel=="stageCompleted":
js += _env.getTemplate("sparkJobProgressMonitor/updateStageStatus.js").render(
prefix=self.prefix, stageId=data["stageInfo"]["stageId"], status="Completed", host=None
)
elif channel=="jobEnd":
js += _env.getTemplate("sparkJobProgressMonitor/jobEnded.js").render(
prefix=self.prefix, jobId=data["jobId"]
)
js += "\n"
display(Javascript(js))
time.sleep(0.5)
def display_with_id(self, obj, display_id, update=False):
"""Create a new display with an id"""
ip = get_ipython()
if hasattr(ip, "kernel"):
data, md = ip.display_formatter.format(obj)
content = {
'data': data,
'metadata': md,
'transient': {'display_id': display_id},
}
msg_type = 'update_display_data' if update else 'display_data'
ip.kernel.session.send(ip.kernel.iopub_socket, msg_type, content, parent=ip.parent_header)
else:
display(obj)
def printOutput(self, s):
print(s)
def sendChannel(self, channel, data):
self.printStuff(channel, data)
def onRunCell(self):
self.prefix = str(uuid.uuid4())[:8]
#Create the place holder area for the progress monitor
self.display_with_id(
HTML( _env.getTemplate("sparkJobProgressMonitor/pmLayout.html").render( prefix = self.prefix)),self.getProgressHTMLId()
)
def printStuff(self,channel, s):
try:
data = json.loads(s)
data["channel"] = channel
data["increment"] = 1
key = None
if channel=="jobStart":
key = "{0}-{1}".format(channel,data["jobId"])
elif channel=="stageSubmitted":
key = "{0}-{1}".format(channel,data["stageInfo"]["stageId"])
elif channel=="taskStart":
key = "{0}-{1}".format(channel,data["stageId"])
elif channel=="stageCompleted":
key = "{0}-{1}".format(channel,data["stageInfo"]["stageId"])
elif channel=="jobEnd":
key = "{0}-{1}".format(channel,data["jobId"])
if key:
with self.lock:
if key in self.progressData:
data["increment"] = self.progressData[key]["increment"] + 1
self.progressData[key] = data
self.triggerEvent.set()
except:
print("Unexpected error: {0} - {1} : {2}".format(channel, s, sys.exc_info()[0]))
traceback.print_exc()
class SparkJobProgressMonitor(object):
def __init__(self):
self.monitorOutput = None
self.addSparkListener()
self.displayRuns={}
self.newDisplayRun = False
def onDisplayRun(self, contextId):
if contextId is None or self.monitorOutput is None:
self.newDisplayRun=True
return
cellContext = self.displayRuns.get( contextId )
if cellContext and cellContext != self.monitorOutput.prefix:
#switch the cell context if not a new display Run
if self.newDisplayRun:
self.displayRuns.pop( contextId, None )
else:
self.monitorOutput.prefix = cellContext
elif cellContext is None:
self.displayRuns[contextId] = self.monitorOutput.prefix
if cellContext:
display(Javascript(_env.getTemplate("sparkJobProgressMonitor/emptyTabs.js").render(prefix=cellContext)))
self.newDisplayRun=False
def addSparkListener(self):
try:
get_ipython().run_cell_magic(
"scala",
"cl=sparkProgressMonitor noSqlContext",
_env.getTemplate("sparkJobProgressMonitor/addSparkListener.scala").render()
)
listener = get_ipython().user_ns.get("__pixiedustSparkListener")
#access the listener object from the namespace
if listener:
self.monitorOutput = SparkJobProgressMonitorOutput()
self.monitorOutput.start()
#Add pre_run_cell event handler
get_ipython().events.register('pre_run_cell',lambda: self.monitorOutput.onRunCell() )
listener.setChannelListener( self.monitorOutput )
except:
myLogger.exception("Unexpected error while adding Spark Listener")
raise
|
# -------------------------------------------------------------------------------
# Copyright IBM Corp. 2017
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------------
from pixiedust.utils.template import PixiedustTemplateEnvironment
from IPython.core.getipython import *
from IPython.display import display, HTML, Javascript
from pixiedust.utils.shellAccess import ShellAccess
from pixiedust.utils.environment import Environment
from functools import reduce
import uuid
import json
import sys
import traceback
import pixiedust
from IPython.core.getipython import get_ipython
from collections import OrderedDict
from threading import Thread, Lock, Event
import time
myLogger = pixiedust.getLogger(__name__)
_env = PixiedustTemplateEnvironment()
progressMonitor = None
loadingProgressMonitor = False
def enableSparkJobProgressMonitor():
if Environment.isRunningOnDSX:
print("Spark Job Progress Monitoring cannot be started on DSX")
return
global progressMonitor, loadingProgressMonitor
if progressMonitor is None and not loadingProgressMonitor:
loadingProgressMonitor = True
def startSparkJobProgressMonitor():
global progressMonitor
progressMonitor = SparkJobProgressMonitor()
t = Thread(target=startSparkJobProgressMonitor)
t.daemon = True
t.start()
print("Succesfully enabled Spark Job Progress Monitor")
else:
print("Spark Job Progress Monitor already enabled")
class SparkJobProgressMonitorOutput(Thread):
class Java:
implements = ["com.ibm.pixiedust.PixiedustOutputListener"]
def __init__(self):
super(SparkJobProgressMonitorOutput,self).__init__()
self.prefix = None
self.lock = Lock()
self.triggerEvent = Event()
self.daemon = True
self.progressData = OrderedDict()
def getUpdaterId(self):
return "updaterId{0}".format(self.prefix)
def getProgressHTMLId(self):
return "progress{0}".format(self.prefix)
def run(self):
while True:
self.triggerEvent.wait()
with self.lock:
self.triggerEvent.clear()
if bool(self.progressData):
progressData = self.progressData
self.progressData = OrderedDict()
else:
progressData = OrderedDict()
if bool(progressData):
js = ""
for data in progressData.values():
channel = data["channel"]
if channel=="jobStart":
js += _env.getTemplate("sparkJobProgressMonitor/addJobTab.js").render(
prefix=self.prefix, data=data, overalNumTasks=reduce(lambda x,y:x+y["numTasks"], data["stageInfos"], 0)
)
elif channel=="stageSubmitted":
js += _env.getTemplate("sparkJobProgressMonitor/updateStageStatus.js").render(
prefix=self.prefix, stageId=data["stageInfo"]["stageId"], status="Submitted", host=None
)
elif channel=="taskStart":
js += _env.getTemplate("sparkJobProgressMonitor/taskStart.js").render( prefix=self.prefix, data=data, increment = data["increment"] )
js += "\n"
js += _env.getTemplate("sparkJobProgressMonitor/updateStageStatus.js").render(
prefix=self.prefix, stageId=data["stageId"], status="Running",
host="{0}({1})".format(data["taskInfo"]["executorId"],data["taskInfo"]["host"] )
)
elif channel=="stageCompleted":
js += _env.getTemplate("sparkJobProgressMonitor/updateStageStatus.js").render(
prefix=self.prefix, stageId=data["stageInfo"]["stageId"], status="Completed", host=None
)
elif channel=="jobEnd":
js += _env.getTemplate("sparkJobProgressMonitor/jobEnded.js").render(
prefix=self.prefix, jobId=data["jobId"]
)
js += "\n"
display(Javascript(js))
time.sleep(0.5)
def display_with_id(self, obj, display_id, update=False):
"""Create a new display with an id"""
ip = get_ipython()
if hasattr(ip, "kernel"):
data, md = ip.display_formatter.format(obj)
content = {
'data': data,
'metadata': md,
'transient': {'display_id': display_id},
}
msg_type = 'update_display_data' if update else 'display_data'
ip.kernel.session.send(ip.kernel.iopub_socket, msg_type, content, parent=ip.parent_header)
else:
display(obj)
def printOutput(self, s):
print(s)
def sendChannel(self, channel, data):
self.printStuff(channel, data)
def onRunCell(self):
self.prefix = str(uuid.uuid4())[:8]
#Create the place holder area for the progress monitor
self.display_with_id(
HTML( _env.getTemplate("sparkJobProgressMonitor/pmLayout.html").render( prefix = self.prefix)),self.getProgressHTMLId()
)
def printStuff(self,channel, s):
try:
data = json.loads(s)
data["channel"] = channel
data["increment"] = 1
key = None
if channel=="jobStart":
key = "{0}-{1}".format(channel,data["jobId"])
elif channel=="stageSubmitted":
key = "{0}-{1}".format(channel,data["stageInfo"]["stageId"])
elif channel=="taskStart":
key = "{0}-{1}".format(channel,data["stageId"])
elif channel=="stageCompleted":
key = "{0}-{1}".format(channel,data["stageInfo"]["stageId"])
elif channel=="jobEnd":
key = "{0}-{1}".format(channel,data["jobId"])
if key:
with self.lock:
if key in self.progressData:
data["increment"] = self.progressData[key]["increment"] + 1
self.progressData[key] = data
self.triggerEvent.set()
except:
print("Unexpected error: {0} - {1} : {2}".format(channel, s, sys.exc_info()[0]))
traceback.print_exc()
class SparkJobProgressMonitor(object):
def __init__(self):
self.monitorOutput = None
self.addSparkListener()
self.displayRuns={}
self.newDisplayRun = False
def onDisplayRun(self, contextId):
if contextId is None or self.monitorOutput is None:
self.newDisplayRun=True
return
cellContext = self.displayRuns.get( contextId )
if cellContext and cellContext != self.monitorOutput.prefix:
#switch the cell context if not a new display Run
if self.newDisplayRun:
self.displayRuns.pop( contextId, None )
else:
self.monitorOutput.prefix = cellContext
elif cellContext is None:
self.displayRuns[contextId] = self.monitorOutput.prefix
if cellContext:
display(Javascript(_env.getTemplate("sparkJobProgressMonitor/emptyTabs.js").render(prefix=cellContext)))
self.newDisplayRun=False
def addSparkListener(self):
try:
get_ipython().run_cell_magic(
"scala",
"cl=sparkProgressMonitor noSqlContext",
_env.getTemplate("sparkJobProgressMonitor/addSparkListener.scala").render()
)
listener = get_ipython().user_ns.get("__pixiedustSparkListener")
#access the listener object from the namespace
if listener:
self.monitorOutput = SparkJobProgressMonitorOutput()
self.monitorOutput.start()
#Add pre_run_cell event handler
get_ipython().events.register('pre_run_cell',lambda: self.monitorOutput.onRunCell() )
listener.setChannelListener( self.monitorOutput )
except:
myLogger.exception("Unexpected error while adding Spark Listener")
raise
|
en
| 0.676955
|
# ------------------------------------------------------------------------------- # Copyright IBM Corp. 2017 # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ------------------------------------------------------------------------------- Create a new display with an id #Create the place holder area for the progress monitor #switch the cell context if not a new display Run #access the listener object from the namespace #Add pre_run_cell event handler
| 1.493569
| 1
|
deploy/unet/app.py
|
SchiffFlieger/semantic-segmentation-master-thesis
| 1
|
6629075
|
import os
import glob
import math
import cv2
import numpy as np
from tensorflow.keras.models import load_model
INPUT_TILE_SIZE = 572
PREDICTION_TILE_SIZE = 388
# color values for the segmentation categories
LABEL_RGB_VALUES = [
(3, 0, 208), # buildings
(240, 126, 11), # water
(40, 171, 44), # forest
(193, 193, 193), # traffic
(39, 255, 154), # urban greens
(132, 240, 235), # agriculture
]
def one_hot_to_rgb(prediction, color_palette=None):
if np.ndim(prediction) != 3:
raise ValueError("prediction should have 3 dimensions")
if color_palette is None:
color_palette = np.array(LABEL_RGB_VALUES)
classes = np.argmax(prediction, axis=2)
rgb_encoded = np.zeros(classes.shape[:2] + (3,))
for idx, col in enumerate(color_palette):
rgb_encoded[classes == idx] = col
return rgb_encoded
def map_to_color(color_palette):
def fn(x):
result = color_palette(x)
return result[:, :-1]
return fn
def split_to_tiles(image):
tiles = []
x_steps = image.shape[0] // PREDICTION_TILE_SIZE
y_steps = image.shape[1] // PREDICTION_TILE_SIZE
offset = (INPUT_TILE_SIZE - PREDICTION_TILE_SIZE)
extended_image = add_mirrored_edges(image, offset // 2)
for x in range(x_steps):
for y in range(y_steps):
tile = extended_image[x * PREDICTION_TILE_SIZE:(x + 1) * PREDICTION_TILE_SIZE + offset,
y * PREDICTION_TILE_SIZE:(y + 1) * PREDICTION_TILE_SIZE + offset, :]
tiles.append(tile)
return tiles
def merge_tiles(tiles):
tiles_per_row = int(math.sqrt(len(tiles)))
image = np.zeros((tiles_per_row * PREDICTION_TILE_SIZE, 0, 6))
for x in range(tiles_per_row):
row = np.zeros((0, PREDICTION_TILE_SIZE, 6))
for y in range(tiles_per_row):
row = np.concatenate([row, tiles[y * tiles_per_row + x]], axis=0)
image = np.concatenate([image, row], axis=1)
return image
def add_mirrored_edges(image, context=100):
height = image.shape[0]
width = image.shape[1]
top_edge = image[0:context, :, :]
top_edge = cv2.flip(top_edge, 0)
bottom_edge = image[height - context:height, :, :]
bottom_edge = cv2.flip(bottom_edge, 0)
image = np.concatenate([top_edge, image, bottom_edge], axis=0)
left_edge = image[:, 0:context, :]
left_edge = cv2.flip(left_edge, 1)
right_edge = image[:, width - context:width, :]
right_edge = cv2.flip(right_edge, 1)
image = np.concatenate([left_edge, image, right_edge], axis=1)
return image
def predict(model, image):
image = image / 255
tiles = split_to_tiles(image)
pred = model.predict(np.array(tiles))
merged = merge_tiles(pred.tolist())
return one_hot_to_rgb(merged)
if __name__ == '__main__':
model = load_model("/thesis/model/model.hdf5", compile=False)
for img_path in glob.glob("/images/*.png"):
print(f"predicting {img_path} ...")
image = cv2.imread(img_path, cv2.IMREAD_COLOR)
if image.shape[0] % PREDICTION_TILE_SIZE != 0 or image.shape[1] % PREDICTION_TILE_SIZE != 0:
print(f"WARN: {img_path} image dimensions must be multiple of {PREDICTION_TILE_SIZE}")
predicted_path = os.path.join("/predictions", os.path.basename(img_path))
cv2.imwrite(predicted_path, predict(model, image))
|
import os
import glob
import math
import cv2
import numpy as np
from tensorflow.keras.models import load_model
INPUT_TILE_SIZE = 572
PREDICTION_TILE_SIZE = 388
# color values for the segmentation categories
LABEL_RGB_VALUES = [
(3, 0, 208), # buildings
(240, 126, 11), # water
(40, 171, 44), # forest
(193, 193, 193), # traffic
(39, 255, 154), # urban greens
(132, 240, 235), # agriculture
]
def one_hot_to_rgb(prediction, color_palette=None):
if np.ndim(prediction) != 3:
raise ValueError("prediction should have 3 dimensions")
if color_palette is None:
color_palette = np.array(LABEL_RGB_VALUES)
classes = np.argmax(prediction, axis=2)
rgb_encoded = np.zeros(classes.shape[:2] + (3,))
for idx, col in enumerate(color_palette):
rgb_encoded[classes == idx] = col
return rgb_encoded
def map_to_color(color_palette):
def fn(x):
result = color_palette(x)
return result[:, :-1]
return fn
def split_to_tiles(image):
tiles = []
x_steps = image.shape[0] // PREDICTION_TILE_SIZE
y_steps = image.shape[1] // PREDICTION_TILE_SIZE
offset = (INPUT_TILE_SIZE - PREDICTION_TILE_SIZE)
extended_image = add_mirrored_edges(image, offset // 2)
for x in range(x_steps):
for y in range(y_steps):
tile = extended_image[x * PREDICTION_TILE_SIZE:(x + 1) * PREDICTION_TILE_SIZE + offset,
y * PREDICTION_TILE_SIZE:(y + 1) * PREDICTION_TILE_SIZE + offset, :]
tiles.append(tile)
return tiles
def merge_tiles(tiles):
tiles_per_row = int(math.sqrt(len(tiles)))
image = np.zeros((tiles_per_row * PREDICTION_TILE_SIZE, 0, 6))
for x in range(tiles_per_row):
row = np.zeros((0, PREDICTION_TILE_SIZE, 6))
for y in range(tiles_per_row):
row = np.concatenate([row, tiles[y * tiles_per_row + x]], axis=0)
image = np.concatenate([image, row], axis=1)
return image
def add_mirrored_edges(image, context=100):
height = image.shape[0]
width = image.shape[1]
top_edge = image[0:context, :, :]
top_edge = cv2.flip(top_edge, 0)
bottom_edge = image[height - context:height, :, :]
bottom_edge = cv2.flip(bottom_edge, 0)
image = np.concatenate([top_edge, image, bottom_edge], axis=0)
left_edge = image[:, 0:context, :]
left_edge = cv2.flip(left_edge, 1)
right_edge = image[:, width - context:width, :]
right_edge = cv2.flip(right_edge, 1)
image = np.concatenate([left_edge, image, right_edge], axis=1)
return image
def predict(model, image):
image = image / 255
tiles = split_to_tiles(image)
pred = model.predict(np.array(tiles))
merged = merge_tiles(pred.tolist())
return one_hot_to_rgb(merged)
if __name__ == '__main__':
model = load_model("/thesis/model/model.hdf5", compile=False)
for img_path in glob.glob("/images/*.png"):
print(f"predicting {img_path} ...")
image = cv2.imread(img_path, cv2.IMREAD_COLOR)
if image.shape[0] % PREDICTION_TILE_SIZE != 0 or image.shape[1] % PREDICTION_TILE_SIZE != 0:
print(f"WARN: {img_path} image dimensions must be multiple of {PREDICTION_TILE_SIZE}")
predicted_path = os.path.join("/predictions", os.path.basename(img_path))
cv2.imwrite(predicted_path, predict(model, image))
|
en
| 0.6506
|
# color values for the segmentation categories # buildings # water # forest # traffic # urban greens # agriculture
| 2.705748
| 3
|
2018/day11.py
|
tcbegley/advent-of-code
| 6
|
6629076
|
import sys
def get_power_level(x, y, s):
rid = x + 10
pl = rid * y
pl += s
pl *= rid
pl = (pl % 1000) // 100
return pl - 5
def answer(serial_no):
grid = [[None] * 300 for _ in range(300)]
for i in range(300):
for j in range(300):
grid[i][j] = get_power_level(i, j, serial_no)
best = -float("inf")
bestx = None
besty = None
for x in range(298):
for y in range(298):
total = sum(
[grid[x + i][y + j] for i in range(3) for j in range(3)]
)
if total > best:
best = total
bestx = x
besty = y
return bestx, besty
if __name__ == "__main__":
print(answer(int(sys.argv[1])))
|
import sys
def get_power_level(x, y, s):
rid = x + 10
pl = rid * y
pl += s
pl *= rid
pl = (pl % 1000) // 100
return pl - 5
def answer(serial_no):
grid = [[None] * 300 for _ in range(300)]
for i in range(300):
for j in range(300):
grid[i][j] = get_power_level(i, j, serial_no)
best = -float("inf")
bestx = None
besty = None
for x in range(298):
for y in range(298):
total = sum(
[grid[x + i][y + j] for i in range(3) for j in range(3)]
)
if total > best:
best = total
bestx = x
besty = y
return bestx, besty
if __name__ == "__main__":
print(answer(int(sys.argv[1])))
|
none
| 1
| 3.390945
| 3
|
|
station/types/test/test_cpa.py
|
jawaad-ahmad/brata.station
| 0
|
6629077
|
# ------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
"""
TODO module description
"""
import unittest
from station.types.cpa import Station
# ------------------------------------------------------------------------------
class StationTestCase(unittest.TestCase):
"""
TODO class comment
"""
# --------------------------------------------------------------------------
def setUp(self):
"""TODO strictly one-line summary
TODO Detailed multi-line description if
necessary.
Args:
arg1 (type1): TODO describe arg, valid values, etc.
arg2 (type2): TODO describe arg, valid values, etc.
arg3 (type3): TODO describe arg, valid values, etc.
Returns:
TODO describe the return type and details
Raises:
TodoError1: if TODO.
TodoError2: if TODO.
"""
#TODO
# --------------------------------------------------------------------------
def test_init(self):
"""TODO strictly one-line summary
TODO Detailed multi-line description if
necessary.
Args:
arg1 (type1): TODO describe arg, valid values, etc.
arg2 (type2): TODO describe arg, valid values, etc.
arg3 (type3): TODO describe arg, valid values, etc.
Returns:
TODO describe the return type and details
Raises:
TodoError1: if TODO.
TodoError2: if TODO.
"""
#TODO
#self.assertEqual(name, target.Name)
# --------------------------------------------------------------------------
def test_onReady(self):
"""TODO strictly one-line summary
TODO Detailed multi-line description if
necessary.
Args:
arg1 (type1): TODO describe arg, valid values, etc.
arg2 (type2): TODO describe arg, valid values, etc.
arg3 (type3): TODO describe arg, valid values, etc.
Returns:
TODO describe the return type and details
Raises:
TodoError1: if TODO.
TodoError2: if TODO.
"""
# TODO
#output = pibrella.output.e
#self.assertEqual(0, output.read())
#self.Target.turnOn()
#self.assertEqual(1, output.read())
# --------------------------------------------------------------------------
def test_onProcessing(self):
"""TODO strictly one-line summary
TODO Detailed multi-line description if
necessary.
Args:
arg1 (type1): TODO describe arg, valid values, etc.
arg2 (type2): TODO describe arg, valid values, etc.
arg3 (type3): TODO describe arg, valid values, etc.
Returns:
TODO describe the return type and details
Raises:
TodoError1: if TODO.
TodoError2: if TODO.
"""
# TODO
#output = pibrella.output.f
#self.assertEqual(0, output.read())
#self.Target.turnOff()
#self.assertEqual(1, output.read())
# --------------------------------------------------------------------------
def test_onFailed(self):
"""TODO strictly one-line summary
TODO Detailed multi-line description if
necessary.
Args:
arg1 (type1): TODO describe arg, valid values, etc.
arg2 (type2): TODO describe arg, valid values, etc.
arg3 (type3): TODO describe arg, valid values, etc.
Returns:
TODO describe the return type and details
Raises:
TodoError1: if TODO.
TodoError2: if TODO.
"""
# TODO
#self.Target.setFlashing()
# TODO
# --------------------------------------------------------------------------
def test_onPassed(self):
"""TODO strictly one-line summary
TODO Detailed multi-line description if
necessary.
Args:
arg1 (type1): TODO describe arg, valid values, etc.
arg2 (type2): TODO describe arg, valid values, etc.
arg3 (type3): TODO describe arg, valid values, etc.
Returns:
TODO describe the return type and details
Raises:
TodoError1: if TODO.
TodoError2: if TODO.
"""
# TODO
#self.Target.setFlashing()
# TODO
# --------------------------------------------------------------------------
def test_onUnexpectedState(self):
"""TODO strictly one-line summary
TODO Detailed multi-line description if
necessary.
Args:
arg1 (type1): TODO describe arg, valid values, etc.
arg2 (type2): TODO describe arg, valid values, etc.
arg3 (type3): TODO describe arg, valid values, etc.
Returns:
TODO describe the return type and details
Raises:
TodoError1: if TODO.
TodoError2: if TODO.
"""
# TODO
#self.Target.setFlashing()
# TODO
# ------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
|
# ------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
"""
TODO module description
"""
import unittest
from station.types.cpa import Station
# ------------------------------------------------------------------------------
class StationTestCase(unittest.TestCase):
"""
TODO class comment
"""
# --------------------------------------------------------------------------
def setUp(self):
"""TODO strictly one-line summary
TODO Detailed multi-line description if
necessary.
Args:
arg1 (type1): TODO describe arg, valid values, etc.
arg2 (type2): TODO describe arg, valid values, etc.
arg3 (type3): TODO describe arg, valid values, etc.
Returns:
TODO describe the return type and details
Raises:
TodoError1: if TODO.
TodoError2: if TODO.
"""
#TODO
# --------------------------------------------------------------------------
def test_init(self):
"""TODO strictly one-line summary
TODO Detailed multi-line description if
necessary.
Args:
arg1 (type1): TODO describe arg, valid values, etc.
arg2 (type2): TODO describe arg, valid values, etc.
arg3 (type3): TODO describe arg, valid values, etc.
Returns:
TODO describe the return type and details
Raises:
TodoError1: if TODO.
TodoError2: if TODO.
"""
#TODO
#self.assertEqual(name, target.Name)
# --------------------------------------------------------------------------
def test_onReady(self):
"""TODO strictly one-line summary
TODO Detailed multi-line description if
necessary.
Args:
arg1 (type1): TODO describe arg, valid values, etc.
arg2 (type2): TODO describe arg, valid values, etc.
arg3 (type3): TODO describe arg, valid values, etc.
Returns:
TODO describe the return type and details
Raises:
TodoError1: if TODO.
TodoError2: if TODO.
"""
# TODO
#output = pibrella.output.e
#self.assertEqual(0, output.read())
#self.Target.turnOn()
#self.assertEqual(1, output.read())
# --------------------------------------------------------------------------
def test_onProcessing(self):
"""TODO strictly one-line summary
TODO Detailed multi-line description if
necessary.
Args:
arg1 (type1): TODO describe arg, valid values, etc.
arg2 (type2): TODO describe arg, valid values, etc.
arg3 (type3): TODO describe arg, valid values, etc.
Returns:
TODO describe the return type and details
Raises:
TodoError1: if TODO.
TodoError2: if TODO.
"""
# TODO
#output = pibrella.output.f
#self.assertEqual(0, output.read())
#self.Target.turnOff()
#self.assertEqual(1, output.read())
# --------------------------------------------------------------------------
def test_onFailed(self):
"""TODO strictly one-line summary
TODO Detailed multi-line description if
necessary.
Args:
arg1 (type1): TODO describe arg, valid values, etc.
arg2 (type2): TODO describe arg, valid values, etc.
arg3 (type3): TODO describe arg, valid values, etc.
Returns:
TODO describe the return type and details
Raises:
TodoError1: if TODO.
TodoError2: if TODO.
"""
# TODO
#self.Target.setFlashing()
# TODO
# --------------------------------------------------------------------------
def test_onPassed(self):
"""TODO strictly one-line summary
TODO Detailed multi-line description if
necessary.
Args:
arg1 (type1): TODO describe arg, valid values, etc.
arg2 (type2): TODO describe arg, valid values, etc.
arg3 (type3): TODO describe arg, valid values, etc.
Returns:
TODO describe the return type and details
Raises:
TodoError1: if TODO.
TodoError2: if TODO.
"""
# TODO
#self.Target.setFlashing()
# TODO
# --------------------------------------------------------------------------
def test_onUnexpectedState(self):
"""TODO strictly one-line summary
TODO Detailed multi-line description if
necessary.
Args:
arg1 (type1): TODO describe arg, valid values, etc.
arg2 (type2): TODO describe arg, valid values, etc.
arg3 (type3): TODO describe arg, valid values, etc.
Returns:
TODO describe the return type and details
Raises:
TodoError1: if TODO.
TodoError2: if TODO.
"""
# TODO
#self.Target.setFlashing()
# TODO
# ------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
|
en
| 0.293659
|
# ------------------------------------------------------------------------------ # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # # See the License for the specific language governing permissions and # limitations under the License. # ------------------------------------------------------------------------------ TODO module description # ------------------------------------------------------------------------------ TODO class comment # -------------------------------------------------------------------------- TODO strictly one-line summary TODO Detailed multi-line description if necessary. Args: arg1 (type1): TODO describe arg, valid values, etc. arg2 (type2): TODO describe arg, valid values, etc. arg3 (type3): TODO describe arg, valid values, etc. Returns: TODO describe the return type and details Raises: TodoError1: if TODO. TodoError2: if TODO. #TODO # -------------------------------------------------------------------------- TODO strictly one-line summary TODO Detailed multi-line description if necessary. Args: arg1 (type1): TODO describe arg, valid values, etc. arg2 (type2): TODO describe arg, valid values, etc. arg3 (type3): TODO describe arg, valid values, etc. Returns: TODO describe the return type and details Raises: TodoError1: if TODO. TodoError2: if TODO. #TODO #self.assertEqual(name, target.Name) # -------------------------------------------------------------------------- TODO strictly one-line summary TODO Detailed multi-line description if necessary. Args: arg1 (type1): TODO describe arg, valid values, etc. arg2 (type2): TODO describe arg, valid values, etc. arg3 (type3): TODO describe arg, valid values, etc. Returns: TODO describe the return type and details Raises: TodoError1: if TODO. TodoError2: if TODO. # TODO #output = pibrella.output.e #self.assertEqual(0, output.read()) #self.Target.turnOn() #self.assertEqual(1, output.read()) # -------------------------------------------------------------------------- TODO strictly one-line summary TODO Detailed multi-line description if necessary. Args: arg1 (type1): TODO describe arg, valid values, etc. arg2 (type2): TODO describe arg, valid values, etc. arg3 (type3): TODO describe arg, valid values, etc. Returns: TODO describe the return type and details Raises: TodoError1: if TODO. TodoError2: if TODO. # TODO #output = pibrella.output.f #self.assertEqual(0, output.read()) #self.Target.turnOff() #self.assertEqual(1, output.read()) # -------------------------------------------------------------------------- TODO strictly one-line summary TODO Detailed multi-line description if necessary. Args: arg1 (type1): TODO describe arg, valid values, etc. arg2 (type2): TODO describe arg, valid values, etc. arg3 (type3): TODO describe arg, valid values, etc. Returns: TODO describe the return type and details Raises: TodoError1: if TODO. TodoError2: if TODO. # TODO #self.Target.setFlashing() # TODO # -------------------------------------------------------------------------- TODO strictly one-line summary TODO Detailed multi-line description if necessary. Args: arg1 (type1): TODO describe arg, valid values, etc. arg2 (type2): TODO describe arg, valid values, etc. arg3 (type3): TODO describe arg, valid values, etc. Returns: TODO describe the return type and details Raises: TodoError1: if TODO. TodoError2: if TODO. # TODO #self.Target.setFlashing() # TODO # -------------------------------------------------------------------------- TODO strictly one-line summary TODO Detailed multi-line description if necessary. Args: arg1 (type1): TODO describe arg, valid values, etc. arg2 (type2): TODO describe arg, valid values, etc. arg3 (type3): TODO describe arg, valid values, etc. Returns: TODO describe the return type and details Raises: TodoError1: if TODO. TodoError2: if TODO. # TODO #self.Target.setFlashing() # TODO # ------------------------------------------------------------------------------
| 1.583886
| 2
|
plugin.video.saltsrd.lite/scrapers/moviexk_scraper.py
|
TheWardoctor/wardoctors-repo
| 1
|
6629078
|
<reponame>TheWardoctor/wardoctors-repo
"""
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urllib
import urlparse
import kodi
import log_utils # @UnusedImport
import dom_parser2
from salts_lib import scraper_utils
from salts_lib.constants import FORCE_NO_MATCH
from salts_lib.constants import VIDEO_TYPES
import scraper
logger = log_utils.Logger.get_logger(__name__)
BASE_URL = 'https://moviexk.org'
class Scraper(scraper.Scraper):
base_url = BASE_URL
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.base_url = kodi.get_setting('%s-base_url' % (self.get_name()))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.TVSHOW, VIDEO_TYPES.EPISODE, VIDEO_TYPES.MOVIE])
@classmethod
def get_name(cls):
return 'MovieXK'
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
if not source_url or source_url == FORCE_NO_MATCH: return hosters
page_url = scraper_utils.urljoin(self.base_url, source_url)
headers = {'Referer': page_url}
html = self._http_get(page_url, headers=headers, cache_limit=.5)
if video.video_type == VIDEO_TYPES.MOVIE:
fragment = dom_parser2.parse_dom(html, 'div', {'class': 'poster'})
if fragment:
movie_url = dom_parser2.parse_dom(fragment[0].content, 'a', req='href')
if movie_url:
page_url = scraper_utils.urljoin(self.base_url, movie_url[0].attrs['href'])
html = self._http_get(page_url, cache_limit=.5)
episodes = self.__get_episodes(html)
page_url = self.__get_best_page(episodes)
if not page_url:
return hosters
else:
page_url = scraper_utils.urljoin(self.base_url, page_url)
html = self._http_get(page_url, cache_limit=.5)
streams = dom_parser2.parse_dom(html, 'iframe', req='src')
if streams:
streams = [(attrs['src'], 480) for attrs, _content in streams]
direct = False
else:
streams = [(attrs['src'], attrs.get('data-res', 480)) for attrs, _content in dom_parser2.parse_dom(html, 'source', req=['src'])]
direct = True
headers = {'User-Agent': scraper_utils.get_ua(), 'Referer': page_url}
for stream_url, height in streams:
if 'video.php' in stream_url or 'moviexk.php' in stream_url:
if 'title=' in stream_url:
title = stream_url.split('title=')[-1]
stream_url = stream_url.replace(title, urllib.quote(title))
redir_url = self._http_get(stream_url, headers=headers, allow_redirect=False, method='HEAD', cache_limit=0)
if redir_url.startswith('http'):
redir_url = redir_url.replace(' ', '').split(';codec')[0]
stream_url = redir_url
else:
continue
if direct:
host = scraper_utils.get_direct_hostname(self, stream_url)
if host == 'gvideo':
quality = scraper_utils.gv_get_quality(stream_url)
else:
quality = scraper_utils.height_get_quality(height)
stream_url += scraper_utils.append_headers(headers)
else:
host = urlparse.urlparse(stream_url).hostname
quality = scraper_utils.height_get_quality(height)
source = {'multi-part': False, 'url': stream_url, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'direct': direct}
hosters.append(source)
return hosters
def __get_best_page(self, episodes):
if 'EPTRAILER' in episodes: del episodes['EPTRAILER']
if 'EPCAM' in episodes: del episodes['EPCAM']
for q in ['EPHD1080P', 'EPHD720P', 'EPHD', 'EPFULL']:
if q in episodes:
return episodes[q]
if episodes:
return episodes.items()[0][1]
def __get_episodes(self, html):
return dict((r.content.replace(' ', '').upper(), r.attrs['href']) for r in dom_parser2.parse_dom(html, 'a', {'data-type': 'watch'}))
def search(self, video_type, title, year, season=''): # @UnusedVariable
results = []
search_url = scraper_utils.urljoin(self.base_url, '/search/')
search_url += urllib.quote_plus(title)
html = self._http_get(search_url, cache_limit=1)
for _attrs, fragment in dom_parser2.parse_dom(html, 'div', {'class': 'inner'}):
name = dom_parser2.parse_dom(fragment, 'div', {'class': 'name'})
if not name: continue
match = dom_parser2.parse_dom(name[0].content, 'a', req='href')
if not match: continue
match_url, match_title_year = match[0].attrs['href'], match[0].content
if 'tv-series' in match_url and video_type == VIDEO_TYPES.MOVIE: continue
match_title_year = re.sub('</?[^>]*>', '', match_title_year)
match_title_year = re.sub('[Ww]atch\s+[Mm]ovie\s*', '', match_title_year)
match_title_year = match_title_year.replace('’', "'")
match_title, match_year = scraper_utils.extra_year(match_title_year)
if not match_year:
year_span = dom_parser2.parse_dom(fragment, 'span', {'class': 'year'})
if year_span:
year_text = dom_parser2.parse_dom(year_span[0].content, 'a')
if year_text:
match_year = year_text[0].content.strip()
if not year or not match_year or year == match_year:
result = {'title': scraper_utils.cleanse_title(match_title), 'url': scraper_utils.pathify_url(match_url), 'year': match_year}
results.append(result)
return results
def _get_episode_url(self, show_url, video):
url = scraper_utils.urljoin(self.base_url, show_url)
html = self._http_get(url, cache_limit=24)
fragment = dom_parser2.parse_dom(html, 'div', {'class': 'poster'})
if not fragment: return
show_url = dom_parser2.parse_dom(fragment[0].content, 'a', req='href')
if not show_url: return
show_url = scraper_utils.urljoin(self.base_url, show_url[0].attrs['href'])
html = self._http_get(show_url, cache_limit=2)
fragment = dom_parser2.parse_dom(html, 'div', {'id': 'servers'})
episode_pattern = 'href="([^"]+)[^>]+>[Ee][Pp]\s*(?:[Ss]0*%s-)?E?p?0*%s(?!\d)' % (video.season, video.episode)
return self._default_get_episode_url(fragment or html, video, episode_pattern)
|
"""
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urllib
import urlparse
import kodi
import log_utils # @UnusedImport
import dom_parser2
from salts_lib import scraper_utils
from salts_lib.constants import FORCE_NO_MATCH
from salts_lib.constants import VIDEO_TYPES
import scraper
logger = log_utils.Logger.get_logger(__name__)
BASE_URL = 'https://moviexk.org'
class Scraper(scraper.Scraper):
base_url = BASE_URL
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.base_url = kodi.get_setting('%s-base_url' % (self.get_name()))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.TVSHOW, VIDEO_TYPES.EPISODE, VIDEO_TYPES.MOVIE])
@classmethod
def get_name(cls):
return 'MovieXK'
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
if not source_url or source_url == FORCE_NO_MATCH: return hosters
page_url = scraper_utils.urljoin(self.base_url, source_url)
headers = {'Referer': page_url}
html = self._http_get(page_url, headers=headers, cache_limit=.5)
if video.video_type == VIDEO_TYPES.MOVIE:
fragment = dom_parser2.parse_dom(html, 'div', {'class': 'poster'})
if fragment:
movie_url = dom_parser2.parse_dom(fragment[0].content, 'a', req='href')
if movie_url:
page_url = scraper_utils.urljoin(self.base_url, movie_url[0].attrs['href'])
html = self._http_get(page_url, cache_limit=.5)
episodes = self.__get_episodes(html)
page_url = self.__get_best_page(episodes)
if not page_url:
return hosters
else:
page_url = scraper_utils.urljoin(self.base_url, page_url)
html = self._http_get(page_url, cache_limit=.5)
streams = dom_parser2.parse_dom(html, 'iframe', req='src')
if streams:
streams = [(attrs['src'], 480) for attrs, _content in streams]
direct = False
else:
streams = [(attrs['src'], attrs.get('data-res', 480)) for attrs, _content in dom_parser2.parse_dom(html, 'source', req=['src'])]
direct = True
headers = {'User-Agent': scraper_utils.get_ua(), 'Referer': page_url}
for stream_url, height in streams:
if 'video.php' in stream_url or 'moviexk.php' in stream_url:
if 'title=' in stream_url:
title = stream_url.split('title=')[-1]
stream_url = stream_url.replace(title, urllib.quote(title))
redir_url = self._http_get(stream_url, headers=headers, allow_redirect=False, method='HEAD', cache_limit=0)
if redir_url.startswith('http'):
redir_url = redir_url.replace(' ', '').split(';codec')[0]
stream_url = redir_url
else:
continue
if direct:
host = scraper_utils.get_direct_hostname(self, stream_url)
if host == 'gvideo':
quality = scraper_utils.gv_get_quality(stream_url)
else:
quality = scraper_utils.height_get_quality(height)
stream_url += scraper_utils.append_headers(headers)
else:
host = urlparse.urlparse(stream_url).hostname
quality = scraper_utils.height_get_quality(height)
source = {'multi-part': False, 'url': stream_url, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'direct': direct}
hosters.append(source)
return hosters
def __get_best_page(self, episodes):
if 'EPTRAILER' in episodes: del episodes['EPTRAILER']
if 'EPCAM' in episodes: del episodes['EPCAM']
for q in ['EPHD1080P', 'EPHD720P', 'EPHD', 'EPFULL']:
if q in episodes:
return episodes[q]
if episodes:
return episodes.items()[0][1]
def __get_episodes(self, html):
return dict((r.content.replace(' ', '').upper(), r.attrs['href']) for r in dom_parser2.parse_dom(html, 'a', {'data-type': 'watch'}))
def search(self, video_type, title, year, season=''): # @UnusedVariable
results = []
search_url = scraper_utils.urljoin(self.base_url, '/search/')
search_url += urllib.quote_plus(title)
html = self._http_get(search_url, cache_limit=1)
for _attrs, fragment in dom_parser2.parse_dom(html, 'div', {'class': 'inner'}):
name = dom_parser2.parse_dom(fragment, 'div', {'class': 'name'})
if not name: continue
match = dom_parser2.parse_dom(name[0].content, 'a', req='href')
if not match: continue
match_url, match_title_year = match[0].attrs['href'], match[0].content
if 'tv-series' in match_url and video_type == VIDEO_TYPES.MOVIE: continue
match_title_year = re.sub('</?[^>]*>', '', match_title_year)
match_title_year = re.sub('[Ww]atch\s+[Mm]ovie\s*', '', match_title_year)
match_title_year = match_title_year.replace('’', "'")
match_title, match_year = scraper_utils.extra_year(match_title_year)
if not match_year:
year_span = dom_parser2.parse_dom(fragment, 'span', {'class': 'year'})
if year_span:
year_text = dom_parser2.parse_dom(year_span[0].content, 'a')
if year_text:
match_year = year_text[0].content.strip()
if not year or not match_year or year == match_year:
result = {'title': scraper_utils.cleanse_title(match_title), 'url': scraper_utils.pathify_url(match_url), 'year': match_year}
results.append(result)
return results
def _get_episode_url(self, show_url, video):
url = scraper_utils.urljoin(self.base_url, show_url)
html = self._http_get(url, cache_limit=24)
fragment = dom_parser2.parse_dom(html, 'div', {'class': 'poster'})
if not fragment: return
show_url = dom_parser2.parse_dom(fragment[0].content, 'a', req='href')
if not show_url: return
show_url = scraper_utils.urljoin(self.base_url, show_url[0].attrs['href'])
html = self._http_get(show_url, cache_limit=2)
fragment = dom_parser2.parse_dom(html, 'div', {'id': 'servers'})
episode_pattern = 'href="([^"]+)[^>]+>[Ee][Pp]\s*(?:[Ss]0*%s-)?E?p?0*%s(?!\d)' % (video.season, video.episode)
return self._default_get_episode_url(fragment or html, video, episode_pattern)
|
en
| 0.848027
|
SALTS XBMC Addon Copyright (C) 2014 tknorris This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. # @UnusedImport # @UnusedVariable #8217;', "'")
| 2.041807
| 2
|
Dataset/Leetcode/train/27/503.py
|
kkcookies99/UAST
| 0
|
6629079
|
<reponame>kkcookies99/UAST<gh_stars>0
class Solution:
def XXX(self, nums: List[int], val: int) -> int:
if not nums:
return 0
a = nums.count(val)
for i in range(a):
nums.remove(val)
return len(nums)
undefined
for (i = 0; i < document.getElementsByTagName("code").length; i++) { console.log(document.getElementsByTagName("code")[i].innerText); }
|
class Solution:
def XXX(self, nums: List[int], val: int) -> int:
if not nums:
return 0
a = nums.count(val)
for i in range(a):
nums.remove(val)
return len(nums)
undefined
for (i = 0; i < document.getElementsByTagName("code").length; i++) { console.log(document.getElementsByTagName("code")[i].innerText); }
|
none
| 1
| 3.211338
| 3
|
|
aristotle/apps/marc/bots/awbots.py
|
jermnelson/Discover-Aristotle
| 7
|
6629080
|
"""
awbots.py - American West Bots for automating American West MARC record loads
"""
__author__ = '<NAME>'
from marcbots import MARCImportBot
PROXY_LOCATION='0-www.americanwest.amdigital.co.uk.tiger.coloradocollege.edu'
class AmericanWestBot(MARCImportBot):
"""
The `AmericanWestBot` reads MARC records from
American West Database, validates adds/modify fields
for a new import MARC file for loading into TIGER
"""
__name__ = 'American West Bot'
def __init__(self,
marc_file):
"""
Initializes `AmericanWestBot` for conversion
process.
:param marc_file: MARC file
"""
MARCImportBot.__init__(self,marc_file)
def processRecord(self,
marc_record):
"""
Method processes a single marc_record for American West
MARC.
:param marc_file: MARC file
"""
marc_record = self.validate001(marc_record)
marc_record = self.validate003(marc_record)
marc_record = self.validate006(marc_record)
marc_record = self.replace007(marc_record)
marc_record = self.validate490(marc_record)
marc_record = self.processURLs(marc_record,
proxy_location=PROXY_LOCATION)
marc_record = self.validate710(marc_record)
marc_record = self.validate730(marc_record)
marc_record = self.validate830(marc_record)
return marc_record
def validate001(self,marc_record):
"""
Method replaces AC prefix with AMP prefix for Prospector compatibility.
:param marc_file: MARC file
"""
field001 = marc_record.get_fields('001')[0]
marc_record.remove_field(field001)
raw_data = field001.data
field001.data = raw_data.replace('AC','AMP')
marc_record.add_field(field001)
return marc_record
def validate003(self,marc_record):
"""
Validates 003 field, adds control code.
:param marc_file: MARC file
"""
marc_record = self.__remove_field__(marc_record=marc_record,
tag='003')
new003 = Field(tag='003',
data='COC')
marc_record.add_field(new003)
return marc_record
def validate490(self,marc_record):
"""
Method removes all existing 490 fields.
:param marc_file: MARC file
"""
all490s = marc_record.get_fields('490')
for field in all490s:
marc_record.remove_field(field)
return marc_record
def validate710(self,
marc_record):
"""
Method validates/adds 710 fields
:param marc_file: MARC file
"""
all710s = marc_record.get_fields('710')
for field in all710s:
marc_record.remove_field(field)
first710 = Field(tag='710',
indicators=['2',' '],
subfields=['a','Newberry Library.'])
marc_record.add_field(first710)
new710 = Field(tag='710',
indicators=['2',' '],
subfields=['a','<NAME> Digital (Firm)'])
marc_record.add_field(new710)
return marc_record
def validate730(self,marc_record):
"""
Method validates 730 with American West desired text.
:param marc_file: MARC file
"""
self.__remove_field__(marc_record=marc_record,
tag='730')
field730 = Field(tag='730',
indicators=['0',' '],
subfields=['a','American West (Online Publications)'])
marc_record.add_field(field730)
return marc_record
def validate830(self,marc_record):
"""
Method removes all existing 830 fields.
:param marc_file: MARC file
"""
all830s = marc_record.get_fields('830')
for field in all830s:
marc_record.remove_field(field)
return marc_record
|
"""
awbots.py - American West Bots for automating American West MARC record loads
"""
__author__ = '<NAME>'
from marcbots import MARCImportBot
PROXY_LOCATION='0-www.americanwest.amdigital.co.uk.tiger.coloradocollege.edu'
class AmericanWestBot(MARCImportBot):
"""
The `AmericanWestBot` reads MARC records from
American West Database, validates adds/modify fields
for a new import MARC file for loading into TIGER
"""
__name__ = 'American West Bot'
def __init__(self,
marc_file):
"""
Initializes `AmericanWestBot` for conversion
process.
:param marc_file: MARC file
"""
MARCImportBot.__init__(self,marc_file)
def processRecord(self,
marc_record):
"""
Method processes a single marc_record for American West
MARC.
:param marc_file: MARC file
"""
marc_record = self.validate001(marc_record)
marc_record = self.validate003(marc_record)
marc_record = self.validate006(marc_record)
marc_record = self.replace007(marc_record)
marc_record = self.validate490(marc_record)
marc_record = self.processURLs(marc_record,
proxy_location=PROXY_LOCATION)
marc_record = self.validate710(marc_record)
marc_record = self.validate730(marc_record)
marc_record = self.validate830(marc_record)
return marc_record
def validate001(self,marc_record):
"""
Method replaces AC prefix with AMP prefix for Prospector compatibility.
:param marc_file: MARC file
"""
field001 = marc_record.get_fields('001')[0]
marc_record.remove_field(field001)
raw_data = field001.data
field001.data = raw_data.replace('AC','AMP')
marc_record.add_field(field001)
return marc_record
def validate003(self,marc_record):
"""
Validates 003 field, adds control code.
:param marc_file: MARC file
"""
marc_record = self.__remove_field__(marc_record=marc_record,
tag='003')
new003 = Field(tag='003',
data='COC')
marc_record.add_field(new003)
return marc_record
def validate490(self,marc_record):
"""
Method removes all existing 490 fields.
:param marc_file: MARC file
"""
all490s = marc_record.get_fields('490')
for field in all490s:
marc_record.remove_field(field)
return marc_record
def validate710(self,
marc_record):
"""
Method validates/adds 710 fields
:param marc_file: MARC file
"""
all710s = marc_record.get_fields('710')
for field in all710s:
marc_record.remove_field(field)
first710 = Field(tag='710',
indicators=['2',' '],
subfields=['a','Newberry Library.'])
marc_record.add_field(first710)
new710 = Field(tag='710',
indicators=['2',' '],
subfields=['a','<NAME> Digital (Firm)'])
marc_record.add_field(new710)
return marc_record
def validate730(self,marc_record):
"""
Method validates 730 with American West desired text.
:param marc_file: MARC file
"""
self.__remove_field__(marc_record=marc_record,
tag='730')
field730 = Field(tag='730',
indicators=['0',' '],
subfields=['a','American West (Online Publications)'])
marc_record.add_field(field730)
return marc_record
def validate830(self,marc_record):
"""
Method removes all existing 830 fields.
:param marc_file: MARC file
"""
all830s = marc_record.get_fields('830')
for field in all830s:
marc_record.remove_field(field)
return marc_record
|
en
| 0.732371
|
awbots.py - American West Bots for automating American West MARC record loads The `AmericanWestBot` reads MARC records from American West Database, validates adds/modify fields for a new import MARC file for loading into TIGER Initializes `AmericanWestBot` for conversion process. :param marc_file: MARC file Method processes a single marc_record for American West MARC. :param marc_file: MARC file Method replaces AC prefix with AMP prefix for Prospector compatibility. :param marc_file: MARC file Validates 003 field, adds control code. :param marc_file: MARC file Method removes all existing 490 fields. :param marc_file: MARC file Method validates/adds 710 fields :param marc_file: MARC file Method validates 730 with American West desired text. :param marc_file: MARC file Method removes all existing 830 fields. :param marc_file: MARC file
| 2.817343
| 3
|
Cython&OpenMP/fopenmp/setup.py
|
SuperbTUM/CV-newcomer
| 0
|
6629081
|
<filename>Cython&OpenMP/fopenmp/setup.py
from distutils.core import setup, Extension
from Cython.Build import cythonize
import numpy as np
compile_args = ['-std=c++11', '-fopenmp']
linker_flags = ['-fopenmp']
module = Extension('scratch',
['scratch.pyx'],
language='c++',
include_dirs=[np.get_include()],
extra_compile_args=compile_args,
extra_link_args=linker_flags)
setup(
name='scratch',
ext_modules=cythonize(module),
gdb_debug=False
)
|
<filename>Cython&OpenMP/fopenmp/setup.py
from distutils.core import setup, Extension
from Cython.Build import cythonize
import numpy as np
compile_args = ['-std=c++11', '-fopenmp']
linker_flags = ['-fopenmp']
module = Extension('scratch',
['scratch.pyx'],
language='c++',
include_dirs=[np.get_include()],
extra_compile_args=compile_args,
extra_link_args=linker_flags)
setup(
name='scratch',
ext_modules=cythonize(module),
gdb_debug=False
)
|
none
| 1
| 1.479364
| 1
|
|
code/507.py
|
Nightwish-cn/my_leetcode
| 23
|
6629082
|
<reponame>Nightwish-cn/my_leetcode
class Solution:
def checkPerfectNumber(self, num):
"""
:type num: int
:rtype: bool
"""
i, sum = 1, -num
while i * i < num:
if num % i == 0:
sum += i + num // i
i += 1
if i * i == num:
sum += i
return num == sum
|
class Solution:
def checkPerfectNumber(self, num):
"""
:type num: int
:rtype: bool
"""
i, sum = 1, -num
while i * i < num:
if num % i == 0:
sum += i + num // i
i += 1
if i * i == num:
sum += i
return num == sum
|
en
| 0.209607
|
:type num: int :rtype: bool
| 3.416177
| 3
|
ext/pynd-lib/pynd/segutils.py
|
thomshaw92/Atlas-GAN
| 107
|
6629083
|
'''
nd segmentation (label map) utilities
Contact: <EMAIL>
'''
import numpy as np
from . import ndutils as nd
def seg2contour(seg, exclude_zero=True, contour_type='inner', thickness=1):
'''
transform nd segmentation (label maps) to contour maps
Parameters
----------
seg : nd array
volume of labels/segmentations
exclude_zero : optional logical
whether to exclude the zero label.
default True
contour_type : string
where to draw contour voxels relative to label 'inner','outer', or 'both'
Output
------
con : nd array
nd array (volume) of contour maps
See Also
--------
seg_overlap
'''
# extract unique labels
labels = np.unique(seg)
if exclude_zero:
labels = np.delete(labels, np.where(labels == 0))
# get the contour of each label
contour_map = seg * 0
for lab in labels:
# extract binary label map for this label
label_map = seg == lab
# extract contour map for this label
thickness = thickness + 0.01
label_contour_map = nd.bw2contour(label_map, type=contour_type, thr=thickness)
# assign contour to this label
contour_map[label_contour_map] = lab
return contour_map
def seg_overlap(vol, seg, do_contour=True, do_rgb=True, cmap=None, thickness=1.0):
'''
overlap a nd volume and nd segmentation (label map)
do_contour should be None, boolean, or contour_type from seg2contour
not well tested yet.
'''
# compute contours for each label if necessary
if do_contour is not None and do_contour is not False:
if not isinstance(do_contour, str):
do_contour = 'inner'
seg = seg2contour(seg, contour_type=do_contour, thickness=thickness)
# compute a rgb-contour map
if do_rgb:
if cmap is None:
nb_labels = np.max(seg).astype(int) + 1
colors = np.random.random((nb_labels, 3)) * 0.5 + 0.5
colors[0, :] = [0, 0, 0]
else:
colors = cmap[:, 0:3]
olap = colors[seg.flat, :]
sf = seg.flat == 0
for d in range(3):
olap[sf, d] = vol.flat[sf]
olap = np.reshape(olap, vol.shape + (3, ))
else:
olap = seg
olap[seg == 0] = vol[seg == 0]
return olap
def seg_overlay(vol, seg, do_rgb=True, seg_wt=0.5, cmap=None):
'''
overlap a nd volume and nd segmentation (label map)
not well tested yet.
'''
# compute contours for each label if necessary
# compute a rgb-contour map
if do_rgb:
if cmap is None:
nb_labels = np.max(seg) + 1
colors = np.random.random((nb_labels, 3)) * 0.5 + 0.5
colors[0, :] = [0, 0, 0]
else:
colors = cmap[:, 0:3]
seg_flat = colors[seg.flat, :]
seg_rgb = np.reshape(seg_flat, vol.shape + (3, ))
# get the overlap image
olap = seg_rgb * seg_wt + np.expand_dims(vol, -1) * (1-seg_wt)
else:
olap = seg * seg_wt + vol * (1-seg_wt)
return olap
|
'''
nd segmentation (label map) utilities
Contact: <EMAIL>
'''
import numpy as np
from . import ndutils as nd
def seg2contour(seg, exclude_zero=True, contour_type='inner', thickness=1):
'''
transform nd segmentation (label maps) to contour maps
Parameters
----------
seg : nd array
volume of labels/segmentations
exclude_zero : optional logical
whether to exclude the zero label.
default True
contour_type : string
where to draw contour voxels relative to label 'inner','outer', or 'both'
Output
------
con : nd array
nd array (volume) of contour maps
See Also
--------
seg_overlap
'''
# extract unique labels
labels = np.unique(seg)
if exclude_zero:
labels = np.delete(labels, np.where(labels == 0))
# get the contour of each label
contour_map = seg * 0
for lab in labels:
# extract binary label map for this label
label_map = seg == lab
# extract contour map for this label
thickness = thickness + 0.01
label_contour_map = nd.bw2contour(label_map, type=contour_type, thr=thickness)
# assign contour to this label
contour_map[label_contour_map] = lab
return contour_map
def seg_overlap(vol, seg, do_contour=True, do_rgb=True, cmap=None, thickness=1.0):
'''
overlap a nd volume and nd segmentation (label map)
do_contour should be None, boolean, or contour_type from seg2contour
not well tested yet.
'''
# compute contours for each label if necessary
if do_contour is not None and do_contour is not False:
if not isinstance(do_contour, str):
do_contour = 'inner'
seg = seg2contour(seg, contour_type=do_contour, thickness=thickness)
# compute a rgb-contour map
if do_rgb:
if cmap is None:
nb_labels = np.max(seg).astype(int) + 1
colors = np.random.random((nb_labels, 3)) * 0.5 + 0.5
colors[0, :] = [0, 0, 0]
else:
colors = cmap[:, 0:3]
olap = colors[seg.flat, :]
sf = seg.flat == 0
for d in range(3):
olap[sf, d] = vol.flat[sf]
olap = np.reshape(olap, vol.shape + (3, ))
else:
olap = seg
olap[seg == 0] = vol[seg == 0]
return olap
def seg_overlay(vol, seg, do_rgb=True, seg_wt=0.5, cmap=None):
'''
overlap a nd volume and nd segmentation (label map)
not well tested yet.
'''
# compute contours for each label if necessary
# compute a rgb-contour map
if do_rgb:
if cmap is None:
nb_labels = np.max(seg) + 1
colors = np.random.random((nb_labels, 3)) * 0.5 + 0.5
colors[0, :] = [0, 0, 0]
else:
colors = cmap[:, 0:3]
seg_flat = colors[seg.flat, :]
seg_rgb = np.reshape(seg_flat, vol.shape + (3, ))
# get the overlap image
olap = seg_rgb * seg_wt + np.expand_dims(vol, -1) * (1-seg_wt)
else:
olap = seg * seg_wt + vol * (1-seg_wt)
return olap
|
en
| 0.663201
|
nd segmentation (label map) utilities Contact: <EMAIL> transform nd segmentation (label maps) to contour maps Parameters ---------- seg : nd array volume of labels/segmentations exclude_zero : optional logical whether to exclude the zero label. default True contour_type : string where to draw contour voxels relative to label 'inner','outer', or 'both' Output ------ con : nd array nd array (volume) of contour maps See Also -------- seg_overlap # extract unique labels # get the contour of each label # extract binary label map for this label # extract contour map for this label # assign contour to this label overlap a nd volume and nd segmentation (label map) do_contour should be None, boolean, or contour_type from seg2contour not well tested yet. # compute contours for each label if necessary # compute a rgb-contour map overlap a nd volume and nd segmentation (label map) not well tested yet. # compute contours for each label if necessary # compute a rgb-contour map # get the overlap image
| 3.068283
| 3
|
wrapper.py
|
Neubias-WG5/W_ObjectTracking-PAST-FR
| 0
|
6629084
|
import os
import shutil
import sys
from subprocess import call
from cytomine.models import Job
from neubiaswg5 import CLASS_OBJTRK
from neubiaswg5.helpers import NeubiasJob, prepare_data, upload_data, upload_metrics, get_discipline
def main(argv):
# 0. Initialize Cytomine client and job if necessary and parse inputs
with NeubiasJob.from_cli(argv) as nj:
problem_cls = get_discipline(nj, default=CLASS_OBJTRK)
is_2d = False
nj.job.update(status=Job.RUNNING, progress=0, statusComment="Running workflow for problem class '{}' in 2D+t".format(problem_cls))
# 1. Create working directories on the machine
# 2. Download the images
nj.job.update(progress=0, statusComment="Initialisation...")
in_images, gt_images, in_path, gt_path, out_path, tmp_path = prepare_data(problem_cls, nj, **nj.flags)
# 3. Call the image analysis workflow using the run script
nj.job.update(progress=25, statusComment="Launching workflow...")
# DEBUG Workflow not currently working !!
# # CellTrackingChallenge expects the protocol to start with the data folder name
# in_path_folder_name, in_folder_name = os.path.basename(os.path.dirname(in_path)), os.path.basename(in_path)
# shutil.copyfile("/app/proto.protocol", "/app/{}-{}.protocol".format(in_path_folder_name, in_folder_name))
# command = "java -Xmx2048m -jar icy.jar -hl -x plugins.adufour.ctc.CellTrackingChallenge {} {}".format(os.path.dirname(in_path), os.path.basename(in_path))
# return_code = call(command, shell=True, cwd="/app") # waits for the subprocess to return
#
# if return_code != 0:
# err_desc = "Failed to execute the ImageJ macro (return code: {})".format(return_code)
# nj.job.update(progress=50, statusComment=err_desc)
# raise ValueError(err_desc)
#
# # move files generated by CellTrackingChallenge into the output folder
# res_path = in_path + "_RES"
# for file in os.listdir(res_path):
# shutil.move(os.path.join(res_path, file), out_path)
# DEBUG copy ground truth in output
for file in os.listdir(gt_path):
outfile = file.replace("_attached", "") if file.endswith(".txt") else file
shutil.copy(os.path.join(gt_path, file), os.path.join(out_path, outfile))
# 4. Upload the annotation and labels to Cytomine
upload_data(problem_cls, nj, in_images, out_path, **nj.flags, is_2d=is_2d, monitor_params={
"start": 60, "end": 90, "period": 0.1
})
# 5. Compute and upload the metrics
nj.job.update(progress=90, statusComment="Computing and uploading metrics...")
upload_metrics(problem_cls, nj, in_images, gt_path, out_path, tmp_path, **nj.flags)
# 6. End
nj.job.update(status=Job.TERMINATED, progress=100, statusComment="Finished.")
if __name__ == "__main__":
main(sys.argv[1:])
|
import os
import shutil
import sys
from subprocess import call
from cytomine.models import Job
from neubiaswg5 import CLASS_OBJTRK
from neubiaswg5.helpers import NeubiasJob, prepare_data, upload_data, upload_metrics, get_discipline
def main(argv):
# 0. Initialize Cytomine client and job if necessary and parse inputs
with NeubiasJob.from_cli(argv) as nj:
problem_cls = get_discipline(nj, default=CLASS_OBJTRK)
is_2d = False
nj.job.update(status=Job.RUNNING, progress=0, statusComment="Running workflow for problem class '{}' in 2D+t".format(problem_cls))
# 1. Create working directories on the machine
# 2. Download the images
nj.job.update(progress=0, statusComment="Initialisation...")
in_images, gt_images, in_path, gt_path, out_path, tmp_path = prepare_data(problem_cls, nj, **nj.flags)
# 3. Call the image analysis workflow using the run script
nj.job.update(progress=25, statusComment="Launching workflow...")
# DEBUG Workflow not currently working !!
# # CellTrackingChallenge expects the protocol to start with the data folder name
# in_path_folder_name, in_folder_name = os.path.basename(os.path.dirname(in_path)), os.path.basename(in_path)
# shutil.copyfile("/app/proto.protocol", "/app/{}-{}.protocol".format(in_path_folder_name, in_folder_name))
# command = "java -Xmx2048m -jar icy.jar -hl -x plugins.adufour.ctc.CellTrackingChallenge {} {}".format(os.path.dirname(in_path), os.path.basename(in_path))
# return_code = call(command, shell=True, cwd="/app") # waits for the subprocess to return
#
# if return_code != 0:
# err_desc = "Failed to execute the ImageJ macro (return code: {})".format(return_code)
# nj.job.update(progress=50, statusComment=err_desc)
# raise ValueError(err_desc)
#
# # move files generated by CellTrackingChallenge into the output folder
# res_path = in_path + "_RES"
# for file in os.listdir(res_path):
# shutil.move(os.path.join(res_path, file), out_path)
# DEBUG copy ground truth in output
for file in os.listdir(gt_path):
outfile = file.replace("_attached", "") if file.endswith(".txt") else file
shutil.copy(os.path.join(gt_path, file), os.path.join(out_path, outfile))
# 4. Upload the annotation and labels to Cytomine
upload_data(problem_cls, nj, in_images, out_path, **nj.flags, is_2d=is_2d, monitor_params={
"start": 60, "end": 90, "period": 0.1
})
# 5. Compute and upload the metrics
nj.job.update(progress=90, statusComment="Computing and uploading metrics...")
upload_metrics(problem_cls, nj, in_images, gt_path, out_path, tmp_path, **nj.flags)
# 6. End
nj.job.update(status=Job.TERMINATED, progress=100, statusComment="Finished.")
if __name__ == "__main__":
main(sys.argv[1:])
|
en
| 0.548632
|
# 0. Initialize Cytomine client and job if necessary and parse inputs # 1. Create working directories on the machine # 2. Download the images # 3. Call the image analysis workflow using the run script # DEBUG Workflow not currently working !! # # CellTrackingChallenge expects the protocol to start with the data folder name # in_path_folder_name, in_folder_name = os.path.basename(os.path.dirname(in_path)), os.path.basename(in_path) # shutil.copyfile("/app/proto.protocol", "/app/{}-{}.protocol".format(in_path_folder_name, in_folder_name)) # command = "java -Xmx2048m -jar icy.jar -hl -x plugins.adufour.ctc.CellTrackingChallenge {} {}".format(os.path.dirname(in_path), os.path.basename(in_path)) # return_code = call(command, shell=True, cwd="/app") # waits for the subprocess to return # # if return_code != 0: # err_desc = "Failed to execute the ImageJ macro (return code: {})".format(return_code) # nj.job.update(progress=50, statusComment=err_desc) # raise ValueError(err_desc) # # # move files generated by CellTrackingChallenge into the output folder # res_path = in_path + "_RES" # for file in os.listdir(res_path): # shutil.move(os.path.join(res_path, file), out_path) # DEBUG copy ground truth in output # 4. Upload the annotation and labels to Cytomine # 5. Compute and upload the metrics # 6. End
| 2.126377
| 2
|
model.py
|
GetThereSafe/gettheresafe
| 1
|
6629085
|
from main import db
class Coord(db.Model):
id = db.Column(db.Integer, primary_key=True)
lng = db.Column(db.Float)
lat = db.Column(db.Float)
def __init__(self, lng, lat):
self.lng = lng
self.lat = lat
|
from main import db
class Coord(db.Model):
id = db.Column(db.Integer, primary_key=True)
lng = db.Column(db.Float)
lat = db.Column(db.Float)
def __init__(self, lng, lat):
self.lng = lng
self.lat = lat
|
none
| 1
| 2.851807
| 3
|
|
python-code/opencv-learning/dnn/tensorflow-face-recognition.py
|
juxiangwu/image-processing
| 13
|
6629086
|
import tensorflow as tf
import numpy as np
import cv2
cap = cv2.VideoCapture(1)
face_cascade = cv2.CascadeClassifier('/usr/local/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml')
while True:
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = frame[y:y+h, x:x+w]
cv2.imwrite('face.jpg',roi_color)
im='/home/rey/Github/face_recognition_tensorflow/face.jpg'
# Read in the image_data
image_data =tf.gfile.FastGFile(im, 'rb').read()
#load retrained label data
label_lines = [line.rstrip() for line
in tf.gfile.GFile("/home/rey/Github/face_recognition_tensorflow/retrained_labels.txt")]
#load retrained graph
with tf.gfile.FastGFile("/home/rey/Github/face_recognition_tensorflow/retrained_graph.pb", 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
with tf.Session() as sess:
# Feed the image_data as input to the graph and get first prediction
softmax_tensor = sess.graph.get_tensor_by_name('final_result:0')
predictions = sess.run(softmax_tensor, {'DecodeJpeg/contents:0': image_data})
# Sort to show labels of first prediction in order of confidence
top_k = predictions[0].argsort()[-len(predictions[0]):][::-1]
info=str(label_lines[top_k[0]]) + ', Confidence:' + str(round((predictions[0][top_k[0]]*100)-0.05,2)) + '%'
cv2.putText(frame, info, (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
#show video stream
cv2.imshow('Face Recognition', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
print ('Session Ended')
cap.release()
cv2.destroyAllWindows()
|
import tensorflow as tf
import numpy as np
import cv2
cap = cv2.VideoCapture(1)
face_cascade = cv2.CascadeClassifier('/usr/local/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml')
while True:
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = frame[y:y+h, x:x+w]
cv2.imwrite('face.jpg',roi_color)
im='/home/rey/Github/face_recognition_tensorflow/face.jpg'
# Read in the image_data
image_data =tf.gfile.FastGFile(im, 'rb').read()
#load retrained label data
label_lines = [line.rstrip() for line
in tf.gfile.GFile("/home/rey/Github/face_recognition_tensorflow/retrained_labels.txt")]
#load retrained graph
with tf.gfile.FastGFile("/home/rey/Github/face_recognition_tensorflow/retrained_graph.pb", 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
with tf.Session() as sess:
# Feed the image_data as input to the graph and get first prediction
softmax_tensor = sess.graph.get_tensor_by_name('final_result:0')
predictions = sess.run(softmax_tensor, {'DecodeJpeg/contents:0': image_data})
# Sort to show labels of first prediction in order of confidence
top_k = predictions[0].argsort()[-len(predictions[0]):][::-1]
info=str(label_lines[top_k[0]]) + ', Confidence:' + str(round((predictions[0][top_k[0]]*100)-0.05,2)) + '%'
cv2.putText(frame, info, (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
#show video stream
cv2.imshow('Face Recognition', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
print ('Session Ended')
cap.release()
cv2.destroyAllWindows()
|
en
| 0.786779
|
# Read in the image_data #load retrained label data #load retrained graph # Feed the image_data as input to the graph and get first prediction # Sort to show labels of first prediction in order of confidence #show video stream
| 2.949202
| 3
|
draw-figs.py
|
rofuyu/exp-gmips-nips17
| 5
|
6629087
|
<filename>draw-figs.py<gh_stars>1-10
#!/usr/bin/env python
import sys
from os import path
import os
from data_info import *
#BASIC Options
#TITLE = 'data size v.s. ROC'
XLABEL = 'updates'
XLABEL = 'time'
#XLABEL = 'rank'
XLABEL = 'cputime'
XLABEL = 'iter'
XLABEL = 'walltime'
XLABEL = 'time'
YLABEL = r'obj'
YLABEL = 'cputime'
YLABEL = 'training-LL'
YLABEL = 'walltime'
YLABEL = r'p@10'
FILETYPE = 'png'
TITLE = '%s v.s. %s ' % (XLABEL, YLABEL)
ymin = None
ymax = None
xmax = None
xmin = None
ymax = 105
xmax = 200
COLORS = 'blue,red,black,green,cyan,magenta,chartreuse,blueviolet,forestgreen,black,aqua'.split(',')
LINESTYLES = '-,--,-.,:'.split(',')
MARKERS = 'o,s,^,v,*,+,x,.'.split(',')
mycolor = lambda i: COLORS[i%len(COLORS)]
mylinestyle = lambda i: LINESTYLES[i%len(LINESTYLES)]
mymarker = lambda i: MARKERS[i%len(MARKERS)]
logs = sys.argv[1:]
def getlegend(log):
if 'Greedy' in log: return 'Greedy-MIPS'
elif 'Hgreedy' in log: return 'Improved-Greedy-MIPS-heap'
elif 'greedy' in log: return 'Greedy-MIPS'
elif 'sample' in log: return 'Sample-MIPS'
elif 'lsh' in log: return 'LSH-MIPS'
elif 'pca' in log: return 'PCA-MIPS'
elif 'diamond' in log: return 'Diamond-MSIPS'
else: return log
def getlengend_greey_comp(log):
if 'Greedy' in log : return 'Improved-Greedy-MIPS with Selection Tree'
elif 'Hgreedy' in log : return 'Improved-Greedy-MIPS with Max Heap'
elif 'greedy' in log : return 'Original-Greedy-MIPS with Selection Tree'
else : return log
def getcoord(log):
x = []
y = []
init = []
for line in open(log, 'r'):
if line.strip() == "" : continue
if XLABEL == 'rank':
if 'oiter 1 ' not in line: continue
line = line.split()
if XLABEL not in line or YLABEL not in line : continue
idx = line[::2].index(XLABEL)
x += [line[1::2][idx]]
idx = line[::2].index(YLABEL)
y += [line[1::2][idx]]
#x = map(float, x)
x = map(float, x)
y = map(float, y)
x, y= zip(*sorted(zip(x,y), reverse=True))
return [x, y, log]
def transform2reletive(curves):
m = min(map(lambda x: min(x[1]) , curves))
for i in range(len(curves)):
curves[i][1] = map(lambda x: abs((x - m)/m), curves[i][1])
def scale(curves, s):
for i in range(len(curves)):
#curves[i][1] = [float(s)*(iter+1)/x for iter, x in enumerate(curves[i][1])]
tmp = zip(*filter(lambda x : x[0] > 0, zip(curves[i][0], curves[i][1])))
curves[i][0] = tmp[0]
curves[i][1] = tmp[1]
try :
curves[i][0] = map(lambda x: s/(x+1e-9), curves[i][0])
except :
print curves[i]
def draw(curves, dataname, hline=None, naive=None, filename=None, legend=None):
global xmax, ymax
import matplotlib
matplotlib.use('Agg')
#matplotlib.rc('text',usetex=True)
matplotlib.rc('font',family='serif')
from matplotlib import pylab
params = {'font.size': 18, 'axes.labelsize': 18, 'text.fontsize': 18, 'legend.fontsize': 16,'xtick.labelsize': 14,'ytick.labelsize': 14, 'axes.formatter.limits':(-3,3)}
pylab.rcParams.update(params)
pylab.figure()
plots = []
#pylab.axhline(y=hline, lw=1, c='gray', marker='.')
for i in range(len(curves)):
#change 'plot' to 'semilogx'/'semilogy'/'loglog' if you need it
if dataname.lower() in ['news20', 'covtype', 'rcv1']: plotter = pylab.semilogx
else: plotter = pylab.plot
if 'liblinear' in curves[i][2]:
tmp,= plotter(curves[i][0], curves[i][1],
lw=3, c=mycolor(4), ls=mylinestyle(4))
else :
tmp,= plotter(curves[i][0], curves[i][1],
lw=4, c=mycolor(i), ls=mylinestyle(i))
plots += [tmp]
#pylab.axvline(x=894956000)
if xmax!=None: pylab.xlim(xmax=xmax)
if ymax!=None: pylab.ylim(ymax=ymax)
if xmin!=None: pylab.xlim(xmin=xmin)
if ymin!=None: pylab.ylim(ymin=ymin)
if naive: pylab.xlabel('Speedup over naive approach ({0} s)'.format(naive), fontsize='large')
else : pylab.xlabel('Speedup over naive approach', fontsize='large')
if YLABEL.startswith('p@'):
pylab.ylabel('Performance (prec@{0})'.format(YLABEL.split('@')[-1]), fontsize='large')
elif YLABEL.startswith('n@'):
pylab.ylabel('Performance (nDCG@{0})'.format(YLABEL.split('@')[-1]), fontsize='large')
else :
pylab.ylabel('Performance ({0})'.format(YLABEL), fontsize='large')
title = dataname
tmpm = '17,770' if 'netflix' in dataname else '624,961'
if 'pos' in dataname or 'syn' in dataname:
m = int(dataname.split('.')[1][1:])
d = int(dataname.split('.')[2][1:])
if 'pos' in dataname:
title = 'syn-uniform ($n=2^{%d}, k=2^{%d}$)'%(m,d)
else :
title = 'syn-normal ($n=2^{%d}, k=2^{%d}$)'%(m,d)
elif dataname in ['netflix','yahoo']:
title = '%s ($n=%s, k=100$)'%(dataname,tmpm)
elif dataname == 'netflix50':
title = '%s ($n=%s, k=50$)'%('netflix',tmpm)
elif dataname == 'netflix200':
title = '%s ($n=%s, k=200$)'%('netflix',tmpm)
elif dataname == 'yahoo50':
title = '%s ($n=%s, k=50$)'%('yahoo',tmpm)
elif dataname == 'yahoo200':
title = '%s ($n=%s, k=200$)'%('yahoo',tmpm)
pylab.title('%s'%(title), fontsize='large')
if legend == None: legend = getlegend
pylab.legend(plots, map(lambda x: legend(x[2]), curves), loc='best')
if filename:
pylab.savefig('figs/%s-%s-%s.%s'%(dataname.lower(), filename, YLABEL, FILETYPE), format=FILETYPE)
else :
pylab.savefig('figs/%s-comp-%s.%s'%(dataname.lower(), YLABEL, FILETYPE), format=FILETYPE)
def get_naive(data):
return float(open('logs/{0}-naive.raw'.format(data)).read().split()[3])
solver = ['greedy', 'pca', 'lsh', 'sample', 'Greedy', 'Hgreedy']
def go_one(data, solver, filename=None, legend=None):
logs = []
for s in solver:
if 'pos' not in data and s == 'sample':
s = 'diamond'
logs += ['logs/%s-%s.raw' %(data,s)]
print logs
curves = map(getcoord, logs)
naive = get_naive(data)
scale(curves, naive)
draw(curves, data, naive=naive, filename=filename, legend=legend)
if not path.exists('figs'):
os.system('mkdir -p figs')
solver = ['Greedy', 'pca', 'lsh', 'sample']
for label in ['p@1', 'n@1', 'p@5', 'n@5', 'p@10', 'n@10'] :
YLABEL = label
for d in datasets:
xmax = 200
if d in synthetic_set:
if d.startswith('pos'):
xmax = 60
else:
xmax = 150
go_one(d, solver)
|
<filename>draw-figs.py<gh_stars>1-10
#!/usr/bin/env python
import sys
from os import path
import os
from data_info import *
#BASIC Options
#TITLE = 'data size v.s. ROC'
XLABEL = 'updates'
XLABEL = 'time'
#XLABEL = 'rank'
XLABEL = 'cputime'
XLABEL = 'iter'
XLABEL = 'walltime'
XLABEL = 'time'
YLABEL = r'obj'
YLABEL = 'cputime'
YLABEL = 'training-LL'
YLABEL = 'walltime'
YLABEL = r'p@10'
FILETYPE = 'png'
TITLE = '%s v.s. %s ' % (XLABEL, YLABEL)
ymin = None
ymax = None
xmax = None
xmin = None
ymax = 105
xmax = 200
COLORS = 'blue,red,black,green,cyan,magenta,chartreuse,blueviolet,forestgreen,black,aqua'.split(',')
LINESTYLES = '-,--,-.,:'.split(',')
MARKERS = 'o,s,^,v,*,+,x,.'.split(',')
mycolor = lambda i: COLORS[i%len(COLORS)]
mylinestyle = lambda i: LINESTYLES[i%len(LINESTYLES)]
mymarker = lambda i: MARKERS[i%len(MARKERS)]
logs = sys.argv[1:]
def getlegend(log):
if 'Greedy' in log: return 'Greedy-MIPS'
elif 'Hgreedy' in log: return 'Improved-Greedy-MIPS-heap'
elif 'greedy' in log: return 'Greedy-MIPS'
elif 'sample' in log: return 'Sample-MIPS'
elif 'lsh' in log: return 'LSH-MIPS'
elif 'pca' in log: return 'PCA-MIPS'
elif 'diamond' in log: return 'Diamond-MSIPS'
else: return log
def getlengend_greey_comp(log):
if 'Greedy' in log : return 'Improved-Greedy-MIPS with Selection Tree'
elif 'Hgreedy' in log : return 'Improved-Greedy-MIPS with Max Heap'
elif 'greedy' in log : return 'Original-Greedy-MIPS with Selection Tree'
else : return log
def getcoord(log):
x = []
y = []
init = []
for line in open(log, 'r'):
if line.strip() == "" : continue
if XLABEL == 'rank':
if 'oiter 1 ' not in line: continue
line = line.split()
if XLABEL not in line or YLABEL not in line : continue
idx = line[::2].index(XLABEL)
x += [line[1::2][idx]]
idx = line[::2].index(YLABEL)
y += [line[1::2][idx]]
#x = map(float, x)
x = map(float, x)
y = map(float, y)
x, y= zip(*sorted(zip(x,y), reverse=True))
return [x, y, log]
def transform2reletive(curves):
m = min(map(lambda x: min(x[1]) , curves))
for i in range(len(curves)):
curves[i][1] = map(lambda x: abs((x - m)/m), curves[i][1])
def scale(curves, s):
for i in range(len(curves)):
#curves[i][1] = [float(s)*(iter+1)/x for iter, x in enumerate(curves[i][1])]
tmp = zip(*filter(lambda x : x[0] > 0, zip(curves[i][0], curves[i][1])))
curves[i][0] = tmp[0]
curves[i][1] = tmp[1]
try :
curves[i][0] = map(lambda x: s/(x+1e-9), curves[i][0])
except :
print curves[i]
def draw(curves, dataname, hline=None, naive=None, filename=None, legend=None):
global xmax, ymax
import matplotlib
matplotlib.use('Agg')
#matplotlib.rc('text',usetex=True)
matplotlib.rc('font',family='serif')
from matplotlib import pylab
params = {'font.size': 18, 'axes.labelsize': 18, 'text.fontsize': 18, 'legend.fontsize': 16,'xtick.labelsize': 14,'ytick.labelsize': 14, 'axes.formatter.limits':(-3,3)}
pylab.rcParams.update(params)
pylab.figure()
plots = []
#pylab.axhline(y=hline, lw=1, c='gray', marker='.')
for i in range(len(curves)):
#change 'plot' to 'semilogx'/'semilogy'/'loglog' if you need it
if dataname.lower() in ['news20', 'covtype', 'rcv1']: plotter = pylab.semilogx
else: plotter = pylab.plot
if 'liblinear' in curves[i][2]:
tmp,= plotter(curves[i][0], curves[i][1],
lw=3, c=mycolor(4), ls=mylinestyle(4))
else :
tmp,= plotter(curves[i][0], curves[i][1],
lw=4, c=mycolor(i), ls=mylinestyle(i))
plots += [tmp]
#pylab.axvline(x=894956000)
if xmax!=None: pylab.xlim(xmax=xmax)
if ymax!=None: pylab.ylim(ymax=ymax)
if xmin!=None: pylab.xlim(xmin=xmin)
if ymin!=None: pylab.ylim(ymin=ymin)
if naive: pylab.xlabel('Speedup over naive approach ({0} s)'.format(naive), fontsize='large')
else : pylab.xlabel('Speedup over naive approach', fontsize='large')
if YLABEL.startswith('p@'):
pylab.ylabel('Performance (prec@{0})'.format(YLABEL.split('@')[-1]), fontsize='large')
elif YLABEL.startswith('n@'):
pylab.ylabel('Performance (nDCG@{0})'.format(YLABEL.split('@')[-1]), fontsize='large')
else :
pylab.ylabel('Performance ({0})'.format(YLABEL), fontsize='large')
title = dataname
tmpm = '17,770' if 'netflix' in dataname else '624,961'
if 'pos' in dataname or 'syn' in dataname:
m = int(dataname.split('.')[1][1:])
d = int(dataname.split('.')[2][1:])
if 'pos' in dataname:
title = 'syn-uniform ($n=2^{%d}, k=2^{%d}$)'%(m,d)
else :
title = 'syn-normal ($n=2^{%d}, k=2^{%d}$)'%(m,d)
elif dataname in ['netflix','yahoo']:
title = '%s ($n=%s, k=100$)'%(dataname,tmpm)
elif dataname == 'netflix50':
title = '%s ($n=%s, k=50$)'%('netflix',tmpm)
elif dataname == 'netflix200':
title = '%s ($n=%s, k=200$)'%('netflix',tmpm)
elif dataname == 'yahoo50':
title = '%s ($n=%s, k=50$)'%('yahoo',tmpm)
elif dataname == 'yahoo200':
title = '%s ($n=%s, k=200$)'%('yahoo',tmpm)
pylab.title('%s'%(title), fontsize='large')
if legend == None: legend = getlegend
pylab.legend(plots, map(lambda x: legend(x[2]), curves), loc='best')
if filename:
pylab.savefig('figs/%s-%s-%s.%s'%(dataname.lower(), filename, YLABEL, FILETYPE), format=FILETYPE)
else :
pylab.savefig('figs/%s-comp-%s.%s'%(dataname.lower(), YLABEL, FILETYPE), format=FILETYPE)
def get_naive(data):
return float(open('logs/{0}-naive.raw'.format(data)).read().split()[3])
solver = ['greedy', 'pca', 'lsh', 'sample', 'Greedy', 'Hgreedy']
def go_one(data, solver, filename=None, legend=None):
logs = []
for s in solver:
if 'pos' not in data and s == 'sample':
s = 'diamond'
logs += ['logs/%s-%s.raw' %(data,s)]
print logs
curves = map(getcoord, logs)
naive = get_naive(data)
scale(curves, naive)
draw(curves, data, naive=naive, filename=filename, legend=legend)
if not path.exists('figs'):
os.system('mkdir -p figs')
solver = ['Greedy', 'pca', 'lsh', 'sample']
for label in ['p@1', 'n@1', 'p@5', 'n@5', 'p@10', 'n@10'] :
YLABEL = label
for d in datasets:
xmax = 200
if d in synthetic_set:
if d.startswith('pos'):
xmax = 60
else:
xmax = 150
go_one(d, solver)
|
en
| 0.199459
|
#!/usr/bin/env python #BASIC Options #TITLE = 'data size v.s. ROC' #XLABEL = 'rank' #x = map(float, x) #curves[i][1] = [float(s)*(iter+1)/x for iter, x in enumerate(curves[i][1])] #matplotlib.rc('text',usetex=True) #pylab.axhline(y=hline, lw=1, c='gray', marker='.') #change 'plot' to 'semilogx'/'semilogy'/'loglog' if you need it #pylab.axvline(x=894956000)
| 2.359351
| 2
|
Sofware/navigation.py
|
Mark-MDO47/PiPod
| 63
|
6629088
|
import csv
class menu():
menuDict = {
"selectedItem": 0,
"Main": ["Music", "Settings"], # ["Music", "Scripts", "Settings"]
"Music": ["Artists", "Albums", "Tracks", "Queue"],
"Scripts": [],
"Artists": [],
"Albums": [],
"Tracks": [],
"list": [],
"Queue": [],
"Settings": ["Sleep", "Shutdown", "Reboot", "Update library"],
"current": "musicController",
"history": [],
}
def __init__(self):
pass
def up(self):
if self.menuDict["selectedItem"] > 0:
self.menuDict["selectedItem"] -= 1
return None
def down(self):
if self.menuDict["current"] == "Queue" and self.menuDict["selectedItem"] < len(self.menuDict[self.menuDict["current"]]):
self.menuDict["selectedItem"] += 1
elif self.menuDict["selectedItem"] < len(self.menuDict[self.menuDict["current"]]) - 1:
self.menuDict["selectedItem"] += 1
return None
def left(self):
self.menuDict["selectedItem"] = 0
if self.menuDict["history"]: # check if history is empty
self.menuDict["current"] = self.menuDict["history"][-1::][0]
self.menuDict["history"].pop()
else:
self.menuDict["current"] = "musicController"
return None
def right(self):
if self.menuDict["current"] == "list" or self.menuDict["current"] == "Tracks": # move selected item to queue
self.menuDict["Queue"].append(menu[menu["current"]][self.menuDict["selectedItem"]])
elif self.menuDict["current"] == "Artists": # move selected artist to queue
for item in self.menuDict["Tracks"]:
if item[1] == self.menuDict["Artists"][self.menuDict["selectedItem"]]:
self.menuDict["Queue"].append(item)
elif self.menuDict["current"] == "Albums": # move selected album to queue
for item in self.menuDict["Tracks"]:
if item[2] == self.menuDict["Albums"][self.menuDict["selectedItem"]]:
self.menuDict["Queue"].append(item)
return "updateList"
def gotomenu(self):
if self.menuDict["current"] == "musicController":
self.menuDict["selectedItem"] = 0
self.menuDict["current"] = "Main"
return None
def select(self):
if self.menuDict["current"] == "Artists":
tempList = []
for item in self.menuDict["Tracks"]:
if item[1] == self.menuDict["Artists"][self.menuDict["selectedItem"]]:
tempList.append(item)
self.menuDict["list"] = tempList
self.menuDict["current"] = "list"
self.menuDict["selectedItem"] = 0
elif self.menuDict["current"] == "Albums":
tempList = []
for item in self.menuDict["Tracks"]:
if item[2] == self.menuDict["Albums"][self.menuDict["selectedItem"]]:
tempList.append(item)
self.menuDict["list"] = tempList
self.menuDict["current"] = "list"
self.menuDict["selectedItem"] = 0
elif self.menuDict["current"] == "Queue":
if self.menuDict["Queue"]:
return "playAtIndex"
elif self.menuDict["current"] == "list" or self.menuDict["current"] == "Tracks":
if self.menuDict["Queue"]:
for item in list(self.menuDict[self.menuDict["current"]]):
if item not in self.menuDict["Queue"]:
self.menuDict["Queue"].append(item)
else:
self.menuDict["Queue"] = list(
self.menuDict[self.menuDict["current"]]) # copy the list where the song is selected to the queue
return "play"
self.menuDict["Queue"].remove(self.menuDict[self.menuDict["current"]][self.menuDict["selectedItem"]]) # Remove selected
self.menuDict["Queue"].insert(0, self.menuDict[self.menuDict["current"]][self.menuDict["selectedItem"]]) # Put selected at first position
elif self.menuDict["current"] == "Settings":
if self.menuDict["Settings"][self.menuDict["selectedItem"]] == "Update library":
return "updateLibrary"
elif self.menuDict["Settings"][self.menuDict["selectedItem"]] == "Sleep":
return "toggleSleep"
elif self.menuDict["Settings"][self.menuDict["selectedItem"]] == "Shutdown":
return "shutdown"
elif self.menuDict["Settings"][self.menuDict["selectedItem"]] == "Reboot":
return "reboot"
else:
if self.menuDict[self.menuDict["current"]]: # check if empty
self.menuDict["history"].append(self.menuDict["current"]) # update history
self.menuDict["current"] = self.menuDict[self.menuDict["current"]][self.menuDict["selectedItem"]] # go to next menu
self.menuDict["selectedItem"] = 0
return None
def loadMetadata(self):
file = open("info.csv", "rt")
self.menuDict["Artists"] = []
self.menuDict["Albums"] = []
self.menuDict["Tracks"] = []
metadata = []
try:
reader = csv.reader(file)
for row in reader:
artistClear = row[1].lstrip().lower().title()
albumClear = row[2].lstrip().lower().title()
if artistClear is not "":
if artistClear not in self.menuDict["Artists"]:
self.menuDict["Artists"].append(artistClear)
if albumClear is not "":
if albumClear not in self.menuDict["Albums"]:
self.menuDict["Albums"].append(albumClear)
if row[3].lstrip() is not "":
metadata.append(
[row[0], artistClear, albumClear, row[3].lstrip()]) # [filename, artist, album, title]
finally:
file.close()
self.menuDict["Artists"].sort()
self.menuDict["Albums"].sort()
self.menuDict["Tracks"] = sorted(metadata, key=lambda meta: meta[3])
|
import csv
class menu():
menuDict = {
"selectedItem": 0,
"Main": ["Music", "Settings"], # ["Music", "Scripts", "Settings"]
"Music": ["Artists", "Albums", "Tracks", "Queue"],
"Scripts": [],
"Artists": [],
"Albums": [],
"Tracks": [],
"list": [],
"Queue": [],
"Settings": ["Sleep", "Shutdown", "Reboot", "Update library"],
"current": "musicController",
"history": [],
}
def __init__(self):
pass
def up(self):
if self.menuDict["selectedItem"] > 0:
self.menuDict["selectedItem"] -= 1
return None
def down(self):
if self.menuDict["current"] == "Queue" and self.menuDict["selectedItem"] < len(self.menuDict[self.menuDict["current"]]):
self.menuDict["selectedItem"] += 1
elif self.menuDict["selectedItem"] < len(self.menuDict[self.menuDict["current"]]) - 1:
self.menuDict["selectedItem"] += 1
return None
def left(self):
self.menuDict["selectedItem"] = 0
if self.menuDict["history"]: # check if history is empty
self.menuDict["current"] = self.menuDict["history"][-1::][0]
self.menuDict["history"].pop()
else:
self.menuDict["current"] = "musicController"
return None
def right(self):
if self.menuDict["current"] == "list" or self.menuDict["current"] == "Tracks": # move selected item to queue
self.menuDict["Queue"].append(menu[menu["current"]][self.menuDict["selectedItem"]])
elif self.menuDict["current"] == "Artists": # move selected artist to queue
for item in self.menuDict["Tracks"]:
if item[1] == self.menuDict["Artists"][self.menuDict["selectedItem"]]:
self.menuDict["Queue"].append(item)
elif self.menuDict["current"] == "Albums": # move selected album to queue
for item in self.menuDict["Tracks"]:
if item[2] == self.menuDict["Albums"][self.menuDict["selectedItem"]]:
self.menuDict["Queue"].append(item)
return "updateList"
def gotomenu(self):
if self.menuDict["current"] == "musicController":
self.menuDict["selectedItem"] = 0
self.menuDict["current"] = "Main"
return None
def select(self):
if self.menuDict["current"] == "Artists":
tempList = []
for item in self.menuDict["Tracks"]:
if item[1] == self.menuDict["Artists"][self.menuDict["selectedItem"]]:
tempList.append(item)
self.menuDict["list"] = tempList
self.menuDict["current"] = "list"
self.menuDict["selectedItem"] = 0
elif self.menuDict["current"] == "Albums":
tempList = []
for item in self.menuDict["Tracks"]:
if item[2] == self.menuDict["Albums"][self.menuDict["selectedItem"]]:
tempList.append(item)
self.menuDict["list"] = tempList
self.menuDict["current"] = "list"
self.menuDict["selectedItem"] = 0
elif self.menuDict["current"] == "Queue":
if self.menuDict["Queue"]:
return "playAtIndex"
elif self.menuDict["current"] == "list" or self.menuDict["current"] == "Tracks":
if self.menuDict["Queue"]:
for item in list(self.menuDict[self.menuDict["current"]]):
if item not in self.menuDict["Queue"]:
self.menuDict["Queue"].append(item)
else:
self.menuDict["Queue"] = list(
self.menuDict[self.menuDict["current"]]) # copy the list where the song is selected to the queue
return "play"
self.menuDict["Queue"].remove(self.menuDict[self.menuDict["current"]][self.menuDict["selectedItem"]]) # Remove selected
self.menuDict["Queue"].insert(0, self.menuDict[self.menuDict["current"]][self.menuDict["selectedItem"]]) # Put selected at first position
elif self.menuDict["current"] == "Settings":
if self.menuDict["Settings"][self.menuDict["selectedItem"]] == "Update library":
return "updateLibrary"
elif self.menuDict["Settings"][self.menuDict["selectedItem"]] == "Sleep":
return "toggleSleep"
elif self.menuDict["Settings"][self.menuDict["selectedItem"]] == "Shutdown":
return "shutdown"
elif self.menuDict["Settings"][self.menuDict["selectedItem"]] == "Reboot":
return "reboot"
else:
if self.menuDict[self.menuDict["current"]]: # check if empty
self.menuDict["history"].append(self.menuDict["current"]) # update history
self.menuDict["current"] = self.menuDict[self.menuDict["current"]][self.menuDict["selectedItem"]] # go to next menu
self.menuDict["selectedItem"] = 0
return None
def loadMetadata(self):
file = open("info.csv", "rt")
self.menuDict["Artists"] = []
self.menuDict["Albums"] = []
self.menuDict["Tracks"] = []
metadata = []
try:
reader = csv.reader(file)
for row in reader:
artistClear = row[1].lstrip().lower().title()
albumClear = row[2].lstrip().lower().title()
if artistClear is not "":
if artistClear not in self.menuDict["Artists"]:
self.menuDict["Artists"].append(artistClear)
if albumClear is not "":
if albumClear not in self.menuDict["Albums"]:
self.menuDict["Albums"].append(albumClear)
if row[3].lstrip() is not "":
metadata.append(
[row[0], artistClear, albumClear, row[3].lstrip()]) # [filename, artist, album, title]
finally:
file.close()
self.menuDict["Artists"].sort()
self.menuDict["Albums"].sort()
self.menuDict["Tracks"] = sorted(metadata, key=lambda meta: meta[3])
|
en
| 0.812907
|
# ["Music", "Scripts", "Settings"] # check if history is empty # move selected item to queue # move selected artist to queue # move selected album to queue # copy the list where the song is selected to the queue # Remove selected # Put selected at first position # check if empty # update history # go to next menu # [filename, artist, album, title]
| 3.612006
| 4
|
Projects/Client-Server-Application/utils/unpriv_port.py
|
CristianCotovanu/PCom
| 0
|
6629089
|
<reponame>CristianCotovanu/PCom<gh_stars>0
MIN_VALUE = 1024
MAX_VALUE = 65535
def unprivileged_port_type(value, min_value=1024, max_value=MAX_VALUE):
try:
int_value = int(value)
except ValueError:
raise ValueError('The port should be an integer.')
if (int_value >= min_value) and (int_value <= max_value):
return int_value
else:
raise ValueError(
'The port should be between {min_value} and {max_value}'.format(min_value=min_value, max_value=max_value))
def get_unprivileged_port_meta(var_name: str, min_value=MIN_VALUE, max_value=MAX_VALUE):
return '({min_value}-{max_value})'.format(min_value=min_value,
max_value=max_value)
|
MIN_VALUE = 1024
MAX_VALUE = 65535
def unprivileged_port_type(value, min_value=1024, max_value=MAX_VALUE):
try:
int_value = int(value)
except ValueError:
raise ValueError('The port should be an integer.')
if (int_value >= min_value) and (int_value <= max_value):
return int_value
else:
raise ValueError(
'The port should be between {min_value} and {max_value}'.format(min_value=min_value, max_value=max_value))
def get_unprivileged_port_meta(var_name: str, min_value=MIN_VALUE, max_value=MAX_VALUE):
return '({min_value}-{max_value})'.format(min_value=min_value,
max_value=max_value)
|
none
| 1
| 3.414111
| 3
|
|
simulation.py
|
doubianimehdi/DataEngineerUberBootCamp
| 0
|
6629090
|
<gh_stars>0
"""
Ce dummy producer va récupérer des trajets en 2019-03 à la même heure, minute et seconde afin
de simuler des trajets effectués en réel.
"""
import os
import sys
import json
import time
import warnings
import pandas as pd
import logging
from datetime import datetime, timedelta
from confluent_kafka import avro
from confluent_kafka.avro import AvroProducer
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.propagate = False
fh = logging.FileHandler("/tmp/log_simulation.log", "a+")
fh.setLevel(logging.DEBUG)
logger.addHandler(fh)
start_date = datetime.now()
warnings.filterwarnings('ignore')
# TODO : Remplacer par le serveur Confluent
CONFLUENT_URL = "192.168.127.12:9092"
SCHEMA_REGISTRY_URL = "http://192.168.127.12:8081"
# On ne peut pas sérialiser par défaut un DateTime Python
def json_converter(o):
if isinstance(o, datetime):
return o.__str__()
# Les colonnes sources du fichier
source_columns = "VendorID,tpep_pickup_datetime,tpep_dropoff_datetime," \
+ "passenger_count,trip_distance,RatecodeID," \
+ "payment_type,fare_amount,extra,mta_tax,tip_amount,tolls_amount,improvement_surcharge," \
+ "total_amount,PULocationID,DOLocationID"
source_columns = source_columns.split(",")
# Les colonnes cibles dans BigQuery
target_columns = "vendor_id,pickup_datetime,dropoff_datetime,passenger_count," \
+ "trip_distance,rate_code," \
+ "payment_type,fare_amount,extra,mta_tax,tip_amount," \
+ "tolls_amount,improvement_surcharge,total_amount,PULocationID,DOLocationID"
target_columns = target_columns.split(",")
data = None
print("Chargement des données ...")
for chunk in pd.read_csv(
"yellow_tripdata_2019-03.csv",
chunksize=1000000):
data = chunk
break
# On récupère les informations temporelles pour simuler des données temps réel
data['tpep_pickup_datetime'] = pd.to_datetime(data['tpep_pickup_datetime'])
data['tpep_dropoff_datetime'] = pd.to_datetime(data['tpep_dropoff_datetime'])
data['duration'] = (data['tpep_dropoff_datetime'] - data['tpep_pickup_datetime']).dt.seconds
data['pickup_hour'] = data['tpep_pickup_datetime'].dt.hour
data['pickup_minute'] = data['tpep_pickup_datetime'].dt.minute
data['pickup_second'] = data['tpep_pickup_datetime'].dt.second
data['dropoff_hour'] = data['tpep_dropoff_datetime'].dt.hour
data['dropoff_minute'] = data['tpep_dropoff_datetime'].dt.minute
data['dropoff_second'] = data['tpep_dropoff_datetime'].dt.second
def get_current_trips():
now = datetime.now()
trips = data.loc[
(data['dropoff_hour'] == now.hour) &
(data['dropoff_minute'] == now.minute) &
(data['dropoff_second'] == now.second)
, :]
trips['tpep_dropoff_datetime'] = datetime.now()
trips['VendorID'] = trips['VendorID'].astype(str)
# Conversion en timestamp UNIX
trips['tpep_pickup_datetime'] = trips['tpep_dropoff_datetime'].sub(
trips['duration'] * 1000000000
).values.astype(int) // 10 ** 9
trips['tpep_dropoff_datetime'] = trips['tpep_dropoff_datetime'].values.astype(int) // 10 ** 9
trips = trips[source_columns]
trips.columns = target_columns
return trips
schema = """{
"type": "record",
"name": "ongoing_trips_schema",
"namespace": "com.wepay.kafka.connect.bigquery.schemaregistry.schemaretriever.SchemaRegistrySchemaRetriever",
"fields": [
{"name": "vendor_id", "type": ["string", "null"]},
{"name": "pickup_datetime", "type": ["int", "null"]},
{"name": "dropoff_datetime", "type": ["int", "null"]},
{"name": "passenger_count", "type": ["int", "null"]},
{"name": "trip_distance", "type": ["float", "null"]},
{"name": "rate_code", "type": ["int", "null"]},
{"name": "payment_type", "type": ["int", "null"]},
{"name": "fare_amount", "type": ["float", "null"]},
{"name": "extra", "type": ["float", "null"]},
{"name": "mta_tax", "type": ["float", "null"]},
{"name": "tip_amount", "type": ["float", "null"]},
{"name": "tolls_amount", "type": ["float", "null"]},
{"name": "improvement_surcharge", "type": ["float", "null"]},
{"name": "total_amount", "type": ["float", "null"]},
{"name": "PULocationID", "type": ["int", "null"]},
{"name": "DOLocationID", "type": ["int", "null"]}
]
}"""
avro_schema = avro.loads(schema)
producer = AvroProducer({
'bootstrap.servers': CONFLUENT_URL,
'schema.registry.url': SCHEMA_REGISTRY_URL
}, default_value_schema=avro_schema)
start_time = time.time()
logger.info("Start producing ...")
keep_producing = True
while keep_producing:
try:
trips = get_current_trips()
# iterrows return une tuple (index, Series)
for _, trip in trips.iterrows():
producer.produce(
topic='ongoing-trips',
value=trip.to_dict())
# On augmente le délai pour ne pas saturer la bande passante (économies de crédits)
time.sleep(5.0 - ((time.time() - start_time) % 5.0))
keep_producing = datetime.now() <= start_date + timedelta(hours=1)
except Exception as e:
logger.error("Error :", e)
logger.info("Fin de la simulation.")
|
"""
Ce dummy producer va récupérer des trajets en 2019-03 à la même heure, minute et seconde afin
de simuler des trajets effectués en réel.
"""
import os
import sys
import json
import time
import warnings
import pandas as pd
import logging
from datetime import datetime, timedelta
from confluent_kafka import avro
from confluent_kafka.avro import AvroProducer
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.propagate = False
fh = logging.FileHandler("/tmp/log_simulation.log", "a+")
fh.setLevel(logging.DEBUG)
logger.addHandler(fh)
start_date = datetime.now()
warnings.filterwarnings('ignore')
# TODO : Remplacer par le serveur Confluent
CONFLUENT_URL = "192.168.127.12:9092"
SCHEMA_REGISTRY_URL = "http://192.168.127.12:8081"
# On ne peut pas sérialiser par défaut un DateTime Python
def json_converter(o):
if isinstance(o, datetime):
return o.__str__()
# Les colonnes sources du fichier
source_columns = "VendorID,tpep_pickup_datetime,tpep_dropoff_datetime," \
+ "passenger_count,trip_distance,RatecodeID," \
+ "payment_type,fare_amount,extra,mta_tax,tip_amount,tolls_amount,improvement_surcharge," \
+ "total_amount,PULocationID,DOLocationID"
source_columns = source_columns.split(",")
# Les colonnes cibles dans BigQuery
target_columns = "vendor_id,pickup_datetime,dropoff_datetime,passenger_count," \
+ "trip_distance,rate_code," \
+ "payment_type,fare_amount,extra,mta_tax,tip_amount," \
+ "tolls_amount,improvement_surcharge,total_amount,PULocationID,DOLocationID"
target_columns = target_columns.split(",")
data = None
print("Chargement des données ...")
for chunk in pd.read_csv(
"yellow_tripdata_2019-03.csv",
chunksize=1000000):
data = chunk
break
# On récupère les informations temporelles pour simuler des données temps réel
data['tpep_pickup_datetime'] = pd.to_datetime(data['tpep_pickup_datetime'])
data['tpep_dropoff_datetime'] = pd.to_datetime(data['tpep_dropoff_datetime'])
data['duration'] = (data['tpep_dropoff_datetime'] - data['tpep_pickup_datetime']).dt.seconds
data['pickup_hour'] = data['tpep_pickup_datetime'].dt.hour
data['pickup_minute'] = data['tpep_pickup_datetime'].dt.minute
data['pickup_second'] = data['tpep_pickup_datetime'].dt.second
data['dropoff_hour'] = data['tpep_dropoff_datetime'].dt.hour
data['dropoff_minute'] = data['tpep_dropoff_datetime'].dt.minute
data['dropoff_second'] = data['tpep_dropoff_datetime'].dt.second
def get_current_trips():
now = datetime.now()
trips = data.loc[
(data['dropoff_hour'] == now.hour) &
(data['dropoff_minute'] == now.minute) &
(data['dropoff_second'] == now.second)
, :]
trips['tpep_dropoff_datetime'] = datetime.now()
trips['VendorID'] = trips['VendorID'].astype(str)
# Conversion en timestamp UNIX
trips['tpep_pickup_datetime'] = trips['tpep_dropoff_datetime'].sub(
trips['duration'] * 1000000000
).values.astype(int) // 10 ** 9
trips['tpep_dropoff_datetime'] = trips['tpep_dropoff_datetime'].values.astype(int) // 10 ** 9
trips = trips[source_columns]
trips.columns = target_columns
return trips
schema = """{
"type": "record",
"name": "ongoing_trips_schema",
"namespace": "com.wepay.kafka.connect.bigquery.schemaregistry.schemaretriever.SchemaRegistrySchemaRetriever",
"fields": [
{"name": "vendor_id", "type": ["string", "null"]},
{"name": "pickup_datetime", "type": ["int", "null"]},
{"name": "dropoff_datetime", "type": ["int", "null"]},
{"name": "passenger_count", "type": ["int", "null"]},
{"name": "trip_distance", "type": ["float", "null"]},
{"name": "rate_code", "type": ["int", "null"]},
{"name": "payment_type", "type": ["int", "null"]},
{"name": "fare_amount", "type": ["float", "null"]},
{"name": "extra", "type": ["float", "null"]},
{"name": "mta_tax", "type": ["float", "null"]},
{"name": "tip_amount", "type": ["float", "null"]},
{"name": "tolls_amount", "type": ["float", "null"]},
{"name": "improvement_surcharge", "type": ["float", "null"]},
{"name": "total_amount", "type": ["float", "null"]},
{"name": "PULocationID", "type": ["int", "null"]},
{"name": "DOLocationID", "type": ["int", "null"]}
]
}"""
avro_schema = avro.loads(schema)
producer = AvroProducer({
'bootstrap.servers': CONFLUENT_URL,
'schema.registry.url': SCHEMA_REGISTRY_URL
}, default_value_schema=avro_schema)
start_time = time.time()
logger.info("Start producing ...")
keep_producing = True
while keep_producing:
try:
trips = get_current_trips()
# iterrows return une tuple (index, Series)
for _, trip in trips.iterrows():
producer.produce(
topic='ongoing-trips',
value=trip.to_dict())
# On augmente le délai pour ne pas saturer la bande passante (économies de crédits)
time.sleep(5.0 - ((time.time() - start_time) % 5.0))
keep_producing = datetime.now() <= start_date + timedelta(hours=1)
except Exception as e:
logger.error("Error :", e)
logger.info("Fin de la simulation.")
|
en
| 0.119256
|
Ce dummy producer va récupérer des trajets en 2019-03 à la même heure, minute et seconde afin de simuler des trajets effectués en réel. # TODO : Remplacer par le serveur Confluent # On ne peut pas sérialiser par défaut un DateTime Python # Les colonnes sources du fichier # Les colonnes cibles dans BigQuery # On récupère les informations temporelles pour simuler des données temps réel # Conversion en timestamp UNIX { "type": "record", "name": "ongoing_trips_schema", "namespace": "com.wepay.kafka.connect.bigquery.schemaregistry.schemaretriever.SchemaRegistrySchemaRetriever", "fields": [ {"name": "vendor_id", "type": ["string", "null"]}, {"name": "pickup_datetime", "type": ["int", "null"]}, {"name": "dropoff_datetime", "type": ["int", "null"]}, {"name": "passenger_count", "type": ["int", "null"]}, {"name": "trip_distance", "type": ["float", "null"]}, {"name": "rate_code", "type": ["int", "null"]}, {"name": "payment_type", "type": ["int", "null"]}, {"name": "fare_amount", "type": ["float", "null"]}, {"name": "extra", "type": ["float", "null"]}, {"name": "mta_tax", "type": ["float", "null"]}, {"name": "tip_amount", "type": ["float", "null"]}, {"name": "tolls_amount", "type": ["float", "null"]}, {"name": "improvement_surcharge", "type": ["float", "null"]}, {"name": "total_amount", "type": ["float", "null"]}, {"name": "PULocationID", "type": ["int", "null"]}, {"name": "DOLocationID", "type": ["int", "null"]} ] } # iterrows return une tuple (index, Series) # On augmente le délai pour ne pas saturer la bande passante (économies de crédits)
| 2.030288
| 2
|
users/models.py
|
mcfa77y/festival-playlist-generator
| 0
|
6629091
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from django.contrib.auth.models import AbstractUser
from django.db import models
# from django.utils.translation import ugettext_lazy as _
class User(AbstractUser):
# First Name and Last Name do not cover name patterns
# around the globe.
name = models.CharField("Name of User", blank=True, max_length=255)
def __unicode__(self):
return self.username
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from django.contrib.auth.models import AbstractUser
from django.db import models
# from django.utils.translation import ugettext_lazy as _
class User(AbstractUser):
# First Name and Last Name do not cover name patterns
# around the globe.
name = models.CharField("Name of User", blank=True, max_length=255)
def __unicode__(self):
return self.username
|
en
| 0.798878
|
# -*- coding: utf-8 -*- # from django.utils.translation import ugettext_lazy as _ # First Name and Last Name do not cover name patterns # around the globe.
| 2.035268
| 2
|
subjects/forms.py
|
rafaelmadaloz/i18n-scholl-system
| 88
|
6629092
|
from django.forms import ModelForm
from django import forms
from .models import Subject, SubjectCombination
class SubjectForm(ModelForm):
class Meta:
model = Subject
fields = ['subject_name', 'subject_code']
widgets = {
'subject_name': forms.TextInput(attrs={'class': 'form-control'}),
'subject_code': forms.NumberInput(attrs={'class': 'form-control'}),
}
class SubjectCombinationForm(ModelForm):
class Meta:
model = SubjectCombination
fields = ['select_class', 'select_subject']
widgets = {
'select_class': forms.Select(
attrs={
'class': 'form-control'
}
),
'select_subject': forms.Select(
attrs={
'class': 'form-control'
}
),
}
|
from django.forms import ModelForm
from django import forms
from .models import Subject, SubjectCombination
class SubjectForm(ModelForm):
class Meta:
model = Subject
fields = ['subject_name', 'subject_code']
widgets = {
'subject_name': forms.TextInput(attrs={'class': 'form-control'}),
'subject_code': forms.NumberInput(attrs={'class': 'form-control'}),
}
class SubjectCombinationForm(ModelForm):
class Meta:
model = SubjectCombination
fields = ['select_class', 'select_subject']
widgets = {
'select_class': forms.Select(
attrs={
'class': 'form-control'
}
),
'select_subject': forms.Select(
attrs={
'class': 'form-control'
}
),
}
|
none
| 1
| 2.275002
| 2
|
|
lib/oci_utils/migrate/system_tools.py
|
totalamateurhour/oci-utils
| 1
|
6629093
|
# oci-utils
#
# Copyright (c) 2019, 2020 Oracle and/or its affiliates. All rights reserved.
# Licensed under the Universal Permissive License v 1.0 as shown
# at http://oss.oracle.com/licenses/upl.
""" Module containing migrate platform system tools.
"""
import ipaddress
import logging
import os
import re
import shutil
import string
import subprocess
import threading
import uuid
from glob import glob
from oci_utils.migrate import ProgressBar
from oci_utils.migrate import error_msg
from oci_utils.migrate import migrate_data
from oci_utils.migrate import pause_msg
from oci_utils.migrate import result_msg
from oci_utils.migrate import terminal_dimension
from oci_utils.migrate.decorators import state_loop
from oci_utils.migrate.exception import OciMigrateException
_logger = logging.getLogger('oci-utils.system_tools')
def backup_dir(directory_name):
"""
Backup a directory as path/bck_directory_name_[current_time]
Parameters
----------
directory_name: str
The full path of the directory.
Returns
-------
str: path of backup directory on success, None otherwise.
"""
_logger.debug('__ Backup %s', directory_name)
try:
if os.path.isdir(directory_name):
backup_name = generate_backup_name(directory_name)
shutil.copytree(directory_name, backup_name)
_logger.debug('Backup of %s succeeded.', directory_name)
return backup_name
_logger.warning('%s is not a directory.', directory_name)
return None
except Exception as e:
_logger.warning('Backup of %s failed: %s', directory_name, str(e))
return None
def backup_file(file_name):
"""
Backup a single file as path/bck_file_name_[current_time]
Parameters
----------
file_name: str
The full path of the directory.
Returns
-------
str: path of backup file on success, None otherwise.
"""
_logger.debug('__Backup %s', file_name)
try:
if os.path.exists(file_name):
if os.path.isdir(file_name):
_logger.warning('%s is a directory.', file_name)
return None
backup_name = generate_backup_name(file_name)
shutil.copyfile(file_name, backup_name)
_logger.debug('Backup of %s succeeded.', file_name)
return backup_name
_logger.debug('%s does not exist.', file_name)
return None
except Exception as e:
_logger.warning('Backup of %s failed: %s', file_name, str(e))
return None
def enter_chroot(newroot):
"""
Execute the chroot command.
Parameters
----------
newroot: str
Full path of new root directory.
Returns
-------
file descriptor, str, str: The file descriptor of the current root on
success, path to restore, current working dir;
raises an exception on failure.
"""
_logger.debug('__ Entering chroot jail at %s.', newroot)
root2return = -1
current_dir = ''
try:
#
# current working directory
current_dir = os.getcwd()
#
# change root
root2return = os.open('/', os.O_RDONLY)
os.chdir(newroot)
os.chroot(newroot)
_logger.debug('Changed root to %s.', newroot)
except Exception as e:
_logger.error(' Failed to change root to %s: %s', newroot, str(e))
#
# need to return environment.
if root2return > 0:
os.fchdir(root2return)
os.chroot('.')
os.close(root2return)
raise OciMigrateException('Failed to change root to %s:' % newroot) from e
try:
#
# adjust PATH to make sure.
currentpath = os.environ['PATH']
newpath = currentpath.replace('/bin', '')\
.replace('/usr/bin', '')\
.replace('/sbin', '')\
.replace('/usr/sbin', '')\
.replace('/usr/local/sbin', '')\
.replace('::', ':') \
+ ':/root/bin:/bin:/usr/bin:/usr/sbin:/usr/local/sbin:/sbin'
os.environ['PATH'] = newpath
_logger.debug('Set path to %s', newpath)
return root2return, currentpath, current_dir
except Exception as e:
_logger.error(' Failed to set path to %s: %s', newpath, str(e))
raise OciMigrateException('Failed to set path to %s:' % newpath) from e
@state_loop(migrate_data.qemu_max_count)
def create_nbd():
"""
Load nbd module
Returns
-------
bool: True on succes, False on failure, raise an exception on call
error.
"""
cmd = ['modprobe', 'nbd', 'max_part=63']
_logger.debug('__ Running %s', cmd)
try:
if run_call_cmd(cmd) == 0:
return True
_logger.critical(' Failed to execute %s', cmd)
raise OciMigrateException('\nFailed to execute %s' % cmd)
except Exception as e:
_logger.critical(' Failed: %s', str(e))
return False
@state_loop(migrate_data.qemu_max_count)
def exec_blkid(blkid_args):
"""
Run a blkid command.
Parameters
----------
blkid_args: list
The argument list for the blkid command.
Returns
-------
dict: blkid return value on success, None otherwise.
"""
cmd = ['blkid'] + blkid_args
_logger.debug('__ Running %s', cmd)
try:
pause_msg('test nbd devs', pause_flag='_OCI_EXEC')
blkid_res = run_popen_cmd(cmd, valid_return=frozenset([0,2]))['output'].decode('utf-8')
_logger.debug('success\n%s', blkid_res)
return blkid_res
except Exception as e:
_logger.error(' %s failed: %s', cmd, str(e))
return None
def exec_exists(executable):
"""
Verify if executable exists in path.
Parameters
----------
executable: str
The file to be tested.
Returns
-------
str: full path on success, None otherwise.
"""
_logger.debug('__ which %s', executable)
return shutil.which(executable)
def exec_ldconfig():
"""
Executes ldconfig to update the shared library cache.
Returns
-------
int: 0 on success, raises an exception otherwise.
"""
cmd = ['ldconfig']
_logger.debug('__ Running %s', cmd)
try:
pause_msg('running ldconfig', pause_flag='_OCI_EXEC')
return run_call_cmd(cmd)
except Exception as e:
_logger.error(' %s command failed: %s', cmd, str(e))
raise OciMigrateException('\n%s command failed:' % cmd) from e
def exec_lsblk(lsblk_args):
"""
Run an lsblk command.
Parameters
----------
lsblk_args: list
The argument list for the blkid command.
Returns
-------
dict: blkid return value on success, None otherwise.
"""
cmd = ['lsblk'] + lsblk_args
_logger.debug('__ Running %s', cmd)
pause_msg(cmd, pause_flag='_OCI_EXEC')
try:
lsblk_res = run_popen_cmd(cmd)['output'].decode('utf-8')
_logger.debug('Success\n%s', lsblk_res)
return lsblk_res
except Exception as e:
_logger.error(' %s failed: %s', cmd, str(e))
raise OciMigrateException('%s failed:' % cmd) from e
def exec_lvscan(lvscan_args):
"""
Scan the system for logical volumes.
Parameters
----------
lvscan_args: list
list of strings, arguments for lvscan
Returns
-------
list: A list of strings, the output of lvscan --verbose on success,
raises an exeception on failure.
"""
cmd = ['lvscan'] + lvscan_args
_logger.debug('__ Running: %s', cmd)
pause_msg(cmd, pause_flag='_OCI_LVM')
try:
_logger.debug('command: %s', cmd)
output = run_popen_cmd(cmd)['output'].decode('utf-8')
_logger.debug('Logical volumes scanned:\n%s', str(output))
return output.splitlines()
except Exception as e:
#
# lvscan failed
_logger.critical(' Failed to scan for logical volumes: %s', str(e))
raise OciMigrateException('Failed to scan for logical volume:') from e
def exec_mkdir(dirname):
"""
Create a directory.
Parameters
----------
dirname: str
The full path of the directory.
Returns
-------
bool:
True on success, False otherwise.
"""
_logger.debug('__ Creating %s.', dirname)
try:
if not os.path.exists(dirname):
os.makedirs(dirname)
else:
_logger.debug('Directory %s already exists', dirname)
return True
except Exception as e:
raise OciMigrateException('') from e
def mount_pseudo(rootdir):
"""
Remount proc, sys and dev.
Parameters
----------
rootdir: str
The mountpoint of the root partition.
Returns
-------
list: The list of new mountpoints on success, None otherwise.
"""
pseudodict = {'proc': ['-t', 'proc', 'none', '%s/proc' % rootdir],
'dev': ['-o', 'bind', '/dev', '%s/dev' % rootdir],
'sys': ['-o', 'bind', '/sys', '%s/sys' % rootdir]}
pseudomounts = []
_logger.debug('__ Mounting: %s', pseudodict)
for dirs, cmd_par in list(pseudodict.items()):
cmd = ['mount'] + cmd_par
_logger.debug('Mounting %s', dirs)
pause_msg(cmd, pause_flag='_OCI_MOUNT')
try:
_logger.debug('Command: %s', cmd)
cmdret = run_call_cmd(cmd)
_logger.debug('%s : %d', cmd, cmdret)
if cmdret != 0:
_logger.error(' Failed to %s', cmd)
raise Exception('%s Failed: %d' % (cmd, cmdret))
pseudomounts.append(cmd_par[3])
except Exception as e:
_logger.critical(' Failed to %s: %s', cmd, str(e))
raise OciMigrateException('Failed to %s:' % cmd) from e
return pseudomounts
def exec_parted(devname):
"""
Collect data about the device on the image using the parted utility.
Parameters
----------
devname: str
The device name.
Returns
-------
dict: The device data from parted utility on success, None otherwise.
"""
cmd = ['parted', devname, 'print']
_logger.debug('__ Running %s', cmd)
pause_msg(cmd, pause_flag='_OCI_EXEC')
try:
result = run_popen_cmd(cmd)['output'].decode('utf-8')
_logger.debug('parted: %s', result)
device_data = dict()
device_data['Partition List'] = list()
for devx in result.splitlines():
_logger.debug('%s - %d', devx, len(devx))
if 'Model' in devx:
device_data['Model'] = devx.split(':')[1]
_logger.debug('Model %s', device_data['Model'])
elif 'Disk Flags' in devx:
device_data['Disk Flags'] = devx.split(':')[1]
_logger.debug('Disk Flags %s', device_data['Disk Flags'])
elif 'Disk' in devx:
device_data['Disk'] = devx.split(':')[1]
_logger.debug('Disk %s', device_data['Disk'])
elif 'Partition Table' in devx:
device_data['Partition Table'] = devx.split(':')[1]
_logger.debug('Partition Table %s', device_data['Partition Table'])
elif devx.split():
if devx.split()[0].isdigit():
device_data['Partition List'].append(devx.split())
_logger.debug('Partition: %s', devx.split())
else:
_logger.debug('Ignoring %s', devx)
else:
_logger.debug('Ignoring %s', devx)
_logger.debug(device_data)
pause_msg(device_data, pause_flag='_OCI_EXEC')
return device_data
except Exception as e:
_logger.error(' Failed to collect parted %s device data: %s', devname, str(e))
return None
def exec_pvscan(pvscan_args, devname=None):
"""
Update the lvm cache.
Parameters
----------
pvscan_args: list
List of strings, arguments for pvscan
devname: str
Device name to scan.
Returns
-------
bool: True on success, raises an exception on failure.
"""
_logger.debug('__ Running pvscan %s', pvscan_args)
cmd = ['pvscan'] + pvscan_args
if devname is not None:
cmd.append(devname)
pause_msg(cmd, pause_flag='_OCI_LVM')
try:
_logger.debug('command: %s', cmd)
cmdret = run_call_cmd(cmd)
_logger.debug('Physical volumes scanned on %s: %d', devname, cmdret)
if cmdret != 0:
_logger.error(' Physical volume scan failed.')
raise Exception('Physical volume scan failed.')
return True
except Exception as e:
#
# pvscan failed
_logger.critical(' Failed to scan %s for physical volumes: %s', devname, str(e))
raise OciMigrateException('Failed to scan %s for physical volumes:' % devname) from e
@state_loop(migrate_data.qemu_max_count)
def exec_qemunbd(qemunbd_args):
"""
Execute a qemu-nbd command.
Parameters
----------
qemunbd_args: list
The list of arguments for qemu-nbd.
Returns
-------
int: 0 on success, raise exception otherwise.
"""
cmd = ['qemu-nbd'] + qemunbd_args
_logger.debug('__ Running %s', cmd)
pause_msg(cmd, pause_flag='_OCI_EXEC')
try:
return run_call_cmd(cmd)
except Exception as e:
_logger.error(' %s command failed: %s', cmd, str(e))
raise OciMigrateException('\n%s command failed:' % cmd) from e
def exec_rename(some_name, to_name=None):
"""
Renames a file, symbolic link or directory to path/bck_filename_current_time.
Parameters
----------
some_name: str
Full path of the original file.
to_name: str
Full path of the destination file, if specified, using default otherwise.
Returns
-------
str: the path of the renamed file on success, None otherwise.
"""
if not bool(to_name):
to_name = generate_backup_name(some_name)
_logger.debug('__ Rename %s to %s', some_name, to_name)
#
try:
#
# delete to_ if already exists
#
# if file, symlink or directory
if os.path.exists(to_name):
_logger.debug('%s already exists', to_name)
if os.path.isfile(to_name):
os.remove(to_name)
elif os.path.isdir(to_name):
os.rmdir(to_name)
elif os.path.islink(to_name):
if os.unlink(to_name):
_logger.debug('Removed symbolic link %s', to_name)
else:
_logger.error(' Failed to remove symbolic link %s', to_name)
else:
_logger.error(' Failed to remove %s.', to_name)
else:
_logger.debug('%s does not exists', to_name)
#
# rename
if os.path.exists(some_name) or os.path.islink(some_name):
_logger.debug('%s exists and is a file or symbolic link.', some_name)
os.rename(some_name, to_name)
_logger.debug('Renamed %s to %s.', some_name, to_name)
return to_name
_logger.debug(' %s does not exists', some_name)
except Exception as e:
_logger.error(' Failed to rename %s to %s: %s', some_name, to_name, str(e))
raise OciMigrateException('Failed to rename %s to %s' % (some_name, to_name)) from e
return None
@state_loop(migrate_data.qemu_max_count)
def exec_rmdir(dirname):
"""
Create a directory.
Parameters
----------
dirname: str
The full path of the directory.
Returns
-------
bool:
True on success, raises an exception otherwise.
"""
_logger.debug('__ Removing directory tree %s.', dirname)
try:
shutil.rmtree(dirname)
return True
except Exception as e:
raise OciMigrateException('') from e
@state_loop(migrate_data.rmmod_max_count)
def exec_rmmod(module):
"""
Removes a module.
Parameters
----------
module: str
The module name.
Returns
-------
bool: True
"""
_logger.debug('__ Remove module %s', module)
cmd = ['rmmod']
cmd.append(module)
try:
rmmod_result = subprocess.check_call(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, shell=False)
if rmmod_result == 0:
_logger.debug('Successfully removed %s', module)
else:
_logger.error(' Error removing %s, exit code %s, ignoring.', cmd, str(rmmod_result))
except Exception as e:
_logger.debug('Failed: %s, ignoring.', str(e))
#
# ignoring eventual errors, which will be caused by module already removed.
return True
def exec_sfdisk(devname):
"""
Collect the data about the partitions on the image file mounted on the
device devname using the sfdisk utility.
Parameters
----------
devname: str
The device.
Returns
-------
dict: The partition data with sfdisk results on success, None otherwise.
"""
cmd = ['sfdisk', '-d', devname]
_logger.debug('__ Running %s', cmd)
pause_msg(cmd, pause_flag='_OCI_EXEC')
try:
result = run_popen_cmd(cmd)['output'].decode('utf-8')
partdata = dict()
for devx in result.splitlines():
if devx.startswith(devname):
key = devx.split(':')[0].strip()
result_msg(msg='sfdisk partition %s' % key, result=False)
thispart = {'start': 0, 'size': 0, 'Id': 0, 'bootable': False}
val = devx.split(':')[1].split(',')
for it in val:
if 'start' in it:
x = it.split('=')[1]
thispart['start'] = int(x)
elif 'size' in it:
x = it.split('=')[1]
thispart['size'] = int(x)
elif 'Id' in it:
x = it.split('=')[1]
thispart['Id'] = x.strip()
elif 'bootable' in it:
thispart['bootable'] = True
else:
_logger.debug('unrecognised item: %s', val)
partdata[key] = thispart
_logger.debug(partdata)
return partdata
except Exception as e:
_logger.error(' Failed to collect sfdisk %s partition data: %s', devname, str(e))
return None
def exec_vgchange(changecmd):
"""
Execute vgchange command.
Parameters
----------
changecmd: list
Parameters for the vgchange command.
Returns
-------
str: vgchange output.
"""
cmd = ['vgchange'] + changecmd
_logger.debug('__ Running %s', cmd)
pause_msg(cmd, pause_flag='_OCI_LVM')
try:
output = run_popen_cmd(cmd)['output'].decode('utf-8')
_logger.debug('vgchange result:\n%s', output)
return output
except Exception as e:
_logger.critical(' Failed to execute %s: %s', cmd, str(e))
raise OciMigrateException('Failed to execute %s:' % cmd) from e
def exec_rename_volume_groups(vg_list, direction):
"""
Rename a list of volume groups.
Parameters
----------
vg_list: list
list of lists [original name, new name]
direction: str
if FORWARD, rename original name to new name, if BACKWARD from new name
to original name.
Returns
-------
bool: True on success, False otherwise.
"""
_logger.debug('__ Rename volume group %s.', vg_list)
result = True
#
for vg_names in vg_list:
if direction == 'FORWARD':
cmd = ['vgrename', vg_names[0], vg_names[1]]
elif direction == 'BACKWARD':
cmd = ['vgrename', vg_names[1], vg_names[0]]
else:
_logger.debug('Invalid argument %s', direction)
return False
#
pause_msg(cmd, pause_flag='_OCI_LVM')
try:
_logger.debug('command: %s', cmd)
output = run_popen_cmd(cmd)['output'].decode('utf-8')
if 'successfully renamed' in output:
_logger.debug('%s succeeded', cmd)
else:
_logger.debug('%s failed', cmd)
result = False
except Exception as e:
_logger.debug('Execution of vgrename failed: %s', str(e))
result = False
return result
def exec_vgs_noheadings():
"""
List the local volume group and generates a new (temporary) name as
a hex UUID.
Returns
-------
list: list of lists [original volume group name, new volume group name].
"""
cmd = ['vgs', '--noheadings']
_logger.debug('__ Executing %s.', cmd)
pause_msg(cmd, pause_flag='_OCI_LVM')
vg_list = list()
try:
vgs_response = run_popen_cmd(cmd)['output']
output = vgs_response.decode('utf-8').splitlines() if bool(vgs_response) else b''
if bool(output):
for vg_record in output:
if len(vg_record) > 0:
vg_list.append([vg_record.split()[0], uuid.uuid4().hex])
_logger.debug('Volume groups found: %s', vg_list)
return vg_list
except Exception as e:
_logger.critical(' Failed to list current volume groups: %s', str(e))
def exec_vgscan(vgscan_args):
"""
Scan the system for (new) volume groups.
Parameters
----------
vgscan_args: list
list of strings, arguments for vgscan
Returns
-------
bool: True on success, raises an exeception on failure.
"""
cmd = ['vgscan'] + vgscan_args
_logger.debug('__ Executing %s', cmd)
pause_msg(cmd, pause_flag='_OCI_LVM')
try:
output = run_popen_cmd(cmd)['output'].decode('utf-8')
_logger.debug('Volume groups scanned:\n%s', str(output))
return True
except Exception as e:
#
# vgscan failed
_logger.critical(' Failed to scan for volume groups: %s', str(e))
raise OciMigrateException('Failed to scan for volume groups:') from e
def generate_backup_name(full_path):
"""
Generate a name for a file or directory path, as <path>/bck_<file>_timestamp.
Parameters
----------
full_path: str
full path of file or directory.
Returns
-------
str: full path of backup file or directory.
"""
_logger.debug('__ Backup for %s', full_path)
return os.path.split(full_path)[0] \
+ '/bck_' \
+ os.path.split(full_path)[1] \
+ '_' \
+ migrate_data.current_time
def get_free_nbd():
"""
Find first free device name
Returns
-------
str: The free nbd device on success, None otherwise.
"""
_logger.debug('__ Get free nb device.')
devpath = '/sys/class/block/nbd*'
try:
for devname in glob(devpath):
with open(devname + '/size', 'r') as f:
sz = f.readline()
nbdsz = int(sz)
if nbdsz == 0:
freedev = devname.rsplit('/')[-1]
return '/dev/' + freedev
_logger.critical(' Failed to locate a free nbd devide.')
raise OciMigrateException('\nFailed to locate a free nbd devide.')
except Exception as e:
_logger.critical(' Failed to screen nbd devices: %s', str(e))
raise OciMigrateException('\nFailed to screen nbd devices:') from e
def get_grub2_kernels(grub_config_file):
"""
Get the versions of the kernels defined in the grub2 config file.
Parameters
----------
grub_config_file: str
Full path of the grub config file.
Returns
-------
list: list of kernel versions.
"""
_logger.debug('__ Get the kernel versions from %s', grub_config_file)
kernels_list = list()
menu_flag = False
with open(grub_config_file, 'r') as grub_file:
for grub_line in grub_file:
gl = ' '.join(grub_line.split()).split()
if bool(gl):
if gl[0] == 'menuentry':
menu_flag = True
if menu_flag:
if 'linux' in gl[0]:
menu_flag = False
for it in gl:
if 'vmlinuz' in it:
kernel_version = it.split('-', 1)[1]
kernels_list.append(kernel_version)
return kernels_list
def get_grub_default_kernel(grub_config_file):
"""
Get the kernel version booted by default from the grub config file.
Parameters
----------
grub_config_file: str
Full path of the grub config file.
Returns
-------
str: kernel version.
"""
_logger.debug('__ Get the default kernel from %s', grub_config_file)
def find_default_boot(grub_filename):
with open(grub_filename, 'r') as grub_fd:
for grub_ln in grub_fd:
grub_line_list = grub_ln.translate({ord(c): None for c in string.whitespace}).split('=')
if grub_line_list[0] == 'default':
return int(grub_line_list[1])
#
# there should be always a default boot.
return None
default_kernel_nb = find_default_boot(grub_config_file)
if default_kernel_nb is None:
# todo:
# locate default and installed kernels in all the flavors of grub2 and EFI
# _logger.critical('No default boot found.')
# raise OciMigrateException('No default boot found.')
#
# not so fatal, only if boot device is not by lvm, by label or by uuid
default_kernel_nb = 0
kernelversion = 'not found'
return kernelversion
kernel_cnt = 0
with open(grub_config_file, 'r') as grub_config_fd:
for grub_line in grub_config_fd:
gl = ' '.join(grub_line.split()).split()
if bool(gl):
if gl[0] == 'kernel':
if kernel_cnt == default_kernel_nb:
for it in gl:
if 'vmlinuz' in it:
kernelversion = it.split('-', 1)[1]
return kernelversion
kernel_cnt += 1
def get_grub_kernels(grub_config_file):
"""
Get the versions of the kernels defined in the grub config file.
Parameters
----------
grub_config_file: str
Full path of the grub config file.
Returns
-------
list: list of kernel versions.
"""
_logger.debug('__ Get the kernel versions from %s', grub_config_file)
kernel_list = list()
with open(grub_config_file, 'r') as grub_config_fd:
for grub_line in grub_config_fd:
gl = ' '.join(grub_line.split()).split()
if bool(gl):
if gl[0] == 'kernel':
for it in gl:
if 'vmlinuz' in it:
kernelversion = it.split('-', 1)[1]
kernel_list.append(kernelversion)
return kernel_list
def get_nameserver():
"""
Get the nameserver definitions, store the result in migrate_data.nameserver.
Returns
-------
bool: True on success, False otherwise.
"""
def dig_ns():
"""
Look for a nameserver definition in the output of the dig command.
Returns
-------
list: list of ipv4 nameservers.
"""
dig = 'dig'
dig_list = list()
if exec_exists(dig):
cmd = [dig]
try:
dig_output = run_popen_cmd(cmd)['output'].decode('utf-8').splitlines()
for dig_item in dig_output:
if 'SERVER' in dig_item:
dig_list.append(re.search(r"\b(?:[0-9]{1,3}\.){3}[0-9]{1,3}\b", dig_item).group())
_logger.debug('Found ns %s', dig_list[-1])
except Exception as e:
_logger.warning(' Failed to identify nameserver using dig: %s\n', str(e))
else:
_logger.debug('dig utility not found, install bind-utils.')
return dig_list
def nmcli_ns():
"""
Look for a nameserver definition in the output of the nmcli command.
Returns
-------
list: list of ipv4 nameservers.
"""
nmcli = 'nmcli'
nmcli_list = list()
if exec_exists(nmcli):
cmd = [nmcli, 'dev', 'show']
try:
nm_list = run_popen_cmd(cmd)['output'].decode('utf-8').splitlines()
for nm_item in nm_list:
if 'DNS' in nm_item.split(':')[0]:
nmcli_list.append(nm_item.split(':')[1].lstrip().rstrip())
_logger.debug('Found ns %s', nmcli_list[-1])
except Exception as e:
_logger.warning(' Failed to identify nameserver using nmcli: %s\n', str(e))
else:
_logger.debug('nmcli not running.')
return nmcli_list
def resolv_ns():
"""
Look for nameserver definition in resolv.conf.
Returns
-------
list: list of ipv4 nameservers.
"""
resolv_list = list()
try:
with open('/etc/resolv.conf', 'rb') as f:
resolvconf_lines = f.read().decode('utf-8').splitlines()
for nm_item in resolvconf_lines:
ip = re.search(r"\b(?:[0-9]{1,3}\.){3}[0-9]{1,3}\b", nm_item)
if ip is not None:
resolv_list.append(nm_item.split(' ')[1].lstrip().rstrip())
_logger.debug('Found ns %s', resolv_list[-1])
except Exception as e:
_logger.warning('Failed to find nameserver in resolv.conf: %s.', str(e))
return resolv_list
# global nameserver
_logger.debug("__ Collecting nameservers.")
dns_list = dig_ns() + nmcli_ns() + resolv_ns()
_logger.debug('Found nameservers: %s', dns_list)
#
# Verify if found one.
if bool(dns_list):
for ip_ad in dns_list:
try:
if ipaddress.ip_address(ip_ad).version == 4:
migrate_data.nameserver = dns_list[0]
break
except ValueError as v:
_logger.debug('%s', str(v))
except Exception as e:
_logger.debug('%s', str(e))
_logger.debug('Nameserver set to %s', migrate_data.nameserver)
return True
return False
def is_thread_running(thread_id):
"""
Verify if thread is active.
Parameters
----------
thread_id: thread
The thread to test.
Returns
-------
bool: True on success, False otherwise.
"""
_logger.debug('__ Testing threadid %s.', thread_id)
return bool(thread_id in threading.enumerate())
def is_root():
"""
Verify is operator is the root user.
Returns
-------
bool: True on success, False otherwise.
"""
return bool(os.getuid() == 0)
def leave_chroot(root2return, dir2return):
"""
Leave a chroot environment and return to another one.
Parameters
----------
root2return: file descriptor
The file descriptor of the root to return to.
dir2return: str
The original working dir to return to.
Returns
-------
bool: True on success, raises exception on failure.
"""
_logger.debug('__ Leaving chroot jail.')
try:
#
# leave chroot
os.fchdir(root2return)
os.chroot('.')
os.close(root2return)
_logger.debug('Left change root environment.')
#
# return to working directory
os.chdir(dir2return)
return True
except Exception as e:
_logger.error(' Failed to return from chroot: %s', str(e))
OciMigrateException('Failed to return from chroot: %s' % str(e))
def mount_fs(mountpoint):
"""
Mount a filesystem specified in fstab, by mountpoint only.
Parameters
----------
mountpoint: str
The mountpoint.
Returns
-------
bool: True on success, False otherwise
"""
cmd = ['mount', mountpoint]
pause_msg(cmd, pause_flag='_OCI_MOUNT')
_logger.debug('__ Mounting %s', mountpoint)
try:
_, nbcols = terminal_dimension()
mountwait = ProgressBar(nbcols, 0.2, progress_chars=['mounting %s' % mountpoint])
mountwait.start()
_logger.debug('Command: %s', cmd)
cmdret = run_call_cmd(cmd)
_logger.debug('%s returned %d', cmd, cmdret)
if cmdret == 0:
return True
raise Exception('%s failed: %d' % (cmd, cmdret))
except Exception as e:
_logger.error(' Failed to %s: %s', cmd, str(e))
return False
finally:
if is_thread_running(mountwait):
mountwait.stop()
def reset_vg_list(vg_list):
"""
Update the local volume group list
Parameters
----------
vg_list: list (of lists)
The volume group rename list.
Returns
-------
bool: True on success, False otherwise.
"""
_logger.debug('__ Updating the vg list.')
for vg_l in vg_list:
_logger.debug('Updating %s to %s.', vg_l[1], vg_l[0])
vg_l[1] = vg_l[0]
return True
def restore_nameserver():
"""
Restore nameserver configuration.
Returns
-------
bool: True on success, False otherwise.
"""
_logger.debug('__ Restore nameserver data.')
# global resolv_conf_path
resolvpath = '/etc/resolv.conf'
try:
#
# save used one
if os.path.isfile(resolvpath):
if not bool(exec_rename(resolvpath, resolvpath + '_temp_' + migrate_data.current_time)):
_logger.debug('Failed to rename %s to %s, no harm done.',
resolvpath, resolvpath + '_temp_' + migrate_data.current_time)
else:
_logger.debug('No %s found.', resolvpath)
#
# restore original one
if os.path.isfile(migrate_data.resolv_conf_path):
if bool(exec_rename(migrate_data.resolv_conf_path, resolvpath)):
_logger.debug('Successfully restored %s', resolvpath)
else:
_logger.debug('Failed to restore %s.', resolvpath)
raise OciMigrateException('Failed to restore nameserver config.')
else:
_logger.debug('No %s found.', migrate_data.resolv_conf_path)
return True
except Exception as e:
error_msg('Continuing but might cause issues installing cloud-init: %s' % str(e))
return False
@state_loop(3)
def rm_nbd():
"""
Unload kernel module nbd.
Returns
-------
bool: True on succes, False on failure.
"""
modname = 'nbd'
return bool(exec_rmmod(modname))
def run_call_cmd(command):
"""
Execute an os command which does not return data.
Parameters
----------
command: list
The os command and its arguments.
Returns
-------
int: The return value.
"""
_logger.debug('__ Executing %s', command)
assert (len(command) > 0), 'empty command list'
try:
return subprocess.call(command, stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL)
except OSError as oserr:
raise OciMigrateException('OS error encountered while running %s:' % command) from oserr
except subprocess.CalledProcessError as e:
raise OciMigrateException('Error encountered while running %s:' % command) from e
def run_popen_cmd(command, valid_return=frozenset([0])):
"""
Execute an os command and collect stdout and stderr.
Parameters
----------
command: list
The os command and its arguments.
valid_return: frozenset
A set of valid return codes, default = [0]
Returns
-------
dict: {'output': output,
'error': error,
'return_code: return_code}
raises an exception on failure.
"""
_logger.debug('__ Executing %s.', command)
output_dict = dict()
if exec_exists(command[0]) is not None:
_logger.debug('running %s', command)
try:
ext_process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = ext_process.communicate()
return_code = ext_process.returncode
output_dict['output'] = output
output_dict['error'] = error
output_dict['return_code'] = return_code
#if return_code != 0:
# if bool(error):
# _logger.debug('Error occurred while running %s: %s - %s',
# command, return_code, error.decode('utf-8'), exc_info=True)
# raise OciMigrateException('Error encountered while running %s: %s - %s'
# % (command, return_code, error.decode('utf-8')))
if not bool(output):
_logger.debug('%s did not return output.', command)
if bool(error):
# not necessarily fatal
_logger.debug('%s returned message %s.', command, error.decode('utf-8'))
_logger.debug('%s returned code %s', command, str(return_code))
if return_code not in valid_return:
raise OciMigrateException('Error encountered while running %s: %s - %s'
% (command, return_code, error.decode('utf-8')))
return output_dict
except OSError as os_error:
raise OciMigrateException('OS error encountered while running %s:' % command) from os_error
except Exception as e:
raise OciMigrateException('Error encountered while running %s:' % command) from e
else:
_logger.critical(' %s not found.', command[0])
raise OciMigrateException('%s does not exist' % command[0])
def set_nameserver():
"""
Setting temporary nameserver.
Returns
-------
bool: True on success, False otherwise.
"""
# global resolv_conf_path
_logger.debug('__ Set nameserver.')
#
# rename eventual existing resolv.conf
resolvpath = '/etc/resolv.conf'
try:
#
# save current
if os.path.isfile(resolvpath) or os.path.islink(resolvpath) or os.path.isdir(resolvpath):
migrate_data.resolv_conf_path = exec_rename(resolvpath)
if not bool(migrate_data.resolv_conf_path):
_logger.debug('Failed to save current nameserver configuration.')
else:
_logger.error(' No %s found', resolvpath)
#
# write new
with open(resolvpath, 'w') as f:
f.writelines('nameserver %s\n' % migrate_data.nameserver)
return True
except Exception as e:
error_msg('Failed to set nameserver: %s\n continuing but might cause issues installing cloud-init.' % str(e))
return False
def show_hex_dump(bindata):
"""
Show hex and readable version of binary data.
Parameters
----------
bindata: binary data.
Returns
-------
str: hexdump format
"""
blocklen = 16
ll = len(bindata)
m = bindata
hexdata = ''
addr = 0
try:
while ll > 0:
x = m[0:blocklen]
line = ' '.join(['%02x' % i for i in bytearray(x)])
line = line[0:23] + ' ' + line[23:]
readable = ''.join([chr(i) if 32 <= i <= 127
else '.' for i in bytearray(x)])
hexdata += '%08x : %s : %s :\n' % (addr*16, line, readable)
y = m[blocklen:]
m = y
addr += 1
ll -= blocklen
except Exception as e:
_logger.error(' Exception: %s', str(e))
return hexdata
@state_loop(3)
def unmount_something(mountpoint):
"""
Unmount.
Parameters
----------
mountpoint: str
The mountpoint.
Returns
-------
bool: True on success, False otherwise.
"""
_logger.debug('__ Unmount %s.', mountpoint)
if os.path.ismount(mountpoint):
_logger.debug('%s is a mountpoint.', mountpoint)
else:
_logger.debug('%s is not a mountpoint, quitting', mountpoint)
return True
#
cmd = ['umount', mountpoint]
pause_msg(cmd, pause_flag='_OCI_MOUNT')
try:
_logger.debug('command: %s', cmd)
cmdret = run_call_cmd(cmd)
_logger.debug('%s : %d', cmd, cmdret)
if cmdret != 0:
raise Exception('%s failed: %d' % (cmd, cmdret))
except Exception as e:
_logger.error(' Failed to %s: %s', cmd, str(e))
return False
return True
def unmount_pseudo(pseudomounts):
"""
Unmount the pseudodevices.
Parameters
----------
pseudomounts: list
The list of pseudodevices
Returns
-------
True on success, False otherwise.
"""
_logger.debug('__ Unmount %s', pseudomounts)
res = True
pseudomounts.sort(reverse=True)
for mnt in pseudomounts:
_logger.debug('Unmount %s', mnt)
umount_res = unmount_something(mnt)
if umount_res:
_logger.debug('%s successfully unmounted.', mnt)
else:
_logger.error(' Failed to unmount %s', mnt)
res = False
return res
|
# oci-utils
#
# Copyright (c) 2019, 2020 Oracle and/or its affiliates. All rights reserved.
# Licensed under the Universal Permissive License v 1.0 as shown
# at http://oss.oracle.com/licenses/upl.
""" Module containing migrate platform system tools.
"""
import ipaddress
import logging
import os
import re
import shutil
import string
import subprocess
import threading
import uuid
from glob import glob
from oci_utils.migrate import ProgressBar
from oci_utils.migrate import error_msg
from oci_utils.migrate import migrate_data
from oci_utils.migrate import pause_msg
from oci_utils.migrate import result_msg
from oci_utils.migrate import terminal_dimension
from oci_utils.migrate.decorators import state_loop
from oci_utils.migrate.exception import OciMigrateException
_logger = logging.getLogger('oci-utils.system_tools')
def backup_dir(directory_name):
"""
Backup a directory as path/bck_directory_name_[current_time]
Parameters
----------
directory_name: str
The full path of the directory.
Returns
-------
str: path of backup directory on success, None otherwise.
"""
_logger.debug('__ Backup %s', directory_name)
try:
if os.path.isdir(directory_name):
backup_name = generate_backup_name(directory_name)
shutil.copytree(directory_name, backup_name)
_logger.debug('Backup of %s succeeded.', directory_name)
return backup_name
_logger.warning('%s is not a directory.', directory_name)
return None
except Exception as e:
_logger.warning('Backup of %s failed: %s', directory_name, str(e))
return None
def backup_file(file_name):
"""
Backup a single file as path/bck_file_name_[current_time]
Parameters
----------
file_name: str
The full path of the directory.
Returns
-------
str: path of backup file on success, None otherwise.
"""
_logger.debug('__Backup %s', file_name)
try:
if os.path.exists(file_name):
if os.path.isdir(file_name):
_logger.warning('%s is a directory.', file_name)
return None
backup_name = generate_backup_name(file_name)
shutil.copyfile(file_name, backup_name)
_logger.debug('Backup of %s succeeded.', file_name)
return backup_name
_logger.debug('%s does not exist.', file_name)
return None
except Exception as e:
_logger.warning('Backup of %s failed: %s', file_name, str(e))
return None
def enter_chroot(newroot):
"""
Execute the chroot command.
Parameters
----------
newroot: str
Full path of new root directory.
Returns
-------
file descriptor, str, str: The file descriptor of the current root on
success, path to restore, current working dir;
raises an exception on failure.
"""
_logger.debug('__ Entering chroot jail at %s.', newroot)
root2return = -1
current_dir = ''
try:
#
# current working directory
current_dir = os.getcwd()
#
# change root
root2return = os.open('/', os.O_RDONLY)
os.chdir(newroot)
os.chroot(newroot)
_logger.debug('Changed root to %s.', newroot)
except Exception as e:
_logger.error(' Failed to change root to %s: %s', newroot, str(e))
#
# need to return environment.
if root2return > 0:
os.fchdir(root2return)
os.chroot('.')
os.close(root2return)
raise OciMigrateException('Failed to change root to %s:' % newroot) from e
try:
#
# adjust PATH to make sure.
currentpath = os.environ['PATH']
newpath = currentpath.replace('/bin', '')\
.replace('/usr/bin', '')\
.replace('/sbin', '')\
.replace('/usr/sbin', '')\
.replace('/usr/local/sbin', '')\
.replace('::', ':') \
+ ':/root/bin:/bin:/usr/bin:/usr/sbin:/usr/local/sbin:/sbin'
os.environ['PATH'] = newpath
_logger.debug('Set path to %s', newpath)
return root2return, currentpath, current_dir
except Exception as e:
_logger.error(' Failed to set path to %s: %s', newpath, str(e))
raise OciMigrateException('Failed to set path to %s:' % newpath) from e
@state_loop(migrate_data.qemu_max_count)
def create_nbd():
"""
Load nbd module
Returns
-------
bool: True on succes, False on failure, raise an exception on call
error.
"""
cmd = ['modprobe', 'nbd', 'max_part=63']
_logger.debug('__ Running %s', cmd)
try:
if run_call_cmd(cmd) == 0:
return True
_logger.critical(' Failed to execute %s', cmd)
raise OciMigrateException('\nFailed to execute %s' % cmd)
except Exception as e:
_logger.critical(' Failed: %s', str(e))
return False
@state_loop(migrate_data.qemu_max_count)
def exec_blkid(blkid_args):
"""
Run a blkid command.
Parameters
----------
blkid_args: list
The argument list for the blkid command.
Returns
-------
dict: blkid return value on success, None otherwise.
"""
cmd = ['blkid'] + blkid_args
_logger.debug('__ Running %s', cmd)
try:
pause_msg('test nbd devs', pause_flag='_OCI_EXEC')
blkid_res = run_popen_cmd(cmd, valid_return=frozenset([0,2]))['output'].decode('utf-8')
_logger.debug('success\n%s', blkid_res)
return blkid_res
except Exception as e:
_logger.error(' %s failed: %s', cmd, str(e))
return None
def exec_exists(executable):
"""
Verify if executable exists in path.
Parameters
----------
executable: str
The file to be tested.
Returns
-------
str: full path on success, None otherwise.
"""
_logger.debug('__ which %s', executable)
return shutil.which(executable)
def exec_ldconfig():
"""
Executes ldconfig to update the shared library cache.
Returns
-------
int: 0 on success, raises an exception otherwise.
"""
cmd = ['ldconfig']
_logger.debug('__ Running %s', cmd)
try:
pause_msg('running ldconfig', pause_flag='_OCI_EXEC')
return run_call_cmd(cmd)
except Exception as e:
_logger.error(' %s command failed: %s', cmd, str(e))
raise OciMigrateException('\n%s command failed:' % cmd) from e
def exec_lsblk(lsblk_args):
"""
Run an lsblk command.
Parameters
----------
lsblk_args: list
The argument list for the blkid command.
Returns
-------
dict: blkid return value on success, None otherwise.
"""
cmd = ['lsblk'] + lsblk_args
_logger.debug('__ Running %s', cmd)
pause_msg(cmd, pause_flag='_OCI_EXEC')
try:
lsblk_res = run_popen_cmd(cmd)['output'].decode('utf-8')
_logger.debug('Success\n%s', lsblk_res)
return lsblk_res
except Exception as e:
_logger.error(' %s failed: %s', cmd, str(e))
raise OciMigrateException('%s failed:' % cmd) from e
def exec_lvscan(lvscan_args):
"""
Scan the system for logical volumes.
Parameters
----------
lvscan_args: list
list of strings, arguments for lvscan
Returns
-------
list: A list of strings, the output of lvscan --verbose on success,
raises an exeception on failure.
"""
cmd = ['lvscan'] + lvscan_args
_logger.debug('__ Running: %s', cmd)
pause_msg(cmd, pause_flag='_OCI_LVM')
try:
_logger.debug('command: %s', cmd)
output = run_popen_cmd(cmd)['output'].decode('utf-8')
_logger.debug('Logical volumes scanned:\n%s', str(output))
return output.splitlines()
except Exception as e:
#
# lvscan failed
_logger.critical(' Failed to scan for logical volumes: %s', str(e))
raise OciMigrateException('Failed to scan for logical volume:') from e
def exec_mkdir(dirname):
"""
Create a directory.
Parameters
----------
dirname: str
The full path of the directory.
Returns
-------
bool:
True on success, False otherwise.
"""
_logger.debug('__ Creating %s.', dirname)
try:
if not os.path.exists(dirname):
os.makedirs(dirname)
else:
_logger.debug('Directory %s already exists', dirname)
return True
except Exception as e:
raise OciMigrateException('') from e
def mount_pseudo(rootdir):
"""
Remount proc, sys and dev.
Parameters
----------
rootdir: str
The mountpoint of the root partition.
Returns
-------
list: The list of new mountpoints on success, None otherwise.
"""
pseudodict = {'proc': ['-t', 'proc', 'none', '%s/proc' % rootdir],
'dev': ['-o', 'bind', '/dev', '%s/dev' % rootdir],
'sys': ['-o', 'bind', '/sys', '%s/sys' % rootdir]}
pseudomounts = []
_logger.debug('__ Mounting: %s', pseudodict)
for dirs, cmd_par in list(pseudodict.items()):
cmd = ['mount'] + cmd_par
_logger.debug('Mounting %s', dirs)
pause_msg(cmd, pause_flag='_OCI_MOUNT')
try:
_logger.debug('Command: %s', cmd)
cmdret = run_call_cmd(cmd)
_logger.debug('%s : %d', cmd, cmdret)
if cmdret != 0:
_logger.error(' Failed to %s', cmd)
raise Exception('%s Failed: %d' % (cmd, cmdret))
pseudomounts.append(cmd_par[3])
except Exception as e:
_logger.critical(' Failed to %s: %s', cmd, str(e))
raise OciMigrateException('Failed to %s:' % cmd) from e
return pseudomounts
def exec_parted(devname):
"""
Collect data about the device on the image using the parted utility.
Parameters
----------
devname: str
The device name.
Returns
-------
dict: The device data from parted utility on success, None otherwise.
"""
cmd = ['parted', devname, 'print']
_logger.debug('__ Running %s', cmd)
pause_msg(cmd, pause_flag='_OCI_EXEC')
try:
result = run_popen_cmd(cmd)['output'].decode('utf-8')
_logger.debug('parted: %s', result)
device_data = dict()
device_data['Partition List'] = list()
for devx in result.splitlines():
_logger.debug('%s - %d', devx, len(devx))
if 'Model' in devx:
device_data['Model'] = devx.split(':')[1]
_logger.debug('Model %s', device_data['Model'])
elif 'Disk Flags' in devx:
device_data['Disk Flags'] = devx.split(':')[1]
_logger.debug('Disk Flags %s', device_data['Disk Flags'])
elif 'Disk' in devx:
device_data['Disk'] = devx.split(':')[1]
_logger.debug('Disk %s', device_data['Disk'])
elif 'Partition Table' in devx:
device_data['Partition Table'] = devx.split(':')[1]
_logger.debug('Partition Table %s', device_data['Partition Table'])
elif devx.split():
if devx.split()[0].isdigit():
device_data['Partition List'].append(devx.split())
_logger.debug('Partition: %s', devx.split())
else:
_logger.debug('Ignoring %s', devx)
else:
_logger.debug('Ignoring %s', devx)
_logger.debug(device_data)
pause_msg(device_data, pause_flag='_OCI_EXEC')
return device_data
except Exception as e:
_logger.error(' Failed to collect parted %s device data: %s', devname, str(e))
return None
def exec_pvscan(pvscan_args, devname=None):
"""
Update the lvm cache.
Parameters
----------
pvscan_args: list
List of strings, arguments for pvscan
devname: str
Device name to scan.
Returns
-------
bool: True on success, raises an exception on failure.
"""
_logger.debug('__ Running pvscan %s', pvscan_args)
cmd = ['pvscan'] + pvscan_args
if devname is not None:
cmd.append(devname)
pause_msg(cmd, pause_flag='_OCI_LVM')
try:
_logger.debug('command: %s', cmd)
cmdret = run_call_cmd(cmd)
_logger.debug('Physical volumes scanned on %s: %d', devname, cmdret)
if cmdret != 0:
_logger.error(' Physical volume scan failed.')
raise Exception('Physical volume scan failed.')
return True
except Exception as e:
#
# pvscan failed
_logger.critical(' Failed to scan %s for physical volumes: %s', devname, str(e))
raise OciMigrateException('Failed to scan %s for physical volumes:' % devname) from e
@state_loop(migrate_data.qemu_max_count)
def exec_qemunbd(qemunbd_args):
"""
Execute a qemu-nbd command.
Parameters
----------
qemunbd_args: list
The list of arguments for qemu-nbd.
Returns
-------
int: 0 on success, raise exception otherwise.
"""
cmd = ['qemu-nbd'] + qemunbd_args
_logger.debug('__ Running %s', cmd)
pause_msg(cmd, pause_flag='_OCI_EXEC')
try:
return run_call_cmd(cmd)
except Exception as e:
_logger.error(' %s command failed: %s', cmd, str(e))
raise OciMigrateException('\n%s command failed:' % cmd) from e
def exec_rename(some_name, to_name=None):
"""
Renames a file, symbolic link or directory to path/bck_filename_current_time.
Parameters
----------
some_name: str
Full path of the original file.
to_name: str
Full path of the destination file, if specified, using default otherwise.
Returns
-------
str: the path of the renamed file on success, None otherwise.
"""
if not bool(to_name):
to_name = generate_backup_name(some_name)
_logger.debug('__ Rename %s to %s', some_name, to_name)
#
try:
#
# delete to_ if already exists
#
# if file, symlink or directory
if os.path.exists(to_name):
_logger.debug('%s already exists', to_name)
if os.path.isfile(to_name):
os.remove(to_name)
elif os.path.isdir(to_name):
os.rmdir(to_name)
elif os.path.islink(to_name):
if os.unlink(to_name):
_logger.debug('Removed symbolic link %s', to_name)
else:
_logger.error(' Failed to remove symbolic link %s', to_name)
else:
_logger.error(' Failed to remove %s.', to_name)
else:
_logger.debug('%s does not exists', to_name)
#
# rename
if os.path.exists(some_name) or os.path.islink(some_name):
_logger.debug('%s exists and is a file or symbolic link.', some_name)
os.rename(some_name, to_name)
_logger.debug('Renamed %s to %s.', some_name, to_name)
return to_name
_logger.debug(' %s does not exists', some_name)
except Exception as e:
_logger.error(' Failed to rename %s to %s: %s', some_name, to_name, str(e))
raise OciMigrateException('Failed to rename %s to %s' % (some_name, to_name)) from e
return None
@state_loop(migrate_data.qemu_max_count)
def exec_rmdir(dirname):
"""
Create a directory.
Parameters
----------
dirname: str
The full path of the directory.
Returns
-------
bool:
True on success, raises an exception otherwise.
"""
_logger.debug('__ Removing directory tree %s.', dirname)
try:
shutil.rmtree(dirname)
return True
except Exception as e:
raise OciMigrateException('') from e
@state_loop(migrate_data.rmmod_max_count)
def exec_rmmod(module):
"""
Removes a module.
Parameters
----------
module: str
The module name.
Returns
-------
bool: True
"""
_logger.debug('__ Remove module %s', module)
cmd = ['rmmod']
cmd.append(module)
try:
rmmod_result = subprocess.check_call(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, shell=False)
if rmmod_result == 0:
_logger.debug('Successfully removed %s', module)
else:
_logger.error(' Error removing %s, exit code %s, ignoring.', cmd, str(rmmod_result))
except Exception as e:
_logger.debug('Failed: %s, ignoring.', str(e))
#
# ignoring eventual errors, which will be caused by module already removed.
return True
def exec_sfdisk(devname):
"""
Collect the data about the partitions on the image file mounted on the
device devname using the sfdisk utility.
Parameters
----------
devname: str
The device.
Returns
-------
dict: The partition data with sfdisk results on success, None otherwise.
"""
cmd = ['sfdisk', '-d', devname]
_logger.debug('__ Running %s', cmd)
pause_msg(cmd, pause_flag='_OCI_EXEC')
try:
result = run_popen_cmd(cmd)['output'].decode('utf-8')
partdata = dict()
for devx in result.splitlines():
if devx.startswith(devname):
key = devx.split(':')[0].strip()
result_msg(msg='sfdisk partition %s' % key, result=False)
thispart = {'start': 0, 'size': 0, 'Id': 0, 'bootable': False}
val = devx.split(':')[1].split(',')
for it in val:
if 'start' in it:
x = it.split('=')[1]
thispart['start'] = int(x)
elif 'size' in it:
x = it.split('=')[1]
thispart['size'] = int(x)
elif 'Id' in it:
x = it.split('=')[1]
thispart['Id'] = x.strip()
elif 'bootable' in it:
thispart['bootable'] = True
else:
_logger.debug('unrecognised item: %s', val)
partdata[key] = thispart
_logger.debug(partdata)
return partdata
except Exception as e:
_logger.error(' Failed to collect sfdisk %s partition data: %s', devname, str(e))
return None
def exec_vgchange(changecmd):
"""
Execute vgchange command.
Parameters
----------
changecmd: list
Parameters for the vgchange command.
Returns
-------
str: vgchange output.
"""
cmd = ['vgchange'] + changecmd
_logger.debug('__ Running %s', cmd)
pause_msg(cmd, pause_flag='_OCI_LVM')
try:
output = run_popen_cmd(cmd)['output'].decode('utf-8')
_logger.debug('vgchange result:\n%s', output)
return output
except Exception as e:
_logger.critical(' Failed to execute %s: %s', cmd, str(e))
raise OciMigrateException('Failed to execute %s:' % cmd) from e
def exec_rename_volume_groups(vg_list, direction):
"""
Rename a list of volume groups.
Parameters
----------
vg_list: list
list of lists [original name, new name]
direction: str
if FORWARD, rename original name to new name, if BACKWARD from new name
to original name.
Returns
-------
bool: True on success, False otherwise.
"""
_logger.debug('__ Rename volume group %s.', vg_list)
result = True
#
for vg_names in vg_list:
if direction == 'FORWARD':
cmd = ['vgrename', vg_names[0], vg_names[1]]
elif direction == 'BACKWARD':
cmd = ['vgrename', vg_names[1], vg_names[0]]
else:
_logger.debug('Invalid argument %s', direction)
return False
#
pause_msg(cmd, pause_flag='_OCI_LVM')
try:
_logger.debug('command: %s', cmd)
output = run_popen_cmd(cmd)['output'].decode('utf-8')
if 'successfully renamed' in output:
_logger.debug('%s succeeded', cmd)
else:
_logger.debug('%s failed', cmd)
result = False
except Exception as e:
_logger.debug('Execution of vgrename failed: %s', str(e))
result = False
return result
def exec_vgs_noheadings():
"""
List the local volume group and generates a new (temporary) name as
a hex UUID.
Returns
-------
list: list of lists [original volume group name, new volume group name].
"""
cmd = ['vgs', '--noheadings']
_logger.debug('__ Executing %s.', cmd)
pause_msg(cmd, pause_flag='_OCI_LVM')
vg_list = list()
try:
vgs_response = run_popen_cmd(cmd)['output']
output = vgs_response.decode('utf-8').splitlines() if bool(vgs_response) else b''
if bool(output):
for vg_record in output:
if len(vg_record) > 0:
vg_list.append([vg_record.split()[0], uuid.uuid4().hex])
_logger.debug('Volume groups found: %s', vg_list)
return vg_list
except Exception as e:
_logger.critical(' Failed to list current volume groups: %s', str(e))
def exec_vgscan(vgscan_args):
"""
Scan the system for (new) volume groups.
Parameters
----------
vgscan_args: list
list of strings, arguments for vgscan
Returns
-------
bool: True on success, raises an exeception on failure.
"""
cmd = ['vgscan'] + vgscan_args
_logger.debug('__ Executing %s', cmd)
pause_msg(cmd, pause_flag='_OCI_LVM')
try:
output = run_popen_cmd(cmd)['output'].decode('utf-8')
_logger.debug('Volume groups scanned:\n%s', str(output))
return True
except Exception as e:
#
# vgscan failed
_logger.critical(' Failed to scan for volume groups: %s', str(e))
raise OciMigrateException('Failed to scan for volume groups:') from e
def generate_backup_name(full_path):
"""
Generate a name for a file or directory path, as <path>/bck_<file>_timestamp.
Parameters
----------
full_path: str
full path of file or directory.
Returns
-------
str: full path of backup file or directory.
"""
_logger.debug('__ Backup for %s', full_path)
return os.path.split(full_path)[0] \
+ '/bck_' \
+ os.path.split(full_path)[1] \
+ '_' \
+ migrate_data.current_time
def get_free_nbd():
"""
Find first free device name
Returns
-------
str: The free nbd device on success, None otherwise.
"""
_logger.debug('__ Get free nb device.')
devpath = '/sys/class/block/nbd*'
try:
for devname in glob(devpath):
with open(devname + '/size', 'r') as f:
sz = f.readline()
nbdsz = int(sz)
if nbdsz == 0:
freedev = devname.rsplit('/')[-1]
return '/dev/' + freedev
_logger.critical(' Failed to locate a free nbd devide.')
raise OciMigrateException('\nFailed to locate a free nbd devide.')
except Exception as e:
_logger.critical(' Failed to screen nbd devices: %s', str(e))
raise OciMigrateException('\nFailed to screen nbd devices:') from e
def get_grub2_kernels(grub_config_file):
"""
Get the versions of the kernels defined in the grub2 config file.
Parameters
----------
grub_config_file: str
Full path of the grub config file.
Returns
-------
list: list of kernel versions.
"""
_logger.debug('__ Get the kernel versions from %s', grub_config_file)
kernels_list = list()
menu_flag = False
with open(grub_config_file, 'r') as grub_file:
for grub_line in grub_file:
gl = ' '.join(grub_line.split()).split()
if bool(gl):
if gl[0] == 'menuentry':
menu_flag = True
if menu_flag:
if 'linux' in gl[0]:
menu_flag = False
for it in gl:
if 'vmlinuz' in it:
kernel_version = it.split('-', 1)[1]
kernels_list.append(kernel_version)
return kernels_list
def get_grub_default_kernel(grub_config_file):
"""
Get the kernel version booted by default from the grub config file.
Parameters
----------
grub_config_file: str
Full path of the grub config file.
Returns
-------
str: kernel version.
"""
_logger.debug('__ Get the default kernel from %s', grub_config_file)
def find_default_boot(grub_filename):
with open(grub_filename, 'r') as grub_fd:
for grub_ln in grub_fd:
grub_line_list = grub_ln.translate({ord(c): None for c in string.whitespace}).split('=')
if grub_line_list[0] == 'default':
return int(grub_line_list[1])
#
# there should be always a default boot.
return None
default_kernel_nb = find_default_boot(grub_config_file)
if default_kernel_nb is None:
# todo:
# locate default and installed kernels in all the flavors of grub2 and EFI
# _logger.critical('No default boot found.')
# raise OciMigrateException('No default boot found.')
#
# not so fatal, only if boot device is not by lvm, by label or by uuid
default_kernel_nb = 0
kernelversion = 'not found'
return kernelversion
kernel_cnt = 0
with open(grub_config_file, 'r') as grub_config_fd:
for grub_line in grub_config_fd:
gl = ' '.join(grub_line.split()).split()
if bool(gl):
if gl[0] == 'kernel':
if kernel_cnt == default_kernel_nb:
for it in gl:
if 'vmlinuz' in it:
kernelversion = it.split('-', 1)[1]
return kernelversion
kernel_cnt += 1
def get_grub_kernels(grub_config_file):
"""
Get the versions of the kernels defined in the grub config file.
Parameters
----------
grub_config_file: str
Full path of the grub config file.
Returns
-------
list: list of kernel versions.
"""
_logger.debug('__ Get the kernel versions from %s', grub_config_file)
kernel_list = list()
with open(grub_config_file, 'r') as grub_config_fd:
for grub_line in grub_config_fd:
gl = ' '.join(grub_line.split()).split()
if bool(gl):
if gl[0] == 'kernel':
for it in gl:
if 'vmlinuz' in it:
kernelversion = it.split('-', 1)[1]
kernel_list.append(kernelversion)
return kernel_list
def get_nameserver():
"""
Get the nameserver definitions, store the result in migrate_data.nameserver.
Returns
-------
bool: True on success, False otherwise.
"""
def dig_ns():
"""
Look for a nameserver definition in the output of the dig command.
Returns
-------
list: list of ipv4 nameservers.
"""
dig = 'dig'
dig_list = list()
if exec_exists(dig):
cmd = [dig]
try:
dig_output = run_popen_cmd(cmd)['output'].decode('utf-8').splitlines()
for dig_item in dig_output:
if 'SERVER' in dig_item:
dig_list.append(re.search(r"\b(?:[0-9]{1,3}\.){3}[0-9]{1,3}\b", dig_item).group())
_logger.debug('Found ns %s', dig_list[-1])
except Exception as e:
_logger.warning(' Failed to identify nameserver using dig: %s\n', str(e))
else:
_logger.debug('dig utility not found, install bind-utils.')
return dig_list
def nmcli_ns():
"""
Look for a nameserver definition in the output of the nmcli command.
Returns
-------
list: list of ipv4 nameservers.
"""
nmcli = 'nmcli'
nmcli_list = list()
if exec_exists(nmcli):
cmd = [nmcli, 'dev', 'show']
try:
nm_list = run_popen_cmd(cmd)['output'].decode('utf-8').splitlines()
for nm_item in nm_list:
if 'DNS' in nm_item.split(':')[0]:
nmcli_list.append(nm_item.split(':')[1].lstrip().rstrip())
_logger.debug('Found ns %s', nmcli_list[-1])
except Exception as e:
_logger.warning(' Failed to identify nameserver using nmcli: %s\n', str(e))
else:
_logger.debug('nmcli not running.')
return nmcli_list
def resolv_ns():
"""
Look for nameserver definition in resolv.conf.
Returns
-------
list: list of ipv4 nameservers.
"""
resolv_list = list()
try:
with open('/etc/resolv.conf', 'rb') as f:
resolvconf_lines = f.read().decode('utf-8').splitlines()
for nm_item in resolvconf_lines:
ip = re.search(r"\b(?:[0-9]{1,3}\.){3}[0-9]{1,3}\b", nm_item)
if ip is not None:
resolv_list.append(nm_item.split(' ')[1].lstrip().rstrip())
_logger.debug('Found ns %s', resolv_list[-1])
except Exception as e:
_logger.warning('Failed to find nameserver in resolv.conf: %s.', str(e))
return resolv_list
# global nameserver
_logger.debug("__ Collecting nameservers.")
dns_list = dig_ns() + nmcli_ns() + resolv_ns()
_logger.debug('Found nameservers: %s', dns_list)
#
# Verify if found one.
if bool(dns_list):
for ip_ad in dns_list:
try:
if ipaddress.ip_address(ip_ad).version == 4:
migrate_data.nameserver = dns_list[0]
break
except ValueError as v:
_logger.debug('%s', str(v))
except Exception as e:
_logger.debug('%s', str(e))
_logger.debug('Nameserver set to %s', migrate_data.nameserver)
return True
return False
def is_thread_running(thread_id):
"""
Verify if thread is active.
Parameters
----------
thread_id: thread
The thread to test.
Returns
-------
bool: True on success, False otherwise.
"""
_logger.debug('__ Testing threadid %s.', thread_id)
return bool(thread_id in threading.enumerate())
def is_root():
"""
Verify is operator is the root user.
Returns
-------
bool: True on success, False otherwise.
"""
return bool(os.getuid() == 0)
def leave_chroot(root2return, dir2return):
"""
Leave a chroot environment and return to another one.
Parameters
----------
root2return: file descriptor
The file descriptor of the root to return to.
dir2return: str
The original working dir to return to.
Returns
-------
bool: True on success, raises exception on failure.
"""
_logger.debug('__ Leaving chroot jail.')
try:
#
# leave chroot
os.fchdir(root2return)
os.chroot('.')
os.close(root2return)
_logger.debug('Left change root environment.')
#
# return to working directory
os.chdir(dir2return)
return True
except Exception as e:
_logger.error(' Failed to return from chroot: %s', str(e))
OciMigrateException('Failed to return from chroot: %s' % str(e))
def mount_fs(mountpoint):
"""
Mount a filesystem specified in fstab, by mountpoint only.
Parameters
----------
mountpoint: str
The mountpoint.
Returns
-------
bool: True on success, False otherwise
"""
cmd = ['mount', mountpoint]
pause_msg(cmd, pause_flag='_OCI_MOUNT')
_logger.debug('__ Mounting %s', mountpoint)
try:
_, nbcols = terminal_dimension()
mountwait = ProgressBar(nbcols, 0.2, progress_chars=['mounting %s' % mountpoint])
mountwait.start()
_logger.debug('Command: %s', cmd)
cmdret = run_call_cmd(cmd)
_logger.debug('%s returned %d', cmd, cmdret)
if cmdret == 0:
return True
raise Exception('%s failed: %d' % (cmd, cmdret))
except Exception as e:
_logger.error(' Failed to %s: %s', cmd, str(e))
return False
finally:
if is_thread_running(mountwait):
mountwait.stop()
def reset_vg_list(vg_list):
"""
Update the local volume group list
Parameters
----------
vg_list: list (of lists)
The volume group rename list.
Returns
-------
bool: True on success, False otherwise.
"""
_logger.debug('__ Updating the vg list.')
for vg_l in vg_list:
_logger.debug('Updating %s to %s.', vg_l[1], vg_l[0])
vg_l[1] = vg_l[0]
return True
def restore_nameserver():
"""
Restore nameserver configuration.
Returns
-------
bool: True on success, False otherwise.
"""
_logger.debug('__ Restore nameserver data.')
# global resolv_conf_path
resolvpath = '/etc/resolv.conf'
try:
#
# save used one
if os.path.isfile(resolvpath):
if not bool(exec_rename(resolvpath, resolvpath + '_temp_' + migrate_data.current_time)):
_logger.debug('Failed to rename %s to %s, no harm done.',
resolvpath, resolvpath + '_temp_' + migrate_data.current_time)
else:
_logger.debug('No %s found.', resolvpath)
#
# restore original one
if os.path.isfile(migrate_data.resolv_conf_path):
if bool(exec_rename(migrate_data.resolv_conf_path, resolvpath)):
_logger.debug('Successfully restored %s', resolvpath)
else:
_logger.debug('Failed to restore %s.', resolvpath)
raise OciMigrateException('Failed to restore nameserver config.')
else:
_logger.debug('No %s found.', migrate_data.resolv_conf_path)
return True
except Exception as e:
error_msg('Continuing but might cause issues installing cloud-init: %s' % str(e))
return False
@state_loop(3)
def rm_nbd():
"""
Unload kernel module nbd.
Returns
-------
bool: True on succes, False on failure.
"""
modname = 'nbd'
return bool(exec_rmmod(modname))
def run_call_cmd(command):
"""
Execute an os command which does not return data.
Parameters
----------
command: list
The os command and its arguments.
Returns
-------
int: The return value.
"""
_logger.debug('__ Executing %s', command)
assert (len(command) > 0), 'empty command list'
try:
return subprocess.call(command, stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL)
except OSError as oserr:
raise OciMigrateException('OS error encountered while running %s:' % command) from oserr
except subprocess.CalledProcessError as e:
raise OciMigrateException('Error encountered while running %s:' % command) from e
def run_popen_cmd(command, valid_return=frozenset([0])):
"""
Execute an os command and collect stdout and stderr.
Parameters
----------
command: list
The os command and its arguments.
valid_return: frozenset
A set of valid return codes, default = [0]
Returns
-------
dict: {'output': output,
'error': error,
'return_code: return_code}
raises an exception on failure.
"""
_logger.debug('__ Executing %s.', command)
output_dict = dict()
if exec_exists(command[0]) is not None:
_logger.debug('running %s', command)
try:
ext_process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = ext_process.communicate()
return_code = ext_process.returncode
output_dict['output'] = output
output_dict['error'] = error
output_dict['return_code'] = return_code
#if return_code != 0:
# if bool(error):
# _logger.debug('Error occurred while running %s: %s - %s',
# command, return_code, error.decode('utf-8'), exc_info=True)
# raise OciMigrateException('Error encountered while running %s: %s - %s'
# % (command, return_code, error.decode('utf-8')))
if not bool(output):
_logger.debug('%s did not return output.', command)
if bool(error):
# not necessarily fatal
_logger.debug('%s returned message %s.', command, error.decode('utf-8'))
_logger.debug('%s returned code %s', command, str(return_code))
if return_code not in valid_return:
raise OciMigrateException('Error encountered while running %s: %s - %s'
% (command, return_code, error.decode('utf-8')))
return output_dict
except OSError as os_error:
raise OciMigrateException('OS error encountered while running %s:' % command) from os_error
except Exception as e:
raise OciMigrateException('Error encountered while running %s:' % command) from e
else:
_logger.critical(' %s not found.', command[0])
raise OciMigrateException('%s does not exist' % command[0])
def set_nameserver():
"""
Setting temporary nameserver.
Returns
-------
bool: True on success, False otherwise.
"""
# global resolv_conf_path
_logger.debug('__ Set nameserver.')
#
# rename eventual existing resolv.conf
resolvpath = '/etc/resolv.conf'
try:
#
# save current
if os.path.isfile(resolvpath) or os.path.islink(resolvpath) or os.path.isdir(resolvpath):
migrate_data.resolv_conf_path = exec_rename(resolvpath)
if not bool(migrate_data.resolv_conf_path):
_logger.debug('Failed to save current nameserver configuration.')
else:
_logger.error(' No %s found', resolvpath)
#
# write new
with open(resolvpath, 'w') as f:
f.writelines('nameserver %s\n' % migrate_data.nameserver)
return True
except Exception as e:
error_msg('Failed to set nameserver: %s\n continuing but might cause issues installing cloud-init.' % str(e))
return False
def show_hex_dump(bindata):
"""
Show hex and readable version of binary data.
Parameters
----------
bindata: binary data.
Returns
-------
str: hexdump format
"""
blocklen = 16
ll = len(bindata)
m = bindata
hexdata = ''
addr = 0
try:
while ll > 0:
x = m[0:blocklen]
line = ' '.join(['%02x' % i for i in bytearray(x)])
line = line[0:23] + ' ' + line[23:]
readable = ''.join([chr(i) if 32 <= i <= 127
else '.' for i in bytearray(x)])
hexdata += '%08x : %s : %s :\n' % (addr*16, line, readable)
y = m[blocklen:]
m = y
addr += 1
ll -= blocklen
except Exception as e:
_logger.error(' Exception: %s', str(e))
return hexdata
@state_loop(3)
def unmount_something(mountpoint):
"""
Unmount.
Parameters
----------
mountpoint: str
The mountpoint.
Returns
-------
bool: True on success, False otherwise.
"""
_logger.debug('__ Unmount %s.', mountpoint)
if os.path.ismount(mountpoint):
_logger.debug('%s is a mountpoint.', mountpoint)
else:
_logger.debug('%s is not a mountpoint, quitting', mountpoint)
return True
#
cmd = ['umount', mountpoint]
pause_msg(cmd, pause_flag='_OCI_MOUNT')
try:
_logger.debug('command: %s', cmd)
cmdret = run_call_cmd(cmd)
_logger.debug('%s : %d', cmd, cmdret)
if cmdret != 0:
raise Exception('%s failed: %d' % (cmd, cmdret))
except Exception as e:
_logger.error(' Failed to %s: %s', cmd, str(e))
return False
return True
def unmount_pseudo(pseudomounts):
"""
Unmount the pseudodevices.
Parameters
----------
pseudomounts: list
The list of pseudodevices
Returns
-------
True on success, False otherwise.
"""
_logger.debug('__ Unmount %s', pseudomounts)
res = True
pseudomounts.sort(reverse=True)
for mnt in pseudomounts:
_logger.debug('Unmount %s', mnt)
umount_res = unmount_something(mnt)
if umount_res:
_logger.debug('%s successfully unmounted.', mnt)
else:
_logger.error(' Failed to unmount %s', mnt)
res = False
return res
|
en
| 0.608291
|
# oci-utils # # Copyright (c) 2019, 2020 Oracle and/or its affiliates. All rights reserved. # Licensed under the Universal Permissive License v 1.0 as shown # at http://oss.oracle.com/licenses/upl. Module containing migrate platform system tools. Backup a directory as path/bck_directory_name_[current_time] Parameters ---------- directory_name: str The full path of the directory. Returns ------- str: path of backup directory on success, None otherwise. Backup a single file as path/bck_file_name_[current_time] Parameters ---------- file_name: str The full path of the directory. Returns ------- str: path of backup file on success, None otherwise. Execute the chroot command. Parameters ---------- newroot: str Full path of new root directory. Returns ------- file descriptor, str, str: The file descriptor of the current root on success, path to restore, current working dir; raises an exception on failure. # # current working directory # # change root # # need to return environment. # # adjust PATH to make sure. Load nbd module Returns ------- bool: True on succes, False on failure, raise an exception on call error. Run a blkid command. Parameters ---------- blkid_args: list The argument list for the blkid command. Returns ------- dict: blkid return value on success, None otherwise. Verify if executable exists in path. Parameters ---------- executable: str The file to be tested. Returns ------- str: full path on success, None otherwise. Executes ldconfig to update the shared library cache. Returns ------- int: 0 on success, raises an exception otherwise. Run an lsblk command. Parameters ---------- lsblk_args: list The argument list for the blkid command. Returns ------- dict: blkid return value on success, None otherwise. Scan the system for logical volumes. Parameters ---------- lvscan_args: list list of strings, arguments for lvscan Returns ------- list: A list of strings, the output of lvscan --verbose on success, raises an exeception on failure. # # lvscan failed Create a directory. Parameters ---------- dirname: str The full path of the directory. Returns ------- bool: True on success, False otherwise. Remount proc, sys and dev. Parameters ---------- rootdir: str The mountpoint of the root partition. Returns ------- list: The list of new mountpoints on success, None otherwise. Collect data about the device on the image using the parted utility. Parameters ---------- devname: str The device name. Returns ------- dict: The device data from parted utility on success, None otherwise. Update the lvm cache. Parameters ---------- pvscan_args: list List of strings, arguments for pvscan devname: str Device name to scan. Returns ------- bool: True on success, raises an exception on failure. # # pvscan failed Execute a qemu-nbd command. Parameters ---------- qemunbd_args: list The list of arguments for qemu-nbd. Returns ------- int: 0 on success, raise exception otherwise. Renames a file, symbolic link or directory to path/bck_filename_current_time. Parameters ---------- some_name: str Full path of the original file. to_name: str Full path of the destination file, if specified, using default otherwise. Returns ------- str: the path of the renamed file on success, None otherwise. # # # delete to_ if already exists # # if file, symlink or directory # # rename Create a directory. Parameters ---------- dirname: str The full path of the directory. Returns ------- bool: True on success, raises an exception otherwise. Removes a module. Parameters ---------- module: str The module name. Returns ------- bool: True # # ignoring eventual errors, which will be caused by module already removed. Collect the data about the partitions on the image file mounted on the device devname using the sfdisk utility. Parameters ---------- devname: str The device. Returns ------- dict: The partition data with sfdisk results on success, None otherwise. Execute vgchange command. Parameters ---------- changecmd: list Parameters for the vgchange command. Returns ------- str: vgchange output. Rename a list of volume groups. Parameters ---------- vg_list: list list of lists [original name, new name] direction: str if FORWARD, rename original name to new name, if BACKWARD from new name to original name. Returns ------- bool: True on success, False otherwise. # # List the local volume group and generates a new (temporary) name as a hex UUID. Returns ------- list: list of lists [original volume group name, new volume group name]. Scan the system for (new) volume groups. Parameters ---------- vgscan_args: list list of strings, arguments for vgscan Returns ------- bool: True on success, raises an exeception on failure. # # vgscan failed Generate a name for a file or directory path, as <path>/bck_<file>_timestamp. Parameters ---------- full_path: str full path of file or directory. Returns ------- str: full path of backup file or directory. Find first free device name Returns ------- str: The free nbd device on success, None otherwise. Get the versions of the kernels defined in the grub2 config file. Parameters ---------- grub_config_file: str Full path of the grub config file. Returns ------- list: list of kernel versions. Get the kernel version booted by default from the grub config file. Parameters ---------- grub_config_file: str Full path of the grub config file. Returns ------- str: kernel version. # # there should be always a default boot. # todo: # locate default and installed kernels in all the flavors of grub2 and EFI # _logger.critical('No default boot found.') # raise OciMigrateException('No default boot found.') # # not so fatal, only if boot device is not by lvm, by label or by uuid Get the versions of the kernels defined in the grub config file. Parameters ---------- grub_config_file: str Full path of the grub config file. Returns ------- list: list of kernel versions. Get the nameserver definitions, store the result in migrate_data.nameserver. Returns ------- bool: True on success, False otherwise. Look for a nameserver definition in the output of the dig command. Returns ------- list: list of ipv4 nameservers. Look for a nameserver definition in the output of the nmcli command. Returns ------- list: list of ipv4 nameservers. Look for nameserver definition in resolv.conf. Returns ------- list: list of ipv4 nameservers. # global nameserver # # Verify if found one. Verify if thread is active. Parameters ---------- thread_id: thread The thread to test. Returns ------- bool: True on success, False otherwise. Verify is operator is the root user. Returns ------- bool: True on success, False otherwise. Leave a chroot environment and return to another one. Parameters ---------- root2return: file descriptor The file descriptor of the root to return to. dir2return: str The original working dir to return to. Returns ------- bool: True on success, raises exception on failure. # # leave chroot # # return to working directory Mount a filesystem specified in fstab, by mountpoint only. Parameters ---------- mountpoint: str The mountpoint. Returns ------- bool: True on success, False otherwise Update the local volume group list Parameters ---------- vg_list: list (of lists) The volume group rename list. Returns ------- bool: True on success, False otherwise. Restore nameserver configuration. Returns ------- bool: True on success, False otherwise. # global resolv_conf_path # # save used one # # restore original one Unload kernel module nbd. Returns ------- bool: True on succes, False on failure. Execute an os command which does not return data. Parameters ---------- command: list The os command and its arguments. Returns ------- int: The return value. Execute an os command and collect stdout and stderr. Parameters ---------- command: list The os command and its arguments. valid_return: frozenset A set of valid return codes, default = [0] Returns ------- dict: {'output': output, 'error': error, 'return_code: return_code} raises an exception on failure. #if return_code != 0: # if bool(error): # _logger.debug('Error occurred while running %s: %s - %s', # command, return_code, error.decode('utf-8'), exc_info=True) # raise OciMigrateException('Error encountered while running %s: %s - %s' # % (command, return_code, error.decode('utf-8'))) # not necessarily fatal Setting temporary nameserver. Returns ------- bool: True on success, False otherwise. # global resolv_conf_path # # rename eventual existing resolv.conf # # save current # # write new Show hex and readable version of binary data. Parameters ---------- bindata: binary data. Returns ------- str: hexdump format Unmount. Parameters ---------- mountpoint: str The mountpoint. Returns ------- bool: True on success, False otherwise. # Unmount the pseudodevices. Parameters ---------- pseudomounts: list The list of pseudodevices Returns ------- True on success, False otherwise.
| 1.940619
| 2
|
graphAA.py
|
gwiedeman/aaText
| 3
|
6629094
|
import os
import json
import argparse
import sys
from nltk.tokenize import word_tokenize
import nltk
import matplotlib.pyplot as plt
argParse = argparse.ArgumentParser()
argParse.add_argument("-n", help="Phase(s) to graph, different versions separated by a pipe (|). Accepts multiple args.", action='append')
argParse.add_argument("-x", help="Value of X-Axis, supports lssue or year, and defaults to year.")
argParse.add_argument("-range", help="Range of years to include, such as: 1980-1998")
argParse.add_argument("-per", help="Instances per x number of words.")
argParse.add_argument("-input", help="Path to directory of Text files (optional).")
args = argParse.parse_args()
home = os.path.dirname(os.path.realpath(__file__))
yList = []
xLists = {}
for phrase in args.n:
xLists[phrase] = []
if args.input:
textDir = args.g
else:
textDir = os.path.join(home, "text")
if not os.path.isdir(textDir):
print ("Error: Input Directory is incorrect. Please run extractText.py or enter the path to a directory of text files after with -input.")
else:
currentYear = ""
yearText = ""
xAxis = []
text = ""
if args.x:
method = args.x
else:
method = "year"
totalCount = 0
xAxis = []
for root, dirs, files in os.walk(textDir):
if method.lower() == "issue" or method.lower() == "issues":
if args.range:
r1, r2 = args.range.split("-")
methodCount = 0
for file in files:
fileYear = int(file.split("_")[0])
if fileYear >= int(r1) and fileYear <= int(r2):
methodCount += 1
xAxis.append(file)
else:
xAxis = files
else:
if args.range:
r1, r2 = args.range.split("-")
else:
r1 = 1900
r2 = 2200
methodCount = 0
for x in range(int(r1), int(r2) + 1):
methodCount += 1
yearGroup = []
for file in files:
if file.startswith(str(x)):
yearGroup.append(file)
if len(yearGroup) > 0:
xAxis.append(yearGroup)
for group in xAxis:
text = ""
if isinstance(group, str):
xLabel = group
print ("Reading " + str(group) + "...")
issuePath = os.path.join(root, group)
textFile = open(issuePath, 'r', encoding='utf-8')
text = textFile.read()
else:
for issue in group:
xLabel = issue.split("_")[0]
print ("Reading " + str(issue) + "...")
issuePath = os.path.join(root, issue)
textFile = open(issuePath, 'r', encoding='utf-8')
text = text + "\n" + textFile.read()
if text != "":
try:
tokens = word_tokenize(text.lower())
except:
nltk.download('punkt')
tokens = word_tokenize(text.lower())
wordCount = len(tokens)
yList.append(xLabel)
for phrase in args.n:
xList = []
matchCount = 0
matchList = str(phrase).strip().lower().split("|")
for version in matchList:
match = tuple(version.split(" "))
grams = nltk.ngrams(tokens, len(version.split(" ")))
for gram in grams:
if gram == match:
matchCount += 1
if args.per:
yLabel = "Instances per " + str(args.per) + " Words"
chunk = wordCount / int(args.per)
matchCount = matchCount / chunk
else:
yLabel = "Instances"
#print (phrase + ": " + str(matchCount))
xLists[phrase].append(matchCount)
totalCount = totalCount + matchCount
print (str(totalCount) + " total instances of " + ", ".join(args.n) + " , or " + str(totalCount/methodCount) + " per " + method + ".")
#print (xLists)
for key, line in xLists.items():
plt.plot(yList, line, label=key.title().replace("|", " or "))
plt.xlabel(str(method).title())
plt.ylabel(yLabel)
plt.xticks(rotation="vertical")
plt.legend(loc='upper left')
graphTitle = "N-grams in the American Archivist"
plt.title(graphTitle)
plt.show()
|
import os
import json
import argparse
import sys
from nltk.tokenize import word_tokenize
import nltk
import matplotlib.pyplot as plt
argParse = argparse.ArgumentParser()
argParse.add_argument("-n", help="Phase(s) to graph, different versions separated by a pipe (|). Accepts multiple args.", action='append')
argParse.add_argument("-x", help="Value of X-Axis, supports lssue or year, and defaults to year.")
argParse.add_argument("-range", help="Range of years to include, such as: 1980-1998")
argParse.add_argument("-per", help="Instances per x number of words.")
argParse.add_argument("-input", help="Path to directory of Text files (optional).")
args = argParse.parse_args()
home = os.path.dirname(os.path.realpath(__file__))
yList = []
xLists = {}
for phrase in args.n:
xLists[phrase] = []
if args.input:
textDir = args.g
else:
textDir = os.path.join(home, "text")
if not os.path.isdir(textDir):
print ("Error: Input Directory is incorrect. Please run extractText.py or enter the path to a directory of text files after with -input.")
else:
currentYear = ""
yearText = ""
xAxis = []
text = ""
if args.x:
method = args.x
else:
method = "year"
totalCount = 0
xAxis = []
for root, dirs, files in os.walk(textDir):
if method.lower() == "issue" or method.lower() == "issues":
if args.range:
r1, r2 = args.range.split("-")
methodCount = 0
for file in files:
fileYear = int(file.split("_")[0])
if fileYear >= int(r1) and fileYear <= int(r2):
methodCount += 1
xAxis.append(file)
else:
xAxis = files
else:
if args.range:
r1, r2 = args.range.split("-")
else:
r1 = 1900
r2 = 2200
methodCount = 0
for x in range(int(r1), int(r2) + 1):
methodCount += 1
yearGroup = []
for file in files:
if file.startswith(str(x)):
yearGroup.append(file)
if len(yearGroup) > 0:
xAxis.append(yearGroup)
for group in xAxis:
text = ""
if isinstance(group, str):
xLabel = group
print ("Reading " + str(group) + "...")
issuePath = os.path.join(root, group)
textFile = open(issuePath, 'r', encoding='utf-8')
text = textFile.read()
else:
for issue in group:
xLabel = issue.split("_")[0]
print ("Reading " + str(issue) + "...")
issuePath = os.path.join(root, issue)
textFile = open(issuePath, 'r', encoding='utf-8')
text = text + "\n" + textFile.read()
if text != "":
try:
tokens = word_tokenize(text.lower())
except:
nltk.download('punkt')
tokens = word_tokenize(text.lower())
wordCount = len(tokens)
yList.append(xLabel)
for phrase in args.n:
xList = []
matchCount = 0
matchList = str(phrase).strip().lower().split("|")
for version in matchList:
match = tuple(version.split(" "))
grams = nltk.ngrams(tokens, len(version.split(" ")))
for gram in grams:
if gram == match:
matchCount += 1
if args.per:
yLabel = "Instances per " + str(args.per) + " Words"
chunk = wordCount / int(args.per)
matchCount = matchCount / chunk
else:
yLabel = "Instances"
#print (phrase + ": " + str(matchCount))
xLists[phrase].append(matchCount)
totalCount = totalCount + matchCount
print (str(totalCount) + " total instances of " + ", ".join(args.n) + " , or " + str(totalCount/methodCount) + " per " + method + ".")
#print (xLists)
for key, line in xLists.items():
plt.plot(yList, line, label=key.title().replace("|", " or "))
plt.xlabel(str(method).title())
plt.ylabel(yLabel)
plt.xticks(rotation="vertical")
plt.legend(loc='upper left')
graphTitle = "N-grams in the American Archivist"
plt.title(graphTitle)
plt.show()
|
ru
| 0.208019
|
#print (phrase + ": " + str(matchCount)) #print (xLists)
| 3.263671
| 3
|
convert_cached_copy_to_feed.py
|
ClockworkNet/gsa-admin-toolkit
| 34
|
6629095
|
<filename>convert_cached_copy_to_feed.py
#!/usr/bin/python2
#
# Copyright 2007 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This code is not supported by Google
#
"""This script helps to migrate data from one appliance to another.
You need to compile a list of source urls (e.g. via export urls from crawl
diagnostics or export all urls).
IMPORTANT NOTES:
- the script will only export the content, not any meta tags associated
- the script will only contain what the cached version contains
i.e. the truncated file if the original source was longer than 2.5 MB
- since the script uses the cached version, it assumes to have the default
stylsheet. In case you modified the chached version header in the
default stylesheet, you need to adjust the header scrapper
- the script will use one connection to the appliance at a time to
download the cached versions. This means that you will have less
serving bandwidth during the runtime of the script
- 1GB limit is not honored. You might have to manually split the output file.
TODO(mblume)
- add meta data via get the search results first with getfields
- parse xml and add these as metadata to the feed.
"""
import base64
import codecs
import getopt
import sys
import urllib
import urllib2
import zlib
import HTMLParser
import xml.etree.cElementTree as ElementTree
from xml.sax.saxutils import quoteattr
#
# constants for the script
# NOTE: you should modify the <datasource>-tag content
#
# the xml header for the content feed
feed_header_text = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE gsafeed PUBLIC "-//Google//DTD GSA Feeds//EN" "">
<gsafeed>
<header>\
<datasource>convert_cached_copy_to_feed</datasource>
<feedtype>full</feedtype>
</header>
<group>\n"""
# the xml footer for the content feed
feed_footer_text = ' </group>\n</gsafeed>'
def DeflateAndBase64Encode(string_val):
"""zlib compress and base64 encode a string."""
zlibbed_str = zlib.compress(string_val)
return base64.b64encode(zlibbed_str)
def Usage():
"""Print script usage instructions on stdout."""
print """A python script to download the cached version from a GSA
and generate a feed that can be submitted to another GSA.
Usage: %s ARGS
--gsaname: hostname or ip of the source gsa,
e.g. search.company.com or 172.16.31.10
--urlfile: path and filename of the file containing all urls to download.
The format should be one url per line
--output: path and filename of the generated XML feed file
--help: output this message""" % sys.argv[0]
def main(argv):
#
# Parameters we are going to initialize via command line args
#
# URL that is being indexed by the appliance.
cached_url_file_name = None
# Hostname of the GSA/Mini.
appliance_hostname = None
# output file:
output_feed_file_name = None
try:
opts, args = getopt.getopt(argv[1:], None,
['help', 'gsaname=', 'urlfile=', 'output='])
except getopt.GetoptError:
# print help information and exit:
Usage()
sys.exit(2)
for opt, arg in opts:
if opt == '--help':
Usage()
sys.exit()
if opt == '--gsaname':
appliance_hostname = arg
if opt == '--urlfile':
cached_url_file_name = arg
if opt == '--output':
output_feed_file_name = arg
if appliance_hostname and cached_url_file_name and output_feed_file_name:
#
# Real work begins here.
#
try:
parser = HTMLParser.HTMLParser()
output_file = open(output_feed_file_name, 'w')
output_file.write(feed_header_text)
# get all cached urls:
cached_url_file = open(cached_url_file_name, 'r')
for url in cached_url_file:
cached_url = 'http://' + appliance_hostname + '/search?q=cache:'
cached_url += urllib.quote_plus(url.rstrip())
print 'Accessing URL - %s ' %cached_url
# since 7.0 (and possibly earlier), content is an XML document
# containing the cached content
content = urllib2.urlopen(cached_url).read()
gsp = ElementTree.fromstring(content)
content_type = gsp.findall('.//CACHE_CONTENT_TYPE')[0].text
blob = gsp.findall('.//BLOB')[0]
encoding = blob.get('encoding')
cache_response = codecs.decode(
base64.b64decode(blob.text), encoding)
if content_type == 'text/plain':
# the blob that comes back is wrapped in HTML; unwrap it
pre_body = cache_response[cache_response.find('<pre>') + len('<pre>'):
cache_response.rfind('</pre>')]
cached_content = parser.unescape(pre_body)
else:
cached_content = cache_response
compressed_cached_content = DeflateAndBase64Encode(
codecs.encode(cached_content, 'utf-8'))
# debug output------------------------------------
#print 'complete content from GSA is:\n%s' % cached_content
#print 'cached content is:\n%s' % compressed_cached_content
# end debug output --------------------------------
output_file.write(""" <record url=%s mimetype=%s>
<content encoding="base64compressed">%s</content>
</record>\n""" % (quoteattr(url.rstrip()), quoteattr(content_type),
compressed_cached_content))
except Exception, exception:
print 'Got exception: %s' %exception
sys.exit(1)
finally:
output_file.write(feed_footer_text)
else:
Usage()
sys.exit(1)
if __name__ == '__main__':
main(sys.argv)
|
<filename>convert_cached_copy_to_feed.py
#!/usr/bin/python2
#
# Copyright 2007 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This code is not supported by Google
#
"""This script helps to migrate data from one appliance to another.
You need to compile a list of source urls (e.g. via export urls from crawl
diagnostics or export all urls).
IMPORTANT NOTES:
- the script will only export the content, not any meta tags associated
- the script will only contain what the cached version contains
i.e. the truncated file if the original source was longer than 2.5 MB
- since the script uses the cached version, it assumes to have the default
stylsheet. In case you modified the chached version header in the
default stylesheet, you need to adjust the header scrapper
- the script will use one connection to the appliance at a time to
download the cached versions. This means that you will have less
serving bandwidth during the runtime of the script
- 1GB limit is not honored. You might have to manually split the output file.
TODO(mblume)
- add meta data via get the search results first with getfields
- parse xml and add these as metadata to the feed.
"""
import base64
import codecs
import getopt
import sys
import urllib
import urllib2
import zlib
import HTMLParser
import xml.etree.cElementTree as ElementTree
from xml.sax.saxutils import quoteattr
#
# constants for the script
# NOTE: you should modify the <datasource>-tag content
#
# the xml header for the content feed
feed_header_text = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE gsafeed PUBLIC "-//Google//DTD GSA Feeds//EN" "">
<gsafeed>
<header>\
<datasource>convert_cached_copy_to_feed</datasource>
<feedtype>full</feedtype>
</header>
<group>\n"""
# the xml footer for the content feed
feed_footer_text = ' </group>\n</gsafeed>'
def DeflateAndBase64Encode(string_val):
"""zlib compress and base64 encode a string."""
zlibbed_str = zlib.compress(string_val)
return base64.b64encode(zlibbed_str)
def Usage():
"""Print script usage instructions on stdout."""
print """A python script to download the cached version from a GSA
and generate a feed that can be submitted to another GSA.
Usage: %s ARGS
--gsaname: hostname or ip of the source gsa,
e.g. search.company.com or 172.16.31.10
--urlfile: path and filename of the file containing all urls to download.
The format should be one url per line
--output: path and filename of the generated XML feed file
--help: output this message""" % sys.argv[0]
def main(argv):
#
# Parameters we are going to initialize via command line args
#
# URL that is being indexed by the appliance.
cached_url_file_name = None
# Hostname of the GSA/Mini.
appliance_hostname = None
# output file:
output_feed_file_name = None
try:
opts, args = getopt.getopt(argv[1:], None,
['help', 'gsaname=', 'urlfile=', 'output='])
except getopt.GetoptError:
# print help information and exit:
Usage()
sys.exit(2)
for opt, arg in opts:
if opt == '--help':
Usage()
sys.exit()
if opt == '--gsaname':
appliance_hostname = arg
if opt == '--urlfile':
cached_url_file_name = arg
if opt == '--output':
output_feed_file_name = arg
if appliance_hostname and cached_url_file_name and output_feed_file_name:
#
# Real work begins here.
#
try:
parser = HTMLParser.HTMLParser()
output_file = open(output_feed_file_name, 'w')
output_file.write(feed_header_text)
# get all cached urls:
cached_url_file = open(cached_url_file_name, 'r')
for url in cached_url_file:
cached_url = 'http://' + appliance_hostname + '/search?q=cache:'
cached_url += urllib.quote_plus(url.rstrip())
print 'Accessing URL - %s ' %cached_url
# since 7.0 (and possibly earlier), content is an XML document
# containing the cached content
content = urllib2.urlopen(cached_url).read()
gsp = ElementTree.fromstring(content)
content_type = gsp.findall('.//CACHE_CONTENT_TYPE')[0].text
blob = gsp.findall('.//BLOB')[0]
encoding = blob.get('encoding')
cache_response = codecs.decode(
base64.b64decode(blob.text), encoding)
if content_type == 'text/plain':
# the blob that comes back is wrapped in HTML; unwrap it
pre_body = cache_response[cache_response.find('<pre>') + len('<pre>'):
cache_response.rfind('</pre>')]
cached_content = parser.unescape(pre_body)
else:
cached_content = cache_response
compressed_cached_content = DeflateAndBase64Encode(
codecs.encode(cached_content, 'utf-8'))
# debug output------------------------------------
#print 'complete content from GSA is:\n%s' % cached_content
#print 'cached content is:\n%s' % compressed_cached_content
# end debug output --------------------------------
output_file.write(""" <record url=%s mimetype=%s>
<content encoding="base64compressed">%s</content>
</record>\n""" % (quoteattr(url.rstrip()), quoteattr(content_type),
compressed_cached_content))
except Exception, exception:
print 'Got exception: %s' %exception
sys.exit(1)
finally:
output_file.write(feed_footer_text)
else:
Usage()
sys.exit(1)
if __name__ == '__main__':
main(sys.argv)
|
en
| 0.763648
|
#!/usr/bin/python2 # # Copyright 2007 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # This code is not supported by Google # This script helps to migrate data from one appliance to another. You need to compile a list of source urls (e.g. via export urls from crawl diagnostics or export all urls). IMPORTANT NOTES: - the script will only export the content, not any meta tags associated - the script will only contain what the cached version contains i.e. the truncated file if the original source was longer than 2.5 MB - since the script uses the cached version, it assumes to have the default stylsheet. In case you modified the chached version header in the default stylesheet, you need to adjust the header scrapper - the script will use one connection to the appliance at a time to download the cached versions. This means that you will have less serving bandwidth during the runtime of the script - 1GB limit is not honored. You might have to manually split the output file. TODO(mblume) - add meta data via get the search results first with getfields - parse xml and add these as metadata to the feed. # # constants for the script # NOTE: you should modify the <datasource>-tag content # # the xml header for the content feed <?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE gsafeed PUBLIC "-//Google//DTD GSA Feeds//EN" ""> <gsafeed> <header>\ <datasource>convert_cached_copy_to_feed</datasource> <feedtype>full</feedtype> </header> <group>\n # the xml footer for the content feed zlib compress and base64 encode a string. Print script usage instructions on stdout. A python script to download the cached version from a GSA and generate a feed that can be submitted to another GSA. Usage: %s ARGS --gsaname: hostname or ip of the source gsa, e.g. search.company.com or 172.16.31.10 --urlfile: path and filename of the file containing all urls to download. The format should be one url per line --output: path and filename of the generated XML feed file --help: output this message # # Parameters we are going to initialize via command line args # # URL that is being indexed by the appliance. # Hostname of the GSA/Mini. # output file: # print help information and exit: # # Real work begins here. # # get all cached urls: # since 7.0 (and possibly earlier), content is an XML document # containing the cached content # the blob that comes back is wrapped in HTML; unwrap it # debug output------------------------------------ #print 'complete content from GSA is:\n%s' % cached_content #print 'cached content is:\n%s' % compressed_cached_content # end debug output -------------------------------- <record url=%s mimetype=%s> <content encoding="base64compressed">%s</content> </record>\n
| 2.186936
| 2
|
nipype/pipeline/plugins/multiproc.py
|
effigies/nipype
| 0
|
6629096
|
<filename>nipype/pipeline/plugins/multiproc.py
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Parallel workflow execution via multiprocessing
Support for child processes running as non-daemons based on
http://stackoverflow.com/a/8963618/1183453
"""
# Import packages
from multiprocessing import Process, Pool, cpu_count, pool
from traceback import format_exception
import os
import sys
import numpy as np
from copy import deepcopy
from ..engine import MapNode
from ...utils.misc import str2bool
from ... import logging
from nipype.pipeline.plugins import semaphore_singleton
from .base import (DistributedPluginBase, report_crash)
# Init logger
logger = logging.getLogger('workflow')
# Run node
def run_node(node, updatehash):
"""Function to execute node.run(), catch and log any errors and
return the result dictionary
Parameters
----------
node : nipype Node instance
the node to run
updatehash : boolean
flag for updating hash
Returns
-------
result : dictionary
dictionary containing the node runtime results and stats
"""
# Init variables
result = dict(result=None, traceback=None)
# Try and execute the node via node.run()
try:
result['result'] = node.run(updatehash=updatehash)
except:
etype, eval, etr = sys.exc_info()
result['traceback'] = format_exception(etype, eval, etr)
result['result'] = node.result
# Return the result dictionary
return result
class NonDaemonProcess(Process):
"""A non-daemon process to support internal multiprocessing.
"""
def _get_daemon(self):
return False
def _set_daemon(self, value):
pass
daemon = property(_get_daemon, _set_daemon)
class NonDaemonPool(pool.Pool):
"""A process pool with non-daemon processes.
"""
Process = NonDaemonProcess
def release_lock(args):
semaphore_singleton.semaphore.release()
# Get total system RAM
def get_system_total_memory_gb():
"""Function to get the total RAM of the running system in GB
"""
# Import packages
import os
import sys
# Get memory
if 'linux' in sys.platform:
with open('/proc/meminfo', 'r') as f_in:
meminfo_lines = f_in.readlines()
mem_total_line = [line for line in meminfo_lines \
if 'MemTotal' in line][0]
mem_total = float(mem_total_line.split()[1])
memory_gb = mem_total/(1024.0**2)
elif 'darwin' in sys.platform:
mem_str = os.popen('sysctl hw.memsize').read().strip().split(' ')[-1]
memory_gb = float(mem_str)/(1024.0**3)
else:
err_msg = 'System platform: %s is not supported'
raise Exception(err_msg)
# Return memory
return memory_gb
class MultiProcPlugin(DistributedPluginBase):
"""Execute workflow with multiprocessing, not sending more jobs at once
than the system can support.
The plugin_args input to run can be used to control the multiprocessing
execution and defining the maximum amount of memory and threads that
should be used. When those parameters are not specified,
the number of threads and memory of the system is used.
System consuming nodes should be tagged:
memory_consuming_node.interface.estimated_memory_gb = 8
thread_consuming_node.interface.num_threads = 16
The default number of threads and memory for a node is 1.
Currently supported options are:
- non_daemon : boolean flag to execute as non-daemon processes
- n_procs: maximum number of threads to be executed in parallel
- memory_gb: maximum memory (in GB) that can be used at once.
"""
def __init__(self, plugin_args=None):
# Init variables and instance attributes
super(MultiProcPlugin, self).__init__(plugin_args=plugin_args)
self._taskresult = {}
self._taskid = 0
non_daemon = True
self.plugin_args = plugin_args
self.processors = cpu_count()
self.memory_gb = get_system_total_memory_gb()*0.9 # 90% of system memory
# Check plugin args
if self.plugin_args:
if 'non_daemon' in self.plugin_args:
non_daemon = plugin_args['non_daemon']
if 'n_procs' in self.plugin_args:
self.processors = self.plugin_args['n_procs']
if 'memory_gb' in self.plugin_args:
self.memory_gb = self.plugin_args['memory_gb']
# Instantiate different thread pools for non-daemon processes
if non_daemon:
# run the execution using the non-daemon pool subclass
self.pool = NonDaemonPool(processes=self.processors)
else:
self.pool = Pool(processes=self.processors)
def _wait(self):
if len(self.pending_tasks) > 0:
semaphore_singleton.semaphore.acquire()
semaphore_singleton.semaphore.release()
def _get_result(self, taskid):
if taskid not in self._taskresult:
raise RuntimeError('Multiproc task %d not found' % taskid)
if not self._taskresult[taskid].ready():
return None
return self._taskresult[taskid].get()
def _report_crash(self, node, result=None):
if result and result['traceback']:
node._result = result['result']
node._traceback = result['traceback']
return report_crash(node,
traceback=result['traceback'])
else:
return report_crash(node)
def _clear_task(self, taskid):
del self._taskresult[taskid]
def _submit_job(self, node, updatehash=False):
self._taskid += 1
if hasattr(node.inputs, 'terminal_output'):
if node.inputs.terminal_output == 'stream':
node.inputs.terminal_output = 'allatonce'
self._taskresult[self._taskid] = \
self.pool.apply_async(run_node,
(node, updatehash),
callback=release_lock)
return self._taskid
def _send_procs_to_workers(self, updatehash=False, graph=None):
""" Sends jobs to workers when system resources are available.
Check memory (gb) and cores usage before running jobs.
"""
executing_now = []
# Check to see if a job is available
jobids = np.flatnonzero((self.proc_pending == True) & \
(self.depidx.sum(axis=0) == 0).__array__())
# Check available system resources by summing all threads and memory used
busy_memory_gb = 0
busy_processors = 0
for jobid in jobids:
busy_memory_gb += self.procs[jobid]._interface.estimated_memory_gb
busy_processors += self.procs[jobid]._interface.num_threads
free_memory_gb = self.memory_gb - busy_memory_gb
free_processors = self.processors - busy_processors
# Check all jobs without dependency not run
jobids = np.flatnonzero((self.proc_done == False) & \
(self.depidx.sum(axis=0) == 0).__array__())
# Sort jobs ready to run first by memory and then by number of threads
# The most resource consuming jobs run first
jobids = sorted(jobids,
key=lambda item: (self.procs[item]._interface.estimated_memory_gb,
self.procs[item]._interface.num_threads))
logger.debug('Free memory (GB): %d, Free processors: %d',
free_memory_gb, free_processors)
# While have enough memory and processors for first job
# Submit first job on the list
for jobid in jobids:
logger.debug('Next Job: %d, memory (GB): %d, threads: %d' \
% (jobid, self.procs[jobid]._interface.estimated_memory_gb,
self.procs[jobid]._interface.num_threads))
if self.procs[jobid]._interface.estimated_memory_gb <= free_memory_gb and \
self.procs[jobid]._interface.num_threads <= free_processors:
logger.info('Executing: %s ID: %d' %(self.procs[jobid]._id, jobid))
executing_now.append(self.procs[jobid])
if isinstance(self.procs[jobid], MapNode):
try:
num_subnodes = self.procs[jobid].num_subnodes()
except Exception:
etype, eval, etr = sys.exc_info()
traceback = format_exception(etype, eval, etr)
report_crash(self.procs[jobid], traceback=traceback)
self._clean_queue(jobid, graph)
self.proc_pending[jobid] = False
continue
if num_subnodes > 1:
submit = self._submit_mapnode(jobid)
if not submit:
continue
# change job status in appropriate queues
self.proc_done[jobid] = True
self.proc_pending[jobid] = True
free_memory_gb -= self.procs[jobid]._interface.estimated_memory_gb
free_processors -= self.procs[jobid]._interface.num_threads
# Send job to task manager and add to pending tasks
if self._status_callback:
self._status_callback(self.procs[jobid], 'start')
if str2bool(self.procs[jobid].config['execution']['local_hash_check']):
logger.debug('checking hash locally')
try:
hash_exists, _, _, _ = self.procs[
jobid].hash_exists()
logger.debug('Hash exists %s' % str(hash_exists))
if (hash_exists and (self.procs[jobid].overwrite == False or \
(self.procs[jobid].overwrite == None and \
not self.procs[jobid]._interface.always_run))):
self._task_finished_cb(jobid)
self._remove_node_dirs()
continue
except Exception:
etype, eval, etr = sys.exc_info()
traceback = format_exception(etype, eval, etr)
report_crash(self.procs[jobid], traceback=traceback)
self._clean_queue(jobid, graph)
self.proc_pending[jobid] = False
continue
logger.debug('Finished checking hash')
if self.procs[jobid].run_without_submitting:
logger.debug('Running node %s on master thread' \
% self.procs[jobid])
try:
self.procs[jobid].run()
except Exception:
etype, eval, etr = sys.exc_info()
traceback = format_exception(etype, eval, etr)
report_crash(self.procs[jobid], traceback=traceback)
self._task_finished_cb(jobid)
self._remove_node_dirs()
else:
logger.debug('submitting %s' % str(jobid))
tid = self._submit_job(deepcopy(self.procs[jobid]),
updatehash=updatehash)
if tid is None:
self.proc_done[jobid] = False
self.proc_pending[jobid] = False
else:
self.pending_tasks.insert(0, (tid, jobid))
else:
break
logger.debug('No jobs waiting to execute')
|
<filename>nipype/pipeline/plugins/multiproc.py
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Parallel workflow execution via multiprocessing
Support for child processes running as non-daemons based on
http://stackoverflow.com/a/8963618/1183453
"""
# Import packages
from multiprocessing import Process, Pool, cpu_count, pool
from traceback import format_exception
import os
import sys
import numpy as np
from copy import deepcopy
from ..engine import MapNode
from ...utils.misc import str2bool
from ... import logging
from nipype.pipeline.plugins import semaphore_singleton
from .base import (DistributedPluginBase, report_crash)
# Init logger
logger = logging.getLogger('workflow')
# Run node
def run_node(node, updatehash):
"""Function to execute node.run(), catch and log any errors and
return the result dictionary
Parameters
----------
node : nipype Node instance
the node to run
updatehash : boolean
flag for updating hash
Returns
-------
result : dictionary
dictionary containing the node runtime results and stats
"""
# Init variables
result = dict(result=None, traceback=None)
# Try and execute the node via node.run()
try:
result['result'] = node.run(updatehash=updatehash)
except:
etype, eval, etr = sys.exc_info()
result['traceback'] = format_exception(etype, eval, etr)
result['result'] = node.result
# Return the result dictionary
return result
class NonDaemonProcess(Process):
"""A non-daemon process to support internal multiprocessing.
"""
def _get_daemon(self):
return False
def _set_daemon(self, value):
pass
daemon = property(_get_daemon, _set_daemon)
class NonDaemonPool(pool.Pool):
"""A process pool with non-daemon processes.
"""
Process = NonDaemonProcess
def release_lock(args):
semaphore_singleton.semaphore.release()
# Get total system RAM
def get_system_total_memory_gb():
"""Function to get the total RAM of the running system in GB
"""
# Import packages
import os
import sys
# Get memory
if 'linux' in sys.platform:
with open('/proc/meminfo', 'r') as f_in:
meminfo_lines = f_in.readlines()
mem_total_line = [line for line in meminfo_lines \
if 'MemTotal' in line][0]
mem_total = float(mem_total_line.split()[1])
memory_gb = mem_total/(1024.0**2)
elif 'darwin' in sys.platform:
mem_str = os.popen('sysctl hw.memsize').read().strip().split(' ')[-1]
memory_gb = float(mem_str)/(1024.0**3)
else:
err_msg = 'System platform: %s is not supported'
raise Exception(err_msg)
# Return memory
return memory_gb
class MultiProcPlugin(DistributedPluginBase):
"""Execute workflow with multiprocessing, not sending more jobs at once
than the system can support.
The plugin_args input to run can be used to control the multiprocessing
execution and defining the maximum amount of memory and threads that
should be used. When those parameters are not specified,
the number of threads and memory of the system is used.
System consuming nodes should be tagged:
memory_consuming_node.interface.estimated_memory_gb = 8
thread_consuming_node.interface.num_threads = 16
The default number of threads and memory for a node is 1.
Currently supported options are:
- non_daemon : boolean flag to execute as non-daemon processes
- n_procs: maximum number of threads to be executed in parallel
- memory_gb: maximum memory (in GB) that can be used at once.
"""
def __init__(self, plugin_args=None):
# Init variables and instance attributes
super(MultiProcPlugin, self).__init__(plugin_args=plugin_args)
self._taskresult = {}
self._taskid = 0
non_daemon = True
self.plugin_args = plugin_args
self.processors = cpu_count()
self.memory_gb = get_system_total_memory_gb()*0.9 # 90% of system memory
# Check plugin args
if self.plugin_args:
if 'non_daemon' in self.plugin_args:
non_daemon = plugin_args['non_daemon']
if 'n_procs' in self.plugin_args:
self.processors = self.plugin_args['n_procs']
if 'memory_gb' in self.plugin_args:
self.memory_gb = self.plugin_args['memory_gb']
# Instantiate different thread pools for non-daemon processes
if non_daemon:
# run the execution using the non-daemon pool subclass
self.pool = NonDaemonPool(processes=self.processors)
else:
self.pool = Pool(processes=self.processors)
def _wait(self):
if len(self.pending_tasks) > 0:
semaphore_singleton.semaphore.acquire()
semaphore_singleton.semaphore.release()
def _get_result(self, taskid):
if taskid not in self._taskresult:
raise RuntimeError('Multiproc task %d not found' % taskid)
if not self._taskresult[taskid].ready():
return None
return self._taskresult[taskid].get()
def _report_crash(self, node, result=None):
if result and result['traceback']:
node._result = result['result']
node._traceback = result['traceback']
return report_crash(node,
traceback=result['traceback'])
else:
return report_crash(node)
def _clear_task(self, taskid):
del self._taskresult[taskid]
def _submit_job(self, node, updatehash=False):
self._taskid += 1
if hasattr(node.inputs, 'terminal_output'):
if node.inputs.terminal_output == 'stream':
node.inputs.terminal_output = 'allatonce'
self._taskresult[self._taskid] = \
self.pool.apply_async(run_node,
(node, updatehash),
callback=release_lock)
return self._taskid
def _send_procs_to_workers(self, updatehash=False, graph=None):
""" Sends jobs to workers when system resources are available.
Check memory (gb) and cores usage before running jobs.
"""
executing_now = []
# Check to see if a job is available
jobids = np.flatnonzero((self.proc_pending == True) & \
(self.depidx.sum(axis=0) == 0).__array__())
# Check available system resources by summing all threads and memory used
busy_memory_gb = 0
busy_processors = 0
for jobid in jobids:
busy_memory_gb += self.procs[jobid]._interface.estimated_memory_gb
busy_processors += self.procs[jobid]._interface.num_threads
free_memory_gb = self.memory_gb - busy_memory_gb
free_processors = self.processors - busy_processors
# Check all jobs without dependency not run
jobids = np.flatnonzero((self.proc_done == False) & \
(self.depidx.sum(axis=0) == 0).__array__())
# Sort jobs ready to run first by memory and then by number of threads
# The most resource consuming jobs run first
jobids = sorted(jobids,
key=lambda item: (self.procs[item]._interface.estimated_memory_gb,
self.procs[item]._interface.num_threads))
logger.debug('Free memory (GB): %d, Free processors: %d',
free_memory_gb, free_processors)
# While have enough memory and processors for first job
# Submit first job on the list
for jobid in jobids:
logger.debug('Next Job: %d, memory (GB): %d, threads: %d' \
% (jobid, self.procs[jobid]._interface.estimated_memory_gb,
self.procs[jobid]._interface.num_threads))
if self.procs[jobid]._interface.estimated_memory_gb <= free_memory_gb and \
self.procs[jobid]._interface.num_threads <= free_processors:
logger.info('Executing: %s ID: %d' %(self.procs[jobid]._id, jobid))
executing_now.append(self.procs[jobid])
if isinstance(self.procs[jobid], MapNode):
try:
num_subnodes = self.procs[jobid].num_subnodes()
except Exception:
etype, eval, etr = sys.exc_info()
traceback = format_exception(etype, eval, etr)
report_crash(self.procs[jobid], traceback=traceback)
self._clean_queue(jobid, graph)
self.proc_pending[jobid] = False
continue
if num_subnodes > 1:
submit = self._submit_mapnode(jobid)
if not submit:
continue
# change job status in appropriate queues
self.proc_done[jobid] = True
self.proc_pending[jobid] = True
free_memory_gb -= self.procs[jobid]._interface.estimated_memory_gb
free_processors -= self.procs[jobid]._interface.num_threads
# Send job to task manager and add to pending tasks
if self._status_callback:
self._status_callback(self.procs[jobid], 'start')
if str2bool(self.procs[jobid].config['execution']['local_hash_check']):
logger.debug('checking hash locally')
try:
hash_exists, _, _, _ = self.procs[
jobid].hash_exists()
logger.debug('Hash exists %s' % str(hash_exists))
if (hash_exists and (self.procs[jobid].overwrite == False or \
(self.procs[jobid].overwrite == None and \
not self.procs[jobid]._interface.always_run))):
self._task_finished_cb(jobid)
self._remove_node_dirs()
continue
except Exception:
etype, eval, etr = sys.exc_info()
traceback = format_exception(etype, eval, etr)
report_crash(self.procs[jobid], traceback=traceback)
self._clean_queue(jobid, graph)
self.proc_pending[jobid] = False
continue
logger.debug('Finished checking hash')
if self.procs[jobid].run_without_submitting:
logger.debug('Running node %s on master thread' \
% self.procs[jobid])
try:
self.procs[jobid].run()
except Exception:
etype, eval, etr = sys.exc_info()
traceback = format_exception(etype, eval, etr)
report_crash(self.procs[jobid], traceback=traceback)
self._task_finished_cb(jobid)
self._remove_node_dirs()
else:
logger.debug('submitting %s' % str(jobid))
tid = self._submit_job(deepcopy(self.procs[jobid]),
updatehash=updatehash)
if tid is None:
self.proc_done[jobid] = False
self.proc_pending[jobid] = False
else:
self.pending_tasks.insert(0, (tid, jobid))
else:
break
logger.debug('No jobs waiting to execute')
|
en
| 0.771919
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: Parallel workflow execution via multiprocessing Support for child processes running as non-daemons based on http://stackoverflow.com/a/8963618/1183453 # Import packages # Init logger # Run node Function to execute node.run(), catch and log any errors and return the result dictionary Parameters ---------- node : nipype Node instance the node to run updatehash : boolean flag for updating hash Returns ------- result : dictionary dictionary containing the node runtime results and stats # Init variables # Try and execute the node via node.run() # Return the result dictionary A non-daemon process to support internal multiprocessing. A process pool with non-daemon processes. # Get total system RAM Function to get the total RAM of the running system in GB # Import packages # Get memory # Return memory Execute workflow with multiprocessing, not sending more jobs at once than the system can support. The plugin_args input to run can be used to control the multiprocessing execution and defining the maximum amount of memory and threads that should be used. When those parameters are not specified, the number of threads and memory of the system is used. System consuming nodes should be tagged: memory_consuming_node.interface.estimated_memory_gb = 8 thread_consuming_node.interface.num_threads = 16 The default number of threads and memory for a node is 1. Currently supported options are: - non_daemon : boolean flag to execute as non-daemon processes - n_procs: maximum number of threads to be executed in parallel - memory_gb: maximum memory (in GB) that can be used at once. # Init variables and instance attributes # 90% of system memory # Check plugin args # Instantiate different thread pools for non-daemon processes # run the execution using the non-daemon pool subclass Sends jobs to workers when system resources are available. Check memory (gb) and cores usage before running jobs. # Check to see if a job is available # Check available system resources by summing all threads and memory used # Check all jobs without dependency not run # Sort jobs ready to run first by memory and then by number of threads # The most resource consuming jobs run first # While have enough memory and processors for first job # Submit first job on the list # change job status in appropriate queues # Send job to task manager and add to pending tasks
| 2.242474
| 2
|
tests/test_state.py
|
gmr/aiorabbit
| 17
|
6629097
|
<reponame>gmr/aiorabbit<gh_stars>10-100
from aiorabbit import exceptions, state
from . import testing
STATE_FOO = 0x10
STATE_BAR = 0x11
STATE_BAZ = 0x12
class State(state.StateManager):
STATE_MAP = {
state.STATE_UNINITIALIZED: 'Uninitialized',
state.STATE_EXCEPTION: 'Exception',
STATE_FOO: 'Foo',
STATE_BAR: 'Bar',
STATE_BAZ: 'Baz',
}
STATE_TRANSITIONS = {
state.STATE_UNINITIALIZED: [STATE_FOO, STATE_BAR],
state.STATE_EXCEPTION: [],
STATE_FOO: [STATE_BAR],
STATE_BAR: [STATE_BAZ],
STATE_BAZ: [STATE_FOO]
}
def set_state(self, value: int) -> None:
self._set_state(value)
def set_exception(self, exc):
self._set_state(state.STATE_EXCEPTION, exc)
class TestCase(testing.AsyncTestCase):
def setUp(self) -> None:
super().setUp()
self.obj = State(self.loop)
def assert_state(self, value):
self.assertEqual(self.obj.state, self.obj.STATE_MAP[value])
def test_state_transitions(self):
self.assert_state(state.STATE_UNINITIALIZED)
self.obj.set_state(STATE_FOO)
self.assert_state(STATE_FOO)
self.obj.set_state(STATE_BAR)
self.assert_state(STATE_BAR)
self.obj.set_state(STATE_BAZ)
self.assert_state(STATE_BAZ)
self.obj.set_state(STATE_FOO)
self.assert_state(STATE_FOO)
def test_invalid_state_transition(self):
self.assert_state(state.STATE_UNINITIALIZED)
with self.assertRaises(exceptions.StateTransitionError):
self.obj.set_state(STATE_BAZ)
def test_setting_state_to_same_value(self):
self.assert_state(state.STATE_UNINITIALIZED)
self.obj.set_state(STATE_FOO)
self.assert_state(STATE_FOO)
self.obj.set_state(STATE_FOO)
@testing.async_test
async def test_wait_on_state(self):
self.loop.call_soon(self.obj.set_state, STATE_FOO)
await self.obj._wait_on_state(STATE_FOO)
self.loop.call_soon(self.obj.set_state, STATE_BAR)
await self.obj._wait_on_state(STATE_BAR)
self.assert_state(STATE_BAR)
@testing.async_test
async def test_exception_while_waiting(self):
self.loop.call_soon(self.obj.set_state, STATE_FOO)
await self.obj._wait_on_state(STATE_FOO)
self.loop.call_soon(self.obj.set_exception, RuntimeError)
with self.assertRaises(RuntimeError):
await self.obj._wait_on_state(STATE_BAR)
|
from aiorabbit import exceptions, state
from . import testing
STATE_FOO = 0x10
STATE_BAR = 0x11
STATE_BAZ = 0x12
class State(state.StateManager):
STATE_MAP = {
state.STATE_UNINITIALIZED: 'Uninitialized',
state.STATE_EXCEPTION: 'Exception',
STATE_FOO: 'Foo',
STATE_BAR: 'Bar',
STATE_BAZ: 'Baz',
}
STATE_TRANSITIONS = {
state.STATE_UNINITIALIZED: [STATE_FOO, STATE_BAR],
state.STATE_EXCEPTION: [],
STATE_FOO: [STATE_BAR],
STATE_BAR: [STATE_BAZ],
STATE_BAZ: [STATE_FOO]
}
def set_state(self, value: int) -> None:
self._set_state(value)
def set_exception(self, exc):
self._set_state(state.STATE_EXCEPTION, exc)
class TestCase(testing.AsyncTestCase):
def setUp(self) -> None:
super().setUp()
self.obj = State(self.loop)
def assert_state(self, value):
self.assertEqual(self.obj.state, self.obj.STATE_MAP[value])
def test_state_transitions(self):
self.assert_state(state.STATE_UNINITIALIZED)
self.obj.set_state(STATE_FOO)
self.assert_state(STATE_FOO)
self.obj.set_state(STATE_BAR)
self.assert_state(STATE_BAR)
self.obj.set_state(STATE_BAZ)
self.assert_state(STATE_BAZ)
self.obj.set_state(STATE_FOO)
self.assert_state(STATE_FOO)
def test_invalid_state_transition(self):
self.assert_state(state.STATE_UNINITIALIZED)
with self.assertRaises(exceptions.StateTransitionError):
self.obj.set_state(STATE_BAZ)
def test_setting_state_to_same_value(self):
self.assert_state(state.STATE_UNINITIALIZED)
self.obj.set_state(STATE_FOO)
self.assert_state(STATE_FOO)
self.obj.set_state(STATE_FOO)
@testing.async_test
async def test_wait_on_state(self):
self.loop.call_soon(self.obj.set_state, STATE_FOO)
await self.obj._wait_on_state(STATE_FOO)
self.loop.call_soon(self.obj.set_state, STATE_BAR)
await self.obj._wait_on_state(STATE_BAR)
self.assert_state(STATE_BAR)
@testing.async_test
async def test_exception_while_waiting(self):
self.loop.call_soon(self.obj.set_state, STATE_FOO)
await self.obj._wait_on_state(STATE_FOO)
self.loop.call_soon(self.obj.set_exception, RuntimeError)
with self.assertRaises(RuntimeError):
await self.obj._wait_on_state(STATE_BAR)
|
none
| 1
| 2.439583
| 2
|
|
andromeda/modules/technical_support/models/__init__.py
|
sango09/andromeda_api_rest
| 1
|
6629098
|
from .support import *
from .rating import *
|
from .support import *
from .rating import *
|
none
| 1
| 1.072708
| 1
|
|
PycharmProjects/pythonexercicios/aula017/ex079.py
|
zmixtv1/cev-Python
| 0
|
6629099
|
'''
valores = []
while True:
valores.append(int(input("Digite um valor: ")))
resp = str(input("Quer continuar[S/N]: ")).upper()
if resp == "S":
valores.append(int(input("Digite outro valor: ")))
resp = str(input("Quer continuar[S/N]: ")).upper()
else:
break
valores.sort()
print(f"você digitou os valores {valores}")
'''
valores = []
while True:
n = int(input("Digite um valor: "))
if n not in valores:
valores.append(n)
else:
print("valor duplicado! Não vou adicionar...")
r= str(input("Quer continuar [S/N]: "))
if r in "Nn":
break
valores.sort()
print(f"você digitou os valores {valores}")
|
'''
valores = []
while True:
valores.append(int(input("Digite um valor: ")))
resp = str(input("Quer continuar[S/N]: ")).upper()
if resp == "S":
valores.append(int(input("Digite outro valor: ")))
resp = str(input("Quer continuar[S/N]: ")).upper()
else:
break
valores.sort()
print(f"você digitou os valores {valores}")
'''
valores = []
while True:
n = int(input("Digite um valor: "))
if n not in valores:
valores.append(n)
else:
print("valor duplicado! Não vou adicionar...")
r= str(input("Quer continuar [S/N]: "))
if r in "Nn":
break
valores.sort()
print(f"você digitou os valores {valores}")
|
pt
| 0.264351
|
valores = [] while True: valores.append(int(input("Digite um valor: "))) resp = str(input("Quer continuar[S/N]: ")).upper() if resp == "S": valores.append(int(input("Digite outro valor: "))) resp = str(input("Quer continuar[S/N]: ")).upper() else: break valores.sort() print(f"você digitou os valores {valores}")
| 3.976855
| 4
|
convert_tf_checkpoint_to_pytorch.py
|
awesome-archive/CAIL2019
| 300
|
6629100
|
#!/usr/bin/python
# coding: utf-8
import shutil
from pytorch_pretrained_bert import convert_tf_checkpoint_to_pytorch
BERT_MODEL_PATH = "models/chinese_L-12_H-768_A-12/"
if __name__ == "__main__":
convert_tf_checkpoint_to_pytorch.convert_tf_checkpoint_to_pytorch(
BERT_MODEL_PATH + "bert_model.ckpt",
BERT_MODEL_PATH + "bert_config.json",
"models/pytorch_pretrain/pytorch_model.bin",
)
shutil.copyfile(
BERT_MODEL_PATH + "bert_config.json",
"models/pytorch_pretrain/bert_config.json",
)
shutil.copyfile(
BERT_MODEL_PATH + "vocab.txt", "models/pytorch_pretrain/vocab.txt"
)
|
#!/usr/bin/python
# coding: utf-8
import shutil
from pytorch_pretrained_bert import convert_tf_checkpoint_to_pytorch
BERT_MODEL_PATH = "models/chinese_L-12_H-768_A-12/"
if __name__ == "__main__":
convert_tf_checkpoint_to_pytorch.convert_tf_checkpoint_to_pytorch(
BERT_MODEL_PATH + "bert_model.ckpt",
BERT_MODEL_PATH + "bert_config.json",
"models/pytorch_pretrain/pytorch_model.bin",
)
shutil.copyfile(
BERT_MODEL_PATH + "bert_config.json",
"models/pytorch_pretrain/bert_config.json",
)
shutil.copyfile(
BERT_MODEL_PATH + "vocab.txt", "models/pytorch_pretrain/vocab.txt"
)
|
en
| 0.42926
|
#!/usr/bin/python # coding: utf-8
| 2.432376
| 2
|
client.py
|
alisonsandrade/covidometro-tk
| 1
|
6629101
|
import socket
import threading
import tkinter
import tkinter.scrolledtext
from tkinter import ttk
from service.api import ApiService
HOST = "127.0.0.1"
PORT = 9091
class Client:
def __init__(self, host, port):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((host, port))
self.gui_done = False
self.running = True
gui_thread = threading.Thread(target=self.gui_loop)
receive_thread = threading.Thread(target=self.receive_loop)
gui_thread.start()
receive_thread.start()
def gui_loop(self):
self.win = tkinter.Tk()
self.win.configure(bg="lightgray")
self.win.title("Covidômetro")
# Menu principal
barra_menu = tkinter.Menu(self.win)
menu_principal = tkinter.Menu(barra_menu)
menu_principal.add_command(label="Sair", command=self.stop)
menu_about = tkinter.Menu(barra_menu)
menu_about.add_command(label="Informações", command=self.gui_about)
barra_menu.add_cascade(label="Principal", menu=menu_principal)
barra_menu.add_cascade(label="Sobre", menu=menu_about)
self.win.config(menu=barra_menu)
# Combo dos Estados
self.label_state = tkinter.Label(self.win, text="Estado", bg="lightgray")
self.label_state.config(font=("Arial", 12))
self.label_state.pack(padx=20, pady=5)
self.comboBoxState = ttk.Combobox(self.win, width=27, textvariable=tkinter.StringVar())
self.comboBoxState['values'] = (
'AC - Acré',
'AL - Alagoas',
'AP - Amapá',
'AM - Amazonas',
'BA - Bahia',
'CE - Ceará',
'DF - Distrito Federal',
'ES - Espírito Santo',
'GO - Goiás',
'MA - Maranhão',
'MT - Mato Grosso',
'MS - Mato Grosso do Sul',
'MG - Minas Gerais',
'PA - Pará',
'PB - Paraíba',
'PR - Paraná',
'PE - Pernambuco',
'PI - Piauí',
'RJ - Rio de Janeiro',
'RN - Rio Grande do Norte',
'RS - Rio Grande do Sul',
'RO - Rondônia',
'RR - Roraima',
'SC - Santa Catarina',
'SP - São Paulo',
'SE - Sergipe',
'TO - Tocantins')
# self.comboBoxState.grid(column=0, row=1)
self.comboBoxState.current(14)
self.comboBoxState.pack()
# Combo das cidades
self.label_city = tkinter.Label(self.win, text="Município", bg="lightgray")
self.label_city.config(font=("Arial", 12))
self.label_city.pack(padx=20, pady=5)
self.comboBoxCities = ttk.Combobox(self.win, width=27, textvariable=tkinter.StringVar(), postcommand=self.on_select_cities)
self.comboBoxCities.pack()
self.chat_label = tkinter.Label(self.win, text="Resultados:", bg="lightgray")
self.chat_label.config(font=("Arial", 12))
self.chat_label.pack(padx=20, pady=5)
self.text_area = tkinter.scrolledtext.ScrolledText(self.win)
self.text_area.pack(padx=20, pady=5)
self.text_area.config(state="disabled")
self.send_button = tkinter.Button(self.win, text="Consultar", command=self.write)
self.send_button.config(font=("Arial", 12))
self.send_button.pack(padx=20, pady=5)
self.gui_done = True
self.win.protocol("WM_DELETE_WINDOW", self.stop)
self.win.mainloop()
def write(self):
state = self.comboBoxState.get()[0:2]
city = self.comboBoxCities.get()
message = f"{state} {city}"
# message = f"{self.input_area.get('1.0', 'end')}"
self.sock.send(message.encode("utf-8"))
def on_select_cities(self):
state_selected = self.comboBoxState.get()[0:2]
data = ApiService().load_data()
states = data["estados"]
cities = data["cidades"]
cities_list = []
for city in cities:
if city['estadoId'] == state_selected:
cities_list.append(city["cidade"])
self.comboBoxCities['values'] = cities_list
def stop(self):
self.running = False
self.win.destroy()
self.sock.close()
exit(0)
def gui_about(self):
from tkinter import messagebox
messagebox.showinfo(title="Sobre nós", message="Sistema desenvolvido em Python com TKinter como requisito para obtenção de nota da avaliação de da disciplina de Protocolos de Interconexão de Redes de Computadores do curso TSI - IFPB, Guarabira/PB.\n\nDesenvolvedores:\n- <NAME>\n- <NAME>")
def receive_loop(self):
while self.running:
try:
message = self.sock.recv(1024)
print('[*] Servidor:', message.decode("utf-8", "ignore"))
if self.gui_done:
self.text_area.config(state='normal')
self.text_area.insert("end", message.decode("utf-8", "ignore"))
self.text_area.yview("end")
self.text_area.config(state="disabled")
except ConnectionAbortedError:
pass
except:
print("Ocorreu um erro")
self.sock.close()
break
client = Client(HOST, PORT)
|
import socket
import threading
import tkinter
import tkinter.scrolledtext
from tkinter import ttk
from service.api import ApiService
HOST = "127.0.0.1"
PORT = 9091
class Client:
def __init__(self, host, port):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((host, port))
self.gui_done = False
self.running = True
gui_thread = threading.Thread(target=self.gui_loop)
receive_thread = threading.Thread(target=self.receive_loop)
gui_thread.start()
receive_thread.start()
def gui_loop(self):
self.win = tkinter.Tk()
self.win.configure(bg="lightgray")
self.win.title("Covidômetro")
# Menu principal
barra_menu = tkinter.Menu(self.win)
menu_principal = tkinter.Menu(barra_menu)
menu_principal.add_command(label="Sair", command=self.stop)
menu_about = tkinter.Menu(barra_menu)
menu_about.add_command(label="Informações", command=self.gui_about)
barra_menu.add_cascade(label="Principal", menu=menu_principal)
barra_menu.add_cascade(label="Sobre", menu=menu_about)
self.win.config(menu=barra_menu)
# Combo dos Estados
self.label_state = tkinter.Label(self.win, text="Estado", bg="lightgray")
self.label_state.config(font=("Arial", 12))
self.label_state.pack(padx=20, pady=5)
self.comboBoxState = ttk.Combobox(self.win, width=27, textvariable=tkinter.StringVar())
self.comboBoxState['values'] = (
'AC - Acré',
'AL - Alagoas',
'AP - Amapá',
'AM - Amazonas',
'BA - Bahia',
'CE - Ceará',
'DF - Distrito Federal',
'ES - Espírito Santo',
'GO - Goiás',
'MA - Maranhão',
'MT - Mato Grosso',
'MS - Mato Grosso do Sul',
'MG - Minas Gerais',
'PA - Pará',
'PB - Paraíba',
'PR - Paraná',
'PE - Pernambuco',
'PI - Piauí',
'RJ - Rio de Janeiro',
'RN - Rio Grande do Norte',
'RS - Rio Grande do Sul',
'RO - Rondônia',
'RR - Roraima',
'SC - Santa Catarina',
'SP - São Paulo',
'SE - Sergipe',
'TO - Tocantins')
# self.comboBoxState.grid(column=0, row=1)
self.comboBoxState.current(14)
self.comboBoxState.pack()
# Combo das cidades
self.label_city = tkinter.Label(self.win, text="Município", bg="lightgray")
self.label_city.config(font=("Arial", 12))
self.label_city.pack(padx=20, pady=5)
self.comboBoxCities = ttk.Combobox(self.win, width=27, textvariable=tkinter.StringVar(), postcommand=self.on_select_cities)
self.comboBoxCities.pack()
self.chat_label = tkinter.Label(self.win, text="Resultados:", bg="lightgray")
self.chat_label.config(font=("Arial", 12))
self.chat_label.pack(padx=20, pady=5)
self.text_area = tkinter.scrolledtext.ScrolledText(self.win)
self.text_area.pack(padx=20, pady=5)
self.text_area.config(state="disabled")
self.send_button = tkinter.Button(self.win, text="Consultar", command=self.write)
self.send_button.config(font=("Arial", 12))
self.send_button.pack(padx=20, pady=5)
self.gui_done = True
self.win.protocol("WM_DELETE_WINDOW", self.stop)
self.win.mainloop()
def write(self):
state = self.comboBoxState.get()[0:2]
city = self.comboBoxCities.get()
message = f"{state} {city}"
# message = f"{self.input_area.get('1.0', 'end')}"
self.sock.send(message.encode("utf-8"))
def on_select_cities(self):
state_selected = self.comboBoxState.get()[0:2]
data = ApiService().load_data()
states = data["estados"]
cities = data["cidades"]
cities_list = []
for city in cities:
if city['estadoId'] == state_selected:
cities_list.append(city["cidade"])
self.comboBoxCities['values'] = cities_list
def stop(self):
self.running = False
self.win.destroy()
self.sock.close()
exit(0)
def gui_about(self):
from tkinter import messagebox
messagebox.showinfo(title="Sobre nós", message="Sistema desenvolvido em Python com TKinter como requisito para obtenção de nota da avaliação de da disciplina de Protocolos de Interconexão de Redes de Computadores do curso TSI - IFPB, Guarabira/PB.\n\nDesenvolvedores:\n- <NAME>\n- <NAME>")
def receive_loop(self):
while self.running:
try:
message = self.sock.recv(1024)
print('[*] Servidor:', message.decode("utf-8", "ignore"))
if self.gui_done:
self.text_area.config(state='normal')
self.text_area.insert("end", message.decode("utf-8", "ignore"))
self.text_area.yview("end")
self.text_area.config(state="disabled")
except ConnectionAbortedError:
pass
except:
print("Ocorreu um erro")
self.sock.close()
break
client = Client(HOST, PORT)
|
pt
| 0.194248
|
# Menu principal # Combo dos Estados # self.comboBoxState.grid(column=0, row=1) # Combo das cidades # message = f"{self.input_area.get('1.0', 'end')}"
| 3.028019
| 3
|
tests/test_gradients_fdtd.py
|
kwadwo00/ceviche
| 111
|
6629102
|
<gh_stars>100-1000
import unittest
import sys
import autograd.numpy as npa
import numpy as np
from autograd import grad
from autograd import checkpoint
from copy import deepcopy, copy
from time import time
import sys
sys.path.append('../ceviche')
from ceviche import fdtd
from ceviche.utils import grad_num
from ceviche.jacobians import jacobian
# gradient error tolerance
ALLOWED_RATIO = 1e-4 # maximum allowed ratio of || grad_num - grad_auto || vs. || grad_num ||
DEPS = 1e-6 # numerical gradient step size
VERBOSE = False
print("Testing the FDTD gradients")
class TestFDTD(unittest.TestCase):
'''Tests for Gradient Correctness'''
def setUp(self):
# basic simulation parameters
self.Nx = 8
self.Ny = 8
self.Nz = 1
self.omega = 2*np.pi*200e12
self.dL = 5e-8
self.pml = [2, 2, 0]
# source parameters
self.steps = 500
self.t0 = 300
self.sigma = 20
self.source_amp = 1
self.source_pos = np.zeros((self.Nx, self.Ny, self.Nz))
self.source_pos[self.Nx//2, self.Ny//2, self.Nz//2] = self.source_amp
self.gaussian = lambda t: self.source_pos * self.source_amp * np.exp(-(t - self.t0)**2 / 2 / self.sigma**2)
# starting relative permittivity (random for debugging)
self.eps_r = np.random.random((self.Nx, self.Ny, self.Nz)) + 1
self.eps_arr = self.eps_r.flatten()
def check_gradient_error(self, grad_num, grad_auto):
""" Checks the test case:
compares the norm of the gradient to the norm of the difference
Throws error if this is greater than ALLOWED RATIO
"""
norm_grad = np.linalg.norm(grad_num)
print('\t\tnorm of gradient: ', norm_grad)
norm_diff = np.linalg.norm(grad_num - grad_auto)
print('\t\tnorm of difference: ', norm_diff)
norm_ratio = norm_diff / norm_grad
print('\t\tratio of norms: ', norm_ratio)
self.assertLessEqual(norm_ratio, ALLOWED_RATIO)
print('')
def test_grad_rev_E(self):
print('\ttesting E fields in FDTD (reverse mode)')
F = fdtd(self.eps_r, dL=self.dL, npml=self.pml)
def objective(eps_arr):
F.eps_r = eps_arr.reshape((self.Nx, self.Ny, self.Nz))
S = 0.0
for t_index in range(self.steps):
fields = F.forward(Jz=self.gaussian(t_index))
S += npa.sum(fields['Ex'] + fields['Ey'] + fields['Ez'])
return S
jac_autograd_rev = jacobian(objective, mode='reverse')(self.eps_arr)
jac_numerical = jacobian(objective, mode='numerical', step_size=DEPS)(self.eps_arr)
if VERBOSE:
print('\tobjective function value: ', objective(self.eps_arr))
print('\tjacobian (auto): \n\t\t', jac_autograd_rev)
print('\tjacobian (num): \n\t\t', jac_numerical)
self.check_gradient_error(jac_numerical, jac_autograd_rev)
def test_grad_for_E(self):
print('\ttesting E fields in FDTD (forward mode)')
F = fdtd(self.eps_r, dL=self.dL, npml=self.pml)
def objective(c):
F = fdtd(c * self.eps_r, dL=self.dL, npml=self.pml)
S = 0.0
for t_index in range(self.steps):
fields = F.forward(Jx=self.gaussian(t_index))
S += fields['Ex'] + fields['Ey'] + fields['Ez']
return S
c0 = 2.0
jac_autograd_for = jacobian(objective, mode='forward')(c0)
jac_numerical = jacobian(objective, mode='numerical', step_size=DEPS)(c0)
if VERBOSE:
print('\tobjective function value: ', objective(self.eps_arr))
print('\tjacobian (auto): \n\t\t', jac_autograd_for)
print('\tjacobian (num): \n\t\t', jac_numerical)
self.check_gradient_error(jac_numerical, jac_autograd_for)
def test_grad_rev_H(self):
print('\ttesting H fields in FDTD (reverse mode)')
F = fdtd(self.eps_r, dL=self.dL, npml=self.pml)
def objective(eps_arr):
F.eps_r = eps_arr.reshape((self.Nx, self.Ny, self.Nz))
S = 0.0
for t_index in range(self.steps):
fields = F.forward(Jx=self.gaussian(t_index))
S += npa.sum(fields['Hx'] + fields['Hy'] + fields['Hz'])
return S
jac_autograd_rev = jacobian(objective)(self.eps_arr)
jac_numerical = jacobian(objective, mode='numerical', step_size=DEPS)(self.eps_arr)
if VERBOSE:
print('\tobjective function value: ', objective(self.eps_arr))
print('\tjacobian (auto): \n\t\t', jac_autograd_rev)
print('\tjacobian (num): \n\t\t', jac_numerical)
self.check_gradient_error(jac_numerical, jac_autograd_rev)
def test_grad_for_H(self):
print('\ttesting H fields in FDTD (forward mode)')
def objective(c):
F = fdtd(c * self.eps_r, dL=self.dL, npml=self.pml)
S = 0.0
for t_index in range(self.steps):
fields = F.forward(Jx=self.gaussian(t_index))
S += fields['Hx'] + fields['Hy'] + fields['Hz']
return S
c0 = 2.0
jac_autograd_for = jacobian(objective, mode='forward')(c0)
jac_numerical = jacobian(objective, mode='numerical', step_size=DEPS)(c0)
if VERBOSE:
print('\tobjective function value: ', objective(c0))
print('\tjacobian (auto): \n\t\t', jac_autograd_for)
print('\tjacobian (num): \n\t\t', jac_numerical)
self.check_gradient_error(jac_numerical, jac_autograd_for)
if __name__ == "__main__":
unittest.main()
|
import unittest
import sys
import autograd.numpy as npa
import numpy as np
from autograd import grad
from autograd import checkpoint
from copy import deepcopy, copy
from time import time
import sys
sys.path.append('../ceviche')
from ceviche import fdtd
from ceviche.utils import grad_num
from ceviche.jacobians import jacobian
# gradient error tolerance
ALLOWED_RATIO = 1e-4 # maximum allowed ratio of || grad_num - grad_auto || vs. || grad_num ||
DEPS = 1e-6 # numerical gradient step size
VERBOSE = False
print("Testing the FDTD gradients")
class TestFDTD(unittest.TestCase):
'''Tests for Gradient Correctness'''
def setUp(self):
# basic simulation parameters
self.Nx = 8
self.Ny = 8
self.Nz = 1
self.omega = 2*np.pi*200e12
self.dL = 5e-8
self.pml = [2, 2, 0]
# source parameters
self.steps = 500
self.t0 = 300
self.sigma = 20
self.source_amp = 1
self.source_pos = np.zeros((self.Nx, self.Ny, self.Nz))
self.source_pos[self.Nx//2, self.Ny//2, self.Nz//2] = self.source_amp
self.gaussian = lambda t: self.source_pos * self.source_amp * np.exp(-(t - self.t0)**2 / 2 / self.sigma**2)
# starting relative permittivity (random for debugging)
self.eps_r = np.random.random((self.Nx, self.Ny, self.Nz)) + 1
self.eps_arr = self.eps_r.flatten()
def check_gradient_error(self, grad_num, grad_auto):
""" Checks the test case:
compares the norm of the gradient to the norm of the difference
Throws error if this is greater than ALLOWED RATIO
"""
norm_grad = np.linalg.norm(grad_num)
print('\t\tnorm of gradient: ', norm_grad)
norm_diff = np.linalg.norm(grad_num - grad_auto)
print('\t\tnorm of difference: ', norm_diff)
norm_ratio = norm_diff / norm_grad
print('\t\tratio of norms: ', norm_ratio)
self.assertLessEqual(norm_ratio, ALLOWED_RATIO)
print('')
def test_grad_rev_E(self):
print('\ttesting E fields in FDTD (reverse mode)')
F = fdtd(self.eps_r, dL=self.dL, npml=self.pml)
def objective(eps_arr):
F.eps_r = eps_arr.reshape((self.Nx, self.Ny, self.Nz))
S = 0.0
for t_index in range(self.steps):
fields = F.forward(Jz=self.gaussian(t_index))
S += npa.sum(fields['Ex'] + fields['Ey'] + fields['Ez'])
return S
jac_autograd_rev = jacobian(objective, mode='reverse')(self.eps_arr)
jac_numerical = jacobian(objective, mode='numerical', step_size=DEPS)(self.eps_arr)
if VERBOSE:
print('\tobjective function value: ', objective(self.eps_arr))
print('\tjacobian (auto): \n\t\t', jac_autograd_rev)
print('\tjacobian (num): \n\t\t', jac_numerical)
self.check_gradient_error(jac_numerical, jac_autograd_rev)
def test_grad_for_E(self):
print('\ttesting E fields in FDTD (forward mode)')
F = fdtd(self.eps_r, dL=self.dL, npml=self.pml)
def objective(c):
F = fdtd(c * self.eps_r, dL=self.dL, npml=self.pml)
S = 0.0
for t_index in range(self.steps):
fields = F.forward(Jx=self.gaussian(t_index))
S += fields['Ex'] + fields['Ey'] + fields['Ez']
return S
c0 = 2.0
jac_autograd_for = jacobian(objective, mode='forward')(c0)
jac_numerical = jacobian(objective, mode='numerical', step_size=DEPS)(c0)
if VERBOSE:
print('\tobjective function value: ', objective(self.eps_arr))
print('\tjacobian (auto): \n\t\t', jac_autograd_for)
print('\tjacobian (num): \n\t\t', jac_numerical)
self.check_gradient_error(jac_numerical, jac_autograd_for)
def test_grad_rev_H(self):
print('\ttesting H fields in FDTD (reverse mode)')
F = fdtd(self.eps_r, dL=self.dL, npml=self.pml)
def objective(eps_arr):
F.eps_r = eps_arr.reshape((self.Nx, self.Ny, self.Nz))
S = 0.0
for t_index in range(self.steps):
fields = F.forward(Jx=self.gaussian(t_index))
S += npa.sum(fields['Hx'] + fields['Hy'] + fields['Hz'])
return S
jac_autograd_rev = jacobian(objective)(self.eps_arr)
jac_numerical = jacobian(objective, mode='numerical', step_size=DEPS)(self.eps_arr)
if VERBOSE:
print('\tobjective function value: ', objective(self.eps_arr))
print('\tjacobian (auto): \n\t\t', jac_autograd_rev)
print('\tjacobian (num): \n\t\t', jac_numerical)
self.check_gradient_error(jac_numerical, jac_autograd_rev)
def test_grad_for_H(self):
print('\ttesting H fields in FDTD (forward mode)')
def objective(c):
F = fdtd(c * self.eps_r, dL=self.dL, npml=self.pml)
S = 0.0
for t_index in range(self.steps):
fields = F.forward(Jx=self.gaussian(t_index))
S += fields['Hx'] + fields['Hy'] + fields['Hz']
return S
c0 = 2.0
jac_autograd_for = jacobian(objective, mode='forward')(c0)
jac_numerical = jacobian(objective, mode='numerical', step_size=DEPS)(c0)
if VERBOSE:
print('\tobjective function value: ', objective(c0))
print('\tjacobian (auto): \n\t\t', jac_autograd_for)
print('\tjacobian (num): \n\t\t', jac_numerical)
self.check_gradient_error(jac_numerical, jac_autograd_for)
if __name__ == "__main__":
unittest.main()
|
en
| 0.563832
|
# gradient error tolerance # maximum allowed ratio of || grad_num - grad_auto || vs. || grad_num || # numerical gradient step size Tests for Gradient Correctness # basic simulation parameters # source parameters # starting relative permittivity (random for debugging) Checks the test case: compares the norm of the gradient to the norm of the difference Throws error if this is greater than ALLOWED RATIO
| 2.673536
| 3
|
build/lib/sgs/__init__.py
|
gpetrini/pySGS
| 0
|
6629103
|
from .sgs import SGS
from . import series
|
from .sgs import SGS
from . import series
|
none
| 1
| 1.038013
| 1
|
|
findatapy/timeseries/filter.py
|
DT021/findatapy
| 0
|
6629104
|
__author__ = 'saeedamen' # <NAME>
#
# Copyright 2016-2020 Cuemacro
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and limitations under the License.
#
import numpy as np
import pandas as pd
import pytz
import datetime
from datetime import timedelta
from findatapy.timeseries.calendar import Calendar
from findatapy.util.dataconstants import DataConstants
from findatapy.util.loggermanager import LoggerManager
constants = DataConstants()
class Filter(object):
"""Functions for filtering time series by dates and columns.
This class is used extensively in both findatapy and finmarketpy.
Market holidays are collected from web sources such as https://www.timeanddate.com/holidays/ and also individual
exchange websites, and is manually updated from time to time to take into account newly instituted holidays, and stored
in conf/holidays_table.parquet - if you need to add your own holidays.
"""
_time_series_cache = {} # shared across all instances of object!
def __init__(self):
self._calendar = Calendar()
def filter_time_series(self, market_data_request, data_frame, pad_columns=False):
"""Filters a time series given a set of criteria (like start/finish date and tickers)
Parameters
----------
market_data_request : MarketDataRequest
defining time series filtering
data_frame : DataFrame
time series to be filtered
pad_columns : boolean
true, non-existant columns with nan
Returns
-------
DataFrame
"""
start_date = market_data_request.start_date
finish_date = market_data_request.finish_date
data_frame = self.filter_time_series_by_date(start_date, finish_date, data_frame)
# Filter by ticker.field combinations requested
columns = self.create_tickers_fields_list(market_data_request)
if (pad_columns):
data_frame = self.pad_time_series_columns(columns, data_frame)
else:
data_frame = self.filter_time_series_by_columns(columns, data_frame)
return data_frame
def filter_time_series_by_holidays(self, data_frame, cal='FX', holidays_list=[]):
"""Removes holidays from a given time series
Parameters
----------
data_frame : DataFrame
data frame to be filtered
cal : str
business calendar to use
Returns
-------
DataFrame
"""
# Optimal case for weekdays: remove Saturday and Sunday
if (cal == 'WEEKDAY' or cal == 'WKY'):
return data_frame[data_frame.index.dayofweek <= 4]
# Select only those holidays in the sample
holidays_start = self._calendar.get_holidays(data_frame.index[0], data_frame.index[-1], cal, holidays_list=holidays_list)
if (holidays_start.size == 0):
return data_frame
holidays_end = holidays_start + np.timedelta64(1, 'D')
# floored_dates = data_frame.index.normalize()
#
# filter_by_index_start = floored_dates.searchsorted(holidays_start)
# filter_by_index_end = floored_dates.searchsorted(holidays_end)
#
# indices_to_keep = []
#
# if filter_by_index_end[0] == 0:
# counter = filter_by_index_end[0] + 1
# start_index = 1
# else:
# counter = 0
# start_index = 0
#
# for i in range(start_index, len(holidays_start)):
# indices = list(range(counter, filter_by_index_start[i] - 1))
# indices_to_keep = indices_to_keep + indices
#
# counter = filter_by_index_end[i] + 1
#
# indices = list(range(counter, len(floored_dates)))
# indices_to_keep = indices_to_keep + indices
#
# data_frame_filtered = data_frame[indices_to_keep]
if data_frame.index.tz is None:
holidays_start = holidays_start.tz_localize(None)
holidays_end = holidays_end.tz_localize(None)
data_frame_left = data_frame
data_frame_filtered = []
for i in range(0, len(holidays_start)):
data_frame_temp = data_frame_left[data_frame_left.index < holidays_start[i]]
data_frame_left = data_frame_left[data_frame_left.index >= holidays_end[i]]
data_frame_filtered.append(data_frame_temp)
data_frame_filtered.append(data_frame_left)
return pd.concat(data_frame_filtered)
def filter_time_series_by_date(self, start_date, finish_date, data_frame):
"""Filter time series by start/finish dates
Parameters
----------
start_date : DateTime
start date of calendar
finish_date : DataTime
finish date of calendar
data_frame : DataFrame
data frame to be filtered
Returns
-------
DataFrame
"""
offset = 0 # inclusive
return self.filter_time_series_by_date_offset(start_date, finish_date, data_frame, offset,
exclude_start_end=False)
def filter_time_series_by_days(self, days, data_frame):
"""Filter time series by start/finish dates
Parameters
----------
start_date : DateTime
start date of calendar
finish_date : DataTime
finish date of calendar
data_frame : DataFrame
data frame to be filtered
Returns
-------
DataFrame
"""
offset = 0 # inclusive
finish_date = datetime.datetime.utcnow()
start_date = finish_date - timedelta(days=days)
return self.filter_time_series_by_date_offset(start_date, finish_date, data_frame, offset)
def filter_time_series_by_date_exc(self, start_date, finish_date, data_frame):
"""Filter time series by start/finish dates (exclude start & finish dates)
Parameters
----------
start_date : DateTime
start date of calendar
finish_date : DataTime
finish date of calendar
data_frame : DataFrame
data frame to be filtered
Returns
-------
DataFrame
"""
offset = 1 # exclusive of start finish date
return self.filter_time_series_by_date_offset(start_date, finish_date, data_frame, offset,
exclude_start_end=True)
# try:
# # filter by dates for intraday data
# if(start_date is not None):
# data_frame = data_frame.loc[start_date <= data_frame.index]
#
# if(finish_date is not None):
# # filter by start_date and finish_date
# data_frame = data_frame.loc[data_frame.index <= finish_date]
# except:
# # filter by dates for daily data
# if(start_date is not None):
# data_frame = data_frame.loc[start_date.date() <= data_frame.index]
#
# if(finish_date is not None):
# # filter by start_date and finish_date
# data_frame = data_frame.loc[data_frame.index <= finish_date.date()]
#
# return data_frame
def filter_time_series_by_date_offset(self, start_date, finish_date, data_frame, offset, exclude_start_end=False):
"""Filter time series by start/finish dates (and an offset)
Parameters
----------
start_date : DateTime
start date of calendar
finish_date : DataTime
finish date of calendar
data_frame : DataFrame
data frame to be filtered
offset : int
offset to be applied
Returns
-------
DataFrame
"""
if hasattr(data_frame.index, 'tz'):
if data_frame.index.tz is not None:
# If the start/finish dates are timezone naive, overwrite with the DataFrame timezone
if not (isinstance(start_date, str)):
start_date = start_date.replace(tzinfo=data_frame.index.tz)
if not (isinstance(finish_date, str)):
finish_date = finish_date.replace(tzinfo=data_frame.index.tz)
else:
# Otherwise remove timezone from start_date/finish_date
if not (isinstance(start_date, str)):
try:
start_date = start_date.replace(tzinfo=None)
except:
pass
if not (isinstance(finish_date, str)):
try:
finish_date = finish_date.replace(tzinfo=None)
except:
pass
if 'int' in str(data_frame.index.dtype):
return data_frame
try:
data_frame = self.filter_time_series_aux(start_date, finish_date, data_frame, offset)
except:
# start_date = start_date.date()
# finish_date = finish_date.date()
# if isinstance(start_date, str):
# # format expected 'Jun 1 2005 01:33', '%b %d %Y %H:%M'
# try:
# start_date = datetime.datetime.strptime(start_date, '%b %d %Y %H:%M')
# except:
# i = 0
#
# if isinstance(finish_date, str):
# # format expected 'Jun 1 2005 01:33', '%b %d %Y %H:%M'
# try:
# finish_date = datetime.datetime.strptime(finish_date, '%b %d %Y %H:%M')
# except:
# i = 0
# try:
# start_date = start_date.date()
# except: pass
#
# try:
# finish_date = finish_date.date()
# except: pass
# if we have dates stored as opposed to TimeStamps (ie. daily data), we use a simple (slower) method
# for filtering daily data
if (start_date is not None):
if exclude_start_end:
data_frame = data_frame.loc[start_date < data_frame.index]
else:
data_frame = data_frame.loc[start_date <= data_frame.index]
if (finish_date is not None):
if exclude_start_end:
data_frame = data_frame.loc[data_frame.index < finish_date]
else:
# filter by start_date and finish_date
data_frame = data_frame.loc[data_frame.index <= finish_date]
return data_frame
def filter_time_series_aux(self, start_date, finish_date, data_frame, offset):
"""Filter time series by start/finish dates (and an offset)
Parameters
----------
start_date : DateTime
start date of calendar
finish_date : DataTime
finish date of calendar
data_frame : DataFrame
data frame to be filtered
offset : int (not implemented!)
offset to be applied
Returns
-------
DataFrame
"""
# start_index = 0
# finish_index = len(data_frame.index) - offset
# filter by dates for intraday data
# if(start_date is not None):
# start_index = data_frame.index.searchsorted(start_date)
#
# if (0 <= start_index + offset < len(data_frame.index)):
# start_index = start_index + offset
#
# # data_frame = data_frame[start_date < data_frame.index]
#
# if(finish_date is not None):
# finish_index = data_frame.index.searchsorted(finish_date)
#
# if (0 <= finish_index - offset < len(data_frame.index)):
# finish_index = finish_index - offset
# CAREFUL: need + 1 otherwise will only return 1 less than usual
# return data_frame.iloc[start_date:finish_date]
# Just use pandas, quicker and simpler code!
if data_frame is None:
return None
# Slower method..
# return data_frame.loc[start_date:finish_date]
# Much faster, start and finish dates are inclusive
return data_frame[(data_frame.index >= start_date) & (data_frame.index <= finish_date)]
def filter_time_series_by_time_of_day_timezone(self, hour, minute, data_frame, timezone_of_snap='UTC'):
old_tz = data_frame.index.tz
data_frame = data_frame.tz_convert(pytz.timezone(timezone_of_snap))
data_frame = data_frame[data_frame.index.minute == minute]
data_frame = data_frame[data_frame.index.hour == hour]
data_frame = data_frame.tz_convert(old_tz)
return data_frame
def filter_time_series_by_time_of_day(self, hour, minute, data_frame, in_tz=None, out_tz=None):
"""Filter time series by time of day
Parameters
----------
hour : int
hour of day
minute : int
minute of day
data_frame : DataFrame
data frame to be filtered
in_tz : str (optional)
time zone of input data frame
out_tz : str (optional)
time zone of output data frame
Returns
-------
DataFrame
"""
if out_tz is not None:
try:
if in_tz is not None:
data_frame = data_frame.tz_localize(pytz.timezone(in_tz))
except:
data_frame = data_frame.tz_convert(pytz.timezone(in_tz))
data_frame = data_frame.tz_convert(pytz.timezone(out_tz))
# change internal representation of time
data_frame.index = pd.DatetimeIndex(data_frame.index.values)
data_frame = data_frame[data_frame.index.minute == minute]
data_frame = data_frame[data_frame.index.hour == hour]
return data_frame
def filter_time_series_by_minute_of_hour(self, minute, data_frame, in_tz=None, out_tz=None):
"""Filter time series by minute of hour
Parameters
----------
minute : int
minute of hour
data_frame : DataFrame
data frame to be filtered
in_tz : str (optional)
time zone of input data frame
out_tz : str (optional)
time zone of output data frame
Returns
-------
DataFrame
"""
if out_tz is not None:
if in_tz is not None:
data_frame = data_frame.tz_localize(pytz.timezone(in_tz))
data_frame = data_frame.tz_convert(pytz.timezone(out_tz))
# change internal representation of time
data_frame.index = pd.DatetimeIndex(data_frame.index.values)
data_frame = data_frame[data_frame.index.minute == minute]
return data_frame
def filter_time_series_between_hours(self, start_hour, finish_hour, data_frame):
"""Filter time series between hours of the day
Parameters
----------
start_hour : int
start of hour filter
finish_hour : int
finish of hour filter
data_frame : DataFrame
data frame to be filtered
Returns
-------
DataFrame
"""
data_frame = data_frame[data_frame.index.hour <= finish_hour]
data_frame = data_frame[data_frame.index.hour >= start_hour]
return data_frame
def filter_time_series_by_columns(self, columns, data_frame):
"""Filter time series by certain columns
Parameters
----------
columns : list(str)
start of hour filter
data_frame : DataFrame
data frame to be filtered
Returns
-------
DataFrame
"""
if data_frame is not None and columns is not None:
return data_frame[columns]
return None
def pad_time_series_columns(self, columns, data_frame):
"""Selects time series from a dataframe and if necessary creates empty columns
Parameters
----------
columns : str
columns to be included with this keyword
data_frame : DataFrame
data frame to be filtered
Returns
-------
DataFrame
"""
old_columns = data_frame.columns.tolist()
common_columns = [val for val in columns if val in old_columns]
uncommon_columns = [val for val in columns if val not in old_columns]
uncommon_columns = [str(x) for x in uncommon_columns]
data_frame = data_frame[common_columns]
if len(uncommon_columns) > 0:
logger = LoggerManager().getLogger(__name__)
logger.info("Padding missing columns...") # " + str(uncommon_columns))
new_data_frame = pd.DataFrame(index=data_frame.index, columns=uncommon_columns)
data_frame = pd.concat([data_frame, new_data_frame], axis=1)
# Force new columns to float NaNs (not objects which causes problems with newer pandas versions)
# or to NaT if they are date columns
for u in uncommon_columns:
is_date = False
for c in constants.always_date_columns:
if c in u:
is_date = True
if is_date:
data_frame[u] = np.datetime64('NaT')
else:
data_frame[u] = np.nan
# SLOW method below
# for x in uncommon_columns: data_frame.loc[:,x] = np.nan
# Get columns in same order again
data_frame = data_frame[columns]
return data_frame
def filter_time_series_by_excluded_keyword(self, keyword, data_frame):
"""Filter time series to exclude columns which contain keyword
Parameters
----------
keyword : str
columns to be excluded with this keyword
data_frame : DataFrame
data frame to be filtered
Returns
-------
DataFrame
"""
if not (isinstance(keyword, list)):
keyword = [keyword]
columns = []
for k in keyword:
columns.append([elem for elem in data_frame.columns if k not in elem])
columns = self._calendar.flatten_list_of_lists(columns)
return self.filter_time_series_by_columns(columns, data_frame)
def filter_time_series_by_included_keyword(self, keyword, data_frame):
"""Filter time series to include columns which contain keyword
Parameters
----------
keyword : str
columns to be included with this keyword
data_frame : DataFrame
data frame to be filtered
Returns
-------
DataFrame
"""
if not (isinstance(keyword, list)):
keyword = [keyword]
columns = []
for k in keyword:
columns.append([elem for elem in data_frame.columns if k in elem])
columns = self._calendar.flatten_list_of_lists(columns)
return self.filter_time_series_by_columns(columns, data_frame)
def filter_time_series_by_minute_freq(self, freq, data_frame):
"""Filter time series where minutes correspond to certain minute filter
Parameters
----------
freq : int
minute frequency to be filtered
data_frame : DataFrame
data frame to be filtered
Returns
-------
DataFrame
"""
return data_frame.loc[data_frame.index.minute % freq == 0]
def create_tickers_fields_list(self, market_data_request):
"""Creates a list of tickers concatenated with fields from a MarketDataRequest
Parameters
----------
market_data_request : MarketDataRequest
request to be expanded
Returns
-------
list(str)
"""
tickers = market_data_request.tickers
fields = market_data_request.fields
if isinstance(tickers, str): tickers = [tickers]
if isinstance(fields, str): fields = [fields]
tickers_fields_list = []
# Create ticker.field combination for series we wish to return
for f in fields:
for t in tickers:
tickers_fields_list.append(t + '.' + f)
return tickers_fields_list
def resample_time_series(self, data_frame, freq):
return data_frame.asfreq(freq, method='pad')
def resample_time_series_frequency(self, data_frame, data_resample_freq,
data_resample_type='mean', fill_empties=False):
# Should we take the mean, first, last in our resample
if data_resample_type == 'mean':
data_frame_r = data_frame.resample(data_resample_freq).mean()
elif data_resample_type == 'first':
data_frame_r = data_frame.resample(data_resample_freq).first()
elif data_resample_type == 'last':
data_frame_r = data_frame.resample(data_resample_freq).last()
else:
# TODO implement other types
return
if fill_empties == True:
data_frame, data_frame_r = data_frame.align(data_frame_r, join='left', axis=0)
data_frame_r = data_frame_r.fillna(method='ffill')
return data_frame_r
def make_FX_1_min_working_days(self, data_frame):
data_frame = data_frame.resample('1min').mean()
data_frame = self.filter_time_series_by_holidays(data_frame, 'FX')
data_frame = data_frame.fillna(method='ffill')
data_frame = self.remove_out_FX_out_of_hours(data_frame)
return data_frame
def remove_out_FX_out_of_hours(self, data_frame):
"""Filtered a time series for FX hours (ie. excludes 22h GMT Fri - 19h GMT Sun and New Year's Day)
Parameters
----------
data_frame : DataFrame
data frame with FX prices
Returns
-------
list(str)
"""
# assume data_frame is in GMT time
# remove Fri after 22:00 GMT
# remove Sat
# remove Sun before 19:00 GMT
# Monday = 0, ..., Sunday = 6
data_frame = data_frame[~((data_frame.index.dayofweek == 4) & (data_frame.index.hour > 22))]
data_frame = data_frame[~((data_frame.index.dayofweek == 5))]
data_frame = data_frame[~((data_frame.index.dayofweek == 6) & (data_frame.index.hour < 19))]
data_frame = data_frame[~((data_frame.index.day == 1) & (data_frame.index.month == 1))]
return data_frame
def remove_duplicate_indices(self, df):
return df[~df.index.duplicated(keep='first')]
def mask_time_series_by_time(self, df, time_list, time_zone):
""" Mask a time series by time of day and time zone specified
e.g. given a time series minutes data
want to keep data at specific time periods every day with a considered time zone
Parameters
----------
df : DateTime
time series needed to be masked
time_list : list of tuples
deciding the time periods which we want to keep the data on each day
e.g. time_list = [('01:08', '03:02'),('12:24','12:55'),('17:31','19:24')]
* Note: assume no overlapping of these tuples
time_zone: str
e.g. 'Europe/London'
Returns
-------
DataFrame (which the time zone is 'UTC')
"""
# Change the time zone from 'UTC' to a given one
df.index = df.index.tz_convert(time_zone)
df_mask = pd.DataFrame(0, index=df.index, columns=['mask'])
# Mask data with each given tuple
for i in range(0, len(time_list)):
start_hour = int(time_list[i][0].split(':')[0])
start_minute = int(time_list[i][0].split(':')[1])
end_hour = int(time_list[i][1].split(':')[0])
end_minute = int(time_list[i][1].split(':')[1])
# E.g. if tuple is ('01:08', '03:02'),
# take hours in target - take values in [01:00,04:00]
narray = np.where(df.index.hour.isin(range(start_hour, end_hour + 1)), 1, 0)
df_mask_temp = pd.DataFrame(index=df.index, columns=df_mask.columns.tolist(), data=narray)
# Remove minutes not in target - remove values in [01:00,01:07], [03:03,03:59]
narray = np.where(((df.index.hour == start_hour) & (df.index.minute < start_minute)), 0, 1)
df_mask_temp = df_mask_temp * pd.DataFrame(index=df.index, columns=df_mask.columns.tolist(),
data=narray)
narray = np.where((df.index.hour == end_hour) & (df.index.minute > end_minute), 0, 1)
df_mask_temp = df_mask_temp * pd.DataFrame(index=df.index, columns=df_mask.columns.tolist(),
data=narray)
# Collect all the periods we want to keep the data
df_mask = df_mask + df_mask_temp
narray = np.where(df_mask == 1, df, 0)
df = pd.DataFrame(index=df.index, columns=df.columns.tolist(), data=narray)
df.index = df.index.tz_convert('UTC') # change the time zone to 'UTC'
return df
|
__author__ = 'saeedamen' # <NAME>
#
# Copyright 2016-2020 Cuemacro
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and limitations under the License.
#
import numpy as np
import pandas as pd
import pytz
import datetime
from datetime import timedelta
from findatapy.timeseries.calendar import Calendar
from findatapy.util.dataconstants import DataConstants
from findatapy.util.loggermanager import LoggerManager
constants = DataConstants()
class Filter(object):
"""Functions for filtering time series by dates and columns.
This class is used extensively in both findatapy and finmarketpy.
Market holidays are collected from web sources such as https://www.timeanddate.com/holidays/ and also individual
exchange websites, and is manually updated from time to time to take into account newly instituted holidays, and stored
in conf/holidays_table.parquet - if you need to add your own holidays.
"""
_time_series_cache = {} # shared across all instances of object!
def __init__(self):
self._calendar = Calendar()
def filter_time_series(self, market_data_request, data_frame, pad_columns=False):
"""Filters a time series given a set of criteria (like start/finish date and tickers)
Parameters
----------
market_data_request : MarketDataRequest
defining time series filtering
data_frame : DataFrame
time series to be filtered
pad_columns : boolean
true, non-existant columns with nan
Returns
-------
DataFrame
"""
start_date = market_data_request.start_date
finish_date = market_data_request.finish_date
data_frame = self.filter_time_series_by_date(start_date, finish_date, data_frame)
# Filter by ticker.field combinations requested
columns = self.create_tickers_fields_list(market_data_request)
if (pad_columns):
data_frame = self.pad_time_series_columns(columns, data_frame)
else:
data_frame = self.filter_time_series_by_columns(columns, data_frame)
return data_frame
def filter_time_series_by_holidays(self, data_frame, cal='FX', holidays_list=[]):
"""Removes holidays from a given time series
Parameters
----------
data_frame : DataFrame
data frame to be filtered
cal : str
business calendar to use
Returns
-------
DataFrame
"""
# Optimal case for weekdays: remove Saturday and Sunday
if (cal == 'WEEKDAY' or cal == 'WKY'):
return data_frame[data_frame.index.dayofweek <= 4]
# Select only those holidays in the sample
holidays_start = self._calendar.get_holidays(data_frame.index[0], data_frame.index[-1], cal, holidays_list=holidays_list)
if (holidays_start.size == 0):
return data_frame
holidays_end = holidays_start + np.timedelta64(1, 'D')
# floored_dates = data_frame.index.normalize()
#
# filter_by_index_start = floored_dates.searchsorted(holidays_start)
# filter_by_index_end = floored_dates.searchsorted(holidays_end)
#
# indices_to_keep = []
#
# if filter_by_index_end[0] == 0:
# counter = filter_by_index_end[0] + 1
# start_index = 1
# else:
# counter = 0
# start_index = 0
#
# for i in range(start_index, len(holidays_start)):
# indices = list(range(counter, filter_by_index_start[i] - 1))
# indices_to_keep = indices_to_keep + indices
#
# counter = filter_by_index_end[i] + 1
#
# indices = list(range(counter, len(floored_dates)))
# indices_to_keep = indices_to_keep + indices
#
# data_frame_filtered = data_frame[indices_to_keep]
if data_frame.index.tz is None:
holidays_start = holidays_start.tz_localize(None)
holidays_end = holidays_end.tz_localize(None)
data_frame_left = data_frame
data_frame_filtered = []
for i in range(0, len(holidays_start)):
data_frame_temp = data_frame_left[data_frame_left.index < holidays_start[i]]
data_frame_left = data_frame_left[data_frame_left.index >= holidays_end[i]]
data_frame_filtered.append(data_frame_temp)
data_frame_filtered.append(data_frame_left)
return pd.concat(data_frame_filtered)
def filter_time_series_by_date(self, start_date, finish_date, data_frame):
"""Filter time series by start/finish dates
Parameters
----------
start_date : DateTime
start date of calendar
finish_date : DataTime
finish date of calendar
data_frame : DataFrame
data frame to be filtered
Returns
-------
DataFrame
"""
offset = 0 # inclusive
return self.filter_time_series_by_date_offset(start_date, finish_date, data_frame, offset,
exclude_start_end=False)
def filter_time_series_by_days(self, days, data_frame):
"""Filter time series by start/finish dates
Parameters
----------
start_date : DateTime
start date of calendar
finish_date : DataTime
finish date of calendar
data_frame : DataFrame
data frame to be filtered
Returns
-------
DataFrame
"""
offset = 0 # inclusive
finish_date = datetime.datetime.utcnow()
start_date = finish_date - timedelta(days=days)
return self.filter_time_series_by_date_offset(start_date, finish_date, data_frame, offset)
def filter_time_series_by_date_exc(self, start_date, finish_date, data_frame):
"""Filter time series by start/finish dates (exclude start & finish dates)
Parameters
----------
start_date : DateTime
start date of calendar
finish_date : DataTime
finish date of calendar
data_frame : DataFrame
data frame to be filtered
Returns
-------
DataFrame
"""
offset = 1 # exclusive of start finish date
return self.filter_time_series_by_date_offset(start_date, finish_date, data_frame, offset,
exclude_start_end=True)
# try:
# # filter by dates for intraday data
# if(start_date is not None):
# data_frame = data_frame.loc[start_date <= data_frame.index]
#
# if(finish_date is not None):
# # filter by start_date and finish_date
# data_frame = data_frame.loc[data_frame.index <= finish_date]
# except:
# # filter by dates for daily data
# if(start_date is not None):
# data_frame = data_frame.loc[start_date.date() <= data_frame.index]
#
# if(finish_date is not None):
# # filter by start_date and finish_date
# data_frame = data_frame.loc[data_frame.index <= finish_date.date()]
#
# return data_frame
def filter_time_series_by_date_offset(self, start_date, finish_date, data_frame, offset, exclude_start_end=False):
"""Filter time series by start/finish dates (and an offset)
Parameters
----------
start_date : DateTime
start date of calendar
finish_date : DataTime
finish date of calendar
data_frame : DataFrame
data frame to be filtered
offset : int
offset to be applied
Returns
-------
DataFrame
"""
if hasattr(data_frame.index, 'tz'):
if data_frame.index.tz is not None:
# If the start/finish dates are timezone naive, overwrite with the DataFrame timezone
if not (isinstance(start_date, str)):
start_date = start_date.replace(tzinfo=data_frame.index.tz)
if not (isinstance(finish_date, str)):
finish_date = finish_date.replace(tzinfo=data_frame.index.tz)
else:
# Otherwise remove timezone from start_date/finish_date
if not (isinstance(start_date, str)):
try:
start_date = start_date.replace(tzinfo=None)
except:
pass
if not (isinstance(finish_date, str)):
try:
finish_date = finish_date.replace(tzinfo=None)
except:
pass
if 'int' in str(data_frame.index.dtype):
return data_frame
try:
data_frame = self.filter_time_series_aux(start_date, finish_date, data_frame, offset)
except:
# start_date = start_date.date()
# finish_date = finish_date.date()
# if isinstance(start_date, str):
# # format expected 'Jun 1 2005 01:33', '%b %d %Y %H:%M'
# try:
# start_date = datetime.datetime.strptime(start_date, '%b %d %Y %H:%M')
# except:
# i = 0
#
# if isinstance(finish_date, str):
# # format expected 'Jun 1 2005 01:33', '%b %d %Y %H:%M'
# try:
# finish_date = datetime.datetime.strptime(finish_date, '%b %d %Y %H:%M')
# except:
# i = 0
# try:
# start_date = start_date.date()
# except: pass
#
# try:
# finish_date = finish_date.date()
# except: pass
# if we have dates stored as opposed to TimeStamps (ie. daily data), we use a simple (slower) method
# for filtering daily data
if (start_date is not None):
if exclude_start_end:
data_frame = data_frame.loc[start_date < data_frame.index]
else:
data_frame = data_frame.loc[start_date <= data_frame.index]
if (finish_date is not None):
if exclude_start_end:
data_frame = data_frame.loc[data_frame.index < finish_date]
else:
# filter by start_date and finish_date
data_frame = data_frame.loc[data_frame.index <= finish_date]
return data_frame
def filter_time_series_aux(self, start_date, finish_date, data_frame, offset):
"""Filter time series by start/finish dates (and an offset)
Parameters
----------
start_date : DateTime
start date of calendar
finish_date : DataTime
finish date of calendar
data_frame : DataFrame
data frame to be filtered
offset : int (not implemented!)
offset to be applied
Returns
-------
DataFrame
"""
# start_index = 0
# finish_index = len(data_frame.index) - offset
# filter by dates for intraday data
# if(start_date is not None):
# start_index = data_frame.index.searchsorted(start_date)
#
# if (0 <= start_index + offset < len(data_frame.index)):
# start_index = start_index + offset
#
# # data_frame = data_frame[start_date < data_frame.index]
#
# if(finish_date is not None):
# finish_index = data_frame.index.searchsorted(finish_date)
#
# if (0 <= finish_index - offset < len(data_frame.index)):
# finish_index = finish_index - offset
# CAREFUL: need + 1 otherwise will only return 1 less than usual
# return data_frame.iloc[start_date:finish_date]
# Just use pandas, quicker and simpler code!
if data_frame is None:
return None
# Slower method..
# return data_frame.loc[start_date:finish_date]
# Much faster, start and finish dates are inclusive
return data_frame[(data_frame.index >= start_date) & (data_frame.index <= finish_date)]
def filter_time_series_by_time_of_day_timezone(self, hour, minute, data_frame, timezone_of_snap='UTC'):
old_tz = data_frame.index.tz
data_frame = data_frame.tz_convert(pytz.timezone(timezone_of_snap))
data_frame = data_frame[data_frame.index.minute == minute]
data_frame = data_frame[data_frame.index.hour == hour]
data_frame = data_frame.tz_convert(old_tz)
return data_frame
def filter_time_series_by_time_of_day(self, hour, minute, data_frame, in_tz=None, out_tz=None):
"""Filter time series by time of day
Parameters
----------
hour : int
hour of day
minute : int
minute of day
data_frame : DataFrame
data frame to be filtered
in_tz : str (optional)
time zone of input data frame
out_tz : str (optional)
time zone of output data frame
Returns
-------
DataFrame
"""
if out_tz is not None:
try:
if in_tz is not None:
data_frame = data_frame.tz_localize(pytz.timezone(in_tz))
except:
data_frame = data_frame.tz_convert(pytz.timezone(in_tz))
data_frame = data_frame.tz_convert(pytz.timezone(out_tz))
# change internal representation of time
data_frame.index = pd.DatetimeIndex(data_frame.index.values)
data_frame = data_frame[data_frame.index.minute == minute]
data_frame = data_frame[data_frame.index.hour == hour]
return data_frame
def filter_time_series_by_minute_of_hour(self, minute, data_frame, in_tz=None, out_tz=None):
"""Filter time series by minute of hour
Parameters
----------
minute : int
minute of hour
data_frame : DataFrame
data frame to be filtered
in_tz : str (optional)
time zone of input data frame
out_tz : str (optional)
time zone of output data frame
Returns
-------
DataFrame
"""
if out_tz is not None:
if in_tz is not None:
data_frame = data_frame.tz_localize(pytz.timezone(in_tz))
data_frame = data_frame.tz_convert(pytz.timezone(out_tz))
# change internal representation of time
data_frame.index = pd.DatetimeIndex(data_frame.index.values)
data_frame = data_frame[data_frame.index.minute == minute]
return data_frame
def filter_time_series_between_hours(self, start_hour, finish_hour, data_frame):
"""Filter time series between hours of the day
Parameters
----------
start_hour : int
start of hour filter
finish_hour : int
finish of hour filter
data_frame : DataFrame
data frame to be filtered
Returns
-------
DataFrame
"""
data_frame = data_frame[data_frame.index.hour <= finish_hour]
data_frame = data_frame[data_frame.index.hour >= start_hour]
return data_frame
def filter_time_series_by_columns(self, columns, data_frame):
"""Filter time series by certain columns
Parameters
----------
columns : list(str)
start of hour filter
data_frame : DataFrame
data frame to be filtered
Returns
-------
DataFrame
"""
if data_frame is not None and columns is not None:
return data_frame[columns]
return None
def pad_time_series_columns(self, columns, data_frame):
"""Selects time series from a dataframe and if necessary creates empty columns
Parameters
----------
columns : str
columns to be included with this keyword
data_frame : DataFrame
data frame to be filtered
Returns
-------
DataFrame
"""
old_columns = data_frame.columns.tolist()
common_columns = [val for val in columns if val in old_columns]
uncommon_columns = [val for val in columns if val not in old_columns]
uncommon_columns = [str(x) for x in uncommon_columns]
data_frame = data_frame[common_columns]
if len(uncommon_columns) > 0:
logger = LoggerManager().getLogger(__name__)
logger.info("Padding missing columns...") # " + str(uncommon_columns))
new_data_frame = pd.DataFrame(index=data_frame.index, columns=uncommon_columns)
data_frame = pd.concat([data_frame, new_data_frame], axis=1)
# Force new columns to float NaNs (not objects which causes problems with newer pandas versions)
# or to NaT if they are date columns
for u in uncommon_columns:
is_date = False
for c in constants.always_date_columns:
if c in u:
is_date = True
if is_date:
data_frame[u] = np.datetime64('NaT')
else:
data_frame[u] = np.nan
# SLOW method below
# for x in uncommon_columns: data_frame.loc[:,x] = np.nan
# Get columns in same order again
data_frame = data_frame[columns]
return data_frame
def filter_time_series_by_excluded_keyword(self, keyword, data_frame):
"""Filter time series to exclude columns which contain keyword
Parameters
----------
keyword : str
columns to be excluded with this keyword
data_frame : DataFrame
data frame to be filtered
Returns
-------
DataFrame
"""
if not (isinstance(keyword, list)):
keyword = [keyword]
columns = []
for k in keyword:
columns.append([elem for elem in data_frame.columns if k not in elem])
columns = self._calendar.flatten_list_of_lists(columns)
return self.filter_time_series_by_columns(columns, data_frame)
def filter_time_series_by_included_keyword(self, keyword, data_frame):
"""Filter time series to include columns which contain keyword
Parameters
----------
keyword : str
columns to be included with this keyword
data_frame : DataFrame
data frame to be filtered
Returns
-------
DataFrame
"""
if not (isinstance(keyword, list)):
keyword = [keyword]
columns = []
for k in keyword:
columns.append([elem for elem in data_frame.columns if k in elem])
columns = self._calendar.flatten_list_of_lists(columns)
return self.filter_time_series_by_columns(columns, data_frame)
def filter_time_series_by_minute_freq(self, freq, data_frame):
"""Filter time series where minutes correspond to certain minute filter
Parameters
----------
freq : int
minute frequency to be filtered
data_frame : DataFrame
data frame to be filtered
Returns
-------
DataFrame
"""
return data_frame.loc[data_frame.index.minute % freq == 0]
def create_tickers_fields_list(self, market_data_request):
"""Creates a list of tickers concatenated with fields from a MarketDataRequest
Parameters
----------
market_data_request : MarketDataRequest
request to be expanded
Returns
-------
list(str)
"""
tickers = market_data_request.tickers
fields = market_data_request.fields
if isinstance(tickers, str): tickers = [tickers]
if isinstance(fields, str): fields = [fields]
tickers_fields_list = []
# Create ticker.field combination for series we wish to return
for f in fields:
for t in tickers:
tickers_fields_list.append(t + '.' + f)
return tickers_fields_list
def resample_time_series(self, data_frame, freq):
return data_frame.asfreq(freq, method='pad')
def resample_time_series_frequency(self, data_frame, data_resample_freq,
data_resample_type='mean', fill_empties=False):
# Should we take the mean, first, last in our resample
if data_resample_type == 'mean':
data_frame_r = data_frame.resample(data_resample_freq).mean()
elif data_resample_type == 'first':
data_frame_r = data_frame.resample(data_resample_freq).first()
elif data_resample_type == 'last':
data_frame_r = data_frame.resample(data_resample_freq).last()
else:
# TODO implement other types
return
if fill_empties == True:
data_frame, data_frame_r = data_frame.align(data_frame_r, join='left', axis=0)
data_frame_r = data_frame_r.fillna(method='ffill')
return data_frame_r
def make_FX_1_min_working_days(self, data_frame):
data_frame = data_frame.resample('1min').mean()
data_frame = self.filter_time_series_by_holidays(data_frame, 'FX')
data_frame = data_frame.fillna(method='ffill')
data_frame = self.remove_out_FX_out_of_hours(data_frame)
return data_frame
def remove_out_FX_out_of_hours(self, data_frame):
"""Filtered a time series for FX hours (ie. excludes 22h GMT Fri - 19h GMT Sun and New Year's Day)
Parameters
----------
data_frame : DataFrame
data frame with FX prices
Returns
-------
list(str)
"""
# assume data_frame is in GMT time
# remove Fri after 22:00 GMT
# remove Sat
# remove Sun before 19:00 GMT
# Monday = 0, ..., Sunday = 6
data_frame = data_frame[~((data_frame.index.dayofweek == 4) & (data_frame.index.hour > 22))]
data_frame = data_frame[~((data_frame.index.dayofweek == 5))]
data_frame = data_frame[~((data_frame.index.dayofweek == 6) & (data_frame.index.hour < 19))]
data_frame = data_frame[~((data_frame.index.day == 1) & (data_frame.index.month == 1))]
return data_frame
def remove_duplicate_indices(self, df):
return df[~df.index.duplicated(keep='first')]
def mask_time_series_by_time(self, df, time_list, time_zone):
""" Mask a time series by time of day and time zone specified
e.g. given a time series minutes data
want to keep data at specific time periods every day with a considered time zone
Parameters
----------
df : DateTime
time series needed to be masked
time_list : list of tuples
deciding the time periods which we want to keep the data on each day
e.g. time_list = [('01:08', '03:02'),('12:24','12:55'),('17:31','19:24')]
* Note: assume no overlapping of these tuples
time_zone: str
e.g. 'Europe/London'
Returns
-------
DataFrame (which the time zone is 'UTC')
"""
# Change the time zone from 'UTC' to a given one
df.index = df.index.tz_convert(time_zone)
df_mask = pd.DataFrame(0, index=df.index, columns=['mask'])
# Mask data with each given tuple
for i in range(0, len(time_list)):
start_hour = int(time_list[i][0].split(':')[0])
start_minute = int(time_list[i][0].split(':')[1])
end_hour = int(time_list[i][1].split(':')[0])
end_minute = int(time_list[i][1].split(':')[1])
# E.g. if tuple is ('01:08', '03:02'),
# take hours in target - take values in [01:00,04:00]
narray = np.where(df.index.hour.isin(range(start_hour, end_hour + 1)), 1, 0)
df_mask_temp = pd.DataFrame(index=df.index, columns=df_mask.columns.tolist(), data=narray)
# Remove minutes not in target - remove values in [01:00,01:07], [03:03,03:59]
narray = np.where(((df.index.hour == start_hour) & (df.index.minute < start_minute)), 0, 1)
df_mask_temp = df_mask_temp * pd.DataFrame(index=df.index, columns=df_mask.columns.tolist(),
data=narray)
narray = np.where((df.index.hour == end_hour) & (df.index.minute > end_minute), 0, 1)
df_mask_temp = df_mask_temp * pd.DataFrame(index=df.index, columns=df_mask.columns.tolist(),
data=narray)
# Collect all the periods we want to keep the data
df_mask = df_mask + df_mask_temp
narray = np.where(df_mask == 1, df, 0)
df = pd.DataFrame(index=df.index, columns=df.columns.tolist(), data=narray)
df.index = df.index.tz_convert('UTC') # change the time zone to 'UTC'
return df
|
en
| 0.622076
|
# <NAME> # # Copyright 2016-2020 Cuemacro # # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the # License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # # See the License for the specific language governing permissions and limitations under the License. # Functions for filtering time series by dates and columns. This class is used extensively in both findatapy and finmarketpy. Market holidays are collected from web sources such as https://www.timeanddate.com/holidays/ and also individual exchange websites, and is manually updated from time to time to take into account newly instituted holidays, and stored in conf/holidays_table.parquet - if you need to add your own holidays. # shared across all instances of object! Filters a time series given a set of criteria (like start/finish date and tickers) Parameters ---------- market_data_request : MarketDataRequest defining time series filtering data_frame : DataFrame time series to be filtered pad_columns : boolean true, non-existant columns with nan Returns ------- DataFrame # Filter by ticker.field combinations requested Removes holidays from a given time series Parameters ---------- data_frame : DataFrame data frame to be filtered cal : str business calendar to use Returns ------- DataFrame # Optimal case for weekdays: remove Saturday and Sunday # Select only those holidays in the sample # floored_dates = data_frame.index.normalize() # # filter_by_index_start = floored_dates.searchsorted(holidays_start) # filter_by_index_end = floored_dates.searchsorted(holidays_end) # # indices_to_keep = [] # # if filter_by_index_end[0] == 0: # counter = filter_by_index_end[0] + 1 # start_index = 1 # else: # counter = 0 # start_index = 0 # # for i in range(start_index, len(holidays_start)): # indices = list(range(counter, filter_by_index_start[i] - 1)) # indices_to_keep = indices_to_keep + indices # # counter = filter_by_index_end[i] + 1 # # indices = list(range(counter, len(floored_dates))) # indices_to_keep = indices_to_keep + indices # # data_frame_filtered = data_frame[indices_to_keep] Filter time series by start/finish dates Parameters ---------- start_date : DateTime start date of calendar finish_date : DataTime finish date of calendar data_frame : DataFrame data frame to be filtered Returns ------- DataFrame # inclusive Filter time series by start/finish dates Parameters ---------- start_date : DateTime start date of calendar finish_date : DataTime finish date of calendar data_frame : DataFrame data frame to be filtered Returns ------- DataFrame # inclusive Filter time series by start/finish dates (exclude start & finish dates) Parameters ---------- start_date : DateTime start date of calendar finish_date : DataTime finish date of calendar data_frame : DataFrame data frame to be filtered Returns ------- DataFrame # exclusive of start finish date # try: # # filter by dates for intraday data # if(start_date is not None): # data_frame = data_frame.loc[start_date <= data_frame.index] # # if(finish_date is not None): # # filter by start_date and finish_date # data_frame = data_frame.loc[data_frame.index <= finish_date] # except: # # filter by dates for daily data # if(start_date is not None): # data_frame = data_frame.loc[start_date.date() <= data_frame.index] # # if(finish_date is not None): # # filter by start_date and finish_date # data_frame = data_frame.loc[data_frame.index <= finish_date.date()] # # return data_frame Filter time series by start/finish dates (and an offset) Parameters ---------- start_date : DateTime start date of calendar finish_date : DataTime finish date of calendar data_frame : DataFrame data frame to be filtered offset : int offset to be applied Returns ------- DataFrame # If the start/finish dates are timezone naive, overwrite with the DataFrame timezone # Otherwise remove timezone from start_date/finish_date # start_date = start_date.date() # finish_date = finish_date.date() # if isinstance(start_date, str): # # format expected 'Jun 1 2005 01:33', '%b %d %Y %H:%M' # try: # start_date = datetime.datetime.strptime(start_date, '%b %d %Y %H:%M') # except: # i = 0 # # if isinstance(finish_date, str): # # format expected 'Jun 1 2005 01:33', '%b %d %Y %H:%M' # try: # finish_date = datetime.datetime.strptime(finish_date, '%b %d %Y %H:%M') # except: # i = 0 # try: # start_date = start_date.date() # except: pass # # try: # finish_date = finish_date.date() # except: pass # if we have dates stored as opposed to TimeStamps (ie. daily data), we use a simple (slower) method # for filtering daily data # filter by start_date and finish_date Filter time series by start/finish dates (and an offset) Parameters ---------- start_date : DateTime start date of calendar finish_date : DataTime finish date of calendar data_frame : DataFrame data frame to be filtered offset : int (not implemented!) offset to be applied Returns ------- DataFrame # start_index = 0 # finish_index = len(data_frame.index) - offset # filter by dates for intraday data # if(start_date is not None): # start_index = data_frame.index.searchsorted(start_date) # # if (0 <= start_index + offset < len(data_frame.index)): # start_index = start_index + offset # # # data_frame = data_frame[start_date < data_frame.index] # # if(finish_date is not None): # finish_index = data_frame.index.searchsorted(finish_date) # # if (0 <= finish_index - offset < len(data_frame.index)): # finish_index = finish_index - offset # CAREFUL: need + 1 otherwise will only return 1 less than usual # return data_frame.iloc[start_date:finish_date] # Just use pandas, quicker and simpler code! # Slower method.. # return data_frame.loc[start_date:finish_date] # Much faster, start and finish dates are inclusive Filter time series by time of day Parameters ---------- hour : int hour of day minute : int minute of day data_frame : DataFrame data frame to be filtered in_tz : str (optional) time zone of input data frame out_tz : str (optional) time zone of output data frame Returns ------- DataFrame # change internal representation of time Filter time series by minute of hour Parameters ---------- minute : int minute of hour data_frame : DataFrame data frame to be filtered in_tz : str (optional) time zone of input data frame out_tz : str (optional) time zone of output data frame Returns ------- DataFrame # change internal representation of time Filter time series between hours of the day Parameters ---------- start_hour : int start of hour filter finish_hour : int finish of hour filter data_frame : DataFrame data frame to be filtered Returns ------- DataFrame Filter time series by certain columns Parameters ---------- columns : list(str) start of hour filter data_frame : DataFrame data frame to be filtered Returns ------- DataFrame Selects time series from a dataframe and if necessary creates empty columns Parameters ---------- columns : str columns to be included with this keyword data_frame : DataFrame data frame to be filtered Returns ------- DataFrame # " + str(uncommon_columns)) # Force new columns to float NaNs (not objects which causes problems with newer pandas versions) # or to NaT if they are date columns # SLOW method below # for x in uncommon_columns: data_frame.loc[:,x] = np.nan # Get columns in same order again Filter time series to exclude columns which contain keyword Parameters ---------- keyword : str columns to be excluded with this keyword data_frame : DataFrame data frame to be filtered Returns ------- DataFrame Filter time series to include columns which contain keyword Parameters ---------- keyword : str columns to be included with this keyword data_frame : DataFrame data frame to be filtered Returns ------- DataFrame Filter time series where minutes correspond to certain minute filter Parameters ---------- freq : int minute frequency to be filtered data_frame : DataFrame data frame to be filtered Returns ------- DataFrame Creates a list of tickers concatenated with fields from a MarketDataRequest Parameters ---------- market_data_request : MarketDataRequest request to be expanded Returns ------- list(str) # Create ticker.field combination for series we wish to return # Should we take the mean, first, last in our resample # TODO implement other types Filtered a time series for FX hours (ie. excludes 22h GMT Fri - 19h GMT Sun and New Year's Day) Parameters ---------- data_frame : DataFrame data frame with FX prices Returns ------- list(str) # assume data_frame is in GMT time # remove Fri after 22:00 GMT # remove Sat # remove Sun before 19:00 GMT # Monday = 0, ..., Sunday = 6 Mask a time series by time of day and time zone specified e.g. given a time series minutes data want to keep data at specific time periods every day with a considered time zone Parameters ---------- df : DateTime time series needed to be masked time_list : list of tuples deciding the time periods which we want to keep the data on each day e.g. time_list = [('01:08', '03:02'),('12:24','12:55'),('17:31','19:24')] * Note: assume no overlapping of these tuples time_zone: str e.g. 'Europe/London' Returns ------- DataFrame (which the time zone is 'UTC') # Change the time zone from 'UTC' to a given one # Mask data with each given tuple # E.g. if tuple is ('01:08', '03:02'), # take hours in target - take values in [01:00,04:00] # Remove minutes not in target - remove values in [01:00,01:07], [03:03,03:59] # Collect all the periods we want to keep the data # change the time zone to 'UTC'
| 2.608312
| 3
|
docs/examples/testing/unit_with_provided_injection_test.py
|
mitsuhiko/nameko
| 3
|
6629105
|
<reponame>mitsuhiko/nameko
""" Service unit testing best practice, with a provided injection.
"""
import pytest
from sqlalchemy import Column, Integer, String, create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from nameko.rpc import rpc
from nameko.contrib.sqlalchemy import orm_session
from nameko.testing.services import worker_factory
Base = declarative_base()
class Result(Base):
__tablename__ = 'model'
id = Column(Integer, primary_key=True)
value = Column(String(64))
class Service(object):
db = orm_session(Base)
@rpc
def save(self, value):
result = Result(value=value)
self.db.add(result)
self.db.commit()
# =============================================================================
# Begin test
# =============================================================================
@pytest.fixture
def session():
# create sqlite database and session
engine = create_engine('sqlite:///:memory:')
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
return session
def test_service(session):
# create instance, providing the real session for the ``db`` injection
service = worker_factory(Service, db=session)
# verify ``save`` logic by querying the real database
service.save("helloworld")
assert session.query(Result.value).all() == [("helloworld",)]
|
""" Service unit testing best practice, with a provided injection.
"""
import pytest
from sqlalchemy import Column, Integer, String, create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from nameko.rpc import rpc
from nameko.contrib.sqlalchemy import orm_session
from nameko.testing.services import worker_factory
Base = declarative_base()
class Result(Base):
__tablename__ = 'model'
id = Column(Integer, primary_key=True)
value = Column(String(64))
class Service(object):
db = orm_session(Base)
@rpc
def save(self, value):
result = Result(value=value)
self.db.add(result)
self.db.commit()
# =============================================================================
# Begin test
# =============================================================================
@pytest.fixture
def session():
# create sqlite database and session
engine = create_engine('sqlite:///:memory:')
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
return session
def test_service(session):
# create instance, providing the real session for the ``db`` injection
service = worker_factory(Service, db=session)
# verify ``save`` logic by querying the real database
service.save("helloworld")
assert session.query(Result.value).all() == [("helloworld",)]
|
en
| 0.545259
|
Service unit testing best practice, with a provided injection. # ============================================================================= # Begin test # ============================================================================= # create sqlite database and session # create instance, providing the real session for the ``db`` injection # verify ``save`` logic by querying the real database
| 2.700065
| 3
|
mysite/sensor_com/disk_info.py
|
martinloland/greenhouse
| 1
|
6629106
|
<reponame>martinloland/greenhouse<gh_stars>1-10
import ctypes
import os
import platform
import sys
def get_free_space_mb(dirname):
"""Return folder/drive free space (in megabytes)."""
if platform.system() == 'Windows':
free_bytes = ctypes.c_ulonglong(0)
ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(dirname), None, None, ctypes.pointer(free_bytes))
return free_bytes.value / 1024 / 1024
else:
st = os.statvfs(dirname)
return st.f_bavail * st.f_frsize / 1024 / 1024
|
import ctypes
import os
import platform
import sys
def get_free_space_mb(dirname):
"""Return folder/drive free space (in megabytes)."""
if platform.system() == 'Windows':
free_bytes = ctypes.c_ulonglong(0)
ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(dirname), None, None, ctypes.pointer(free_bytes))
return free_bytes.value / 1024 / 1024
else:
st = os.statvfs(dirname)
return st.f_bavail * st.f_frsize / 1024 / 1024
|
en
| 0.465287
|
Return folder/drive free space (in megabytes).
| 2.941602
| 3
|
app/webauthn/__init__.py
|
onyxcherry/OnyxcherryOTP
| 1
|
6629107
|
from flask import Blueprint
bp = Blueprint("webauthn", __name__)
from app.webauthn import routes
|
from flask import Blueprint
bp = Blueprint("webauthn", __name__)
from app.webauthn import routes
|
none
| 1
| 1.491379
| 1
|
|
scripts/run_box3d_pixel.py
|
fredshentu/public_model_based_controller
| 0
|
6629108
|
<reponame>fredshentu/public_model_based_controller
import os
from rllab.baselines.gaussian_conv_baseline import GaussianConvBaseline
from sandbox.rocky.tf.policies.conv_nn_policy import ConvNNPolicy
from rllab.envs.normalized_env import normalize
from rllab.algos.trpo import TRPO
from rllab.misc.instrument import stub, run_experiment_lite
from sandbox.rocky.tf.envs.base import TfEnv
from rllab.envs.gym_env import GymEnv
import itertools
stub(globals())
# Param ranges
seeds = range(5)
for seed in seeds:
mdp = NormalizedEnv(env=GymEnv('Box3dReachPixel-v0',record_video=False, \
log_dir='/tmp/gym_test',record_log=False))
policy = ConvNNPolicy(
"conv_policy",
env_spec=mdp.spec,
conv_filters=(32, 32, 32, 32),
conv_filter_sizes=((3,3),(3,3),(3,3),(3,3)),
conv_strides=(2, 2, 2, 2),
conv_pads=('same', 'same', 'same', 'same'),
hidden_sizes=(256,),
)
baseline = GaussianConvBaseline(
mdp.spec,
regressor_args={
'conv_filters':(32, 32, 32, 32),
'conv_filter_sizes':((3,3),(3,3),(3,3),(3,3)),
'conv_strides':(2, 2, 2, 2),
'conv_pads':('same', 'same', 'same', 'same'),
'hidden_sizes':(256,),
}
)
batch_size = 5000
algo = TRPO(
env=mdp,
policy=policy,
baseline=baseline,
batch_size=batch_size,
whole_paths=True,
max_path_length=500,
n_itr=10000,
step_size=0.01,
subsample_factor=1.0,
)
run_experiment_lite(
algo.train(),
exp_prefix='trpo_box3d_pixel',
n_parallel=1,
snapshot_mode="last",
seed=seed,
mode="local"
)
|
import os
from rllab.baselines.gaussian_conv_baseline import GaussianConvBaseline
from sandbox.rocky.tf.policies.conv_nn_policy import ConvNNPolicy
from rllab.envs.normalized_env import normalize
from rllab.algos.trpo import TRPO
from rllab.misc.instrument import stub, run_experiment_lite
from sandbox.rocky.tf.envs.base import TfEnv
from rllab.envs.gym_env import GymEnv
import itertools
stub(globals())
# Param ranges
seeds = range(5)
for seed in seeds:
mdp = NormalizedEnv(env=GymEnv('Box3dReachPixel-v0',record_video=False, \
log_dir='/tmp/gym_test',record_log=False))
policy = ConvNNPolicy(
"conv_policy",
env_spec=mdp.spec,
conv_filters=(32, 32, 32, 32),
conv_filter_sizes=((3,3),(3,3),(3,3),(3,3)),
conv_strides=(2, 2, 2, 2),
conv_pads=('same', 'same', 'same', 'same'),
hidden_sizes=(256,),
)
baseline = GaussianConvBaseline(
mdp.spec,
regressor_args={
'conv_filters':(32, 32, 32, 32),
'conv_filter_sizes':((3,3),(3,3),(3,3),(3,3)),
'conv_strides':(2, 2, 2, 2),
'conv_pads':('same', 'same', 'same', 'same'),
'hidden_sizes':(256,),
}
)
batch_size = 5000
algo = TRPO(
env=mdp,
policy=policy,
baseline=baseline,
batch_size=batch_size,
whole_paths=True,
max_path_length=500,
n_itr=10000,
step_size=0.01,
subsample_factor=1.0,
)
run_experiment_lite(
algo.train(),
exp_prefix='trpo_box3d_pixel',
n_parallel=1,
snapshot_mode="last",
seed=seed,
mode="local"
)
|
en
| 0.645149
|
# Param ranges
| 1.609636
| 2
|
pdm/cli/commands/build.py
|
nasyxx/pdm
| 0
|
6629109
|
import argparse
from pdm.cli import actions
from pdm.cli.commands.base import BaseCommand
from pdm.cli.options import project_option, verbose_option
from pdm.project import Project
class Command(BaseCommand):
"""Build artifacts for distribution"""
arguments = [verbose_option, project_option]
def add_arguments(self, parser: argparse.ArgumentParser) -> None:
parser.add_argument(
"--no-sdist",
dest="sdist",
default=True,
action="store_false",
help="Don't build source tarballs",
)
parser.add_argument(
"--no-wheel",
dest="wheel",
default=True,
action="store_false",
help="Don't build wheels",
)
parser.add_argument(
"-d", "--dest", default="dist", help="Target directory to put artifacts"
)
parser.add_argument(
"--no-clean",
dest="clean",
default=True,
action="store_false",
help="Do not clean the target directory",
)
def handle(self, project: Project, options: argparse.Namespace) -> None:
actions.do_build(
project, options.sdist, options.wheel, options.dest, options.clean
)
|
import argparse
from pdm.cli import actions
from pdm.cli.commands.base import BaseCommand
from pdm.cli.options import project_option, verbose_option
from pdm.project import Project
class Command(BaseCommand):
"""Build artifacts for distribution"""
arguments = [verbose_option, project_option]
def add_arguments(self, parser: argparse.ArgumentParser) -> None:
parser.add_argument(
"--no-sdist",
dest="sdist",
default=True,
action="store_false",
help="Don't build source tarballs",
)
parser.add_argument(
"--no-wheel",
dest="wheel",
default=True,
action="store_false",
help="Don't build wheels",
)
parser.add_argument(
"-d", "--dest", default="dist", help="Target directory to put artifacts"
)
parser.add_argument(
"--no-clean",
dest="clean",
default=True,
action="store_false",
help="Do not clean the target directory",
)
def handle(self, project: Project, options: argparse.Namespace) -> None:
actions.do_build(
project, options.sdist, options.wheel, options.dest, options.clean
)
|
en
| 0.781808
|
Build artifacts for distribution
| 2.302287
| 2
|
module3/evaluate_predictions.py
|
axel-sirota/build-automated-ml-azure
| 5
|
6629110
|
import pandas as pd
data = pd.read_csv(filepath_or_buffer='../datasets/energy-pred.csv',
names=['Date', 'TZ', 'City', 'Code', 'Load', 'Predicted'], skiprows=1)
accuracy = 100*(1-abs(data['Load'] - data['Predicted'])/data['Load'])
accuracy.dropna(inplace=True)
print(accuracy.describe())
|
import pandas as pd
data = pd.read_csv(filepath_or_buffer='../datasets/energy-pred.csv',
names=['Date', 'TZ', 'City', 'Code', 'Load', 'Predicted'], skiprows=1)
accuracy = 100*(1-abs(data['Load'] - data['Predicted'])/data['Load'])
accuracy.dropna(inplace=True)
print(accuracy.describe())
|
none
| 1
| 2.841729
| 3
|
|
example.py
|
wmp-game/engine
| 0
|
6629111
|
<reponame>wmp-game/engine<filename>example.py
from engine import Engine
import random
def rnd_pos():
return (10*random.random(), 10*random.random())
class TeamA:
def __init__(self):
self.name = 'Team A'
def warrior(self, battle):
return battle.this.move_to(rnd_pos())
def mage(self, battle):
return battle.this.move_to(rnd_pos())
def priest(self, battle):
return battle.this.move_to(rnd_pos())
class TeamB:
def __init__(self):
self.name = 'Team B'
def warrior(self, battle):
return battle.this.move_to(rnd_pos())
def mage(self, battle):
return battle.this.move_to(rnd_pos())
def priest(self, battle):
return battle.this.move_to(rnd_pos())
engine = Engine()
result = engine.start_battle(TeamA(), TeamB(), log_file='example_battle.log')
print(result)
|
from engine import Engine
import random
def rnd_pos():
return (10*random.random(), 10*random.random())
class TeamA:
def __init__(self):
self.name = 'Team A'
def warrior(self, battle):
return battle.this.move_to(rnd_pos())
def mage(self, battle):
return battle.this.move_to(rnd_pos())
def priest(self, battle):
return battle.this.move_to(rnd_pos())
class TeamB:
def __init__(self):
self.name = 'Team B'
def warrior(self, battle):
return battle.this.move_to(rnd_pos())
def mage(self, battle):
return battle.this.move_to(rnd_pos())
def priest(self, battle):
return battle.this.move_to(rnd_pos())
engine = Engine()
result = engine.start_battle(TeamA(), TeamB(), log_file='example_battle.log')
print(result)
|
none
| 1
| 2.913613
| 3
|
|
nitro/resource/config/network/ipv6.py
|
HanseMerkur/nitro-python
| 2
|
6629112
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nitro.resource.base.base_resource import base_resource
from nitro.resource.base.base_resource import base_response
from nitro.service.options import options
from nitro.exception.nitro_exception import nitro_exception
from nitro.util.nitro_util import nitro_util
class ipv6(base_resource) :
"""Configuration for ip v6 resource."""
def __init__(self) :
self._ralearning = ""
self._routerredirection = ""
self._ndbasereachtime = 0
self._ndretransmissiontime = 0
self._natprefix = ""
self._td = 0
self._dodad = ""
self._basereachtime = 0
self._reachtime = 0
self._ndreachtime = 0
self._retransmissiontime = 0
self.___count = 0
@property
def ralearning(self) :
"""Enable the NetScaler appliance to learn about various routes from Router Advertisement (RA) and Router Solicitation (RS) messages sent by the routers.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED."""
try :
return self._ralearning
except Exception as e:
raise e
@ralearning.setter
def ralearning(self, ralearning) :
"""Enable the NetScaler appliance to learn about various routes from Router Advertisement (RA) and Router Solicitation (RS) messages sent by the routers.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
:param ralearning:
"""
try :
self._ralearning = ralearning
except Exception as e:
raise e
@property
def routerredirection(self) :
"""Enable the NetScaler appliance to do Router Redirection.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED."""
try :
return self._routerredirection
except Exception as e:
raise e
@routerredirection.setter
def routerredirection(self, routerredirection) :
"""Enable the NetScaler appliance to do Router Redirection.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
:param routerredirection:
"""
try :
self._routerredirection = routerredirection
except Exception as e:
raise e
@property
def ndbasereachtime(self) :
"""Base reachable time of the Neighbor Discovery (ND6) protocol. The time, in milliseconds, that the NetScaler appliance assumes an adjacent device is reachable after receiving a reachability confirmation.<br/>Default value: 30000<br/>Minimum length = 1."""
try :
return self._ndbasereachtime
except Exception as e:
raise e
@ndbasereachtime.setter
def ndbasereachtime(self, ndbasereachtime) :
"""Base reachable time of the Neighbor Discovery (ND6) protocol. The time, in milliseconds, that the NetScaler appliance assumes an adjacent device is reachable after receiving a reachability confirmation.<br/>Default value: 30000<br/>Minimum length = 1
:param ndbasereachtime:
"""
try :
self._ndbasereachtime = ndbasereachtime
except Exception as e:
raise e
@property
def ndretransmissiontime(self) :
"""Retransmission time of the Neighbor Discovery (ND6) protocol. The time, in milliseconds, between retransmitted Neighbor Solicitation (NS) messages, to an adjacent device.<br/>Default value: 1000<br/>Minimum length = 1."""
try :
return self._ndretransmissiontime
except Exception as e:
raise e
@ndretransmissiontime.setter
def ndretransmissiontime(self, ndretransmissiontime) :
"""Retransmission time of the Neighbor Discovery (ND6) protocol. The time, in milliseconds, between retransmitted Neighbor Solicitation (NS) messages, to an adjacent device.<br/>Default value: 1000<br/>Minimum length = 1
:param ndretransmissiontime:
"""
try :
self._ndretransmissiontime = ndretransmissiontime
except Exception as e:
raise e
@property
def natprefix(self) :
"""Prefix used for translating packets from private IPv6 servers to IPv4 packets. This prefix has a length of 96 bits (128-32 = 96). The IPv6 servers embed the destination IP address of the IPv4 servers or hosts in the last 32 bits of the destination IP address field of the IPv6 packets. The first 96 bits of the destination IP address field are set as the IPv6 NAT prefix. IPv6 packets addressed to this prefix have to be routed to the NetScaler appliance to ensure that the IPv6-IPv4 translation is done by the appliance."""
try :
return self._natprefix
except Exception as e:
raise e
@natprefix.setter
def natprefix(self, natprefix) :
"""Prefix used for translating packets from private IPv6 servers to IPv4 packets. This prefix has a length of 96 bits (128-32 = 96). The IPv6 servers embed the destination IP address of the IPv4 servers or hosts in the last 32 bits of the destination IP address field of the IPv6 packets. The first 96 bits of the destination IP address field are set as the IPv6 NAT prefix. IPv6 packets addressed to this prefix have to be routed to the NetScaler appliance to ensure that the IPv6-IPv4 translation is done by the appliance.
:param natprefix:
"""
try :
self._natprefix = natprefix
except Exception as e:
raise e
@property
def td(self) :
"""Integer value that uniquely identifies the traffic domain in which you want to configure the entity. If you do not specify an ID, the entity becomes part of the default traffic domain, which has an ID of 0.<br/>Maximum length = 4094."""
try :
return self._td
except Exception as e:
raise e
@td.setter
def td(self, td) :
"""Integer value that uniquely identifies the traffic domain in which you want to configure the entity. If you do not specify an ID, the entity becomes part of the default traffic domain, which has an ID of 0.<br/>Maximum length = 4094
:param td:
"""
try :
self._td = td
except Exception as e:
raise e
@property
def dodad(self) :
"""Enable the NetScaler appliance to do Duplicate Address
Detection (DAD) for all the NetScaler owned IPv6 addresses regardless of whether they are obtained through stateless auto configuration, DHCPv6, or manual configuration.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._dodad
except Exception as e:
raise e
@dodad.setter
def dodad(self, dodad) :
"""Enable the NetScaler appliance to do Duplicate Address
Detection (DAD) for all the NetScaler owned IPv6 addresses regardless of whether they are obtained through stateless auto configuration, DHCPv6, or manual configuration.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
:param dodad:
"""
try :
self._dodad = dodad
except Exception as e:
raise e
@property
def basereachtime(self) :
"""ND6 base reachable time (ms)."""
try :
return self._basereachtime
except Exception as e:
raise e
@property
def reachtime(self) :
"""ND6 computed reachable time (ms)."""
try :
return self._reachtime
except Exception as e:
raise e
@property
def ndreachtime(self) :
"""ND6 computed reachable time (ms)."""
try :
return self._ndreachtime
except Exception as e:
raise e
@property
def retransmissiontime(self) :
"""ND6 retransmission time (ms)."""
try :
return self._retransmissiontime
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
"""converts nitro response into object and returns the object array in case of get request.
:param service:
:param response:
"""
try :
result = service.payload_formatter.string_to_resource(ipv6_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.ipv6
except Exception as e :
raise e
def _get_object_name(self) :
"""Returns the value of object identifier argument"""
try :
if self.td is not None :
return str(self.td)
return None
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
"""Use this API to update ipv6.
:param client:
:param resource:
"""
try :
if type(resource) is not list :
updateresource = ipv6()
updateresource.ralearning = resource.ralearning
updateresource.routerredirection = resource.routerredirection
updateresource.ndbasereachtime = resource.ndbasereachtime
updateresource.ndretransmissiontime = resource.ndretransmissiontime
updateresource.natprefix = resource.natprefix
updateresource.td = resource.td
updateresource.dodad = resource.dodad
return updateresource.update_resource(client)
else :
if (resource and len(resource) > 0) :
updateresources = [ ipv6() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].ralearning = resource[i].ralearning
updateresources[i].routerredirection = resource[i].routerredirection
updateresources[i].ndbasereachtime = resource[i].ndbasereachtime
updateresources[i].ndretransmissiontime = resource[i].ndretransmissiontime
updateresources[i].natprefix = resource[i].natprefix
updateresources[i].td = resource[i].td
updateresources[i].dodad = resource[i].dodad
result = cls.update_bulk_request(client, updateresources)
return result
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
"""Use this API to unset the properties of ipv6 resource.
Properties that need to be unset are specified in args array.
:param client:
:param resource:
:param args:
"""
try :
if type(resource) is not list :
unsetresource = ipv6()
if type(resource) != type(unsetresource):
unsetresource.td = resource
else :
unsetresource.td = resource.td
return unsetresource.unset_resource(client, args)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
unsetresources = [ ipv6() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].td = resource[i]
else :
if (resource and len(resource) > 0) :
unsetresources = [ ipv6() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].td = resource[i].td
result = cls.unset_bulk_request(client, unsetresources, args)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
"""Use this API to fetch all the ipv6 resources that are configured on netscaler.
:param client:
:param name: (Default value = "")
:param option_: (Default value = "")
"""
try :
if not name :
obj = ipv6()
response = obj.get_resources(client, option_)
else :
if type(name) != cls :
if type(name) is not list :
obj = ipv6()
obj.td = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [ipv6() for _ in range(len(name))]
obj = [ipv6() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = ipv6()
obj[i].td = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
"""Use this API to fetch filtered set of ipv6 resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
:param client:
:param filter_:
"""
try :
obj = ipv6()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
"""Use this API to count the ipv6 resources configured on NetScaler.
:param client:
"""
try :
obj = ipv6()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
"""Use this API to count filtered the set of ipv6 resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
:param client:
:param filter_:
"""
try :
obj = ipv6()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class Routerredirection:
""" """
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Ralearning:
""" """
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Dodad:
""" """
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class ipv6_response(base_response) :
""" """
def __init__(self, length=1) :
self.ipv6 = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.ipv6 = [ipv6() for _ in range(length)]
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nitro.resource.base.base_resource import base_resource
from nitro.resource.base.base_resource import base_response
from nitro.service.options import options
from nitro.exception.nitro_exception import nitro_exception
from nitro.util.nitro_util import nitro_util
class ipv6(base_resource) :
"""Configuration for ip v6 resource."""
def __init__(self) :
self._ralearning = ""
self._routerredirection = ""
self._ndbasereachtime = 0
self._ndretransmissiontime = 0
self._natprefix = ""
self._td = 0
self._dodad = ""
self._basereachtime = 0
self._reachtime = 0
self._ndreachtime = 0
self._retransmissiontime = 0
self.___count = 0
@property
def ralearning(self) :
"""Enable the NetScaler appliance to learn about various routes from Router Advertisement (RA) and Router Solicitation (RS) messages sent by the routers.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED."""
try :
return self._ralearning
except Exception as e:
raise e
@ralearning.setter
def ralearning(self, ralearning) :
"""Enable the NetScaler appliance to learn about various routes from Router Advertisement (RA) and Router Solicitation (RS) messages sent by the routers.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
:param ralearning:
"""
try :
self._ralearning = ralearning
except Exception as e:
raise e
@property
def routerredirection(self) :
"""Enable the NetScaler appliance to do Router Redirection.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED."""
try :
return self._routerredirection
except Exception as e:
raise e
@routerredirection.setter
def routerredirection(self, routerredirection) :
"""Enable the NetScaler appliance to do Router Redirection.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
:param routerredirection:
"""
try :
self._routerredirection = routerredirection
except Exception as e:
raise e
@property
def ndbasereachtime(self) :
"""Base reachable time of the Neighbor Discovery (ND6) protocol. The time, in milliseconds, that the NetScaler appliance assumes an adjacent device is reachable after receiving a reachability confirmation.<br/>Default value: 30000<br/>Minimum length = 1."""
try :
return self._ndbasereachtime
except Exception as e:
raise e
@ndbasereachtime.setter
def ndbasereachtime(self, ndbasereachtime) :
"""Base reachable time of the Neighbor Discovery (ND6) protocol. The time, in milliseconds, that the NetScaler appliance assumes an adjacent device is reachable after receiving a reachability confirmation.<br/>Default value: 30000<br/>Minimum length = 1
:param ndbasereachtime:
"""
try :
self._ndbasereachtime = ndbasereachtime
except Exception as e:
raise e
@property
def ndretransmissiontime(self) :
"""Retransmission time of the Neighbor Discovery (ND6) protocol. The time, in milliseconds, between retransmitted Neighbor Solicitation (NS) messages, to an adjacent device.<br/>Default value: 1000<br/>Minimum length = 1."""
try :
return self._ndretransmissiontime
except Exception as e:
raise e
@ndretransmissiontime.setter
def ndretransmissiontime(self, ndretransmissiontime) :
"""Retransmission time of the Neighbor Discovery (ND6) protocol. The time, in milliseconds, between retransmitted Neighbor Solicitation (NS) messages, to an adjacent device.<br/>Default value: 1000<br/>Minimum length = 1
:param ndretransmissiontime:
"""
try :
self._ndretransmissiontime = ndretransmissiontime
except Exception as e:
raise e
@property
def natprefix(self) :
"""Prefix used for translating packets from private IPv6 servers to IPv4 packets. This prefix has a length of 96 bits (128-32 = 96). The IPv6 servers embed the destination IP address of the IPv4 servers or hosts in the last 32 bits of the destination IP address field of the IPv6 packets. The first 96 bits of the destination IP address field are set as the IPv6 NAT prefix. IPv6 packets addressed to this prefix have to be routed to the NetScaler appliance to ensure that the IPv6-IPv4 translation is done by the appliance."""
try :
return self._natprefix
except Exception as e:
raise e
@natprefix.setter
def natprefix(self, natprefix) :
"""Prefix used for translating packets from private IPv6 servers to IPv4 packets. This prefix has a length of 96 bits (128-32 = 96). The IPv6 servers embed the destination IP address of the IPv4 servers or hosts in the last 32 bits of the destination IP address field of the IPv6 packets. The first 96 bits of the destination IP address field are set as the IPv6 NAT prefix. IPv6 packets addressed to this prefix have to be routed to the NetScaler appliance to ensure that the IPv6-IPv4 translation is done by the appliance.
:param natprefix:
"""
try :
self._natprefix = natprefix
except Exception as e:
raise e
@property
def td(self) :
"""Integer value that uniquely identifies the traffic domain in which you want to configure the entity. If you do not specify an ID, the entity becomes part of the default traffic domain, which has an ID of 0.<br/>Maximum length = 4094."""
try :
return self._td
except Exception as e:
raise e
@td.setter
def td(self, td) :
"""Integer value that uniquely identifies the traffic domain in which you want to configure the entity. If you do not specify an ID, the entity becomes part of the default traffic domain, which has an ID of 0.<br/>Maximum length = 4094
:param td:
"""
try :
self._td = td
except Exception as e:
raise e
@property
def dodad(self) :
"""Enable the NetScaler appliance to do Duplicate Address
Detection (DAD) for all the NetScaler owned IPv6 addresses regardless of whether they are obtained through stateless auto configuration, DHCPv6, or manual configuration.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._dodad
except Exception as e:
raise e
@dodad.setter
def dodad(self, dodad) :
"""Enable the NetScaler appliance to do Duplicate Address
Detection (DAD) for all the NetScaler owned IPv6 addresses regardless of whether they are obtained through stateless auto configuration, DHCPv6, or manual configuration.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
:param dodad:
"""
try :
self._dodad = dodad
except Exception as e:
raise e
@property
def basereachtime(self) :
"""ND6 base reachable time (ms)."""
try :
return self._basereachtime
except Exception as e:
raise e
@property
def reachtime(self) :
"""ND6 computed reachable time (ms)."""
try :
return self._reachtime
except Exception as e:
raise e
@property
def ndreachtime(self) :
"""ND6 computed reachable time (ms)."""
try :
return self._ndreachtime
except Exception as e:
raise e
@property
def retransmissiontime(self) :
"""ND6 retransmission time (ms)."""
try :
return self._retransmissiontime
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
"""converts nitro response into object and returns the object array in case of get request.
:param service:
:param response:
"""
try :
result = service.payload_formatter.string_to_resource(ipv6_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.ipv6
except Exception as e :
raise e
def _get_object_name(self) :
"""Returns the value of object identifier argument"""
try :
if self.td is not None :
return str(self.td)
return None
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
"""Use this API to update ipv6.
:param client:
:param resource:
"""
try :
if type(resource) is not list :
updateresource = ipv6()
updateresource.ralearning = resource.ralearning
updateresource.routerredirection = resource.routerredirection
updateresource.ndbasereachtime = resource.ndbasereachtime
updateresource.ndretransmissiontime = resource.ndretransmissiontime
updateresource.natprefix = resource.natprefix
updateresource.td = resource.td
updateresource.dodad = resource.dodad
return updateresource.update_resource(client)
else :
if (resource and len(resource) > 0) :
updateresources = [ ipv6() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].ralearning = resource[i].ralearning
updateresources[i].routerredirection = resource[i].routerredirection
updateresources[i].ndbasereachtime = resource[i].ndbasereachtime
updateresources[i].ndretransmissiontime = resource[i].ndretransmissiontime
updateresources[i].natprefix = resource[i].natprefix
updateresources[i].td = resource[i].td
updateresources[i].dodad = resource[i].dodad
result = cls.update_bulk_request(client, updateresources)
return result
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
"""Use this API to unset the properties of ipv6 resource.
Properties that need to be unset are specified in args array.
:param client:
:param resource:
:param args:
"""
try :
if type(resource) is not list :
unsetresource = ipv6()
if type(resource) != type(unsetresource):
unsetresource.td = resource
else :
unsetresource.td = resource.td
return unsetresource.unset_resource(client, args)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
unsetresources = [ ipv6() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].td = resource[i]
else :
if (resource and len(resource) > 0) :
unsetresources = [ ipv6() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].td = resource[i].td
result = cls.unset_bulk_request(client, unsetresources, args)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
"""Use this API to fetch all the ipv6 resources that are configured on netscaler.
:param client:
:param name: (Default value = "")
:param option_: (Default value = "")
"""
try :
if not name :
obj = ipv6()
response = obj.get_resources(client, option_)
else :
if type(name) != cls :
if type(name) is not list :
obj = ipv6()
obj.td = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [ipv6() for _ in range(len(name))]
obj = [ipv6() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = ipv6()
obj[i].td = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
"""Use this API to fetch filtered set of ipv6 resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
:param client:
:param filter_:
"""
try :
obj = ipv6()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
"""Use this API to count the ipv6 resources configured on NetScaler.
:param client:
"""
try :
obj = ipv6()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
"""Use this API to count filtered the set of ipv6 resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
:param client:
:param filter_:
"""
try :
obj = ipv6()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class Routerredirection:
""" """
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Ralearning:
""" """
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Dodad:
""" """
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class ipv6_response(base_response) :
""" """
def __init__(self, length=1) :
self.ipv6 = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.ipv6 = [ipv6() for _ in range(length)]
|
en
| 0.754842
|
# # Copyright (c) 2008-2015 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License") # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Configuration for ip v6 resource. Enable the NetScaler appliance to learn about various routes from Router Advertisement (RA) and Router Solicitation (RS) messages sent by the routers.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED. Enable the NetScaler appliance to learn about various routes from Router Advertisement (RA) and Router Solicitation (RS) messages sent by the routers.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED :param ralearning: Enable the NetScaler appliance to do Router Redirection.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED. Enable the NetScaler appliance to do Router Redirection.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED :param routerredirection: Base reachable time of the Neighbor Discovery (ND6) protocol. The time, in milliseconds, that the NetScaler appliance assumes an adjacent device is reachable after receiving a reachability confirmation.<br/>Default value: 30000<br/>Minimum length = 1. Base reachable time of the Neighbor Discovery (ND6) protocol. The time, in milliseconds, that the NetScaler appliance assumes an adjacent device is reachable after receiving a reachability confirmation.<br/>Default value: 30000<br/>Minimum length = 1 :param ndbasereachtime: Retransmission time of the Neighbor Discovery (ND6) protocol. The time, in milliseconds, between retransmitted Neighbor Solicitation (NS) messages, to an adjacent device.<br/>Default value: 1000<br/>Minimum length = 1. Retransmission time of the Neighbor Discovery (ND6) protocol. The time, in milliseconds, between retransmitted Neighbor Solicitation (NS) messages, to an adjacent device.<br/>Default value: 1000<br/>Minimum length = 1 :param ndretransmissiontime: Prefix used for translating packets from private IPv6 servers to IPv4 packets. This prefix has a length of 96 bits (128-32 = 96). The IPv6 servers embed the destination IP address of the IPv4 servers or hosts in the last 32 bits of the destination IP address field of the IPv6 packets. The first 96 bits of the destination IP address field are set as the IPv6 NAT prefix. IPv6 packets addressed to this prefix have to be routed to the NetScaler appliance to ensure that the IPv6-IPv4 translation is done by the appliance. Prefix used for translating packets from private IPv6 servers to IPv4 packets. This prefix has a length of 96 bits (128-32 = 96). The IPv6 servers embed the destination IP address of the IPv4 servers or hosts in the last 32 bits of the destination IP address field of the IPv6 packets. The first 96 bits of the destination IP address field are set as the IPv6 NAT prefix. IPv6 packets addressed to this prefix have to be routed to the NetScaler appliance to ensure that the IPv6-IPv4 translation is done by the appliance. :param natprefix: Integer value that uniquely identifies the traffic domain in which you want to configure the entity. If you do not specify an ID, the entity becomes part of the default traffic domain, which has an ID of 0.<br/>Maximum length = 4094. Integer value that uniquely identifies the traffic domain in which you want to configure the entity. If you do not specify an ID, the entity becomes part of the default traffic domain, which has an ID of 0.<br/>Maximum length = 4094 :param td: Enable the NetScaler appliance to do Duplicate Address Detection (DAD) for all the NetScaler owned IPv6 addresses regardless of whether they are obtained through stateless auto configuration, DHCPv6, or manual configuration.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED. Enable the NetScaler appliance to do Duplicate Address Detection (DAD) for all the NetScaler owned IPv6 addresses regardless of whether they are obtained through stateless auto configuration, DHCPv6, or manual configuration.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED :param dodad: ND6 base reachable time (ms). ND6 computed reachable time (ms). ND6 computed reachable time (ms). ND6 retransmission time (ms). converts nitro response into object and returns the object array in case of get request. :param service: :param response: Returns the value of object identifier argument Use this API to update ipv6. :param client: :param resource: Use this API to unset the properties of ipv6 resource. Properties that need to be unset are specified in args array. :param client: :param resource: :param args: Use this API to fetch all the ipv6 resources that are configured on netscaler. :param client: :param name: (Default value = "") :param option_: (Default value = "") Use this API to fetch filtered set of ipv6 resources. filter string should be in JSON format.eg: "port:80,servicetype:HTTP". :param client: :param filter_: Use this API to count the ipv6 resources configured on NetScaler. :param client: Use this API to count filtered the set of ipv6 resources. Filter string should be in JSON format.eg: "port:80,servicetype:HTTP". :param client: :param filter_:
| 1.995015
| 2
|
graph/load_sigvisa_graph.py
|
davmre/sigvisa
| 0
|
6629113
|
<filename>graph/load_sigvisa_graph.py
import time
import numpy as np
import os
from sigvisa import Sigvisa
from sigvisa.database.dataset import read_timerange, read_events, EV_MB_COL, EV_EVID_COL
from sigvisa.database.signal_data import read_fitting_run_iterations
from sigvisa.graph.sigvisa_graph import SigvisaGraph, get_param_model_id, ModelNotFoundError
from sigvisa.graph.graph_utils import create_key
from sigvisa.source.event import get_event
from sigvisa.signals.io import load_event_station_chan, load_segments, fetch_waveform
def load_sg_from_db_fit(fitid, load_wiggles=True):
s = Sigvisa()
cursor = s.dbconn.cursor()
fit_sql_query = "select f.runid, f.evid, f.sta, f.chan, f.band, f.hz, f.smooth, f.stime, f.etime, nm.model_type, nm.nmid, f.env from sigvisa_coda_fit f, sigvisa_noise_model nm where f.fitid=%d and f.nmid=nm.nmid" % (fitid)
cursor.execute(fit_sql_query)
fit = cursor.fetchone()
ev = get_event(evid=fit[1])
env = fit[11] == 't'
wave = fetch_waveform(fit[2], fit[3], fit[7], fit[8]).filter('%s%s;smooth_%d;hz_%.2f' % (fit[4], ";env" if env else "", fit[6], fit[5]))
#wave = load_event_station_chan(fit[1], fit[2], fit[3], cursor=cursor, exclude_other_evs=True).filter('%s;env;smooth_%d;hz_%.2f' % (fit[4], fit[6], fit[5]))
nm_type = fit[9]
nmid = int(fit[10])
runid = fit[0]
phase_sql_query = "select fpid, phase, template_model, arrival_time, peak_offset, coda_height, peak_decay, coda_decay, mult_wiggle_std, wiggle_family from sigvisa_coda_fit_phase where fitid=%d" % fitid
cursor.execute(phase_sql_query)
phase_details = cursor.fetchall()
cursor.close()
phases = [p[1] for p in phase_details]
templates = {}
tmshapes = {}
uatemplates = []
wiggle_family="dummy"
for (phase, p) in zip(phases, phase_details):
shape = p[2]
tparams = {'arrival_time': p[3], 'peak_offset': p[4], 'coda_height': p[5], 'coda_decay': p[7], 'mult_wiggle_std': p[8]}
if p[2]=="lin_polyexp":
tparams['peak_decay'] = p[6]
wiggle_family=p[-1]
tmshapes[phase] = shape
if phase=="UA":
uatemplates.append(tparams)
else:
templates[phase] = tparams
sg = SigvisaGraph(template_model_type="dummy", wiggle_model_type="dummy",
template_shape=tmshapes, wiggle_family=wiggle_family,
runids=(runid,), phases=phases,
base_srate=wave['srate'], raw_signals = not env)
wave_node = sg.add_wave(wave, nmid=nmid, dummy_noise_prior=True)
sg.add_event(ev)
for uaparams in uatemplates:
sg.create_unassociated_template(wave_node, atime=uaparams['arrival_time'], initial_vals=uaparams)
for phase in templates.keys():
if not env and "mult_wiggle_std" in templates[phase]:
del templates[phase]["mult_wiggle_std"]
sg.set_template(eid=ev.eid, sta=wave['sta'], band=wave['band'],
chan=wave['chan'], phase=phase,
values = templates[phase])
print "setting template", ev.eid, phase, "to", templates[phase]
return sg
def register_svgraph_cmdline(parser):
parser.add_option("-s", "--sites", dest="sites", default=None, type="str",
help="comma-separated list of stations with which to locate the event")
parser.add_option("-r", "--run_name", dest="run_name", default=None, type="str",
help="name of training run specifying the set of models to use")
parser.add_option("--runid", dest="runid", default=None, type="str",
help="runid of training run specifying the set of models to use")
parser.add_option(
"--template_shape", dest="template_shape", default="lin_polyexp", type="str", help="template model type (lin_polyexp)")
parser.add_option(
"--phases", dest="phases", default="auto", help="comma-separated list of phases to include in predicted templates (auto)")
parser.add_option(
"--template_model_types", dest="tm_types", default="param",
help="comma-separated list of param:model_type mappings (peak_offset:constant_gaussian,coda_height:constant_gaussian,coda_decay:constant_gaussian)")
parser.add_option("--wiggle_model_type", dest="wm_type", default="dummy", help = "")
parser.add_option("--wiggle_family", dest="wiggle_family", default="dummy", help = "")
parser.add_option("--dummy_fallback", dest="dummy_fallback", default=False, action="store_true",
help="fall back to a dummy model instead of throwing an error if no model for the parameter exists in the database (False)")
parser.add_option("--arrays_joint", dest="arrays_joint", default=False, action="store_true",
help="model array stations with joint nodes (False)")
parser.add_option("--absorb_n_phases", dest="absorb_n_phases", default=False, action="store_true",
help="model Pn arrivals as P (false)")
parser.add_option("--uatemplate_rate", dest="uatemplate_rate", default=1e-6, type=float, help="Poisson rate (per-second) for unassociated template prior (1e-6)")
def register_svgraph_signal_cmdline(parser):
parser.add_option("--hz", dest="hz", default=5, type=float, help="downsample signals to a given sampling rate, in hz (5)")
parser.add_option("--smooth", dest="smooth", default=None, type=int, help="perform the given level of smoothing")
parser.add_option("--chans", dest="chans", default="auto", type="str",
help="comma-separated list of channel names to use for inference (auto)")
parser.add_option("--bands", dest="bands", default="freq_2.0_3.0", type="str",
help="comma-separated list of band names to use for inference (freq_2.0_3.0)")
parser.add_option("--array_refsta_only", dest="refsta_only", default=True, action="store_false",
help="load only the reference station for each array site (True)")
parser.add_option("--start_time", dest="start_time", default=None, type="float",
help="load signals beginning at this UNIX time (None)")
parser.add_option("--end_time", dest="end_time", default=None, type="float",
help="load signals end at this UNIX time (None)")
parser.add_option("--dataset", dest="dataset", default="training", type="str",
help="if start_time and end_time not specified, load signals from the time period of the specified dataset (training)")
parser.add_option("--hour", dest="hour", default=0, type="float",
help="start at a particular hour of the given dataset (0)")
parser.add_option("--len_hours", dest="len_hours", default=1, type="float",
help="load this many hours from the given dateset")
parser.add_option("--initialize_leb", dest="initialize_leb", default="no", type="str",
help="use LEB events to set the intial state. options are 'no', 'yes', 'perturb' to initialize with locations randomly perturbed by ~5 degrees, or 'count' to initialize with a set of completely random events, having the same count as the LEB events ")
parser.add_option("--initialize_evids", dest="initialize_evids", default=None, type="str",
help="initialize with a specified list of LEB evids")
parser.add_option("--synth", dest="synth", default=False, action="store_true")
def register_svgraph_event_based_signal_cmdline(parser):
parser.add_option("-e", "--evid", dest="evid", default=None, type="int", help="event ID to locate")
parser.add_option("--hz", dest="hz", default=5, type=float, help="downsample signals to a given sampling rate, in hz (5)")
parser.add_option("--smooth", dest="smooth", default=None, type=int, help="perform the given level of smoothing")
parser.add_option("--chans", dest="chans", default="auto", type="str",
help="comma-separated list of channel names to use for inference (auto)")
parser.add_option("--bands", dest="bands", default="freq_2.0_3.0", type="str",
help="comma-separated list of band names to use for inference (freq_2.0_3.0)")
parser.add_option("--array_refsta_only", dest="refsta_only", default=True, action="store_false",
help="load only the reference station for each array site (True)")
def setup_svgraph_from_cmdline(options, args):
s = Sigvisa()
cursor = s.dbconn.cursor()
if options.runid is None:
run_name = options.run_name
iters = np.array(sorted(list(read_fitting_run_iterations(cursor, run_name))))
run_iter, runid = iters[-1, :]
runids = (runid,)
else:
runids = tuple(int(ss) for ss in options.runid.split(","))
tm_type_str = options.tm_types
if tm_type_str == "param":
tm_type_str = "tt_residual:constant_laplacian,peak_offset:param_linear_mb,amp_transfer:param_sin1,coda_decay:param_linear_distmb,peak_decay:param_linear_distmb,mult_wiggle_std:constant_beta"
tm_types = {}
if ',' in tm_type_str:
for p in tm_type_str.split(','):
(param, model_type) = p.strip().split(':')
tm_types[param] = model_type
else:
tm_types = tm_type_str
if options.phases in ("auto", "leb"):
phases = options.phases
else:
phases = options.phases.split(',')
cursor.close()
sg = SigvisaGraph(template_shape = options.template_shape, template_model_type = tm_types,
wiggle_family = options.wiggle_family, wiggle_model_type = options.wm_type,
dummy_fallback = options.dummy_fallback,
runids=runids, phases=phases, gpmodel_build_trees=False, arrays_joint=options.arrays_joint,
absorb_n_phases=options.absorb_n_phases, uatemplate_rate=options.uatemplate_rate)
return sg
def load_signals_from_cmdline(sg, options, args):
s = Sigvisa()
cursor = s.dbconn.cursor()
sites = options.sites.split(',')
stas = s.sites_to_stas(sites, refsta_only=options.refsta_only)
if options.start_time is not None and options.end_time is not None:
stime = options.start_time
etime = options.end_time
else:
print "loading signals from dataset %s" % options.dataset
(stime, etime) = read_timerange(cursor, options.dataset, hours=None, skip=0)
stime += options.hour * 3600
etime = stime + options.len_hours*3600.0
print "loading signals from stime %.1f through etime %.1f" % (stime, etime)
if options.bands == "all":
bands = s.bands
else:
bands = options.bands.split(',')
if options.chans == "all":
chans = s.chans
else:
chans = options.chans.split(',')
segments = load_segments(cursor, stas, stime, etime, chans = chans)
segments = [seg.with_filter('env;hz_%.3f' % options.hz) for seg in segments]
n_waves = 0
for seg in segments:
for band in bands:
filtered_seg = seg.with_filter(band)
if options.smooth is not None:
filtered_seg = filtered_seg.with_filter("smooth_%d" % options.smooth)
for chan in filtered_seg.get_chans():
try:
modelid = get_param_model_id(sg.runids, seg['sta'], 'P', sg._tm_type('amp_transfer', site=seg['sta']), 'amp_transfer', options.template_shape, chan=chan, band=band)
except ModelNotFoundError as e:
print "couldn't find amp_transfer model for %s,%s,%s, so not adding to graph." % (seg['sta'], chan, band), e
continue
wave = filtered_seg[chan]
wn = sg.add_wave(wave)
n_waves += 1
assert(n_waves > 0)
if options.initialize_evids is not None:
evids = [int(evid) for evid in options.initialize_evids.split(",")]
evs = [get_event(evid=evid) for evid in evids]
else:
evs = get_leb_events(sg, cursor)
if options.initialize_leb != "no" or options.initialize_evids is not None or options.synth:
if options.initialize_leb == "yes" or options.initialize_evids is not None or options.synth:
for ev in evs:
print "initializing with event", ev
sg.add_event(ev, fixed=options.synth)
elif options.initialize_leb=="perturb":
raise NotImplementedError("not implemented!")
elif options.initialize_leb=="count":
evs = sg.prior_sample_events(stime=st, etime=et, n_events=len(events))
else:
raise Exception("unrecognized argument initialize_leb=%s" % options.initialize_leb)
if options.synth:
for (sta, wns) in sg.station_waves.items():
for wn in wns:
wn.unfix_value()
sg.parent_sample_all()
for (sta, wns) in sg.station_waves.items():
for wn in wns:
wn.fix_value()
eids = sg.evnodes.keys()
for eid in eids:
if options.initialize_leb=="no":
sg.remove_event(eid)
else:
for evnode in sg.evnodes[eid].values():
evnode.unfix_value()
cursor.close()
return evs
def get_leb_events(sg, cursor):
st = sg.start_time
et = sg.end_time
events, orid2num = read_events(cursor, st, et, 'leb')
events = [evarr for evarr in events if evarr[EV_MB_COL] > 2]
evs = []
eid = 1
for evarr in events:
ev = get_event(evid=evarr[EV_EVID_COL])
ev.eid = eid
eid += 1
evs.append(ev)
return evs
def load_event_based_signals_from_cmdline(sg, options, args):
s = Sigvisa()
cursor = s.dbconn.cursor()
evid = options.evid
ev_true = get_event(evid=evid)
sites = options.sites.split(',')
stas = s.sites_to_stas(sites, options.refsta_only)
if options.bands == "all":
bands = s.bands
else:
bands = options.bands.split(',')
if options.chans == "all":
chans = s.chans
else:
chans = options.chans.split(',')
# inference is based on segments from all specified stations,
# starting at the min predicted arrival time (for the true event)
# minus 60s, and ending at the max predicted arrival time plus
# 240s
statimes = [ev_true.time + tt_predict(event=ev_true, sta=sta, phase=phase) for (sta, phase) in itertools.product(sites, s.phases)]
stime = np.min(statimes) - 60
etime = np.max(statimes) + 240
segments = load_segments(cursor, stas, stime, etime, chans = chans)
segments = [seg.with_filter('env;hz_%.3f' % options.hz) for seg in segments]
for seg in segments:
for band in bands:
filtered_seg = seg.with_filter(band)
if options.smooth is not None:
filtered_seg = filtered_seg.with_filter("smooth_%d" % options.smooth)
for chan in filtered_seg.get_chans():
wave = filtered_seg[chan]
sg.add_wave(wave)
evnodes = sg.add_event(ev_true)
cursor.close()
return evnodes
|
<filename>graph/load_sigvisa_graph.py
import time
import numpy as np
import os
from sigvisa import Sigvisa
from sigvisa.database.dataset import read_timerange, read_events, EV_MB_COL, EV_EVID_COL
from sigvisa.database.signal_data import read_fitting_run_iterations
from sigvisa.graph.sigvisa_graph import SigvisaGraph, get_param_model_id, ModelNotFoundError
from sigvisa.graph.graph_utils import create_key
from sigvisa.source.event import get_event
from sigvisa.signals.io import load_event_station_chan, load_segments, fetch_waveform
def load_sg_from_db_fit(fitid, load_wiggles=True):
s = Sigvisa()
cursor = s.dbconn.cursor()
fit_sql_query = "select f.runid, f.evid, f.sta, f.chan, f.band, f.hz, f.smooth, f.stime, f.etime, nm.model_type, nm.nmid, f.env from sigvisa_coda_fit f, sigvisa_noise_model nm where f.fitid=%d and f.nmid=nm.nmid" % (fitid)
cursor.execute(fit_sql_query)
fit = cursor.fetchone()
ev = get_event(evid=fit[1])
env = fit[11] == 't'
wave = fetch_waveform(fit[2], fit[3], fit[7], fit[8]).filter('%s%s;smooth_%d;hz_%.2f' % (fit[4], ";env" if env else "", fit[6], fit[5]))
#wave = load_event_station_chan(fit[1], fit[2], fit[3], cursor=cursor, exclude_other_evs=True).filter('%s;env;smooth_%d;hz_%.2f' % (fit[4], fit[6], fit[5]))
nm_type = fit[9]
nmid = int(fit[10])
runid = fit[0]
phase_sql_query = "select fpid, phase, template_model, arrival_time, peak_offset, coda_height, peak_decay, coda_decay, mult_wiggle_std, wiggle_family from sigvisa_coda_fit_phase where fitid=%d" % fitid
cursor.execute(phase_sql_query)
phase_details = cursor.fetchall()
cursor.close()
phases = [p[1] for p in phase_details]
templates = {}
tmshapes = {}
uatemplates = []
wiggle_family="dummy"
for (phase, p) in zip(phases, phase_details):
shape = p[2]
tparams = {'arrival_time': p[3], 'peak_offset': p[4], 'coda_height': p[5], 'coda_decay': p[7], 'mult_wiggle_std': p[8]}
if p[2]=="lin_polyexp":
tparams['peak_decay'] = p[6]
wiggle_family=p[-1]
tmshapes[phase] = shape
if phase=="UA":
uatemplates.append(tparams)
else:
templates[phase] = tparams
sg = SigvisaGraph(template_model_type="dummy", wiggle_model_type="dummy",
template_shape=tmshapes, wiggle_family=wiggle_family,
runids=(runid,), phases=phases,
base_srate=wave['srate'], raw_signals = not env)
wave_node = sg.add_wave(wave, nmid=nmid, dummy_noise_prior=True)
sg.add_event(ev)
for uaparams in uatemplates:
sg.create_unassociated_template(wave_node, atime=uaparams['arrival_time'], initial_vals=uaparams)
for phase in templates.keys():
if not env and "mult_wiggle_std" in templates[phase]:
del templates[phase]["mult_wiggle_std"]
sg.set_template(eid=ev.eid, sta=wave['sta'], band=wave['band'],
chan=wave['chan'], phase=phase,
values = templates[phase])
print "setting template", ev.eid, phase, "to", templates[phase]
return sg
def register_svgraph_cmdline(parser):
parser.add_option("-s", "--sites", dest="sites", default=None, type="str",
help="comma-separated list of stations with which to locate the event")
parser.add_option("-r", "--run_name", dest="run_name", default=None, type="str",
help="name of training run specifying the set of models to use")
parser.add_option("--runid", dest="runid", default=None, type="str",
help="runid of training run specifying the set of models to use")
parser.add_option(
"--template_shape", dest="template_shape", default="lin_polyexp", type="str", help="template model type (lin_polyexp)")
parser.add_option(
"--phases", dest="phases", default="auto", help="comma-separated list of phases to include in predicted templates (auto)")
parser.add_option(
"--template_model_types", dest="tm_types", default="param",
help="comma-separated list of param:model_type mappings (peak_offset:constant_gaussian,coda_height:constant_gaussian,coda_decay:constant_gaussian)")
parser.add_option("--wiggle_model_type", dest="wm_type", default="dummy", help = "")
parser.add_option("--wiggle_family", dest="wiggle_family", default="dummy", help = "")
parser.add_option("--dummy_fallback", dest="dummy_fallback", default=False, action="store_true",
help="fall back to a dummy model instead of throwing an error if no model for the parameter exists in the database (False)")
parser.add_option("--arrays_joint", dest="arrays_joint", default=False, action="store_true",
help="model array stations with joint nodes (False)")
parser.add_option("--absorb_n_phases", dest="absorb_n_phases", default=False, action="store_true",
help="model Pn arrivals as P (false)")
parser.add_option("--uatemplate_rate", dest="uatemplate_rate", default=1e-6, type=float, help="Poisson rate (per-second) for unassociated template prior (1e-6)")
def register_svgraph_signal_cmdline(parser):
parser.add_option("--hz", dest="hz", default=5, type=float, help="downsample signals to a given sampling rate, in hz (5)")
parser.add_option("--smooth", dest="smooth", default=None, type=int, help="perform the given level of smoothing")
parser.add_option("--chans", dest="chans", default="auto", type="str",
help="comma-separated list of channel names to use for inference (auto)")
parser.add_option("--bands", dest="bands", default="freq_2.0_3.0", type="str",
help="comma-separated list of band names to use for inference (freq_2.0_3.0)")
parser.add_option("--array_refsta_only", dest="refsta_only", default=True, action="store_false",
help="load only the reference station for each array site (True)")
parser.add_option("--start_time", dest="start_time", default=None, type="float",
help="load signals beginning at this UNIX time (None)")
parser.add_option("--end_time", dest="end_time", default=None, type="float",
help="load signals end at this UNIX time (None)")
parser.add_option("--dataset", dest="dataset", default="training", type="str",
help="if start_time and end_time not specified, load signals from the time period of the specified dataset (training)")
parser.add_option("--hour", dest="hour", default=0, type="float",
help="start at a particular hour of the given dataset (0)")
parser.add_option("--len_hours", dest="len_hours", default=1, type="float",
help="load this many hours from the given dateset")
parser.add_option("--initialize_leb", dest="initialize_leb", default="no", type="str",
help="use LEB events to set the intial state. options are 'no', 'yes', 'perturb' to initialize with locations randomly perturbed by ~5 degrees, or 'count' to initialize with a set of completely random events, having the same count as the LEB events ")
parser.add_option("--initialize_evids", dest="initialize_evids", default=None, type="str",
help="initialize with a specified list of LEB evids")
parser.add_option("--synth", dest="synth", default=False, action="store_true")
def register_svgraph_event_based_signal_cmdline(parser):
parser.add_option("-e", "--evid", dest="evid", default=None, type="int", help="event ID to locate")
parser.add_option("--hz", dest="hz", default=5, type=float, help="downsample signals to a given sampling rate, in hz (5)")
parser.add_option("--smooth", dest="smooth", default=None, type=int, help="perform the given level of smoothing")
parser.add_option("--chans", dest="chans", default="auto", type="str",
help="comma-separated list of channel names to use for inference (auto)")
parser.add_option("--bands", dest="bands", default="freq_2.0_3.0", type="str",
help="comma-separated list of band names to use for inference (freq_2.0_3.0)")
parser.add_option("--array_refsta_only", dest="refsta_only", default=True, action="store_false",
help="load only the reference station for each array site (True)")
def setup_svgraph_from_cmdline(options, args):
s = Sigvisa()
cursor = s.dbconn.cursor()
if options.runid is None:
run_name = options.run_name
iters = np.array(sorted(list(read_fitting_run_iterations(cursor, run_name))))
run_iter, runid = iters[-1, :]
runids = (runid,)
else:
runids = tuple(int(ss) for ss in options.runid.split(","))
tm_type_str = options.tm_types
if tm_type_str == "param":
tm_type_str = "tt_residual:constant_laplacian,peak_offset:param_linear_mb,amp_transfer:param_sin1,coda_decay:param_linear_distmb,peak_decay:param_linear_distmb,mult_wiggle_std:constant_beta"
tm_types = {}
if ',' in tm_type_str:
for p in tm_type_str.split(','):
(param, model_type) = p.strip().split(':')
tm_types[param] = model_type
else:
tm_types = tm_type_str
if options.phases in ("auto", "leb"):
phases = options.phases
else:
phases = options.phases.split(',')
cursor.close()
sg = SigvisaGraph(template_shape = options.template_shape, template_model_type = tm_types,
wiggle_family = options.wiggle_family, wiggle_model_type = options.wm_type,
dummy_fallback = options.dummy_fallback,
runids=runids, phases=phases, gpmodel_build_trees=False, arrays_joint=options.arrays_joint,
absorb_n_phases=options.absorb_n_phases, uatemplate_rate=options.uatemplate_rate)
return sg
def load_signals_from_cmdline(sg, options, args):
s = Sigvisa()
cursor = s.dbconn.cursor()
sites = options.sites.split(',')
stas = s.sites_to_stas(sites, refsta_only=options.refsta_only)
if options.start_time is not None and options.end_time is not None:
stime = options.start_time
etime = options.end_time
else:
print "loading signals from dataset %s" % options.dataset
(stime, etime) = read_timerange(cursor, options.dataset, hours=None, skip=0)
stime += options.hour * 3600
etime = stime + options.len_hours*3600.0
print "loading signals from stime %.1f through etime %.1f" % (stime, etime)
if options.bands == "all":
bands = s.bands
else:
bands = options.bands.split(',')
if options.chans == "all":
chans = s.chans
else:
chans = options.chans.split(',')
segments = load_segments(cursor, stas, stime, etime, chans = chans)
segments = [seg.with_filter('env;hz_%.3f' % options.hz) for seg in segments]
n_waves = 0
for seg in segments:
for band in bands:
filtered_seg = seg.with_filter(band)
if options.smooth is not None:
filtered_seg = filtered_seg.with_filter("smooth_%d" % options.smooth)
for chan in filtered_seg.get_chans():
try:
modelid = get_param_model_id(sg.runids, seg['sta'], 'P', sg._tm_type('amp_transfer', site=seg['sta']), 'amp_transfer', options.template_shape, chan=chan, band=band)
except ModelNotFoundError as e:
print "couldn't find amp_transfer model for %s,%s,%s, so not adding to graph." % (seg['sta'], chan, band), e
continue
wave = filtered_seg[chan]
wn = sg.add_wave(wave)
n_waves += 1
assert(n_waves > 0)
if options.initialize_evids is not None:
evids = [int(evid) for evid in options.initialize_evids.split(",")]
evs = [get_event(evid=evid) for evid in evids]
else:
evs = get_leb_events(sg, cursor)
if options.initialize_leb != "no" or options.initialize_evids is not None or options.synth:
if options.initialize_leb == "yes" or options.initialize_evids is not None or options.synth:
for ev in evs:
print "initializing with event", ev
sg.add_event(ev, fixed=options.synth)
elif options.initialize_leb=="perturb":
raise NotImplementedError("not implemented!")
elif options.initialize_leb=="count":
evs = sg.prior_sample_events(stime=st, etime=et, n_events=len(events))
else:
raise Exception("unrecognized argument initialize_leb=%s" % options.initialize_leb)
if options.synth:
for (sta, wns) in sg.station_waves.items():
for wn in wns:
wn.unfix_value()
sg.parent_sample_all()
for (sta, wns) in sg.station_waves.items():
for wn in wns:
wn.fix_value()
eids = sg.evnodes.keys()
for eid in eids:
if options.initialize_leb=="no":
sg.remove_event(eid)
else:
for evnode in sg.evnodes[eid].values():
evnode.unfix_value()
cursor.close()
return evs
def get_leb_events(sg, cursor):
st = sg.start_time
et = sg.end_time
events, orid2num = read_events(cursor, st, et, 'leb')
events = [evarr for evarr in events if evarr[EV_MB_COL] > 2]
evs = []
eid = 1
for evarr in events:
ev = get_event(evid=evarr[EV_EVID_COL])
ev.eid = eid
eid += 1
evs.append(ev)
return evs
def load_event_based_signals_from_cmdline(sg, options, args):
s = Sigvisa()
cursor = s.dbconn.cursor()
evid = options.evid
ev_true = get_event(evid=evid)
sites = options.sites.split(',')
stas = s.sites_to_stas(sites, options.refsta_only)
if options.bands == "all":
bands = s.bands
else:
bands = options.bands.split(',')
if options.chans == "all":
chans = s.chans
else:
chans = options.chans.split(',')
# inference is based on segments from all specified stations,
# starting at the min predicted arrival time (for the true event)
# minus 60s, and ending at the max predicted arrival time plus
# 240s
statimes = [ev_true.time + tt_predict(event=ev_true, sta=sta, phase=phase) for (sta, phase) in itertools.product(sites, s.phases)]
stime = np.min(statimes) - 60
etime = np.max(statimes) + 240
segments = load_segments(cursor, stas, stime, etime, chans = chans)
segments = [seg.with_filter('env;hz_%.3f' % options.hz) for seg in segments]
for seg in segments:
for band in bands:
filtered_seg = seg.with_filter(band)
if options.smooth is not None:
filtered_seg = filtered_seg.with_filter("smooth_%d" % options.smooth)
for chan in filtered_seg.get_chans():
wave = filtered_seg[chan]
sg.add_wave(wave)
evnodes = sg.add_event(ev_true)
cursor.close()
return evnodes
|
en
| 0.616662
|
#wave = load_event_station_chan(fit[1], fit[2], fit[3], cursor=cursor, exclude_other_evs=True).filter('%s;env;smooth_%d;hz_%.2f' % (fit[4], fit[6], fit[5])) # inference is based on segments from all specified stations, # starting at the min predicted arrival time (for the true event) # minus 60s, and ending at the max predicted arrival time plus # 240s
| 2.09215
| 2
|
application.py
|
Jason-Gew/BLE-Beacon-Tracking-System
| 19
|
6629114
|
<gh_stars>10-100
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Bluetooth LE Beacon Tracking Client Main Entry
# Support Multiple Beacon Scan Mode, Frequency
__author__ = 'Jason/GeW'
__version__ = '2.0.0'
import json
import time
import queue
import threading
import command_parser
import beacon_scanner
from app_logger import Logger
from mqtt_util import MqttUtil
from mqtt_util import MqttMsg
from configuration import AppConfig
from typing import Optional, Callable, Any, Iterable, Mapping
# -------- Static Variables Start --------
DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S %z"
CONFIG_PATH = 'config/app-config.ini'
COMMAND_PATH = 'config/commands.json'
COMMAND_KEY = 'command'
TRACE_KEY = 'trace'
APP_CONFIG = None
SHUTDOWN = False
# Max Beacon Scan Period (Unit: Second)
MAX_SCAN_PERIOD = 60
# Max Single Beacon Scan Duration (Unit: Second)
MAX_SCAN_DURATION = 10
# -------- Static Variables End --------
log = Logger('logs/app.log').logger
def get_timestamp():
dtz = time.strftime(DATETIME_FORMAT, time.localtime(time.time()))
return dtz
def welcome():
print("\n+---------------------------------------------------------------------+\n")
print("+ Bluetooth LE Beacon Tracking Client +")
print("\n+--------------------------- Version {} ---------------------------+\n".format(__version__))
time.sleep(0.5)
class CommandListener(threading.Thread):
"""
CommandListener for cmd process from MQTT Subscription
"""
msg_queue = None
mqtt_client = None
stop = False
def __init__(self, thread_name, msg_queue, mqtt_client) -> None:
self.msg_queue = msg_queue
if mqtt_client is None or not isinstance(mqtt_client, MqttUtil):
raise TypeError("Invalid MqttUtil")
else:
self.mqtt_client = mqtt_client
threading.Thread.__init__(self, name=thread_name)
def run(self) -> None:
global APP_CONFIG, SHUTDOWN
while not self.stop:
if not self.msg_queue.empty():
message = None
try:
msg = dict(self.msg_queue.get())
message = msg.get('message')
cmd_dict = json.loads(message)
if COMMAND_KEY in cmd_dict.keys():
cmd = cmd_dict.get(COMMAND_KEY)
if 'status' == cmd:
system_status = json.dumps(command_parser.show_system_status())
self.mqtt_client.publish(system_status)
log.info("Published System Status: " + system_status)
elif 'shutdown' == cmd:
log.info("Received Shutdown Command: " + message)
if cmd_dict.get(TRACE_KEY) is None or len(str(cmd_dict.get(TRACE_KEY))) <= 3:
log.warning("Shutdown Command Does Not Have Valid Trace")
continue
self.stop = True
client_message = MqttMsg('String', cmd, 'System Will Shutdown After 3 Seconds',
cmd_dict.get(TRACE_KEY))
self.mqtt_client.publish(client_message.to_json())
SHUTDOWN = True
time.sleep(3)
self.mqtt_client.disconnect()
break
elif ('beacon-scan' == cmd or 'scan' == cmd) and cmd_dict.get(TRACE_KEY) is not None:
scan_duration = cmd_dict.get('duration')
log.info("Received Manual Beacon-Scan Requirement, Duration={}".format(scan_duration))
if scan_duration is not None and 1 <= scan_duration < 10:
beacon_data = beacon_scanner.scan(scan_duration)
else:
beacon_data = beacon_scanner.scan()
client_message = MqttMsg('JSONArray', cmd, 'OK', data=list(beacon_data))
self.mqtt_client.publish(client_message.to_json(), APP_CONFIG.data_topic)
elif 'change-scan-mode' == cmd and cmd_dict.get('scan-mode') is not None:
scan_mode = str(cmd_dict.get('scan-mode'))
if scan_mode.lower() != 'command' and scan_mode.lower() != 'auto':
log.warning("Received Invalid change-scan-mode Command: scan-mode=" + scan_mode)
else:
log.info("Received change-scan-mode Command, Change scan_mode [{}] -> [{}]"
.format(APP_CONFIG.scan_mode, scan_mode))
APP_CONFIG.scan_mode = scan_mode
if cmd_dict.get('duration') is not None and isinstance(cmd_dict.get('duration'), int):
if 1 <= cmd_dict.get('duration') < MAX_SCAN_DURATION:
log.info("change-scan-mode and set scan_duration [{}s] -> [{}s]"
.format(APP_CONFIG.scan_duration, cmd_dict.get('duration')))
APP_CONFIG.scan_duration = int(cmd_dict.get('duration'))
if cmd_dict.get('period') is not None and isinstance(cmd_dict.get('period'), int):
if 1 <= cmd_dict.get('period') <= MAX_SCAN_PERIOD:
log.info("change-scan-mode and set scan_period [{}s] -> [{}s]"
.format(APP_CONFIG.scan_period, cmd_dict.get('period')))
APP_CONFIG.scan_period = int(cmd_dict.get('period'))
elif 'show-config' == cmd and cmd_dict.get(TRACE_KEY) is not None:
log.info("Received Show App Config from Trace: " + cmd_dict.get(TRACE_KEY))
client_message = MqttMsg('JSONObject', cmd, 'OK', data=APP_CONFIG.to_dict())
self.mqtt_client.publish(client_message.to_json())
else:
log.info("Unknown Command: " + message)
except json.decoder.JSONDecodeError as te:
log.error("Invalid JSON Command Type for Msg={} : {}".format(message, te))
except RuntimeError as re:
log.error('Process Command Msg Failed: {}'.format(re))
time.sleep(0.5)
def join(self, timeout: Optional[float] = ...) -> None:
self.stop = True
super(CommandListener, self).join()
class ScannerThread(threading.Thread):
"""
Beacon Scanner Thread, Automatic Scan and Publish Beacon Data
"""
mqtt_client = None
stop = False
def __init__(self, thread_name, mqtt_client) -> None:
if mqtt_client is None or not isinstance(mqtt_client, MqttUtil):
raise TypeError("Invalid MqttUtil")
else:
self.mqtt_client = mqtt_client
threading.Thread.__init__(self, name=thread_name)
def run(self) -> None:
global APP_CONFIG
while not self.stop:
if APP_CONFIG.scan_mode.lower() == 'auto':
time.sleep(int(APP_CONFIG.scan_period))
duration = int(APP_CONFIG.scan_duration)
if SHUTDOWN:
break
if 1 <= duration < MAX_SCAN_DURATION:
beacon_data = beacon_scanner.scan(duration)
else:
beacon_data = beacon_scanner.scan()
client_message = MqttMsg('JSONArray', 'data', 'OK', data=list(beacon_data))
log.info("Preparing Beacon Data = {}".format(beacon_data))
self.mqtt_client.publish(client_message.to_json(), str(APP_CONFIG.data_topic))
else:
time.sleep(1)
pass
def join(self, timeout: Optional[float] = ...) -> None:
self.stop = True
super(ScannerThread, self).join()
if __name__ == '__main__':
welcome()
dt = get_timestamp()
configure = AppConfig(CONFIG_PATH)
configure.load()
log.info("Beacon Tracking Client Initializing @ [{}] => AppConfig:\n{}".format(dt, configure))
APP_CONFIG = configure
queue = queue.Queue(1024)
MQTT_UTIL = MqttUtil(configure, queue)
MQTT_UTIL.connect()
MQTT_UTIL.subscribe()
cmdListener = CommandListener('Command-Listener', queue, MQTT_UTIL)
scanner = ScannerThread('Beacon-Scanner', MQTT_UTIL)
try:
cmdListener.start()
scanner.start()
MQTT_UTIL.client.loop_forever()
log.info("\n\nThank You For Using, Please Refer to https://github.com/Jason-Gew/BLE-Beacon-Tracking-System "
"for Any Issues or Questions\n\n")
except KeyboardInterrupt as ki:
SHUTDOWN = True
log.info("User Terminate the Client...\n\n====== Thank You for Using BLE Beacon Tracking Client V{} ======\n\n"
.format(__version__))
finally:
MQTT_UTIL.disconnect()
cmdListener.join()
scanner.join()
exit(0)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Bluetooth LE Beacon Tracking Client Main Entry
# Support Multiple Beacon Scan Mode, Frequency
__author__ = 'Jason/GeW'
__version__ = '2.0.0'
import json
import time
import queue
import threading
import command_parser
import beacon_scanner
from app_logger import Logger
from mqtt_util import MqttUtil
from mqtt_util import MqttMsg
from configuration import AppConfig
from typing import Optional, Callable, Any, Iterable, Mapping
# -------- Static Variables Start --------
DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S %z"
CONFIG_PATH = 'config/app-config.ini'
COMMAND_PATH = 'config/commands.json'
COMMAND_KEY = 'command'
TRACE_KEY = 'trace'
APP_CONFIG = None
SHUTDOWN = False
# Max Beacon Scan Period (Unit: Second)
MAX_SCAN_PERIOD = 60
# Max Single Beacon Scan Duration (Unit: Second)
MAX_SCAN_DURATION = 10
# -------- Static Variables End --------
log = Logger('logs/app.log').logger
def get_timestamp():
dtz = time.strftime(DATETIME_FORMAT, time.localtime(time.time()))
return dtz
def welcome():
print("\n+---------------------------------------------------------------------+\n")
print("+ Bluetooth LE Beacon Tracking Client +")
print("\n+--------------------------- Version {} ---------------------------+\n".format(__version__))
time.sleep(0.5)
class CommandListener(threading.Thread):
"""
CommandListener for cmd process from MQTT Subscription
"""
msg_queue = None
mqtt_client = None
stop = False
def __init__(self, thread_name, msg_queue, mqtt_client) -> None:
self.msg_queue = msg_queue
if mqtt_client is None or not isinstance(mqtt_client, MqttUtil):
raise TypeError("Invalid MqttUtil")
else:
self.mqtt_client = mqtt_client
threading.Thread.__init__(self, name=thread_name)
def run(self) -> None:
global APP_CONFIG, SHUTDOWN
while not self.stop:
if not self.msg_queue.empty():
message = None
try:
msg = dict(self.msg_queue.get())
message = msg.get('message')
cmd_dict = json.loads(message)
if COMMAND_KEY in cmd_dict.keys():
cmd = cmd_dict.get(COMMAND_KEY)
if 'status' == cmd:
system_status = json.dumps(command_parser.show_system_status())
self.mqtt_client.publish(system_status)
log.info("Published System Status: " + system_status)
elif 'shutdown' == cmd:
log.info("Received Shutdown Command: " + message)
if cmd_dict.get(TRACE_KEY) is None or len(str(cmd_dict.get(TRACE_KEY))) <= 3:
log.warning("Shutdown Command Does Not Have Valid Trace")
continue
self.stop = True
client_message = MqttMsg('String', cmd, 'System Will Shutdown After 3 Seconds',
cmd_dict.get(TRACE_KEY))
self.mqtt_client.publish(client_message.to_json())
SHUTDOWN = True
time.sleep(3)
self.mqtt_client.disconnect()
break
elif ('beacon-scan' == cmd or 'scan' == cmd) and cmd_dict.get(TRACE_KEY) is not None:
scan_duration = cmd_dict.get('duration')
log.info("Received Manual Beacon-Scan Requirement, Duration={}".format(scan_duration))
if scan_duration is not None and 1 <= scan_duration < 10:
beacon_data = beacon_scanner.scan(scan_duration)
else:
beacon_data = beacon_scanner.scan()
client_message = MqttMsg('JSONArray', cmd, 'OK', data=list(beacon_data))
self.mqtt_client.publish(client_message.to_json(), APP_CONFIG.data_topic)
elif 'change-scan-mode' == cmd and cmd_dict.get('scan-mode') is not None:
scan_mode = str(cmd_dict.get('scan-mode'))
if scan_mode.lower() != 'command' and scan_mode.lower() != 'auto':
log.warning("Received Invalid change-scan-mode Command: scan-mode=" + scan_mode)
else:
log.info("Received change-scan-mode Command, Change scan_mode [{}] -> [{}]"
.format(APP_CONFIG.scan_mode, scan_mode))
APP_CONFIG.scan_mode = scan_mode
if cmd_dict.get('duration') is not None and isinstance(cmd_dict.get('duration'), int):
if 1 <= cmd_dict.get('duration') < MAX_SCAN_DURATION:
log.info("change-scan-mode and set scan_duration [{}s] -> [{}s]"
.format(APP_CONFIG.scan_duration, cmd_dict.get('duration')))
APP_CONFIG.scan_duration = int(cmd_dict.get('duration'))
if cmd_dict.get('period') is not None and isinstance(cmd_dict.get('period'), int):
if 1 <= cmd_dict.get('period') <= MAX_SCAN_PERIOD:
log.info("change-scan-mode and set scan_period [{}s] -> [{}s]"
.format(APP_CONFIG.scan_period, cmd_dict.get('period')))
APP_CONFIG.scan_period = int(cmd_dict.get('period'))
elif 'show-config' == cmd and cmd_dict.get(TRACE_KEY) is not None:
log.info("Received Show App Config from Trace: " + cmd_dict.get(TRACE_KEY))
client_message = MqttMsg('JSONObject', cmd, 'OK', data=APP_CONFIG.to_dict())
self.mqtt_client.publish(client_message.to_json())
else:
log.info("Unknown Command: " + message)
except json.decoder.JSONDecodeError as te:
log.error("Invalid JSON Command Type for Msg={} : {}".format(message, te))
except RuntimeError as re:
log.error('Process Command Msg Failed: {}'.format(re))
time.sleep(0.5)
def join(self, timeout: Optional[float] = ...) -> None:
self.stop = True
super(CommandListener, self).join()
class ScannerThread(threading.Thread):
"""
Beacon Scanner Thread, Automatic Scan and Publish Beacon Data
"""
mqtt_client = None
stop = False
def __init__(self, thread_name, mqtt_client) -> None:
if mqtt_client is None or not isinstance(mqtt_client, MqttUtil):
raise TypeError("Invalid MqttUtil")
else:
self.mqtt_client = mqtt_client
threading.Thread.__init__(self, name=thread_name)
def run(self) -> None:
global APP_CONFIG
while not self.stop:
if APP_CONFIG.scan_mode.lower() == 'auto':
time.sleep(int(APP_CONFIG.scan_period))
duration = int(APP_CONFIG.scan_duration)
if SHUTDOWN:
break
if 1 <= duration < MAX_SCAN_DURATION:
beacon_data = beacon_scanner.scan(duration)
else:
beacon_data = beacon_scanner.scan()
client_message = MqttMsg('JSONArray', 'data', 'OK', data=list(beacon_data))
log.info("Preparing Beacon Data = {}".format(beacon_data))
self.mqtt_client.publish(client_message.to_json(), str(APP_CONFIG.data_topic))
else:
time.sleep(1)
pass
def join(self, timeout: Optional[float] = ...) -> None:
self.stop = True
super(ScannerThread, self).join()
if __name__ == '__main__':
welcome()
dt = get_timestamp()
configure = AppConfig(CONFIG_PATH)
configure.load()
log.info("Beacon Tracking Client Initializing @ [{}] => AppConfig:\n{}".format(dt, configure))
APP_CONFIG = configure
queue = queue.Queue(1024)
MQTT_UTIL = MqttUtil(configure, queue)
MQTT_UTIL.connect()
MQTT_UTIL.subscribe()
cmdListener = CommandListener('Command-Listener', queue, MQTT_UTIL)
scanner = ScannerThread('Beacon-Scanner', MQTT_UTIL)
try:
cmdListener.start()
scanner.start()
MQTT_UTIL.client.loop_forever()
log.info("\n\nThank You For Using, Please Refer to https://github.com/Jason-Gew/BLE-Beacon-Tracking-System "
"for Any Issues or Questions\n\n")
except KeyboardInterrupt as ki:
SHUTDOWN = True
log.info("User Terminate the Client...\n\n====== Thank You for Using BLE Beacon Tracking Client V{} ======\n\n"
.format(__version__))
finally:
MQTT_UTIL.disconnect()
cmdListener.join()
scanner.join()
exit(0)
|
en
| 0.558243
|
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # Bluetooth LE Beacon Tracking Client Main Entry # Support Multiple Beacon Scan Mode, Frequency # -------- Static Variables Start -------- # Max Beacon Scan Period (Unit: Second) # Max Single Beacon Scan Duration (Unit: Second) # -------- Static Variables End -------- CommandListener for cmd process from MQTT Subscription Beacon Scanner Thread, Automatic Scan and Publish Beacon Data
| 2.387289
| 2
|
sims/cb/cytofpy/attempt3/main.py
|
luiarthur/cytof5
| 1
|
6629115
|
<reponame>luiarthur/cytof5<gh_stars>1-10
import os
import torch
from readCB import readCB
from Cytof import Cytof
from simdata import simdata
from torch.distributions import Normal
import math
import matplotlib.pyplot as plt
import copy
import numpy as np
import pickle
def add_gridlines_Z(Z):
J, K = Z.shape
for j in range(J):
plt.axhline(y=j+.5, color='grey', linewidth=.5)
for k in range(K):
plt.axvline(x=k+.5, color='grey', linewidth=.5)
if __name__ == '__main__':
sbt = torch.distributions.StickBreakingTransform()
path_to_exp_results = 'results/test/'
os.makedirs(path_to_exp_results, exist_ok=True)
torch.manual_seed(0)
np.random.seed(0)
SIMULATE_DATA = True
# SIMULATE_DATA = False
cm_greys = plt.cm.get_cmap('Greys')
if not SIMULATE_DATA:
CB_FILEPATH = '../../data/cb.txt'
cb = readCB(CB_FILEPATH)
cb['m'] = []
tmp_J = 25
for i in range(len(cb['y'])):
cb['y'][i] = torch.tensor(cb['y'][i])[:, :tmp_J]
cb['m'].append(torch.isnan(cb['y'][i]))
# FIXME: missing values should be imputed
cb['y'][i][cb['m'][i]] = torch.randn(cb['m'][i].sum()) * .5 - 5
else:
# data = simdata(N=[30000, 10000, 20000], L0=3, L1=3, J=12, K=4)
# data = simdata(N=[3000, 3000, 3000], L0=3, L1=3, J=12, K=4)
data = simdata(N=[30000, 10000, 20000], L0=1, L1=1, J=4, a_W=[300, 700])
cb = data['data']
plt.imshow(data['params']['Z'], aspect='auto', vmin=0, vmax=1, cmap=cm_greys)
J, K = data['params']['Z'].shape
add_gridlines_Z(data['params']['Z'])
plt.savefig('{}/Z_true.pdf'.format(path_to_exp_results))
plt.show()
y = copy.deepcopy(cb['y'])
plt.hist(y[0][:, 1], bins=100, density=True); plt.xlim(-20, 20); plt.show()
plt.hist(y[1][:, 3], bins=100, density=True); plt.xlim(-20, 20); plt.show()
plt.hist(y[2][:, -1], bins=100, density=True); plt.xlim(-20, 20); plt.show()
# Plot yi
cm = plt.cm.get_cmap('bwr')
cm.set_under(color='blue')
cm.set_over(color='red')
cm.set_bad(color='black')
I = len(y)
for i in range(I):
plt.imshow(y[i], aspect='auto', vmin=-2, vmax=2, cmap=cm)
plt.colorbar()
plt.show()
K = 10
L = [2, 2]
model = Cytof(data=cb, K=K, L=L)
priors = model.priors
model = Cytof(data=cb, K=K, L=L, priors=priors)
model.debug=0
out = model.fit(data=cb, niters=5000, lr=1e-1, print_freq=10, eps=1e-6,
minibatch_info={'prop': .01},
nmc=1, seed=10)
# Save output
pickle.dump(out, open('{}/out.p'.format(path_to_exp_results), 'wb'))
elbo = out['elbo']
vp = out['vp']
# out = pickle.load(open('{}/out.p'.format(path_to_exp_results), 'rb'))
plt.plot(elbo)
plt.ylabel('ELBO / NSUM')
plt.show()
real_param_mean = {}
for key in vp:
if key != 'Z':
real_param_mean[key] = vp[key].m
else:
real_param_mean[key] = vp[key].logit_p
params = model.to_param_space(real_param_mean)
# print(params)
# for key in vp: print('{} log_s: {}'.format(key, (vp[key].log_s)))
# Posterior Inference
B = 100
post = [model.to_param_space(model.sample_real_params(vp)) for b in range(B)]
# Plot mu
mu0 = torch.stack([p['mu0'].cumsum(0) for p in post]).detach().numpy()
mu1 = torch.stack([p['mu1'].cumsum(0) for p in post]).detach().numpy()
mu = np.concatenate((-(model.iota + mu0), model.iota + mu1), 1)
plt.boxplot(mu, showmeans=True, whis=[2.5, 97.5], showfliers=False)
plt.ylabel('$\mu$', rotation=0)
if SIMULATE_DATA:
for yint in (data['params']['mu0'].tolist() + data['params']['mu1'].tolist()):
plt.axhline(yint)
plt.show()
# plot W
W = torch.stack([p['W'] for p in post]).detach().numpy()
plt.figure()
for i in range(model.I):
plt.subplot(model.I + 1, 1, i + 1)
plt.boxplot(W[:, i, :], showmeans=True, whis=[2.5, 97.5], showfliers=False)
plt.ylabel('$W_{}$'.format(i+1), rotation=0, labelpad=15)
if SIMULATE_DATA:
for yint in data['params']['W'][i, :].tolist():
plt.axhline(yint)
# plot v
v = torch.stack([p['v'] for p in post]).detach().numpy()
plt.subplot(model.I + 1, 1, model.I + 1)
plt.boxplot(v.cumprod(1), showmeans=True, whis=[2.5, 97.5], showfliers=False)
plt.ylabel('$v$', rotation=0, labelpad=15)
plt.tight_layout()
plt.show()
# plot sig
for z in range(2):
sig_key = 'sig' + str(z)
sigz = torch.stack([p[sig_key] for p in post]).detach().numpy()
plt.boxplot(sigz, showmeans=True, whis=[2.5, 97.5], showfliers=False)
plt.xlabel(sig_key, fontsize=15)
if SIMULATE_DATA:
for yint in data['params']['sig'].tolist():
plt.axhline(yint)
plt.show()
# plot alpha
alpha = torch.stack([p['alpha'] for p in post]).detach().numpy().squeeze()
plt.hist(alpha)
plt.xlabel('alpha', fontsize=15)
plt.show()
# Plot Z
# Z = torch.stack([p['Z'] for p in post]).detach().reshape((B, model.J, model.K)).numpy()
H = torch.stack([p['H'] for p in post]).detach()
v = torch.stack([p['v'] for p in post]).detach()
Z = v.log().cumsum(1)[:, None, :] > Normal(0, 1).cdf(H).log()
Z = Z.numpy()
# plt.imshow(Z.mean(0) > .5, aspect='auto', vmin=0, vmax=1, cmap=cm_greys)
plt.imshow(Z.mean(0), aspect='auto', vmin=0, vmax=1, cmap=cm_greys)
add_gridlines_Z(Z[0])
plt.savefig('{}/Z.pdf'.format(path_to_exp_results))
plt.show()
# Plot VP Trace
# Plot mu vp mean
trace_len = len(out['trace'])
mu0_m_trace = torch.stack([-model.iota - t['mu0'].m.exp().cumsum(0)
for t in out['trace']])
mu1_m_trace = torch.stack([model.iota + t['mu1'].m.exp().cumsum(0)
for t in out['trace']])
plt.plot(mu0_m_trace.detach().numpy())
plt.plot(mu1_m_trace.detach().numpy())
plt.title('trace plot for $\mu$ vp mean')
plt.show()
# Plot W vp mean
W_m_trace = torch.stack([model.sbt(t['W'].m) for t in out['trace']])
for i in range(model.I):
plt.plot(W_m_trace.detach().numpy()[:, i, :])
if SIMULATE_DATA:
for k in range(data['params']['W'].size(1)):
plt.axhline(data['params']['W'][i, k])
plt.title('trace plot for W_{} mean'.format(i))
plt.show()
# Plot sig vp mean
for z in range(2):
sig_key = 'sig' + str(z)
sigz_m_trace = torch.stack([t[sig_key].m.exp() for t in out['trace']])
plt.plot(sigz_m_trace.detach().numpy())
if SIMULATE_DATA:
for i in range(model.I):
plt.axhline(data['params']['sig'][i])
plt.title('trace plot for {} vp mean'.format(sig_key))
plt.show()
# Plot v vp mean
v_m_trace = torch.stack([t['v'].m.sigmoid().cumprod(0) for t in out['trace']])
plt.plot(v_m_trace.detach().numpy())
plt.title('trace plot for v vp mean')
plt.show()
|
import os
import torch
from readCB import readCB
from Cytof import Cytof
from simdata import simdata
from torch.distributions import Normal
import math
import matplotlib.pyplot as plt
import copy
import numpy as np
import pickle
def add_gridlines_Z(Z):
J, K = Z.shape
for j in range(J):
plt.axhline(y=j+.5, color='grey', linewidth=.5)
for k in range(K):
plt.axvline(x=k+.5, color='grey', linewidth=.5)
if __name__ == '__main__':
sbt = torch.distributions.StickBreakingTransform()
path_to_exp_results = 'results/test/'
os.makedirs(path_to_exp_results, exist_ok=True)
torch.manual_seed(0)
np.random.seed(0)
SIMULATE_DATA = True
# SIMULATE_DATA = False
cm_greys = plt.cm.get_cmap('Greys')
if not SIMULATE_DATA:
CB_FILEPATH = '../../data/cb.txt'
cb = readCB(CB_FILEPATH)
cb['m'] = []
tmp_J = 25
for i in range(len(cb['y'])):
cb['y'][i] = torch.tensor(cb['y'][i])[:, :tmp_J]
cb['m'].append(torch.isnan(cb['y'][i]))
# FIXME: missing values should be imputed
cb['y'][i][cb['m'][i]] = torch.randn(cb['m'][i].sum()) * .5 - 5
else:
# data = simdata(N=[30000, 10000, 20000], L0=3, L1=3, J=12, K=4)
# data = simdata(N=[3000, 3000, 3000], L0=3, L1=3, J=12, K=4)
data = simdata(N=[30000, 10000, 20000], L0=1, L1=1, J=4, a_W=[300, 700])
cb = data['data']
plt.imshow(data['params']['Z'], aspect='auto', vmin=0, vmax=1, cmap=cm_greys)
J, K = data['params']['Z'].shape
add_gridlines_Z(data['params']['Z'])
plt.savefig('{}/Z_true.pdf'.format(path_to_exp_results))
plt.show()
y = copy.deepcopy(cb['y'])
plt.hist(y[0][:, 1], bins=100, density=True); plt.xlim(-20, 20); plt.show()
plt.hist(y[1][:, 3], bins=100, density=True); plt.xlim(-20, 20); plt.show()
plt.hist(y[2][:, -1], bins=100, density=True); plt.xlim(-20, 20); plt.show()
# Plot yi
cm = plt.cm.get_cmap('bwr')
cm.set_under(color='blue')
cm.set_over(color='red')
cm.set_bad(color='black')
I = len(y)
for i in range(I):
plt.imshow(y[i], aspect='auto', vmin=-2, vmax=2, cmap=cm)
plt.colorbar()
plt.show()
K = 10
L = [2, 2]
model = Cytof(data=cb, K=K, L=L)
priors = model.priors
model = Cytof(data=cb, K=K, L=L, priors=priors)
model.debug=0
out = model.fit(data=cb, niters=5000, lr=1e-1, print_freq=10, eps=1e-6,
minibatch_info={'prop': .01},
nmc=1, seed=10)
# Save output
pickle.dump(out, open('{}/out.p'.format(path_to_exp_results), 'wb'))
elbo = out['elbo']
vp = out['vp']
# out = pickle.load(open('{}/out.p'.format(path_to_exp_results), 'rb'))
plt.plot(elbo)
plt.ylabel('ELBO / NSUM')
plt.show()
real_param_mean = {}
for key in vp:
if key != 'Z':
real_param_mean[key] = vp[key].m
else:
real_param_mean[key] = vp[key].logit_p
params = model.to_param_space(real_param_mean)
# print(params)
# for key in vp: print('{} log_s: {}'.format(key, (vp[key].log_s)))
# Posterior Inference
B = 100
post = [model.to_param_space(model.sample_real_params(vp)) for b in range(B)]
# Plot mu
mu0 = torch.stack([p['mu0'].cumsum(0) for p in post]).detach().numpy()
mu1 = torch.stack([p['mu1'].cumsum(0) for p in post]).detach().numpy()
mu = np.concatenate((-(model.iota + mu0), model.iota + mu1), 1)
plt.boxplot(mu, showmeans=True, whis=[2.5, 97.5], showfliers=False)
plt.ylabel('$\mu$', rotation=0)
if SIMULATE_DATA:
for yint in (data['params']['mu0'].tolist() + data['params']['mu1'].tolist()):
plt.axhline(yint)
plt.show()
# plot W
W = torch.stack([p['W'] for p in post]).detach().numpy()
plt.figure()
for i in range(model.I):
plt.subplot(model.I + 1, 1, i + 1)
plt.boxplot(W[:, i, :], showmeans=True, whis=[2.5, 97.5], showfliers=False)
plt.ylabel('$W_{}$'.format(i+1), rotation=0, labelpad=15)
if SIMULATE_DATA:
for yint in data['params']['W'][i, :].tolist():
plt.axhline(yint)
# plot v
v = torch.stack([p['v'] for p in post]).detach().numpy()
plt.subplot(model.I + 1, 1, model.I + 1)
plt.boxplot(v.cumprod(1), showmeans=True, whis=[2.5, 97.5], showfliers=False)
plt.ylabel('$v$', rotation=0, labelpad=15)
plt.tight_layout()
plt.show()
# plot sig
for z in range(2):
sig_key = 'sig' + str(z)
sigz = torch.stack([p[sig_key] for p in post]).detach().numpy()
plt.boxplot(sigz, showmeans=True, whis=[2.5, 97.5], showfliers=False)
plt.xlabel(sig_key, fontsize=15)
if SIMULATE_DATA:
for yint in data['params']['sig'].tolist():
plt.axhline(yint)
plt.show()
# plot alpha
alpha = torch.stack([p['alpha'] for p in post]).detach().numpy().squeeze()
plt.hist(alpha)
plt.xlabel('alpha', fontsize=15)
plt.show()
# Plot Z
# Z = torch.stack([p['Z'] for p in post]).detach().reshape((B, model.J, model.K)).numpy()
H = torch.stack([p['H'] for p in post]).detach()
v = torch.stack([p['v'] for p in post]).detach()
Z = v.log().cumsum(1)[:, None, :] > Normal(0, 1).cdf(H).log()
Z = Z.numpy()
# plt.imshow(Z.mean(0) > .5, aspect='auto', vmin=0, vmax=1, cmap=cm_greys)
plt.imshow(Z.mean(0), aspect='auto', vmin=0, vmax=1, cmap=cm_greys)
add_gridlines_Z(Z[0])
plt.savefig('{}/Z.pdf'.format(path_to_exp_results))
plt.show()
# Plot VP Trace
# Plot mu vp mean
trace_len = len(out['trace'])
mu0_m_trace = torch.stack([-model.iota - t['mu0'].m.exp().cumsum(0)
for t in out['trace']])
mu1_m_trace = torch.stack([model.iota + t['mu1'].m.exp().cumsum(0)
for t in out['trace']])
plt.plot(mu0_m_trace.detach().numpy())
plt.plot(mu1_m_trace.detach().numpy())
plt.title('trace plot for $\mu$ vp mean')
plt.show()
# Plot W vp mean
W_m_trace = torch.stack([model.sbt(t['W'].m) for t in out['trace']])
for i in range(model.I):
plt.plot(W_m_trace.detach().numpy()[:, i, :])
if SIMULATE_DATA:
for k in range(data['params']['W'].size(1)):
plt.axhline(data['params']['W'][i, k])
plt.title('trace plot for W_{} mean'.format(i))
plt.show()
# Plot sig vp mean
for z in range(2):
sig_key = 'sig' + str(z)
sigz_m_trace = torch.stack([t[sig_key].m.exp() for t in out['trace']])
plt.plot(sigz_m_trace.detach().numpy())
if SIMULATE_DATA:
for i in range(model.I):
plt.axhline(data['params']['sig'][i])
plt.title('trace plot for {} vp mean'.format(sig_key))
plt.show()
# Plot v vp mean
v_m_trace = torch.stack([t['v'].m.sigmoid().cumprod(0) for t in out['trace']])
plt.plot(v_m_trace.detach().numpy())
plt.title('trace plot for v vp mean')
plt.show()
|
en
| 0.362639
|
# SIMULATE_DATA = False # FIXME: missing values should be imputed # data = simdata(N=[30000, 10000, 20000], L0=3, L1=3, J=12, K=4) # data = simdata(N=[3000, 3000, 3000], L0=3, L1=3, J=12, K=4) # Plot yi # Save output # out = pickle.load(open('{}/out.p'.format(path_to_exp_results), 'rb')) # print(params) # for key in vp: print('{} log_s: {}'.format(key, (vp[key].log_s))) # Posterior Inference # Plot mu # plot W # plot v # plot sig # plot alpha # Plot Z # Z = torch.stack([p['Z'] for p in post]).detach().reshape((B, model.J, model.K)).numpy() # plt.imshow(Z.mean(0) > .5, aspect='auto', vmin=0, vmax=1, cmap=cm_greys) # Plot VP Trace # Plot mu vp mean # Plot W vp mean # Plot sig vp mean # Plot v vp mean
| 2.099497
| 2
|
modelo.py
|
raquellecampos/Python
| 0
|
6629116
|
<filename>modelo.py
class Pessoa:
def __init__(self, cpf, nome, data_nascimento, usa_oculos):
self.cpf = cpf
self.nome = nome
self.data_nascimento = data_nascimento
self.usa_oculos = usa_oculos
class Marca:
def __init__(self, nome, sigla):
self.id = None
self.nome = nome
self.sigla = sigla
class Veiculo:
def __init__(self, placa, ano , cor , motor, proprietario, marca):
self.placa = placa
self.ano = ano
self.cor = cor
self.motor = motor
self.proprietario = proprietario
self.marca = marca
|
<filename>modelo.py
class Pessoa:
def __init__(self, cpf, nome, data_nascimento, usa_oculos):
self.cpf = cpf
self.nome = nome
self.data_nascimento = data_nascimento
self.usa_oculos = usa_oculos
class Marca:
def __init__(self, nome, sigla):
self.id = None
self.nome = nome
self.sigla = sigla
class Veiculo:
def __init__(self, placa, ano , cor , motor, proprietario, marca):
self.placa = placa
self.ano = ano
self.cor = cor
self.motor = motor
self.proprietario = proprietario
self.marca = marca
|
none
| 1
| 2.869056
| 3
|
|
df_17/ct/transaction.py
|
tivvit/crypto_trading_bot_devfest_2017
| 0
|
6629117
|
import json
from .trade_decider import TradeDirection
class Transaction(object):
def __init__(self, direction: TradeDirection) -> None:
self.direction = direction
def __repr__(self) -> str:
return json.dumps({
"direction": self.direction,
})
|
import json
from .trade_decider import TradeDirection
class Transaction(object):
def __init__(self, direction: TradeDirection) -> None:
self.direction = direction
def __repr__(self) -> str:
return json.dumps({
"direction": self.direction,
})
|
none
| 1
| 2.653033
| 3
|
|
leetcode/python/925_long_pressed_name.py
|
VVKot/leetcode-solutions
| 4
|
6629118
|
class Solution:
def isLongPressedName(self, name: str, typed: str) -> bool:
i = 0
N = len(name)
T = len(typed)
for j in range(T):
if i < N and name[i] == typed[j]:
i += 1
elif not j or typed[j] != typed[j-1]:
return False
return i == N
|
class Solution:
def isLongPressedName(self, name: str, typed: str) -> bool:
i = 0
N = len(name)
T = len(typed)
for j in range(T):
if i < N and name[i] == typed[j]:
i += 1
elif not j or typed[j] != typed[j-1]:
return False
return i == N
|
none
| 1
| 3.33039
| 3
|
|
examples/3- minting tokens/main.py
|
0xOmarA/RadixLib
| 32
|
6629119
|
"""
This example follows off right where the previous example (2- creating a new mutable supply token)
left off.
The last thing which we had done in the previous example is that we created a new mutable supply
token. The created token had an rri of `<KEY>`.
Keep in mind that the token that you create will have a different RRI as the RRI is derived from the
symbol and the account public key.
In this example we will take a look at how the mutable supply token can be minted using the RadixLib
python package.
"""
import radixlib as radix
import os
def main() -> None:
# Information on the person who we will be minting the tokens for.
recipient_address: str = "tdx1qspqqecwh3tgsgz92l4d4f0e4egmfe86049dj75pgq347fkkfmg84pgx9um0v"
token_rri: str = "<KEY>"
mint_amount: int = 10 * (10**18) # This will mint 10 tokens
# Defining the network that we will be connecting to.
network: radix.network.Network = radix.network.STOKENET
# Getting the mnemonic phrase for the wallet that we will be connecting to. In this case, my
# mnemonic phrase is stored in an envirnoment variable under the name "MNEMONIC_PHRASE".
# You might want to do the same or you could also just put your mnemonic phrase as a literal
# string.
mnemonic_phrase: str = os.environ['MNEMONIC_PHRASE']
# Creating a new wallet object using the mnemonic phrase above on the network defined.
wallet: radix.Wallet = radix.Wallet(
provider = radix.Provider(network),
signer = radix.Signer.from_mnemonic(mnemonic_phrase)
)
print("Wallet address:", wallet.address)
print("Wallet public key:", wallet.public_key)
# Using the quick transactions capability of the wallet object to create a transaction for the
# minting.
tx_hash: str = wallet.build_sign_and_send_transaction(
actions = (
wallet.action_builder
.mint_tokens(
to_account_address = recipient_address,
mint_amount = mint_amount,
token_rri = token_rri
)
)
)
print("Tokens minted under transaction hash:", tx_hash)
if __name__ == "__main__":
main()
|
"""
This example follows off right where the previous example (2- creating a new mutable supply token)
left off.
The last thing which we had done in the previous example is that we created a new mutable supply
token. The created token had an rri of `<KEY>`.
Keep in mind that the token that you create will have a different RRI as the RRI is derived from the
symbol and the account public key.
In this example we will take a look at how the mutable supply token can be minted using the RadixLib
python package.
"""
import radixlib as radix
import os
def main() -> None:
# Information on the person who we will be minting the tokens for.
recipient_address: str = "tdx1qspqqecwh3tgsgz92l4d4f0e4egmfe86049dj75pgq347fkkfmg84pgx9um0v"
token_rri: str = "<KEY>"
mint_amount: int = 10 * (10**18) # This will mint 10 tokens
# Defining the network that we will be connecting to.
network: radix.network.Network = radix.network.STOKENET
# Getting the mnemonic phrase for the wallet that we will be connecting to. In this case, my
# mnemonic phrase is stored in an envirnoment variable under the name "MNEMONIC_PHRASE".
# You might want to do the same or you could also just put your mnemonic phrase as a literal
# string.
mnemonic_phrase: str = os.environ['MNEMONIC_PHRASE']
# Creating a new wallet object using the mnemonic phrase above on the network defined.
wallet: radix.Wallet = radix.Wallet(
provider = radix.Provider(network),
signer = radix.Signer.from_mnemonic(mnemonic_phrase)
)
print("Wallet address:", wallet.address)
print("Wallet public key:", wallet.public_key)
# Using the quick transactions capability of the wallet object to create a transaction for the
# minting.
tx_hash: str = wallet.build_sign_and_send_transaction(
actions = (
wallet.action_builder
.mint_tokens(
to_account_address = recipient_address,
mint_amount = mint_amount,
token_rri = token_rri
)
)
)
print("Tokens minted under transaction hash:", tx_hash)
if __name__ == "__main__":
main()
|
en
| 0.884907
|
This example follows off right where the previous example (2- creating a new mutable supply token) left off. The last thing which we had done in the previous example is that we created a new mutable supply token. The created token had an rri of `<KEY>`. Keep in mind that the token that you create will have a different RRI as the RRI is derived from the symbol and the account public key. In this example we will take a look at how the mutable supply token can be minted using the RadixLib python package. # Information on the person who we will be minting the tokens for. # This will mint 10 tokens # Defining the network that we will be connecting to. # Getting the mnemonic phrase for the wallet that we will be connecting to. In this case, my # mnemonic phrase is stored in an envirnoment variable under the name "MNEMONIC_PHRASE". # You might want to do the same or you could also just put your mnemonic phrase as a literal # string. # Creating a new wallet object using the mnemonic phrase above on the network defined. # Using the quick transactions capability of the wallet object to create a transaction for the # minting.
| 3.495559
| 3
|
tests/hwsim/hwsim.py
|
bjackman/hostap
| 1,104
|
6629120
|
#
# HWSIM generic netlink controller code
# Copyright (c) 2014 Intel Corporation
#
# Author: <NAME> <<EMAIL>>
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import netlink, os
# constants
HWSIM_CMD_CREATE_RADIO = 4
HWSIM_CMD_DESTROY_RADIO = 5
HWSIM_ATTR_CHANNELS = 9
HWSIM_ATTR_RADIO_ID = 10
HWSIM_ATTR_SUPPORT_P2P_DEVICE = 14
HWSIM_ATTR_USE_CHANCTX = 15
# the controller class
class HWSimController(object):
def __init__(self):
self._conn = netlink.Connection(netlink.NETLINK_GENERIC)
self._fid = netlink.genl_controller.get_family_id(b'MAC80211_HWSIM')
def create_radio(self, n_channels=None, use_chanctx=False,
use_p2p_device=False):
attrs = []
if n_channels:
attrs.append(netlink.U32Attr(HWSIM_ATTR_CHANNELS, n_channels))
if use_chanctx:
attrs.append(netlink.FlagAttr(HWSIM_ATTR_USE_CHANCTX))
if use_p2p_device:
attrs.append(netlink.FlagAttr(HWSIM_ATTR_SUPPORT_P2P_DEVICE))
msg = netlink.GenlMessage(self._fid, HWSIM_CMD_CREATE_RADIO,
flags=netlink.NLM_F_REQUEST |
netlink.NLM_F_ACK,
attrs=attrs)
return msg.send_and_recv(self._conn).ret
def destroy_radio(self, radio_id):
attrs = [netlink.U32Attr(HWSIM_ATTR_RADIO_ID, radio_id)]
msg = netlink.GenlMessage(self._fid, HWSIM_CMD_DESTROY_RADIO,
flags=netlink.NLM_F_REQUEST |
netlink.NLM_F_ACK,
attrs=attrs)
msg.send_and_recv(self._conn)
class HWSimRadio(object):
def __init__(self, n_channels=None, use_chanctx=False,
use_p2p_device=False):
self._controller = HWSimController()
self._n_channels = n_channels
self._use_chanctx = use_chanctx
self._use_p2p_dev = use_p2p_device
def __enter__(self):
self._radio_id = self._controller.create_radio(
n_channels=self._n_channels,
use_chanctx=self._use_chanctx,
use_p2p_device=self._use_p2p_dev)
if self._radio_id < 0:
raise Exception("Failed to create radio (err:%d)" % self._radio_id)
try:
iface = os.listdir('/sys/class/mac80211_hwsim/hwsim%d/net/' % self._radio_id)[0]
except Exception as e:
self._controller.destroy_radio(self._radio_id)
raise e
return self._radio_id, iface
def __exit__(self, type, value, traceback):
self._controller.destroy_radio(self._radio_id)
def create(args):
print('Created radio %d' % c.create_radio(n_channels=args.channels,
use_chanctx=args.chanctx))
def destroy(args):
print(c.destroy_radio(args.radio))
if __name__ == '__main__':
import argparse
c = HWSimController()
parser = argparse.ArgumentParser(description='send hwsim control commands')
subparsers = parser.add_subparsers(help="Commands", dest='command')
parser_create = subparsers.add_parser('create', help='create a radio')
parser_create.add_argument('--channels', metavar='<number_of_channels>', type=int,
default=0,
help='Number of concurrent channels supported ' +
'by the radio. If not specified, the number ' +
'of channels specified in the ' +
'mac80211_hwsim.channels module parameter is ' +
'used')
parser_create.add_argument('--chanctx', action="store_true",
help='Use channel contexts, regardless of ' +
'whether the number of channels is 1 or ' +
'greater. By default channel contexts are ' +
'only used if the number of channels is ' +
'greater than 1.')
parser_create.set_defaults(func=create)
parser_destroy = subparsers.add_parser('destroy', help='destroy a radio')
parser_destroy.add_argument('radio', metavar='<radio>', type=int,
default=0,
help='The number of the radio to be ' +
'destroyed (i.e., 0 for phy0, 1 for phy1...)')
parser_destroy.set_defaults(func=destroy)
args = parser.parse_args()
args.func(args)
|
#
# HWSIM generic netlink controller code
# Copyright (c) 2014 Intel Corporation
#
# Author: <NAME> <<EMAIL>>
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import netlink, os
# constants
HWSIM_CMD_CREATE_RADIO = 4
HWSIM_CMD_DESTROY_RADIO = 5
HWSIM_ATTR_CHANNELS = 9
HWSIM_ATTR_RADIO_ID = 10
HWSIM_ATTR_SUPPORT_P2P_DEVICE = 14
HWSIM_ATTR_USE_CHANCTX = 15
# the controller class
class HWSimController(object):
def __init__(self):
self._conn = netlink.Connection(netlink.NETLINK_GENERIC)
self._fid = netlink.genl_controller.get_family_id(b'MAC80211_HWSIM')
def create_radio(self, n_channels=None, use_chanctx=False,
use_p2p_device=False):
attrs = []
if n_channels:
attrs.append(netlink.U32Attr(HWSIM_ATTR_CHANNELS, n_channels))
if use_chanctx:
attrs.append(netlink.FlagAttr(HWSIM_ATTR_USE_CHANCTX))
if use_p2p_device:
attrs.append(netlink.FlagAttr(HWSIM_ATTR_SUPPORT_P2P_DEVICE))
msg = netlink.GenlMessage(self._fid, HWSIM_CMD_CREATE_RADIO,
flags=netlink.NLM_F_REQUEST |
netlink.NLM_F_ACK,
attrs=attrs)
return msg.send_and_recv(self._conn).ret
def destroy_radio(self, radio_id):
attrs = [netlink.U32Attr(HWSIM_ATTR_RADIO_ID, radio_id)]
msg = netlink.GenlMessage(self._fid, HWSIM_CMD_DESTROY_RADIO,
flags=netlink.NLM_F_REQUEST |
netlink.NLM_F_ACK,
attrs=attrs)
msg.send_and_recv(self._conn)
class HWSimRadio(object):
def __init__(self, n_channels=None, use_chanctx=False,
use_p2p_device=False):
self._controller = HWSimController()
self._n_channels = n_channels
self._use_chanctx = use_chanctx
self._use_p2p_dev = use_p2p_device
def __enter__(self):
self._radio_id = self._controller.create_radio(
n_channels=self._n_channels,
use_chanctx=self._use_chanctx,
use_p2p_device=self._use_p2p_dev)
if self._radio_id < 0:
raise Exception("Failed to create radio (err:%d)" % self._radio_id)
try:
iface = os.listdir('/sys/class/mac80211_hwsim/hwsim%d/net/' % self._radio_id)[0]
except Exception as e:
self._controller.destroy_radio(self._radio_id)
raise e
return self._radio_id, iface
def __exit__(self, type, value, traceback):
self._controller.destroy_radio(self._radio_id)
def create(args):
print('Created radio %d' % c.create_radio(n_channels=args.channels,
use_chanctx=args.chanctx))
def destroy(args):
print(c.destroy_radio(args.radio))
if __name__ == '__main__':
import argparse
c = HWSimController()
parser = argparse.ArgumentParser(description='send hwsim control commands')
subparsers = parser.add_subparsers(help="Commands", dest='command')
parser_create = subparsers.add_parser('create', help='create a radio')
parser_create.add_argument('--channels', metavar='<number_of_channels>', type=int,
default=0,
help='Number of concurrent channels supported ' +
'by the radio. If not specified, the number ' +
'of channels specified in the ' +
'mac80211_hwsim.channels module parameter is ' +
'used')
parser_create.add_argument('--chanctx', action="store_true",
help='Use channel contexts, regardless of ' +
'whether the number of channels is 1 or ' +
'greater. By default channel contexts are ' +
'only used if the number of channels is ' +
'greater than 1.')
parser_create.set_defaults(func=create)
parser_destroy = subparsers.add_parser('destroy', help='destroy a radio')
parser_destroy.add_argument('radio', metavar='<radio>', type=int,
default=0,
help='The number of the radio to be ' +
'destroyed (i.e., 0 for phy0, 1 for phy1...)')
parser_destroy.set_defaults(func=destroy)
args = parser.parse_args()
args.func(args)
|
en
| 0.675432
|
# # HWSIM generic netlink controller code # Copyright (c) 2014 Intel Corporation # # Author: <NAME> <<EMAIL>> # # This software may be distributed under the terms of the BSD license. # See README for more details. # constants # the controller class
| 2.212451
| 2
|
ksz_power/parameters.py
|
adeliegorce/tools4reionisation
| 0
|
6629121
|
import numpy as np
from astropy import cosmology, units, constants
#######################################
########### System settings ###########
#######################################
n_threads = 1
folder = './'
outroot = folder+"/kSZ_power_spectrum" # root of all output files
debug = True
late_time = True #if you want to compute the late-time component of the kSZ too
##########################
#### Cosmo parameters ####
##########################
h = 0.6774000
Om_0 = 0.309
Ol_0 = 0.691
Ob_0 = 0.049
obh2 = Ob_0 * h**2
och2 = (Om_0 - Ob_0) * h**2
A_s = 2.139e-9
n_s = 0.9677
T_cmb = 2.7255
cos=cosmology.FlatLambdaCDM(H0=h*100,Tcmb0=T_cmb,Ob0=Ob_0,Om0=Om_0)
Yp = 0.2453
Xp = 1-Yp
mh = constants.m_n.value #kg
rhoc = cos.critical_density0.si.value #kg m-3
nh = Xp*Ob_0*rhoc/mh # m-3
xe_recomb = 1.7e-4
T_CMB=2.7260 #K
T_CMB_uK=T_CMB*1e6
###################
#### Constants ####
###################
s_T = constants.sigma_T.value # sigma_thomson in SI units [m^2]
c = constants.c.value # speed of light in SI units [m.s-1]
Mpcm = (1.0 * units.Mpc).to(units.m).value # one Mpc in [m]
Mpckm = Mpcm / 1e3
#######################################
###### REIONISATION PARAMETERS ########
#######################################
# parameters for reionisation history
asym = True #asymmetric or tanh model for xe(z)
zend = 5.5
zre = 7.
z_early = 20.
# reionisation of Helium
HeliumI = True
HeliumII = False
fH = 1.
if HeliumI:
not4 = 3.9715 #eta
fHe = Yp/(not4*(1-Yp))
fH=1+fHe
helium_fullreion_redshift = 3.5
helium_fullreion_start = 5.0
helium_fullreion_deltaredshift = 0.5
# parameters for Pee
alpha0 = 3.7
kappa = 0.10
#########################################
#### Settings for C_ells computation ####
#########################################
### linear ell range for kSZ C_ells
ell_min_kSZ = 1.
ell_max_kSZ = 10000.
n_ells_kSZ = 60
if debug:
n_ells_kSZ = 2
########################################
#### Integration/precision settings ####
########################################
### Settings for theta integration
num_th = 50
th_integ = np.linspace(0.000001,np.pi*0.999999,num_th)
mu = np.cos(th_integ)#cos(k.k')
### Settings for k' (=kp) integration
# k' array in [Mpc-1] - over which you integrate
min_logkp = -5.
max_logkp = 1.5
dlogkp = 0.05
kp_integ = np.logspace(
min_logkp,
max_logkp,
int((max_logkp - min_logkp) / dlogkp) + 1
)
### Settings for z integration
z_min = 0.0015
z_piv = 1.
z_max = 20.
dlogz = 0.05
dz = 0.05
### Setting for P(k) computation
kmin_pk = 10**min_logkp
kmax_pk = 10**max_logkp
nk_pk = 10001
### ell range for TT, EE, TE C_ells
ell_max_CMB = 2000
|
import numpy as np
from astropy import cosmology, units, constants
#######################################
########### System settings ###########
#######################################
n_threads = 1
folder = './'
outroot = folder+"/kSZ_power_spectrum" # root of all output files
debug = True
late_time = True #if you want to compute the late-time component of the kSZ too
##########################
#### Cosmo parameters ####
##########################
h = 0.6774000
Om_0 = 0.309
Ol_0 = 0.691
Ob_0 = 0.049
obh2 = Ob_0 * h**2
och2 = (Om_0 - Ob_0) * h**2
A_s = 2.139e-9
n_s = 0.9677
T_cmb = 2.7255
cos=cosmology.FlatLambdaCDM(H0=h*100,Tcmb0=T_cmb,Ob0=Ob_0,Om0=Om_0)
Yp = 0.2453
Xp = 1-Yp
mh = constants.m_n.value #kg
rhoc = cos.critical_density0.si.value #kg m-3
nh = Xp*Ob_0*rhoc/mh # m-3
xe_recomb = 1.7e-4
T_CMB=2.7260 #K
T_CMB_uK=T_CMB*1e6
###################
#### Constants ####
###################
s_T = constants.sigma_T.value # sigma_thomson in SI units [m^2]
c = constants.c.value # speed of light in SI units [m.s-1]
Mpcm = (1.0 * units.Mpc).to(units.m).value # one Mpc in [m]
Mpckm = Mpcm / 1e3
#######################################
###### REIONISATION PARAMETERS ########
#######################################
# parameters for reionisation history
asym = True #asymmetric or tanh model for xe(z)
zend = 5.5
zre = 7.
z_early = 20.
# reionisation of Helium
HeliumI = True
HeliumII = False
fH = 1.
if HeliumI:
not4 = 3.9715 #eta
fHe = Yp/(not4*(1-Yp))
fH=1+fHe
helium_fullreion_redshift = 3.5
helium_fullreion_start = 5.0
helium_fullreion_deltaredshift = 0.5
# parameters for Pee
alpha0 = 3.7
kappa = 0.10
#########################################
#### Settings for C_ells computation ####
#########################################
### linear ell range for kSZ C_ells
ell_min_kSZ = 1.
ell_max_kSZ = 10000.
n_ells_kSZ = 60
if debug:
n_ells_kSZ = 2
########################################
#### Integration/precision settings ####
########################################
### Settings for theta integration
num_th = 50
th_integ = np.linspace(0.000001,np.pi*0.999999,num_th)
mu = np.cos(th_integ)#cos(k.k')
### Settings for k' (=kp) integration
# k' array in [Mpc-1] - over which you integrate
min_logkp = -5.
max_logkp = 1.5
dlogkp = 0.05
kp_integ = np.logspace(
min_logkp,
max_logkp,
int((max_logkp - min_logkp) / dlogkp) + 1
)
### Settings for z integration
z_min = 0.0015
z_piv = 1.
z_max = 20.
dlogz = 0.05
dz = 0.05
### Setting for P(k) computation
kmin_pk = 10**min_logkp
kmax_pk = 10**max_logkp
nk_pk = 10001
### ell range for TT, EE, TE C_ells
ell_max_CMB = 2000
|
de
| 0.457448
|
####################################### ########### System settings ########### ####################################### # root of all output files #if you want to compute the late-time component of the kSZ too ########################## #### Cosmo parameters #### ########################## #kg #kg m-3 # m-3 #K ################### #### Constants #### ################### # sigma_thomson in SI units [m^2] # speed of light in SI units [m.s-1] # one Mpc in [m] ####################################### ###### REIONISATION PARAMETERS ######## ####################################### # parameters for reionisation history #asymmetric or tanh model for xe(z) # reionisation of Helium #eta # parameters for Pee ######################################### #### Settings for C_ells computation #### ######################################### ### linear ell range for kSZ C_ells ######################################## #### Integration/precision settings #### ######################################## ### Settings for theta integration #cos(k.k') ### Settings for k' (=kp) integration # k' array in [Mpc-1] - over which you integrate ### Settings for z integration ### Setting for P(k) computation ### ell range for TT, EE, TE C_ells
| 2.14729
| 2
|
tests/fullscale/eqinfo/test_eqinfo.py
|
cehanagan/pylith
| 93
|
6629122
|
#!/usr/bin/env nemesis
#
# ======================================================================
#
# <NAME>, U.S. Geological Survey
# <NAME>, GNS Science
# <NAME>, University at Buffalo
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2021 University of California, Davis
#
# See LICENSE.md for license information.
#
# ======================================================================
#
import unittest
from pylith.testing.FullTestApp import TestDriver
class TestApp(TestDriver):
"""Driver application for full-scale tests.
"""
def __init__(self):
"""Constructor.
"""
TestDriver.__init__(self)
return
def _suite(self):
"""Create test suite.
"""
suite = unittest.TestSuite()
from TestEqInfoLine import TestEqInfoLine
suite.addTest(unittest.makeSuite(TestEqInfoLine))
from TestEqInfoTri import TestEqInfoTri
suite.addTest(unittest.makeSuite(TestEqInfoTri))
from TestEqInfoQuad import TestEqInfoQuad
suite.addTest(unittest.makeSuite(TestEqInfoQuad))
return suite
# ----------------------------------------------------------------------
if __name__ == '__main__':
TestApp().main()
# End of file
|
#!/usr/bin/env nemesis
#
# ======================================================================
#
# <NAME>, U.S. Geological Survey
# <NAME>, GNS Science
# <NAME>, University at Buffalo
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2021 University of California, Davis
#
# See LICENSE.md for license information.
#
# ======================================================================
#
import unittest
from pylith.testing.FullTestApp import TestDriver
class TestApp(TestDriver):
"""Driver application for full-scale tests.
"""
def __init__(self):
"""Constructor.
"""
TestDriver.__init__(self)
return
def _suite(self):
"""Create test suite.
"""
suite = unittest.TestSuite()
from TestEqInfoLine import TestEqInfoLine
suite.addTest(unittest.makeSuite(TestEqInfoLine))
from TestEqInfoTri import TestEqInfoTri
suite.addTest(unittest.makeSuite(TestEqInfoTri))
from TestEqInfoQuad import TestEqInfoQuad
suite.addTest(unittest.makeSuite(TestEqInfoQuad))
return suite
# ----------------------------------------------------------------------
if __name__ == '__main__':
TestApp().main()
# End of file
|
en
| 0.641376
|
#!/usr/bin/env nemesis # # ====================================================================== # # <NAME>, U.S. Geological Survey # <NAME>, GNS Science # <NAME>, University at Buffalo # # This code was developed as part of the Computational Infrastructure # for Geodynamics (http://geodynamics.org). # # Copyright (c) 2010-2021 University of California, Davis # # See LICENSE.md for license information. # # ====================================================================== # Driver application for full-scale tests. Constructor. Create test suite. # ---------------------------------------------------------------------- # End of file
| 2.438053
| 2
|
napari/_qt/utils.py
|
MaksHess/napari
| 7
|
6629123
|
<reponame>MaksHess/napari
from __future__ import annotations
import re
import signal
import socket
import weakref
from contextlib import contextmanager
from functools import lru_cache, partial
from typing import Sequence, Union
import numpy as np
import qtpy
from qtpy.QtCore import (
QByteArray,
QObject,
QPoint,
QPropertyAnimation,
QSize,
QSocketNotifier,
Qt,
Signal,
)
from qtpy.QtGui import QColor, QCursor, QDrag, QImage, QPainter, QPen, QPixmap
from qtpy.QtWidgets import (
QGraphicsColorizeEffect,
QGraphicsOpacityEffect,
QHBoxLayout,
QListWidget,
QVBoxLayout,
QWidget,
)
from ..utils.colormaps.standardize_color import transform_color
from ..utils.events.custom_types import Array
from ..utils.misc import is_sequence
from ..utils.translations import trans
QBYTE_FLAG = "!QBYTE_"
RICH_TEXT_PATTERN = re.compile("<[^\n]+>")
def is_qbyte(string: str) -> bool:
"""Check if a string is a QByteArray string.
Parameters
----------
string : bool
State string.
"""
return isinstance(string, str) and string.startswith(QBYTE_FLAG)
def qbytearray_to_str(qbyte: QByteArray) -> str:
"""Convert a window state to a string.
Used for restoring the state of the main window.
Parameters
----------
qbyte : QByteArray
State array.
"""
return QBYTE_FLAG + qbyte.toBase64().data().decode()
def str_to_qbytearray(string: str) -> QByteArray:
"""Convert a string to a QbyteArray.
Used for restoring the state of the main window.
Parameters
----------
string : str
State string.
"""
if len(string) < len(QBYTE_FLAG) or not is_qbyte(string):
raise ValueError(
trans._(
"Invalid QByte string. QByte strings start with '{QBYTE_FLAG}'",
QBYTE_FLAG=QBYTE_FLAG,
)
)
return QByteArray.fromBase64(string[len(QBYTE_FLAG) :].encode())
def QImg2array(img):
"""Convert QImage to an array.
Parameters
----------
img : qtpy.QtGui.QImage
QImage to be converted.
Returns
-------
arr : array
Numpy array of type ubyte and shape (h, w, 4). Index [0, 0] is the
upper-left corner of the rendered region.
"""
# Fix when image is provided in wrong format (ex. test on Azure pipelines)
if img.format() != QImage.Format_ARGB32:
img = img.convertToFormat(QImage.Format_ARGB32)
b = img.constBits()
h, w, c = img.height(), img.width(), 4
# As vispy doesn't use qtpy we need to reconcile the differences
# between the `QImage` API for `PySide2` and `PyQt5` on how to convert
# a QImage to a numpy array.
if qtpy.API_NAME == 'PySide2':
arr = np.array(b).reshape(h, w, c)
else:
b.setsize(h * w * c)
arr = np.frombuffer(b, np.uint8).reshape(h, w, c)
# Format of QImage is ARGB32_Premultiplied, but color channels are
# reversed.
arr = arr[:, :, [2, 1, 0, 3]]
return arr
@contextmanager
def qt_signals_blocked(obj):
"""Context manager to temporarily block signals from `obj`"""
previous = obj.blockSignals(True)
try:
yield
finally:
obj.blockSignals(previous)
@contextmanager
def event_hook_removed():
"""Context manager to temporarily remove the PyQt5 input hook"""
from qtpy import QtCore
if hasattr(QtCore, 'pyqtRemoveInputHook'):
QtCore.pyqtRemoveInputHook()
try:
yield
finally:
if hasattr(QtCore, 'pyqtRestoreInputHook'):
QtCore.pyqtRestoreInputHook()
def disable_with_opacity(obj, widget_list, enabled):
"""Set enabled state on a list of widgets. If not enabled, decrease opacity."""
for widget_name in widget_list:
widget = getattr(obj, widget_name)
widget.setEnabled(enabled)
op = QGraphicsOpacityEffect(obj)
op.setOpacity(1 if enabled else 0.5)
widget.setGraphicsEffect(op)
@lru_cache(maxsize=64)
def square_pixmap(size):
"""Create a white/black hollow square pixmap. For use as labels cursor."""
size = max(int(size), 1)
pixmap = QPixmap(QSize(size, size))
pixmap.fill(Qt.transparent)
painter = QPainter(pixmap)
painter.setPen(Qt.white)
painter.drawRect(0, 0, size - 1, size - 1)
painter.setPen(Qt.black)
painter.drawRect(1, 1, size - 3, size - 3)
painter.end()
return pixmap
@lru_cache(maxsize=64)
def crosshair_pixmap():
"""Create a cross cursor with white/black hollow square pixmap in the middle.
For use as points cursor."""
size = 25
pixmap = QPixmap(QSize(size, size))
pixmap.fill(Qt.transparent)
painter = QPainter(pixmap)
# Base measures
width = 1
center = 3 # Must be odd!
rect_size = center + 2 * width
square = rect_size + width * 4
pen = QPen(Qt.white, 1)
pen.setJoinStyle(Qt.PenJoinStyle.MiterJoin)
painter.setPen(pen)
# # Horizontal rectangle
painter.drawRect(0, (size - rect_size) // 2, size - 1, rect_size - 1)
# Vertical rectangle
painter.drawRect((size - rect_size) // 2, 0, rect_size - 1, size - 1)
# Square
painter.drawRect(
(size - square) // 2, (size - square) // 2, square - 1, square - 1
)
pen = QPen(Qt.black, 2)
pen.setJoinStyle(Qt.PenJoinStyle.MiterJoin)
painter.setPen(pen)
# # Square
painter.drawRect(
(size - square) // 2 + 2,
(size - square) // 2 + 2,
square - 4,
square - 4,
)
pen = QPen(Qt.black, 3)
pen.setJoinStyle(Qt.PenJoinStyle.MiterJoin)
painter.setPen(pen)
# # # Horizontal lines
mid_vpoint = QPoint(2, size // 2)
painter.drawLine(
mid_vpoint, QPoint(((size - center) // 2) - center + 1, size // 2)
)
mid_vpoint = QPoint(size - 3, size // 2)
painter.drawLine(
mid_vpoint, QPoint(((size - center) // 2) + center + 1, size // 2)
)
# # # Vertical lines
mid_hpoint = QPoint(size // 2, 2)
painter.drawLine(
QPoint(size // 2, ((size - center) // 2) - center + 1), mid_hpoint
)
mid_hpoint = QPoint(size // 2, size - 3)
painter.drawLine(
QPoint(size // 2, ((size - center) // 2) + center + 1), mid_hpoint
)
painter.end()
return pixmap
@lru_cache(maxsize=64)
def circle_pixmap(size: int):
"""Create a white/black hollow circle pixmap. For use as labels cursor."""
size = max(int(size), 1)
pixmap = QPixmap(QSize(size, size))
pixmap.fill(Qt.transparent)
painter = QPainter(pixmap)
painter.setPen(Qt.white)
painter.drawEllipse(0, 0, size - 1, size - 1)
painter.setPen(Qt.black)
painter.drawEllipse(1, 1, size - 3, size - 3)
painter.end()
return pixmap
def drag_with_pixmap(list_widget: QListWidget) -> QDrag:
"""Create a QDrag object with a pixmap of the currently select list item.
This method is useful when you have a QListWidget that displays custom
widgets for each QListWidgetItem instance in the list (usually by calling
``QListWidget.setItemWidget(item, widget)``). When used in a
``QListWidget.startDrag`` method, this function creates a QDrag object that
shows an image of the item being dragged (rather than an empty rectangle).
Parameters
----------
list_widget : QListWidget
The QListWidget for which to create a QDrag object.
Returns
-------
QDrag
A QDrag instance with a pixmap of the currently selected item.
Examples
--------
>>> class QListWidget:
... def startDrag(self, supportedActions):
... drag = drag_with_pixmap(self)
... drag.exec_(supportedActions, Qt.MoveAction)
"""
drag = QDrag(list_widget)
drag.setMimeData(list_widget.mimeData(list_widget.selectedItems()))
size = list_widget.viewport().visibleRegion().boundingRect().size()
pixmap = QPixmap(size)
pixmap.fill(Qt.transparent)
painter = QPainter(pixmap)
for index in list_widget.selectedIndexes():
rect = list_widget.visualRect(index)
painter.drawPixmap(rect, list_widget.viewport().grab(rect))
painter.end()
drag.setPixmap(pixmap)
drag.setHotSpot(list_widget.viewport().mapFromGlobal(QCursor.pos()))
return drag
def combine_widgets(
widgets: Union[QWidget, Sequence[QWidget]], vertical: bool = False
) -> QWidget:
"""Combine a list of widgets into a single QWidget with Layout.
Parameters
----------
widgets : QWidget or sequence of QWidget
A widget or a list of widgets to combine.
vertical : bool, optional
Whether the layout should be QVBoxLayout or not, by default
QHBoxLayout is used
Returns
-------
QWidget
If ``widgets`` is a sequence, returns combined QWidget with `.layout`
property, otherwise returns the original widget.
Raises
------
TypeError
If ``widgets`` is neither a ``QWidget`` or a sequence of ``QWidgets``.
"""
if isinstance(getattr(widgets, 'native', None), QWidget):
# compatibility with magicgui v0.2.0 which no longer uses QWidgets
# directly. Like vispy, the backend widget is at widget.native
return widgets.native # type: ignore
elif isinstance(widgets, QWidget):
return widgets
elif is_sequence(widgets):
# the same as above, compatibility with magicgui v0.2.0
widgets = [
i.native if isinstance(getattr(i, 'native', None), QWidget) else i
for i in widgets
]
if all(isinstance(i, QWidget) for i in widgets):
container = QWidget()
container.setLayout(QVBoxLayout() if vertical else QHBoxLayout())
for widget in widgets:
container.layout().addWidget(widget)
return container
raise TypeError(
trans._('"widget" must be a QWidget or a sequence of QWidgets')
)
def add_flash_animation(
widget: QWidget, duration: int = 300, color: Array = (0.5, 0.5, 0.5, 0.5)
):
"""Add flash animation to widget to highlight certain action (e.g. taking a screenshot).
Parameters
----------
widget : QWidget
Any Qt widget.
duration : int
Duration of the flash animation.
color : Array
Color of the flash animation. By default, we use light gray.
"""
color = transform_color(color)[0]
color = (255 * color).astype("int")
effect = QGraphicsColorizeEffect(widget)
widget.setGraphicsEffect(effect)
widget._flash_animation = QPropertyAnimation(effect, b"color")
widget._flash_animation.setStartValue(QColor(0, 0, 0, 0))
widget._flash_animation.setEndValue(QColor(0, 0, 0, 0))
widget._flash_animation.setLoopCount(1)
# let's make sure to remove the animation from the widget because
# if we don't, the widget will actually be black and white.
widget._flash_animation.finished.connect(
partial(remove_flash_animation, weakref.ref(widget))
)
widget._flash_animation.start()
# now set an actual time for the flashing and an intermediate color
widget._flash_animation.setDuration(duration)
widget._flash_animation.setKeyValueAt(0.1, QColor(*color))
def remove_flash_animation(widget_ref: weakref.ref[QWidget]):
"""Remove flash animation from widget.
Parameters
----------
widget_ref : QWidget
Any Qt widget.
"""
if widget_ref() is None:
return
widget = widget_ref()
try:
widget.setGraphicsEffect(None)
del widget._flash_animation
except RuntimeError:
# RuntimeError: wrapped C/C++ object of type QtWidgetOverlay deleted
pass
@contextmanager
def _maybe_allow_interrupt(qapp):
"""
This manager allows to terminate a plot by sending a SIGINT. It is
necessary because the running Qt backend prevents Python interpreter to
run and process signals (i.e., to raise KeyboardInterrupt exception). To
solve this one needs to somehow wake up the interpreter and make it close
the plot window. We do this by using the signal.set_wakeup_fd() function
which organizes a write of the signal number into a socketpair connected
to the QSocketNotifier (since it is part of the Qt backend, it can react
to that write event). Afterwards, the Qt handler empties the socketpair
by a recv() command to re-arm it (we need this if a signal different from
SIGINT was caught by set_wakeup_fd() and we shall continue waiting). If
the SIGINT was caught indeed, after exiting the on_signal() function the
interpreter reacts to the SIGINT according to the handle() function which
had been set up by a signal.signal() call: it causes the qt_object to
exit by calling its quit() method. Finally, we call the old SIGINT
handler with the same arguments that were given to our custom handle()
handler.
We do this only if the old handler for SIGINT was not None, which means
that a non-python handler was installed, i.e. in Julia, and not SIG_IGN
which means we should ignore the interrupts.
code from https://github.com/matplotlib/matplotlib/pull/13306
"""
old_sigint_handler = signal.getsignal(signal.SIGINT)
handler_args = None
skip = False
if old_sigint_handler in (None, signal.SIG_IGN, signal.SIG_DFL):
skip = True
else:
wsock, rsock = socket.socketpair()
wsock.setblocking(False)
old_wakeup_fd = signal.set_wakeup_fd(wsock.fileno())
sn = QSocketNotifier(rsock.fileno(), QSocketNotifier.Type.Read)
# Clear the socket to re-arm the notifier.
sn.activated.connect(lambda *args: rsock.recv(1))
def handle(*args):
nonlocal handler_args
handler_args = args
qapp.exit()
signal.signal(signal.SIGINT, handle)
try:
yield
finally:
if not skip:
wsock.close()
rsock.close()
sn.setEnabled(False)
signal.set_wakeup_fd(old_wakeup_fd)
signal.signal(signal.SIGINT, old_sigint_handler)
if handler_args is not None:
old_sigint_handler(*handler_args)
class Sentry(QObject):
"""Small object to trigger events across threads."""
alerted = Signal()
def alert(self, *_):
self.alerted.emit()
def qt_might_be_rich_text(text) -> bool:
"""
Check if a text might be rich text in a cross-binding compatible way.
"""
if qtpy.PYSIDE2:
from qtpy.QtGui import Qt as _Qt
else:
from qtpy.QtCore import Qt as _Qt
try:
return _Qt.mightBeRichText(text)
except Exception:
return bool(RICH_TEXT_PATTERN.search(text))
|
from __future__ import annotations
import re
import signal
import socket
import weakref
from contextlib import contextmanager
from functools import lru_cache, partial
from typing import Sequence, Union
import numpy as np
import qtpy
from qtpy.QtCore import (
QByteArray,
QObject,
QPoint,
QPropertyAnimation,
QSize,
QSocketNotifier,
Qt,
Signal,
)
from qtpy.QtGui import QColor, QCursor, QDrag, QImage, QPainter, QPen, QPixmap
from qtpy.QtWidgets import (
QGraphicsColorizeEffect,
QGraphicsOpacityEffect,
QHBoxLayout,
QListWidget,
QVBoxLayout,
QWidget,
)
from ..utils.colormaps.standardize_color import transform_color
from ..utils.events.custom_types import Array
from ..utils.misc import is_sequence
from ..utils.translations import trans
QBYTE_FLAG = "!QBYTE_"
RICH_TEXT_PATTERN = re.compile("<[^\n]+>")
def is_qbyte(string: str) -> bool:
"""Check if a string is a QByteArray string.
Parameters
----------
string : bool
State string.
"""
return isinstance(string, str) and string.startswith(QBYTE_FLAG)
def qbytearray_to_str(qbyte: QByteArray) -> str:
"""Convert a window state to a string.
Used for restoring the state of the main window.
Parameters
----------
qbyte : QByteArray
State array.
"""
return QBYTE_FLAG + qbyte.toBase64().data().decode()
def str_to_qbytearray(string: str) -> QByteArray:
"""Convert a string to a QbyteArray.
Used for restoring the state of the main window.
Parameters
----------
string : str
State string.
"""
if len(string) < len(QBYTE_FLAG) or not is_qbyte(string):
raise ValueError(
trans._(
"Invalid QByte string. QByte strings start with '{QBYTE_FLAG}'",
QBYTE_FLAG=QBYTE_FLAG,
)
)
return QByteArray.fromBase64(string[len(QBYTE_FLAG) :].encode())
def QImg2array(img):
"""Convert QImage to an array.
Parameters
----------
img : qtpy.QtGui.QImage
QImage to be converted.
Returns
-------
arr : array
Numpy array of type ubyte and shape (h, w, 4). Index [0, 0] is the
upper-left corner of the rendered region.
"""
# Fix when image is provided in wrong format (ex. test on Azure pipelines)
if img.format() != QImage.Format_ARGB32:
img = img.convertToFormat(QImage.Format_ARGB32)
b = img.constBits()
h, w, c = img.height(), img.width(), 4
# As vispy doesn't use qtpy we need to reconcile the differences
# between the `QImage` API for `PySide2` and `PyQt5` on how to convert
# a QImage to a numpy array.
if qtpy.API_NAME == 'PySide2':
arr = np.array(b).reshape(h, w, c)
else:
b.setsize(h * w * c)
arr = np.frombuffer(b, np.uint8).reshape(h, w, c)
# Format of QImage is ARGB32_Premultiplied, but color channels are
# reversed.
arr = arr[:, :, [2, 1, 0, 3]]
return arr
@contextmanager
def qt_signals_blocked(obj):
"""Context manager to temporarily block signals from `obj`"""
previous = obj.blockSignals(True)
try:
yield
finally:
obj.blockSignals(previous)
@contextmanager
def event_hook_removed():
"""Context manager to temporarily remove the PyQt5 input hook"""
from qtpy import QtCore
if hasattr(QtCore, 'pyqtRemoveInputHook'):
QtCore.pyqtRemoveInputHook()
try:
yield
finally:
if hasattr(QtCore, 'pyqtRestoreInputHook'):
QtCore.pyqtRestoreInputHook()
def disable_with_opacity(obj, widget_list, enabled):
"""Set enabled state on a list of widgets. If not enabled, decrease opacity."""
for widget_name in widget_list:
widget = getattr(obj, widget_name)
widget.setEnabled(enabled)
op = QGraphicsOpacityEffect(obj)
op.setOpacity(1 if enabled else 0.5)
widget.setGraphicsEffect(op)
@lru_cache(maxsize=64)
def square_pixmap(size):
"""Create a white/black hollow square pixmap. For use as labels cursor."""
size = max(int(size), 1)
pixmap = QPixmap(QSize(size, size))
pixmap.fill(Qt.transparent)
painter = QPainter(pixmap)
painter.setPen(Qt.white)
painter.drawRect(0, 0, size - 1, size - 1)
painter.setPen(Qt.black)
painter.drawRect(1, 1, size - 3, size - 3)
painter.end()
return pixmap
@lru_cache(maxsize=64)
def crosshair_pixmap():
"""Create a cross cursor with white/black hollow square pixmap in the middle.
For use as points cursor."""
size = 25
pixmap = QPixmap(QSize(size, size))
pixmap.fill(Qt.transparent)
painter = QPainter(pixmap)
# Base measures
width = 1
center = 3 # Must be odd!
rect_size = center + 2 * width
square = rect_size + width * 4
pen = QPen(Qt.white, 1)
pen.setJoinStyle(Qt.PenJoinStyle.MiterJoin)
painter.setPen(pen)
# # Horizontal rectangle
painter.drawRect(0, (size - rect_size) // 2, size - 1, rect_size - 1)
# Vertical rectangle
painter.drawRect((size - rect_size) // 2, 0, rect_size - 1, size - 1)
# Square
painter.drawRect(
(size - square) // 2, (size - square) // 2, square - 1, square - 1
)
pen = QPen(Qt.black, 2)
pen.setJoinStyle(Qt.PenJoinStyle.MiterJoin)
painter.setPen(pen)
# # Square
painter.drawRect(
(size - square) // 2 + 2,
(size - square) // 2 + 2,
square - 4,
square - 4,
)
pen = QPen(Qt.black, 3)
pen.setJoinStyle(Qt.PenJoinStyle.MiterJoin)
painter.setPen(pen)
# # # Horizontal lines
mid_vpoint = QPoint(2, size // 2)
painter.drawLine(
mid_vpoint, QPoint(((size - center) // 2) - center + 1, size // 2)
)
mid_vpoint = QPoint(size - 3, size // 2)
painter.drawLine(
mid_vpoint, QPoint(((size - center) // 2) + center + 1, size // 2)
)
# # # Vertical lines
mid_hpoint = QPoint(size // 2, 2)
painter.drawLine(
QPoint(size // 2, ((size - center) // 2) - center + 1), mid_hpoint
)
mid_hpoint = QPoint(size // 2, size - 3)
painter.drawLine(
QPoint(size // 2, ((size - center) // 2) + center + 1), mid_hpoint
)
painter.end()
return pixmap
@lru_cache(maxsize=64)
def circle_pixmap(size: int):
"""Create a white/black hollow circle pixmap. For use as labels cursor."""
size = max(int(size), 1)
pixmap = QPixmap(QSize(size, size))
pixmap.fill(Qt.transparent)
painter = QPainter(pixmap)
painter.setPen(Qt.white)
painter.drawEllipse(0, 0, size - 1, size - 1)
painter.setPen(Qt.black)
painter.drawEllipse(1, 1, size - 3, size - 3)
painter.end()
return pixmap
def drag_with_pixmap(list_widget: QListWidget) -> QDrag:
"""Create a QDrag object with a pixmap of the currently select list item.
This method is useful when you have a QListWidget that displays custom
widgets for each QListWidgetItem instance in the list (usually by calling
``QListWidget.setItemWidget(item, widget)``). When used in a
``QListWidget.startDrag`` method, this function creates a QDrag object that
shows an image of the item being dragged (rather than an empty rectangle).
Parameters
----------
list_widget : QListWidget
The QListWidget for which to create a QDrag object.
Returns
-------
QDrag
A QDrag instance with a pixmap of the currently selected item.
Examples
--------
>>> class QListWidget:
... def startDrag(self, supportedActions):
... drag = drag_with_pixmap(self)
... drag.exec_(supportedActions, Qt.MoveAction)
"""
drag = QDrag(list_widget)
drag.setMimeData(list_widget.mimeData(list_widget.selectedItems()))
size = list_widget.viewport().visibleRegion().boundingRect().size()
pixmap = QPixmap(size)
pixmap.fill(Qt.transparent)
painter = QPainter(pixmap)
for index in list_widget.selectedIndexes():
rect = list_widget.visualRect(index)
painter.drawPixmap(rect, list_widget.viewport().grab(rect))
painter.end()
drag.setPixmap(pixmap)
drag.setHotSpot(list_widget.viewport().mapFromGlobal(QCursor.pos()))
return drag
def combine_widgets(
widgets: Union[QWidget, Sequence[QWidget]], vertical: bool = False
) -> QWidget:
"""Combine a list of widgets into a single QWidget with Layout.
Parameters
----------
widgets : QWidget or sequence of QWidget
A widget or a list of widgets to combine.
vertical : bool, optional
Whether the layout should be QVBoxLayout or not, by default
QHBoxLayout is used
Returns
-------
QWidget
If ``widgets`` is a sequence, returns combined QWidget with `.layout`
property, otherwise returns the original widget.
Raises
------
TypeError
If ``widgets`` is neither a ``QWidget`` or a sequence of ``QWidgets``.
"""
if isinstance(getattr(widgets, 'native', None), QWidget):
# compatibility with magicgui v0.2.0 which no longer uses QWidgets
# directly. Like vispy, the backend widget is at widget.native
return widgets.native # type: ignore
elif isinstance(widgets, QWidget):
return widgets
elif is_sequence(widgets):
# the same as above, compatibility with magicgui v0.2.0
widgets = [
i.native if isinstance(getattr(i, 'native', None), QWidget) else i
for i in widgets
]
if all(isinstance(i, QWidget) for i in widgets):
container = QWidget()
container.setLayout(QVBoxLayout() if vertical else QHBoxLayout())
for widget in widgets:
container.layout().addWidget(widget)
return container
raise TypeError(
trans._('"widget" must be a QWidget or a sequence of QWidgets')
)
def add_flash_animation(
widget: QWidget, duration: int = 300, color: Array = (0.5, 0.5, 0.5, 0.5)
):
"""Add flash animation to widget to highlight certain action (e.g. taking a screenshot).
Parameters
----------
widget : QWidget
Any Qt widget.
duration : int
Duration of the flash animation.
color : Array
Color of the flash animation. By default, we use light gray.
"""
color = transform_color(color)[0]
color = (255 * color).astype("int")
effect = QGraphicsColorizeEffect(widget)
widget.setGraphicsEffect(effect)
widget._flash_animation = QPropertyAnimation(effect, b"color")
widget._flash_animation.setStartValue(QColor(0, 0, 0, 0))
widget._flash_animation.setEndValue(QColor(0, 0, 0, 0))
widget._flash_animation.setLoopCount(1)
# let's make sure to remove the animation from the widget because
# if we don't, the widget will actually be black and white.
widget._flash_animation.finished.connect(
partial(remove_flash_animation, weakref.ref(widget))
)
widget._flash_animation.start()
# now set an actual time for the flashing and an intermediate color
widget._flash_animation.setDuration(duration)
widget._flash_animation.setKeyValueAt(0.1, QColor(*color))
def remove_flash_animation(widget_ref: weakref.ref[QWidget]):
"""Remove flash animation from widget.
Parameters
----------
widget_ref : QWidget
Any Qt widget.
"""
if widget_ref() is None:
return
widget = widget_ref()
try:
widget.setGraphicsEffect(None)
del widget._flash_animation
except RuntimeError:
# RuntimeError: wrapped C/C++ object of type QtWidgetOverlay deleted
pass
@contextmanager
def _maybe_allow_interrupt(qapp):
"""
This manager allows to terminate a plot by sending a SIGINT. It is
necessary because the running Qt backend prevents Python interpreter to
run and process signals (i.e., to raise KeyboardInterrupt exception). To
solve this one needs to somehow wake up the interpreter and make it close
the plot window. We do this by using the signal.set_wakeup_fd() function
which organizes a write of the signal number into a socketpair connected
to the QSocketNotifier (since it is part of the Qt backend, it can react
to that write event). Afterwards, the Qt handler empties the socketpair
by a recv() command to re-arm it (we need this if a signal different from
SIGINT was caught by set_wakeup_fd() and we shall continue waiting). If
the SIGINT was caught indeed, after exiting the on_signal() function the
interpreter reacts to the SIGINT according to the handle() function which
had been set up by a signal.signal() call: it causes the qt_object to
exit by calling its quit() method. Finally, we call the old SIGINT
handler with the same arguments that were given to our custom handle()
handler.
We do this only if the old handler for SIGINT was not None, which means
that a non-python handler was installed, i.e. in Julia, and not SIG_IGN
which means we should ignore the interrupts.
code from https://github.com/matplotlib/matplotlib/pull/13306
"""
old_sigint_handler = signal.getsignal(signal.SIGINT)
handler_args = None
skip = False
if old_sigint_handler in (None, signal.SIG_IGN, signal.SIG_DFL):
skip = True
else:
wsock, rsock = socket.socketpair()
wsock.setblocking(False)
old_wakeup_fd = signal.set_wakeup_fd(wsock.fileno())
sn = QSocketNotifier(rsock.fileno(), QSocketNotifier.Type.Read)
# Clear the socket to re-arm the notifier.
sn.activated.connect(lambda *args: rsock.recv(1))
def handle(*args):
nonlocal handler_args
handler_args = args
qapp.exit()
signal.signal(signal.SIGINT, handle)
try:
yield
finally:
if not skip:
wsock.close()
rsock.close()
sn.setEnabled(False)
signal.set_wakeup_fd(old_wakeup_fd)
signal.signal(signal.SIGINT, old_sigint_handler)
if handler_args is not None:
old_sigint_handler(*handler_args)
class Sentry(QObject):
"""Small object to trigger events across threads."""
alerted = Signal()
def alert(self, *_):
self.alerted.emit()
def qt_might_be_rich_text(text) -> bool:
"""
Check if a text might be rich text in a cross-binding compatible way.
"""
if qtpy.PYSIDE2:
from qtpy.QtGui import Qt as _Qt
else:
from qtpy.QtCore import Qt as _Qt
try:
return _Qt.mightBeRichText(text)
except Exception:
return bool(RICH_TEXT_PATTERN.search(text))
|
en
| 0.752787
|
Check if a string is a QByteArray string. Parameters ---------- string : bool State string. Convert a window state to a string. Used for restoring the state of the main window. Parameters ---------- qbyte : QByteArray State array. Convert a string to a QbyteArray. Used for restoring the state of the main window. Parameters ---------- string : str State string. Convert QImage to an array. Parameters ---------- img : qtpy.QtGui.QImage QImage to be converted. Returns ------- arr : array Numpy array of type ubyte and shape (h, w, 4). Index [0, 0] is the upper-left corner of the rendered region. # Fix when image is provided in wrong format (ex. test on Azure pipelines) # As vispy doesn't use qtpy we need to reconcile the differences # between the `QImage` API for `PySide2` and `PyQt5` on how to convert # a QImage to a numpy array. # Format of QImage is ARGB32_Premultiplied, but color channels are # reversed. Context manager to temporarily block signals from `obj` Context manager to temporarily remove the PyQt5 input hook Set enabled state on a list of widgets. If not enabled, decrease opacity. Create a white/black hollow square pixmap. For use as labels cursor. Create a cross cursor with white/black hollow square pixmap in the middle. For use as points cursor. # Base measures # Must be odd! # # Horizontal rectangle # Vertical rectangle # Square # # Square # # # Horizontal lines # # # Vertical lines Create a white/black hollow circle pixmap. For use as labels cursor. Create a QDrag object with a pixmap of the currently select list item. This method is useful when you have a QListWidget that displays custom widgets for each QListWidgetItem instance in the list (usually by calling ``QListWidget.setItemWidget(item, widget)``). When used in a ``QListWidget.startDrag`` method, this function creates a QDrag object that shows an image of the item being dragged (rather than an empty rectangle). Parameters ---------- list_widget : QListWidget The QListWidget for which to create a QDrag object. Returns ------- QDrag A QDrag instance with a pixmap of the currently selected item. Examples -------- >>> class QListWidget: ... def startDrag(self, supportedActions): ... drag = drag_with_pixmap(self) ... drag.exec_(supportedActions, Qt.MoveAction) Combine a list of widgets into a single QWidget with Layout. Parameters ---------- widgets : QWidget or sequence of QWidget A widget or a list of widgets to combine. vertical : bool, optional Whether the layout should be QVBoxLayout or not, by default QHBoxLayout is used Returns ------- QWidget If ``widgets`` is a sequence, returns combined QWidget with `.layout` property, otherwise returns the original widget. Raises ------ TypeError If ``widgets`` is neither a ``QWidget`` or a sequence of ``QWidgets``. # compatibility with magicgui v0.2.0 which no longer uses QWidgets # directly. Like vispy, the backend widget is at widget.native # type: ignore # the same as above, compatibility with magicgui v0.2.0 Add flash animation to widget to highlight certain action (e.g. taking a screenshot). Parameters ---------- widget : QWidget Any Qt widget. duration : int Duration of the flash animation. color : Array Color of the flash animation. By default, we use light gray. # let's make sure to remove the animation from the widget because # if we don't, the widget will actually be black and white. # now set an actual time for the flashing and an intermediate color Remove flash animation from widget. Parameters ---------- widget_ref : QWidget Any Qt widget. # RuntimeError: wrapped C/C++ object of type QtWidgetOverlay deleted This manager allows to terminate a plot by sending a SIGINT. It is necessary because the running Qt backend prevents Python interpreter to run and process signals (i.e., to raise KeyboardInterrupt exception). To solve this one needs to somehow wake up the interpreter and make it close the plot window. We do this by using the signal.set_wakeup_fd() function which organizes a write of the signal number into a socketpair connected to the QSocketNotifier (since it is part of the Qt backend, it can react to that write event). Afterwards, the Qt handler empties the socketpair by a recv() command to re-arm it (we need this if a signal different from SIGINT was caught by set_wakeup_fd() and we shall continue waiting). If the SIGINT was caught indeed, after exiting the on_signal() function the interpreter reacts to the SIGINT according to the handle() function which had been set up by a signal.signal() call: it causes the qt_object to exit by calling its quit() method. Finally, we call the old SIGINT handler with the same arguments that were given to our custom handle() handler. We do this only if the old handler for SIGINT was not None, which means that a non-python handler was installed, i.e. in Julia, and not SIG_IGN which means we should ignore the interrupts. code from https://github.com/matplotlib/matplotlib/pull/13306 # Clear the socket to re-arm the notifier. Small object to trigger events across threads. Check if a text might be rich text in a cross-binding compatible way.
| 1.999112
| 2
|
ortools/linear_solver/samples/integer_programming_example.py
|
klorel/or-tools
| 1
|
6629124
|
<reponame>klorel/or-tools
# Copyright 2010-2018 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Small example to illustrate solving a MIP problem."""
# [START program]
from __future__ import print_function
# [START import]
from ortools.linear_solver import pywraplp
# [END import]
def IntegerProgrammingExample():
"""Integer programming sample."""
# [START solver]
# Create the mip solver with the CBC backend.
solver = pywraplp.Solver('IntegerProgrammingExample',
pywraplp.Solver.CBC_MIXED_INTEGER_PROGRAMMING)
# [END solver]
# [START variables]
# x, y, and z are non-negative integer variables.
x = solver.IntVar(0.0, solver.infinity(), 'x')
y = solver.IntVar(0.0, solver.infinity(), 'y')
z = solver.IntVar(0.0, solver.infinity(), 'z')
# [END variables]
# [START constraints]
# 2*x + 7*y + 3*z <= 50
constraint0 = solver.Constraint(-solver.infinity(), 50)
constraint0.SetCoefficient(x, 2)
constraint0.SetCoefficient(y, 7)
constraint0.SetCoefficient(z, 3)
# 3*x - 5*y + 7*z <= 45
constraint1 = solver.Constraint(-solver.infinity(), 45)
constraint1.SetCoefficient(x, 3)
constraint1.SetCoefficient(y, -5)
constraint1.SetCoefficient(z, 7)
# 5*x + 2*y - 6*z <= 37
constraint2 = solver.Constraint(-solver.infinity(), 37)
constraint2.SetCoefficient(x, 5)
constraint2.SetCoefficient(y, 2)
constraint2.SetCoefficient(z, -6)
# [END constraints]
# [START objective]
# Maximize 2*x + 2*y + 3*z
objective = solver.Objective()
objective.SetCoefficient(x, 2)
objective.SetCoefficient(y, 2)
objective.SetCoefficient(z, 3)
objective.SetMaximization()
# [END objective]
# Solve the problem and print the solution.
# [START print_solution]
solver.Solve()
# Print the objective value of the solution.
print('Maximum objective function value = %d' % solver.Objective().Value())
print()
# Print the value of each variable in the solution.
for variable in [x, y, z]:
print('%s = %d' % (variable.name(), variable.solution_value()))
# [END print_solution]
IntegerProgrammingExample()
# [END program]
|
# Copyright 2010-2018 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Small example to illustrate solving a MIP problem."""
# [START program]
from __future__ import print_function
# [START import]
from ortools.linear_solver import pywraplp
# [END import]
def IntegerProgrammingExample():
"""Integer programming sample."""
# [START solver]
# Create the mip solver with the CBC backend.
solver = pywraplp.Solver('IntegerProgrammingExample',
pywraplp.Solver.CBC_MIXED_INTEGER_PROGRAMMING)
# [END solver]
# [START variables]
# x, y, and z are non-negative integer variables.
x = solver.IntVar(0.0, solver.infinity(), 'x')
y = solver.IntVar(0.0, solver.infinity(), 'y')
z = solver.IntVar(0.0, solver.infinity(), 'z')
# [END variables]
# [START constraints]
# 2*x + 7*y + 3*z <= 50
constraint0 = solver.Constraint(-solver.infinity(), 50)
constraint0.SetCoefficient(x, 2)
constraint0.SetCoefficient(y, 7)
constraint0.SetCoefficient(z, 3)
# 3*x - 5*y + 7*z <= 45
constraint1 = solver.Constraint(-solver.infinity(), 45)
constraint1.SetCoefficient(x, 3)
constraint1.SetCoefficient(y, -5)
constraint1.SetCoefficient(z, 7)
# 5*x + 2*y - 6*z <= 37
constraint2 = solver.Constraint(-solver.infinity(), 37)
constraint2.SetCoefficient(x, 5)
constraint2.SetCoefficient(y, 2)
constraint2.SetCoefficient(z, -6)
# [END constraints]
# [START objective]
# Maximize 2*x + 2*y + 3*z
objective = solver.Objective()
objective.SetCoefficient(x, 2)
objective.SetCoefficient(y, 2)
objective.SetCoefficient(z, 3)
objective.SetMaximization()
# [END objective]
# Solve the problem and print the solution.
# [START print_solution]
solver.Solve()
# Print the objective value of the solution.
print('Maximum objective function value = %d' % solver.Objective().Value())
print()
# Print the value of each variable in the solution.
for variable in [x, y, z]:
print('%s = %d' % (variable.name(), variable.solution_value()))
# [END print_solution]
IntegerProgrammingExample()
# [END program]
|
en
| 0.792805
|
# Copyright 2010-2018 Google LLC # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Small example to illustrate solving a MIP problem. # [START program] # [START import] # [END import] Integer programming sample. # [START solver] # Create the mip solver with the CBC backend. # [END solver] # [START variables] # x, y, and z are non-negative integer variables. # [END variables] # [START constraints] # 2*x + 7*y + 3*z <= 50 # 3*x - 5*y + 7*z <= 45 # 5*x + 2*y - 6*z <= 37 # [END constraints] # [START objective] # Maximize 2*x + 2*y + 3*z # [END objective] # Solve the problem and print the solution. # [START print_solution] # Print the objective value of the solution. # Print the value of each variable in the solution. # [END print_solution] # [END program]
| 2.65247
| 3
|
easyga.py
|
eromid/easyGA-python
| 0
|
6629125
|
#!/usr/bin/env python
# encoding: utf-8
"""
EasyGA
Copyright © 2017 Eromid (Olly)
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute,
sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES
OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import random
from itertools import combinations
from multiprocessing import Pool
class Chromasome:
"""
The genetic material of an individual.
"""
def __init__(self, n_bits, fitness_fnc):
"""
Constructor for Chromasome objects.
Args:
n_bits (int): The length of the bitstring needed to encode an individual's characteristics.
fitness_fnc (Callable): A callable (function or lambda) that takes the bitstring as a
parameter and returns its fitness score.
"""
self.n_bits = n_bits
self.fitness_fnc = fitness_fnc
self.fitness_val = None
self.bit_string = [0 for i in range(n_bits)]
self.fitness_calculated = False
def randomise(self):
"""
Randomise the bits in the bitstring. Useful for initialising in the first generation.
Returns:
:obj:`Chromasome`: Refernce to self for method chaining.
"""
self.bit_string = [random.choice([0, 1]) for i in range(self.n_bits)]
return self
def mutate(self, rate=0.001):
"""
Mutate the individual - randomly flip some of the bits in the bitstring.
Args:
rate (float): The probability each individual bit will be flipped.
Returns:
:obj:`Chromasome`: Refernce to self for method chaining.
"""
for i in range(len(self.bit_string)):
if random.random() <= rate:
self.bit_string[i] = int(not self.bit_string[i])
return self
def fitness(self):
"""
Get the fitness for this individual. Checks for cached fitness score before calling the (potentially
expensive) fitness function
Returns:
:obj:`Chromasome`: Refernce to self for method chaining.
"""
if self.fitness_val is not None: # check for a cached solution to the fitness function...
return self.fitness_val
else: # otherwise evaluate it.
self.fitness_val = self.fitness_fnc(self.bit_string)
return self.fitness_val
def crossoverSinglePoint(self, mate, pivot):
"""
Combine this individual with another to create an offspring individual with
some traits from each parent.
Args:
mate (:obj:`Chromasome`): The individual we are combining with this one.
pivot (int): index to split the bitstrings at. The child bitstring is equal to this
individual's left `pivot`, and the `mate` individual right of `pivot`.
"""
child = Chromasome(self.n_bits, self.fitness_fnc)
child.bit_string = self.bit_string[:pivot] + mate.bit_string[pivot:]
return child
def crossoverUniform(self, mate):
"""
Combine this individual with another to create an offspring individual with
some traits from each parent. Takes each bit randomly from a parent with equal probability
Args:
mate (:obj:`Chromasome`): The individual we are combining with this one.
Returns:
:obj:`Chromasome`: The new child individual.
"""
parents = [self, mate]
child = Chromasome(self.n_bits, self.fitness_fnc)
for i in xrange(len(self.bit_string)):
choice = int(random.random() > 0.5)
child.bit_string[i] = parents[choice].bit_string[i]
return child
def asHex(self):
"""
The hexadecimal representation of this individual.
Returns:
str: The hex representation.
"""
exponent = 0
total = 0
for i in reversed(self.bit_string):
total += i * (2 ** exponent)
exponent += 1
return hex(total)
def __str__(self):
"""
Binary representation of the bit string
"""
return str(self.bit_string)
class GeneticAlgorithm:
"""
A abstract general genetic algorithm. Your implementation should use this as its base class
and override the following methods:
`selection`: Defines how the "parents" are selected.
`crossover`: Defines how the "parents" are combined to produce the next generation.
`mutate`: Defines how the offspring are mutated to allow drift in the population.
There are built in options for some of the above:
`getBestPairs`, `getRoulettePairs` for `selection` method.
`singlePointCrossover`, `uniformCrossover` for `crossover` method.
"""
def __init__(self, population_size, bit_string_len, fitness_fnc):
"""
Create a population and find the fitness of the first generation.
Args:
population_size (int): The number of individuals to maintain in the population.
bit_string_len (int): The length of the bit string of each individual.
fitness_fnc (Callable): Function to call on the individual to determine its fitness.
"""
self.population_size = population_size
self.fitness_fnc = fitness_fnc
self.bit_string_len = bit_string_len
self.population = [Chromasome(self.bit_string_len, self.fitness_fnc).randomise() for i in range(population_size)]
# self.elite_cutoff = 16 #TODO: remove?
self.updateAllFitness()
def selection(self):
"""
Must be overloaded in a subclass to determine how to perform selection.
Returns:
list of tuple of :obj:`Chromasome`: The pairs to be recombined into the next generation.
"""
raise NotImplementedError("`Selection` method must be overloaded in a subclass")
def crossover(self, parent_pairs):
"""
Must be overloaded in a subclass to determine how to perform crossover.
Args:
parent_pairs (tuple of :obj:`Chromasome`): Parent pairs to be recombined
Returns:
list of :obj:`Chromasome`: The new generation of individuals
"""
raise NotImplementedError("`Crossover` method must be overloaded in a subclass")
def mutate(self):
"""
Must be overloaded in a subclass to determine how to perform crossover.
"""
raise NotImplementedError("`Mutate` method must be overloaded in a subclass")
def nextGeneration(self):
"""
Perform the steps necessary to advance the population:
1. Calculate all fitnesses
2. Select parent pairs
3. Recombine individuals
4. Mutate the new generation
"""
self.updateAllFitness()
parent_pairs = self.selection()
self.population = self.crossover(parent_pairs)
self.mutate()
def updateAllFitness(self, n_workers=16):
"""
Recompute the fitness of all of the individuals. Uses a work pool to parallelize the fitness calculation.
Args:
n_workers (int): The number of workers in the work pool.
"""
work_pool = Pool(n_workers)
updated_fitness = work_pool.map(self.fitness_fnc, [chromasome.bit_string for chromasome in self.population])
for i in xrange(len(self.population)):
self.population[i].fitness_val = updated_fitness[i]
work_pool.close()
def getBestPairs(self, n_pairs, n_elites):
"""
A built in selection method to call in `selection`. Gets random pairs from the best individuals
in the population. We sort the population and choose random unique pairings from the top n_elites.
with uniform probability.
Args:
n_pairs (int): The number of pairs to generate.
n_elites (int): Only the best n_elites are considered for inclusion in the pairings.
"""
self.population = sorted(self.population, key=lambda genotype: genotype.fitness(), reverse=True)
elites = self.population[:n_pairs]
parent_pairs = []
while len(parent_pairs) < self.population_size:
p1 = random.choice(elites)
p2 = random.choice(elites)
if p1 is not p2 and (p1, p2) not in parent_pairs:
parent_pairs.append((p1, p2))
return parent_pairs
def getRoulettePairs(self, n_pairs):
"""
A built in selection method to call in `selection`. Gets random pairs from the best pairs from
the population. We sort the population and choose random unique pairings from the top 50% with
uniform probability.
Args:
n_pairs (int): The number of pairs required to form the next generation. This typically
depends on the crossover method being used.
Returns:
list of tuple of :obj:`Chromasomes`: The parent pairs for the next generation.
"""
parent_pairs = []
self.population = sorted(self.population, key=lambda genotype: genotype.fitness(), reverse=True)
total_fitness = sum(individual.fitness() for individual in self.population)
while len(parent_pairs) < n_pairs:
r = random.uniform(0.0, total_fitness)
cumulative_fitness = 0.0
left_parent = None
for individual in self.population:
cumulative_fitness += individual.fitness()
if cumulative_fitness >= r:
left_parent = individual
break
if left_parent is None:
pass
# Choose a right parent.
r = random.uniform(0.0, total_fitness)
cumulative_fitness = 0.0
right_parent = None
for individual in self.population:
cumulative_fitness += individual.fitness()
if cumulative_fitness >= r:
right_parent = individual
break
if right_parent is None:
pass
# Add the parent tuple if they are not the same and not already present.
if left_parent is not right_parent and (left_parent, right_parent) not in parent_pairs:
parent_pairs.append((left_parent, right_parent))
return parent_pairs
def uniformCrossover(self, parent_pairs):
"""
Built in method for recombining parent individuals. Each bit has an equal chance to be inherited
from each parent.
Args:
parent_pairs (list of tuple of :obj:`Chromasome`): The pairs of parents. The length of this
should be equal to the population size.
Returns:
list of :obj:`Chromasome`: The new generation.
"""
if len(parent_pairs) != self.population_size:
raise ValueError("Uniform crossover expects {} parent pairs; given {}." \
.format(self.population_size, len(parent_pairs)))
new_generation = [p1.crossoverUniform(p2) for p1, p2 in parent_pairs]
return new_generation
def singlePointCrossover(self, parent_pairs, pivot=None):
"""
Built in method for producing a new generation. The bit strings of the parents are split at the
`pivot` and the "halves" swapped between the parents, producing two offspring for each parent
pair.
Args:
parent_pairs (list of tuple of :obj:`Chromasome`): The pairs of parents.
pivot (int): The index at which to "chop" the bit strings.
Returns:
list of :obj:`Chromasome`: The new generation.
"""
if len(parent_pairs) != self.population_size / 2:
raise ValueError("Single point crossover expects {} parent pairs; given {}." \
.format(self.population_size / 2, len(parent_pairs)))
if pivot is None:
pivot = self.bit_string_len / 2
new_generation = [p1.crossoverSinglePoint(p2, pivot) for p1, p2 in parent_pairs]
new_generation += [p2.crossoverSinglePoint(p1, pivot) for p1, p2 in parent_pairs]
return new_generation
def mutateAll(self, rate):
"""
Mutate all the individuals in the population.
Args:
rate (int): The probability that each individual bit will be flipped.
Must be [0..1].
"""
if (rate < 0.0) or (rate > 1.0):
raise ValueError("Mutation rate must be between 0 and 1.")
for individual in self.population:
individual.mutate(rate)
def avgFitness(self):
"""
Get the mean fitness of the current population.
Returns:
float: The mean fitness of the population.
"""
return sum([genotype.fitness() for genotype in self.population]) / float(self.population_size)
def maxFitness(self):
"""
Get the fitness of the fittest individual in the current population.
Returns:
float: The fitness of the fittest individual in the population.
"""
return max([chromasome.fitness() for chromasome in self.population])
def minFitness(self):
"""
Get the fitness of the least fit individual in the current population.
Returns:
float: The fitness of the least fit individual in the population.
"""
return min([chromasome.fitness() for chromasome in self.population])
def __str__(self):
"""
String representation of the population; the hex string of each individual
no new lines.
Returns:
str: String representation of the population.
"""
return "\n".join([i.asHex() for i in self.population])
def getBestIndividual(self):
"""
Get the fittest individual in the current population.
Returns:
:obj:`Chromasome`: The fittest individual in the population.
"""
return max(self.population, key=lambda chromasome: chromasome.fitness())
|
#!/usr/bin/env python
# encoding: utf-8
"""
EasyGA
Copyright © 2017 Eromid (Olly)
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute,
sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES
OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import random
from itertools import combinations
from multiprocessing import Pool
class Chromasome:
"""
The genetic material of an individual.
"""
def __init__(self, n_bits, fitness_fnc):
"""
Constructor for Chromasome objects.
Args:
n_bits (int): The length of the bitstring needed to encode an individual's characteristics.
fitness_fnc (Callable): A callable (function or lambda) that takes the bitstring as a
parameter and returns its fitness score.
"""
self.n_bits = n_bits
self.fitness_fnc = fitness_fnc
self.fitness_val = None
self.bit_string = [0 for i in range(n_bits)]
self.fitness_calculated = False
def randomise(self):
"""
Randomise the bits in the bitstring. Useful for initialising in the first generation.
Returns:
:obj:`Chromasome`: Refernce to self for method chaining.
"""
self.bit_string = [random.choice([0, 1]) for i in range(self.n_bits)]
return self
def mutate(self, rate=0.001):
"""
Mutate the individual - randomly flip some of the bits in the bitstring.
Args:
rate (float): The probability each individual bit will be flipped.
Returns:
:obj:`Chromasome`: Refernce to self for method chaining.
"""
for i in range(len(self.bit_string)):
if random.random() <= rate:
self.bit_string[i] = int(not self.bit_string[i])
return self
def fitness(self):
"""
Get the fitness for this individual. Checks for cached fitness score before calling the (potentially
expensive) fitness function
Returns:
:obj:`Chromasome`: Refernce to self for method chaining.
"""
if self.fitness_val is not None: # check for a cached solution to the fitness function...
return self.fitness_val
else: # otherwise evaluate it.
self.fitness_val = self.fitness_fnc(self.bit_string)
return self.fitness_val
def crossoverSinglePoint(self, mate, pivot):
"""
Combine this individual with another to create an offspring individual with
some traits from each parent.
Args:
mate (:obj:`Chromasome`): The individual we are combining with this one.
pivot (int): index to split the bitstrings at. The child bitstring is equal to this
individual's left `pivot`, and the `mate` individual right of `pivot`.
"""
child = Chromasome(self.n_bits, self.fitness_fnc)
child.bit_string = self.bit_string[:pivot] + mate.bit_string[pivot:]
return child
def crossoverUniform(self, mate):
"""
Combine this individual with another to create an offspring individual with
some traits from each parent. Takes each bit randomly from a parent with equal probability
Args:
mate (:obj:`Chromasome`): The individual we are combining with this one.
Returns:
:obj:`Chromasome`: The new child individual.
"""
parents = [self, mate]
child = Chromasome(self.n_bits, self.fitness_fnc)
for i in xrange(len(self.bit_string)):
choice = int(random.random() > 0.5)
child.bit_string[i] = parents[choice].bit_string[i]
return child
def asHex(self):
"""
The hexadecimal representation of this individual.
Returns:
str: The hex representation.
"""
exponent = 0
total = 0
for i in reversed(self.bit_string):
total += i * (2 ** exponent)
exponent += 1
return hex(total)
def __str__(self):
"""
Binary representation of the bit string
"""
return str(self.bit_string)
class GeneticAlgorithm:
"""
A abstract general genetic algorithm. Your implementation should use this as its base class
and override the following methods:
`selection`: Defines how the "parents" are selected.
`crossover`: Defines how the "parents" are combined to produce the next generation.
`mutate`: Defines how the offspring are mutated to allow drift in the population.
There are built in options for some of the above:
`getBestPairs`, `getRoulettePairs` for `selection` method.
`singlePointCrossover`, `uniformCrossover` for `crossover` method.
"""
def __init__(self, population_size, bit_string_len, fitness_fnc):
"""
Create a population and find the fitness of the first generation.
Args:
population_size (int): The number of individuals to maintain in the population.
bit_string_len (int): The length of the bit string of each individual.
fitness_fnc (Callable): Function to call on the individual to determine its fitness.
"""
self.population_size = population_size
self.fitness_fnc = fitness_fnc
self.bit_string_len = bit_string_len
self.population = [Chromasome(self.bit_string_len, self.fitness_fnc).randomise() for i in range(population_size)]
# self.elite_cutoff = 16 #TODO: remove?
self.updateAllFitness()
def selection(self):
"""
Must be overloaded in a subclass to determine how to perform selection.
Returns:
list of tuple of :obj:`Chromasome`: The pairs to be recombined into the next generation.
"""
raise NotImplementedError("`Selection` method must be overloaded in a subclass")
def crossover(self, parent_pairs):
"""
Must be overloaded in a subclass to determine how to perform crossover.
Args:
parent_pairs (tuple of :obj:`Chromasome`): Parent pairs to be recombined
Returns:
list of :obj:`Chromasome`: The new generation of individuals
"""
raise NotImplementedError("`Crossover` method must be overloaded in a subclass")
def mutate(self):
"""
Must be overloaded in a subclass to determine how to perform crossover.
"""
raise NotImplementedError("`Mutate` method must be overloaded in a subclass")
def nextGeneration(self):
"""
Perform the steps necessary to advance the population:
1. Calculate all fitnesses
2. Select parent pairs
3. Recombine individuals
4. Mutate the new generation
"""
self.updateAllFitness()
parent_pairs = self.selection()
self.population = self.crossover(parent_pairs)
self.mutate()
def updateAllFitness(self, n_workers=16):
"""
Recompute the fitness of all of the individuals. Uses a work pool to parallelize the fitness calculation.
Args:
n_workers (int): The number of workers in the work pool.
"""
work_pool = Pool(n_workers)
updated_fitness = work_pool.map(self.fitness_fnc, [chromasome.bit_string for chromasome in self.population])
for i in xrange(len(self.population)):
self.population[i].fitness_val = updated_fitness[i]
work_pool.close()
def getBestPairs(self, n_pairs, n_elites):
"""
A built in selection method to call in `selection`. Gets random pairs from the best individuals
in the population. We sort the population and choose random unique pairings from the top n_elites.
with uniform probability.
Args:
n_pairs (int): The number of pairs to generate.
n_elites (int): Only the best n_elites are considered for inclusion in the pairings.
"""
self.population = sorted(self.population, key=lambda genotype: genotype.fitness(), reverse=True)
elites = self.population[:n_pairs]
parent_pairs = []
while len(parent_pairs) < self.population_size:
p1 = random.choice(elites)
p2 = random.choice(elites)
if p1 is not p2 and (p1, p2) not in parent_pairs:
parent_pairs.append((p1, p2))
return parent_pairs
def getRoulettePairs(self, n_pairs):
"""
A built in selection method to call in `selection`. Gets random pairs from the best pairs from
the population. We sort the population and choose random unique pairings from the top 50% with
uniform probability.
Args:
n_pairs (int): The number of pairs required to form the next generation. This typically
depends on the crossover method being used.
Returns:
list of tuple of :obj:`Chromasomes`: The parent pairs for the next generation.
"""
parent_pairs = []
self.population = sorted(self.population, key=lambda genotype: genotype.fitness(), reverse=True)
total_fitness = sum(individual.fitness() for individual in self.population)
while len(parent_pairs) < n_pairs:
r = random.uniform(0.0, total_fitness)
cumulative_fitness = 0.0
left_parent = None
for individual in self.population:
cumulative_fitness += individual.fitness()
if cumulative_fitness >= r:
left_parent = individual
break
if left_parent is None:
pass
# Choose a right parent.
r = random.uniform(0.0, total_fitness)
cumulative_fitness = 0.0
right_parent = None
for individual in self.population:
cumulative_fitness += individual.fitness()
if cumulative_fitness >= r:
right_parent = individual
break
if right_parent is None:
pass
# Add the parent tuple if they are not the same and not already present.
if left_parent is not right_parent and (left_parent, right_parent) not in parent_pairs:
parent_pairs.append((left_parent, right_parent))
return parent_pairs
def uniformCrossover(self, parent_pairs):
"""
Built in method for recombining parent individuals. Each bit has an equal chance to be inherited
from each parent.
Args:
parent_pairs (list of tuple of :obj:`Chromasome`): The pairs of parents. The length of this
should be equal to the population size.
Returns:
list of :obj:`Chromasome`: The new generation.
"""
if len(parent_pairs) != self.population_size:
raise ValueError("Uniform crossover expects {} parent pairs; given {}." \
.format(self.population_size, len(parent_pairs)))
new_generation = [p1.crossoverUniform(p2) for p1, p2 in parent_pairs]
return new_generation
def singlePointCrossover(self, parent_pairs, pivot=None):
"""
Built in method for producing a new generation. The bit strings of the parents are split at the
`pivot` and the "halves" swapped between the parents, producing two offspring for each parent
pair.
Args:
parent_pairs (list of tuple of :obj:`Chromasome`): The pairs of parents.
pivot (int): The index at which to "chop" the bit strings.
Returns:
list of :obj:`Chromasome`: The new generation.
"""
if len(parent_pairs) != self.population_size / 2:
raise ValueError("Single point crossover expects {} parent pairs; given {}." \
.format(self.population_size / 2, len(parent_pairs)))
if pivot is None:
pivot = self.bit_string_len / 2
new_generation = [p1.crossoverSinglePoint(p2, pivot) for p1, p2 in parent_pairs]
new_generation += [p2.crossoverSinglePoint(p1, pivot) for p1, p2 in parent_pairs]
return new_generation
def mutateAll(self, rate):
"""
Mutate all the individuals in the population.
Args:
rate (int): The probability that each individual bit will be flipped.
Must be [0..1].
"""
if (rate < 0.0) or (rate > 1.0):
raise ValueError("Mutation rate must be between 0 and 1.")
for individual in self.population:
individual.mutate(rate)
def avgFitness(self):
"""
Get the mean fitness of the current population.
Returns:
float: The mean fitness of the population.
"""
return sum([genotype.fitness() for genotype in self.population]) / float(self.population_size)
def maxFitness(self):
"""
Get the fitness of the fittest individual in the current population.
Returns:
float: The fitness of the fittest individual in the population.
"""
return max([chromasome.fitness() for chromasome in self.population])
def minFitness(self):
"""
Get the fitness of the least fit individual in the current population.
Returns:
float: The fitness of the least fit individual in the population.
"""
return min([chromasome.fitness() for chromasome in self.population])
def __str__(self):
"""
String representation of the population; the hex string of each individual
no new lines.
Returns:
str: String representation of the population.
"""
return "\n".join([i.asHex() for i in self.population])
def getBestIndividual(self):
"""
Get the fittest individual in the current population.
Returns:
:obj:`Chromasome`: The fittest individual in the population.
"""
return max(self.population, key=lambda chromasome: chromasome.fitness())
|
en
| 0.800952
|
#!/usr/bin/env python # encoding: utf-8 EasyGA Copyright © 2017 Eromid (Olly) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. The genetic material of an individual. Constructor for Chromasome objects. Args: n_bits (int): The length of the bitstring needed to encode an individual's characteristics. fitness_fnc (Callable): A callable (function or lambda) that takes the bitstring as a parameter and returns its fitness score. Randomise the bits in the bitstring. Useful for initialising in the first generation. Returns: :obj:`Chromasome`: Refernce to self for method chaining. Mutate the individual - randomly flip some of the bits in the bitstring. Args: rate (float): The probability each individual bit will be flipped. Returns: :obj:`Chromasome`: Refernce to self for method chaining. Get the fitness for this individual. Checks for cached fitness score before calling the (potentially expensive) fitness function Returns: :obj:`Chromasome`: Refernce to self for method chaining. # check for a cached solution to the fitness function... # otherwise evaluate it. Combine this individual with another to create an offspring individual with some traits from each parent. Args: mate (:obj:`Chromasome`): The individual we are combining with this one. pivot (int): index to split the bitstrings at. The child bitstring is equal to this individual's left `pivot`, and the `mate` individual right of `pivot`. Combine this individual with another to create an offspring individual with some traits from each parent. Takes each bit randomly from a parent with equal probability Args: mate (:obj:`Chromasome`): The individual we are combining with this one. Returns: :obj:`Chromasome`: The new child individual. The hexadecimal representation of this individual. Returns: str: The hex representation. Binary representation of the bit string A abstract general genetic algorithm. Your implementation should use this as its base class and override the following methods: `selection`: Defines how the "parents" are selected. `crossover`: Defines how the "parents" are combined to produce the next generation. `mutate`: Defines how the offspring are mutated to allow drift in the population. There are built in options for some of the above: `getBestPairs`, `getRoulettePairs` for `selection` method. `singlePointCrossover`, `uniformCrossover` for `crossover` method. Create a population and find the fitness of the first generation. Args: population_size (int): The number of individuals to maintain in the population. bit_string_len (int): The length of the bit string of each individual. fitness_fnc (Callable): Function to call on the individual to determine its fitness. # self.elite_cutoff = 16 #TODO: remove? Must be overloaded in a subclass to determine how to perform selection. Returns: list of tuple of :obj:`Chromasome`: The pairs to be recombined into the next generation. Must be overloaded in a subclass to determine how to perform crossover. Args: parent_pairs (tuple of :obj:`Chromasome`): Parent pairs to be recombined Returns: list of :obj:`Chromasome`: The new generation of individuals Must be overloaded in a subclass to determine how to perform crossover. Perform the steps necessary to advance the population: 1. Calculate all fitnesses 2. Select parent pairs 3. Recombine individuals 4. Mutate the new generation Recompute the fitness of all of the individuals. Uses a work pool to parallelize the fitness calculation. Args: n_workers (int): The number of workers in the work pool. A built in selection method to call in `selection`. Gets random pairs from the best individuals in the population. We sort the population and choose random unique pairings from the top n_elites. with uniform probability. Args: n_pairs (int): The number of pairs to generate. n_elites (int): Only the best n_elites are considered for inclusion in the pairings. A built in selection method to call in `selection`. Gets random pairs from the best pairs from the population. We sort the population and choose random unique pairings from the top 50% with uniform probability. Args: n_pairs (int): The number of pairs required to form the next generation. This typically depends on the crossover method being used. Returns: list of tuple of :obj:`Chromasomes`: The parent pairs for the next generation. # Choose a right parent. # Add the parent tuple if they are not the same and not already present. Built in method for recombining parent individuals. Each bit has an equal chance to be inherited from each parent. Args: parent_pairs (list of tuple of :obj:`Chromasome`): The pairs of parents. The length of this should be equal to the population size. Returns: list of :obj:`Chromasome`: The new generation. Built in method for producing a new generation. The bit strings of the parents are split at the `pivot` and the "halves" swapped between the parents, producing two offspring for each parent pair. Args: parent_pairs (list of tuple of :obj:`Chromasome`): The pairs of parents. pivot (int): The index at which to "chop" the bit strings. Returns: list of :obj:`Chromasome`: The new generation. Mutate all the individuals in the population. Args: rate (int): The probability that each individual bit will be flipped. Must be [0..1]. Get the mean fitness of the current population. Returns: float: The mean fitness of the population. Get the fitness of the fittest individual in the current population. Returns: float: The fitness of the fittest individual in the population. Get the fitness of the least fit individual in the current population. Returns: float: The fitness of the least fit individual in the population. String representation of the population; the hex string of each individual no new lines. Returns: str: String representation of the population. Get the fittest individual in the current population. Returns: :obj:`Chromasome`: The fittest individual in the population.
| 2.09693
| 2
|
webex_bot/websockets/webex_websocket_client.py
|
jseynaev-cisco/webex_bot
| 0
|
6629126
|
import asyncio
import json
import logging
import socket
import uuid
import backoff
import requests
import websockets
from webexteamssdk import WebexTeamsAPI
DEFAULT_DEVICE_URL = "https://wdm-a.wbx2.com/wdm/api/v1"
DEVICE_DATA = {
"deviceName": "pywebsocket-client",
"deviceType": "DESKTOP",
"localizedModel": "python",
"model": "python",
"name": "python-spark-client",
"systemName": "python-spark-client",
"systemVersion": "0.1"
}
class WebexWebsocketClient(object):
def __init__(self,
access_token,
device_url=DEFAULT_DEVICE_URL,
on_message=None,
on_card_action=None):
self.access_token = access_token
self.teams = WebexTeamsAPI(access_token=access_token)
self.device_url = device_url
self.device_info = None
self.on_message = on_message
self.on_card_action = on_card_action
self.websocket = None
def _process_incoming_websocket_message(self, msg):
"""
Handle websocket data.
:param msg: The raw websocket message
"""
if msg['data']['eventType'] == 'conversation.activity':
activity = msg['data']['activity']
if activity['verb'] == 'post':
logging.debug(f"activity={activity}")
message_base_64_id = self._get_base64_message_id(activity)
webex_message = self.teams.messages.get(message_base_64_id)
logging.debug(f"webex_message from message_base_64_id: {webex_message}")
if self.on_message:
# ack message first
self._ack_message(message_base_64_id)
# Now process it with the handler
self.on_message(webex_message, activity)
elif activity['verb'] == 'cardAction':
logging.debug(f"activity={activity}")
message_base_64_id = self._get_base64_message_id(activity)
attachment_actions = self.teams.attachment_actions.get(message_base_64_id)
logging.info(f"attachment_actions from message_base_64_id: {attachment_actions}")
if self.on_card_action:
# ack message first
self._ack_message(message_base_64_id)
# Now process it with the handler
self.on_card_action(attachment_actions, activity)
else:
logging.debug(f"activity verb is: {activity['verb']} ")
def _get_base64_message_id(self, activity):
"""
In order to geo-locate the correct DC to fetch the message from, you need to use the base64 Id of the
message.
@param activity: incoming websocket data
@return: base 64 message id
"""
activity_id = activity['id']
logging.debug(f"activity verb=post. message id={activity_id}")
conversation_url = activity['target']['url']
conv_target_id = activity['target']['id']
verb = "messages" if activity['verb'] == "post" else "attachment/actions"
conversation_message_url = conversation_url.replace(f"conversations/{conv_target_id}",
f"{verb}/{activity_id}")
headers = {"Authorization": f"Bearer {self.access_token}"}
conversation_message = requests.get(conversation_message_url,
headers=headers).json()
logging.debug(f"conversation_message={conversation_message}")
return conversation_message['id']
def _ack_message(self, message_id):
"""
Ack that this message has been processed. This will prevent the
message coming again.
@param message_id: activity message 'id'
"""
logging.debug(f"WebSocket ack message with id={message_id}")
ack_message = {'type': 'ack',
'messageId': message_id}
self.websocket.send(json.dumps(ack_message))
logging.info(f"WebSocket ack message with id={message_id}. Complete.")
def _get_device_info(self):
"""
Get device info from Webex Cloud.
If it doesn't exist, one will be created.
"""
logging.debug('Getting device list')
try:
resp = self.teams._session.get(f"{self.device_url}/devices")
for device in resp['devices']:
if device['name'] == DEVICE_DATA['name']:
self.device_info = device
logging.debug(f"device_info: {self.device_info}")
return device
except Exception as wdmException:
logging.warning(f"wdmException: {wdmException}")
logging.info('Device does not exist, creating')
resp = self.teams._session.post(f"{self.device_url}/devices", json=DEVICE_DATA)
if resp is None:
raise Exception("could not create WDM device")
self.device_info = resp
logging.debug(f"self.device_info: {self.device_info}")
return resp
def run(self):
if self.device_info is None:
if self._get_device_info() is None:
logging.error('could not get/create device info')
raise Exception("No WDM device info")
async def _websocket_recv():
message = await self.websocket.recv()
logging.debug("WebSocket Received Message(raw): %s\n" % message)
try:
msg = json.loads(message)
loop = asyncio.get_event_loop()
loop.run_in_executor(None, self._process_incoming_websocket_message, msg)
except Exception as messageProcessingException:
logging.warning(
f"An exception occurred while processing message. Ignoring. {messageProcessingException}")
@backoff.on_exception(backoff.expo, websockets.exceptions.ConnectionClosedError)
@backoff.on_exception(backoff.expo, socket.gaierror)
async def _connect_and_listen():
ws_url = self.device_info['webSocketUrl']
logging.info(f"Opening websocket connection to {ws_url}")
async with websockets.connect(ws_url) as _websocket:
self.websocket = _websocket
logging.info("WebSocket Opened.")
msg = {'id': str(uuid.uuid4()),
'type': 'authorization',
'data': {'token': 'Bearer ' + self.access_token}}
await self.websocket.send(json.dumps(msg))
while True:
await _websocket_recv()
try:
asyncio.get_event_loop().run_until_complete(_connect_and_listen())
except Exception as runException:
logging.error(f"runException: {runException}")
# trigger re-connect
asyncio.get_event_loop().run_until_complete(_connect_and_listen())
|
import asyncio
import json
import logging
import socket
import uuid
import backoff
import requests
import websockets
from webexteamssdk import WebexTeamsAPI
DEFAULT_DEVICE_URL = "https://wdm-a.wbx2.com/wdm/api/v1"
DEVICE_DATA = {
"deviceName": "pywebsocket-client",
"deviceType": "DESKTOP",
"localizedModel": "python",
"model": "python",
"name": "python-spark-client",
"systemName": "python-spark-client",
"systemVersion": "0.1"
}
class WebexWebsocketClient(object):
def __init__(self,
access_token,
device_url=DEFAULT_DEVICE_URL,
on_message=None,
on_card_action=None):
self.access_token = access_token
self.teams = WebexTeamsAPI(access_token=access_token)
self.device_url = device_url
self.device_info = None
self.on_message = on_message
self.on_card_action = on_card_action
self.websocket = None
def _process_incoming_websocket_message(self, msg):
"""
Handle websocket data.
:param msg: The raw websocket message
"""
if msg['data']['eventType'] == 'conversation.activity':
activity = msg['data']['activity']
if activity['verb'] == 'post':
logging.debug(f"activity={activity}")
message_base_64_id = self._get_base64_message_id(activity)
webex_message = self.teams.messages.get(message_base_64_id)
logging.debug(f"webex_message from message_base_64_id: {webex_message}")
if self.on_message:
# ack message first
self._ack_message(message_base_64_id)
# Now process it with the handler
self.on_message(webex_message, activity)
elif activity['verb'] == 'cardAction':
logging.debug(f"activity={activity}")
message_base_64_id = self._get_base64_message_id(activity)
attachment_actions = self.teams.attachment_actions.get(message_base_64_id)
logging.info(f"attachment_actions from message_base_64_id: {attachment_actions}")
if self.on_card_action:
# ack message first
self._ack_message(message_base_64_id)
# Now process it with the handler
self.on_card_action(attachment_actions, activity)
else:
logging.debug(f"activity verb is: {activity['verb']} ")
def _get_base64_message_id(self, activity):
"""
In order to geo-locate the correct DC to fetch the message from, you need to use the base64 Id of the
message.
@param activity: incoming websocket data
@return: base 64 message id
"""
activity_id = activity['id']
logging.debug(f"activity verb=post. message id={activity_id}")
conversation_url = activity['target']['url']
conv_target_id = activity['target']['id']
verb = "messages" if activity['verb'] == "post" else "attachment/actions"
conversation_message_url = conversation_url.replace(f"conversations/{conv_target_id}",
f"{verb}/{activity_id}")
headers = {"Authorization": f"Bearer {self.access_token}"}
conversation_message = requests.get(conversation_message_url,
headers=headers).json()
logging.debug(f"conversation_message={conversation_message}")
return conversation_message['id']
def _ack_message(self, message_id):
"""
Ack that this message has been processed. This will prevent the
message coming again.
@param message_id: activity message 'id'
"""
logging.debug(f"WebSocket ack message with id={message_id}")
ack_message = {'type': 'ack',
'messageId': message_id}
self.websocket.send(json.dumps(ack_message))
logging.info(f"WebSocket ack message with id={message_id}. Complete.")
def _get_device_info(self):
"""
Get device info from Webex Cloud.
If it doesn't exist, one will be created.
"""
logging.debug('Getting device list')
try:
resp = self.teams._session.get(f"{self.device_url}/devices")
for device in resp['devices']:
if device['name'] == DEVICE_DATA['name']:
self.device_info = device
logging.debug(f"device_info: {self.device_info}")
return device
except Exception as wdmException:
logging.warning(f"wdmException: {wdmException}")
logging.info('Device does not exist, creating')
resp = self.teams._session.post(f"{self.device_url}/devices", json=DEVICE_DATA)
if resp is None:
raise Exception("could not create WDM device")
self.device_info = resp
logging.debug(f"self.device_info: {self.device_info}")
return resp
def run(self):
if self.device_info is None:
if self._get_device_info() is None:
logging.error('could not get/create device info')
raise Exception("No WDM device info")
async def _websocket_recv():
message = await self.websocket.recv()
logging.debug("WebSocket Received Message(raw): %s\n" % message)
try:
msg = json.loads(message)
loop = asyncio.get_event_loop()
loop.run_in_executor(None, self._process_incoming_websocket_message, msg)
except Exception as messageProcessingException:
logging.warning(
f"An exception occurred while processing message. Ignoring. {messageProcessingException}")
@backoff.on_exception(backoff.expo, websockets.exceptions.ConnectionClosedError)
@backoff.on_exception(backoff.expo, socket.gaierror)
async def _connect_and_listen():
ws_url = self.device_info['webSocketUrl']
logging.info(f"Opening websocket connection to {ws_url}")
async with websockets.connect(ws_url) as _websocket:
self.websocket = _websocket
logging.info("WebSocket Opened.")
msg = {'id': str(uuid.uuid4()),
'type': 'authorization',
'data': {'token': 'Bearer ' + self.access_token}}
await self.websocket.send(json.dumps(msg))
while True:
await _websocket_recv()
try:
asyncio.get_event_loop().run_until_complete(_connect_and_listen())
except Exception as runException:
logging.error(f"runException: {runException}")
# trigger re-connect
asyncio.get_event_loop().run_until_complete(_connect_and_listen())
|
en
| 0.677663
|
Handle websocket data. :param msg: The raw websocket message # ack message first # Now process it with the handler # ack message first # Now process it with the handler In order to geo-locate the correct DC to fetch the message from, you need to use the base64 Id of the message. @param activity: incoming websocket data @return: base 64 message id Ack that this message has been processed. This will prevent the message coming again. @param message_id: activity message 'id' Get device info from Webex Cloud. If it doesn't exist, one will be created. # trigger re-connect
| 2.47371
| 2
|
src/am_softmax.py
|
tenggyut/facenet
| 0
|
6629127
|
#encoding=utf8
import tensorflow as tf
def am_logits_compute(embeddings, label_batch, fc_dim, nrof_classes):
'''
loss head proposed in paper:<Additive Margin Softmax for Face Verification>
link: https://arxiv.org/abs/1801.05599
embeddings : normalized embedding layer of Facenet, it's normalized value of output of resface
label_batch : ground truth label of current training batch
args: arguments from cmd line
nrof_classes: number of classes
'''
m = 0.35
s = 30
with tf.name_scope('AM_logits'):
kernel = tf.Variable(tf.truncated_normal([fc_dim, nrof_classes]))
kernel_norm = tf.nn.l2_normalize(kernel, 0, 1e-10, name='kernel_norm')
cos_theta = tf.matmul(embeddings, kernel_norm)#(batch_size, nrof_classes) 表征了每个feature与对应权重的夹角
cos_theta = tf.clip_by_value(cos_theta, -1,1)
phi = cos_theta - m
label_onehot = tf.one_hot(label_batch, nrof_classes)
adjust_theta = s * tf.where(tf.equal(label_onehot,1), phi, cos_theta)
return adjust_theta
|
#encoding=utf8
import tensorflow as tf
def am_logits_compute(embeddings, label_batch, fc_dim, nrof_classes):
'''
loss head proposed in paper:<Additive Margin Softmax for Face Verification>
link: https://arxiv.org/abs/1801.05599
embeddings : normalized embedding layer of Facenet, it's normalized value of output of resface
label_batch : ground truth label of current training batch
args: arguments from cmd line
nrof_classes: number of classes
'''
m = 0.35
s = 30
with tf.name_scope('AM_logits'):
kernel = tf.Variable(tf.truncated_normal([fc_dim, nrof_classes]))
kernel_norm = tf.nn.l2_normalize(kernel, 0, 1e-10, name='kernel_norm')
cos_theta = tf.matmul(embeddings, kernel_norm)#(batch_size, nrof_classes) 表征了每个feature与对应权重的夹角
cos_theta = tf.clip_by_value(cos_theta, -1,1)
phi = cos_theta - m
label_onehot = tf.one_hot(label_batch, nrof_classes)
adjust_theta = s * tf.where(tf.equal(label_onehot,1), phi, cos_theta)
return adjust_theta
|
en
| 0.67594
|
#encoding=utf8 loss head proposed in paper:<Additive Margin Softmax for Face Verification> link: https://arxiv.org/abs/1801.05599 embeddings : normalized embedding layer of Facenet, it's normalized value of output of resface label_batch : ground truth label of current training batch args: arguments from cmd line nrof_classes: number of classes #(batch_size, nrof_classes) 表征了每个feature与对应权重的夹角
| 2.675555
| 3
|
alipay/aop/api/response/AlipayBossFncInvoiceMailinfoQueryResponse.py
|
antopen/alipay-sdk-python-all
| 213
|
6629128
|
<reponame>antopen/alipay-sdk-python-all
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayBossFncInvoiceMailinfoQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayBossFncInvoiceMailinfoQueryResponse, self).__init__()
self._creator = None
self._express_company_name = None
self._gmt_create = None
self._gmt_modified = None
self._invoice_id = None
self._last_modifier = None
self._mail_date = None
self._mail_id = None
self._reason = None
self._recipients_address = None
self._recipients_name = None
self._recipients_tel = None
self._sender_address = None
self._sender_name = None
self._sender_tel = None
self._tnt_inst_id = None
self._tracking_no = None
@property
def creator(self):
return self._creator
@creator.setter
def creator(self, value):
self._creator = value
@property
def express_company_name(self):
return self._express_company_name
@express_company_name.setter
def express_company_name(self, value):
self._express_company_name = value
@property
def gmt_create(self):
return self._gmt_create
@gmt_create.setter
def gmt_create(self, value):
self._gmt_create = value
@property
def gmt_modified(self):
return self._gmt_modified
@gmt_modified.setter
def gmt_modified(self, value):
self._gmt_modified = value
@property
def invoice_id(self):
return self._invoice_id
@invoice_id.setter
def invoice_id(self, value):
self._invoice_id = value
@property
def last_modifier(self):
return self._last_modifier
@last_modifier.setter
def last_modifier(self, value):
self._last_modifier = value
@property
def mail_date(self):
return self._mail_date
@mail_date.setter
def mail_date(self, value):
self._mail_date = value
@property
def mail_id(self):
return self._mail_id
@mail_id.setter
def mail_id(self, value):
self._mail_id = value
@property
def reason(self):
return self._reason
@reason.setter
def reason(self, value):
self._reason = value
@property
def recipients_address(self):
return self._recipients_address
@recipients_address.setter
def recipients_address(self, value):
self._recipients_address = value
@property
def recipients_name(self):
return self._recipients_name
@recipients_name.setter
def recipients_name(self, value):
self._recipients_name = value
@property
def recipients_tel(self):
return self._recipients_tel
@recipients_tel.setter
def recipients_tel(self, value):
self._recipients_tel = value
@property
def sender_address(self):
return self._sender_address
@sender_address.setter
def sender_address(self, value):
self._sender_address = value
@property
def sender_name(self):
return self._sender_name
@sender_name.setter
def sender_name(self, value):
self._sender_name = value
@property
def sender_tel(self):
return self._sender_tel
@sender_tel.setter
def sender_tel(self, value):
self._sender_tel = value
@property
def tnt_inst_id(self):
return self._tnt_inst_id
@tnt_inst_id.setter
def tnt_inst_id(self, value):
self._tnt_inst_id = value
@property
def tracking_no(self):
return self._tracking_no
@tracking_no.setter
def tracking_no(self, value):
self._tracking_no = value
def parse_response_content(self, response_content):
response = super(AlipayBossFncInvoiceMailinfoQueryResponse, self).parse_response_content(response_content)
if 'creator' in response:
self.creator = response['creator']
if 'express_company_name' in response:
self.express_company_name = response['express_company_name']
if 'gmt_create' in response:
self.gmt_create = response['gmt_create']
if 'gmt_modified' in response:
self.gmt_modified = response['gmt_modified']
if 'invoice_id' in response:
self.invoice_id = response['invoice_id']
if 'last_modifier' in response:
self.last_modifier = response['last_modifier']
if 'mail_date' in response:
self.mail_date = response['mail_date']
if 'mail_id' in response:
self.mail_id = response['mail_id']
if 'reason' in response:
self.reason = response['reason']
if 'recipients_address' in response:
self.recipients_address = response['recipients_address']
if 'recipients_name' in response:
self.recipients_name = response['recipients_name']
if 'recipients_tel' in response:
self.recipients_tel = response['recipients_tel']
if 'sender_address' in response:
self.sender_address = response['sender_address']
if 'sender_name' in response:
self.sender_name = response['sender_name']
if 'sender_tel' in response:
self.sender_tel = response['sender_tel']
if 'tnt_inst_id' in response:
self.tnt_inst_id = response['tnt_inst_id']
if 'tracking_no' in response:
self.tracking_no = response['tracking_no']
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayBossFncInvoiceMailinfoQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayBossFncInvoiceMailinfoQueryResponse, self).__init__()
self._creator = None
self._express_company_name = None
self._gmt_create = None
self._gmt_modified = None
self._invoice_id = None
self._last_modifier = None
self._mail_date = None
self._mail_id = None
self._reason = None
self._recipients_address = None
self._recipients_name = None
self._recipients_tel = None
self._sender_address = None
self._sender_name = None
self._sender_tel = None
self._tnt_inst_id = None
self._tracking_no = None
@property
def creator(self):
return self._creator
@creator.setter
def creator(self, value):
self._creator = value
@property
def express_company_name(self):
return self._express_company_name
@express_company_name.setter
def express_company_name(self, value):
self._express_company_name = value
@property
def gmt_create(self):
return self._gmt_create
@gmt_create.setter
def gmt_create(self, value):
self._gmt_create = value
@property
def gmt_modified(self):
return self._gmt_modified
@gmt_modified.setter
def gmt_modified(self, value):
self._gmt_modified = value
@property
def invoice_id(self):
return self._invoice_id
@invoice_id.setter
def invoice_id(self, value):
self._invoice_id = value
@property
def last_modifier(self):
return self._last_modifier
@last_modifier.setter
def last_modifier(self, value):
self._last_modifier = value
@property
def mail_date(self):
return self._mail_date
@mail_date.setter
def mail_date(self, value):
self._mail_date = value
@property
def mail_id(self):
return self._mail_id
@mail_id.setter
def mail_id(self, value):
self._mail_id = value
@property
def reason(self):
return self._reason
@reason.setter
def reason(self, value):
self._reason = value
@property
def recipients_address(self):
return self._recipients_address
@recipients_address.setter
def recipients_address(self, value):
self._recipients_address = value
@property
def recipients_name(self):
return self._recipients_name
@recipients_name.setter
def recipients_name(self, value):
self._recipients_name = value
@property
def recipients_tel(self):
return self._recipients_tel
@recipients_tel.setter
def recipients_tel(self, value):
self._recipients_tel = value
@property
def sender_address(self):
return self._sender_address
@sender_address.setter
def sender_address(self, value):
self._sender_address = value
@property
def sender_name(self):
return self._sender_name
@sender_name.setter
def sender_name(self, value):
self._sender_name = value
@property
def sender_tel(self):
return self._sender_tel
@sender_tel.setter
def sender_tel(self, value):
self._sender_tel = value
@property
def tnt_inst_id(self):
return self._tnt_inst_id
@tnt_inst_id.setter
def tnt_inst_id(self, value):
self._tnt_inst_id = value
@property
def tracking_no(self):
return self._tracking_no
@tracking_no.setter
def tracking_no(self, value):
self._tracking_no = value
def parse_response_content(self, response_content):
response = super(AlipayBossFncInvoiceMailinfoQueryResponse, self).parse_response_content(response_content)
if 'creator' in response:
self.creator = response['creator']
if 'express_company_name' in response:
self.express_company_name = response['express_company_name']
if 'gmt_create' in response:
self.gmt_create = response['gmt_create']
if 'gmt_modified' in response:
self.gmt_modified = response['gmt_modified']
if 'invoice_id' in response:
self.invoice_id = response['invoice_id']
if 'last_modifier' in response:
self.last_modifier = response['last_modifier']
if 'mail_date' in response:
self.mail_date = response['mail_date']
if 'mail_id' in response:
self.mail_id = response['mail_id']
if 'reason' in response:
self.reason = response['reason']
if 'recipients_address' in response:
self.recipients_address = response['recipients_address']
if 'recipients_name' in response:
self.recipients_name = response['recipients_name']
if 'recipients_tel' in response:
self.recipients_tel = response['recipients_tel']
if 'sender_address' in response:
self.sender_address = response['sender_address']
if 'sender_name' in response:
self.sender_name = response['sender_name']
if 'sender_tel' in response:
self.sender_tel = response['sender_tel']
if 'tnt_inst_id' in response:
self.tnt_inst_id = response['tnt_inst_id']
if 'tracking_no' in response:
self.tracking_no = response['tracking_no']
|
en
| 0.352855
|
#!/usr/bin/env python # -*- coding: utf-8 -*-
| 1.940315
| 2
|
src/pretix/base/migrations/0022_merge.py
|
pajowu/pretix
| 1,248
|
6629129
|
<gh_stars>1000+
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-04-23 09:44
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('pretixbase', '0020_auto_20160421_1943'),
('pretixbase', '0021_auto_20160418_2117'),
]
operations = [
]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-04-23 09:44
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('pretixbase', '0020_auto_20160421_1943'),
('pretixbase', '0021_auto_20160418_2117'),
]
operations = [
]
|
en
| 0.795388
|
# -*- coding: utf-8 -*- # Generated by Django 1.9.2 on 2016-04-23 09:44
| 1.302242
| 1
|
s100py/s102/utils.py
|
GlenRice-NOAA/s100py
| 0
|
6629130
|
""" Functions to create S102 data from other sources
"""
import logging
import sys
import warnings
import numpy
from osgeo import gdal, osr
from ..s1xx import s1xx_sequence
from .api import DEPTH, UNCERTAINTY, S102File, S102Exception
# @todo create a friendly name mapping to s102 nested location, then add s102 functions for "to dictionary" and "from dictionary" to api
# that would make these functions easily invertable
def _get_S102File(output_file):
""" Small helper function to convert the output_file parameter into a S102File, currently accepting file path as string or S102File instance.
Could propbably accept h5py.File or other things in the future"""
if isinstance(output_file, S102File):
data_file = output_file
else: # try everything else -- pathlib, str, tempfile, io.BytesIO
try:
data_file = S102File(output_file)
except TypeError as typeerr:
msg = "Failed to create S102File using {}".format(str(output_file))
logging.error(msg)
raise type(typeerr)(msg).with_traceback(sys.exc_info()[2])
return data_file
def create_s102(output_file, overwrite=True) -> S102File:
""" Creates or updates an S102File object.
Default values are set for any data that don't have options or are mandatory to be filled in the S102 spec.
Parameters
----------
output_file
Can be an S102File object or anything the h5py.File would accept, e.g. string file path, tempfile obect, BytesIO etc.
overwrite
If updating an existing file then set this option to False in order to retain data (not sure this is needed).
Returns
-------
S102File
The object created or updated by this function.
"""
data_file = _get_S102File(output_file)
# @fixme @todo -- I think this will overwrite no matter what, need to look into that
data_file.create_empty_metadata() # init the root with a fully filled out empty metadata set
root = data_file.root
bathy_cov_dset = root.feature_information.bathymetry_coverage_dataset
bathy_depth_info = bathy_cov_dset.append_new_item() # bathy_cov_dset.append(bathy_cov_dset.metadata_type())
bathy_depth_info.initialize_properties(True, overwrite=overwrite)
bathy_depth_info.code = DEPTH
bathy_depth_info.name = DEPTH
# these are auto-filled by the api
# bathy_depth_info.unit_of_measure="metres"
# bathy_depth_info.fill_value=1000000.0
# bathy_depth_info.datatype=H5T_NATIVE_FLOAT
# bathy_depth_info.lower = -12000
# bathy_depth_info.upper = 12000
# bathy_depth_info.closure = "closedInterval"
bathy_uncertainty_info = bathy_cov_dset.append_new_item()
bathy_uncertainty_info.initialize_properties(True, overwrite=overwrite)
bathy_uncertainty_info.code = UNCERTAINTY
bathy_uncertainty_info.name = UNCERTAINTY
# I'm not sure what to put here, yet
tracking_cov = root.feature_information.tracking_list_coverage
track_info = tracking_cov.append_new_item() # append(tracking_cov.metadata_type())
track_info.initialize_properties(True, overwrite=overwrite)
track_info.code = "X"
track_info.name = "X"
track_info.unit_of_measure = "N/A"
track_info = tracking_cov.append_new_item()
track_info.initialize_properties(True, overwrite=overwrite)
track_info.code = "Y"
track_info.name = "Y"
track_info.unit_of_measure = "N/A"
track_info = tracking_cov.append_new_item()
track_info.initialize_properties(True, overwrite=overwrite)
track_info.code = "originalValue"
track_info.name = "Original Value"
track_info = tracking_cov.append_new_item()
track_info.initialize_properties(True, overwrite=overwrite)
track_info.code = "trackCode"
track_info.name = "Track Code"
track_info.unit_of_measure = "N/A"
track_info = tracking_cov.append_new_item()
track_info.initialize_properties(True, overwrite=overwrite)
track_info.code = "listSeries"
track_info.name = "List Series"
track_info.unit_of_measure = "N/A"
root.bathymetry_coverage.axis_names = numpy.array(["Longitude", "Latitude"]) # row major order means X/longitude first
root.bathymetry_coverage.sequencing_rule_scan_direction = "Longitude, Latitude"
root.bathymetry_coverage.common_point_rule = 1 # average
# root.bathymetry_coverage.data_coding_format = 2 # default
# root.bathymetry_coverage.dimension = 2 # default value
root.bathymetry_coverage.interpolation_type = 1 # nearest neighbor
root.bathymetry_coverage.num_instances = 1 # how many Bathycoverages
root.bathymetry_coverage.sequencing_rule_type = 1 # linear
del root.bathymetry_coverage.time_uncertainty
return data_file
def from_arrays(depth_grid: s1xx_sequence, uncert_grid: s1xx_sequence, output_file, nodata_value=None,
flip_x: bool = False, flip_y: bool = False, overwrite: bool = True) -> S102File: # num_array, or list of lists accepted
""" Creates or updates an S102File object based on numpy array/h5py datasets.
Calls :any:`create_s102` then fills in the HDF5 datasets with the supplied depth_grid and uncert_grid.
Fills the number of points areas and any other appropriate places in the HDF5 file per the S102 spec.
Raises an S102Exception if the shapes of the depth and uncertainty (if not None) grids are not equal.
Parameters
----------
depth_grid
uncert_grid
The uncertainty dataset to embed in the object.
If None then a numpy.zeros array will be created in the appropriate shape to be stored in the file.
output_file
Can be an S102File object or anything the h5py.File would accept, e.g. string file path, tempfile obect, BytesIO etc.
nodata_value
Value used to denote an empty cell in the grid. Used in finding the min/max and then converted to the S102 fillValue.
flip_x
boolean if the data should be mirrored on x coordinate (i.e. the original grid is right to left)
Flips are done here so we can implement a chunked read/write to save memory
flip_y
boolean if the data should be mirrored on y coordinate (i.e. the original grid is top to bottom)
Flips are done here so we can implement a chunked read/write to save memory
overwrite
If updating an existing file then set this option to False in order to retain data (not sure this is needed).
Returns
-------
S102File
The object created or updated by this function.
"""
# @todo -- Add logic that if the grids are gdal raster bands then read in blocks and use h5py slicing to write in blocks. Slower but saves resources
data_file = create_s102(output_file)
root = data_file.root
try:
bathy_01 = root.bathymetry_coverage.bathymetry_coverage[0]
except IndexError:
bathy_01 = root.bathymetry_coverage.bathymetry_coverage.append_new_item()
bathy_01.initialize_properties(recursively_create_children=True, overwrite=overwrite)
del bathy_01.grid_spacing_vertical
del bathy_01.grid_origin_vertical
del bathy_01.number_of_times
del bathy_01.time_record_interval
del bathy_01.date_time_of_last_record
del bathy_01.date_time_of_first_record
bathy_01.num_grp = 1
try:
bathy_group_object = bathy_01.bathymetry_group[0]
except IndexError:
bathy_group_object = bathy_01.bathymetry_group.append_new_item()
# bathy_group_object.initialize_properties() # Not creating everything as I'm not sure if the grid attributes should be there
# @todo @fixme fix here -- row/column order?
nx, ny = depth_grid.shape
if uncert_grid is None:
uncert_grid = numpy.full(depth_grid.shape, nodata_value, dtype=numpy.float32)
if depth_grid.shape != uncert_grid.shape:
raise S102Exception("Depth and Uncertainty grids have different shapes")
bathy_01.num_points_latitudinal = ny
bathy_01.num_points_longitudinal = nx
bathy_01.start_sequence = "0,0"
del bathy_01.num_points_vertical
del bathy_01.vertical_extent_maximum_z
del bathy_01.vertical_extent_minimum_z
bathy_group_object.extent_create()
bathy_group_object.extent.initialize_properties(True, overwrite=overwrite)
bathy_group_object.extent.low.coord_values[0:2] = [0, 0]
bathy_group_object.extent.high.coord_values[0:2] = [nx, ny]
depth_max = depth_grid[depth_grid != nodata_value].max()
depth_min = depth_grid[depth_grid != nodata_value].min()
bathy_group_object.maximum_depth = depth_max
bathy_group_object.minimum_depth = depth_min
uncertainty_max = uncert_grid[uncert_grid != nodata_value].max()
uncertainty_min = uncert_grid[uncert_grid != nodata_value].min()
bathy_group_object.minimum_uncertainty = uncertainty_min
bathy_group_object.maximum_uncertainty = uncertainty_max
bathy_group_object.dimension = 2
bathy_group_object.origin_create()
bathy_group_object.origin.initialize_properties(True, overwrite=overwrite)
bathy_group_object.origin.dimension = 2
bathy_group_object.values_create()
grid = bathy_group_object.values
# @todo -- need to make sure nodata values are correct, especially if converting something other than bag which is supposed to have the same nodata value
# @todo -- Add logic that if the grids are gdal raster bands then read in blocks and use h5py slicing to write in blocks. Slower but saves resources
if flip_x:
depth_grid = numpy.fliplr(depth_grid)
uncert_grid = numpy.fliplr(uncert_grid)
if flip_y:
depth_grid = numpy.flipud(depth_grid)
uncert_grid = numpy.flipud(uncert_grid)
if nodata_value != root.feature_information.bathymetry_coverage_dataset[0].fill_value:
depth_grid = numpy.copy(depth_grid)
depth_grid[depth_grid == nodata_value] = root.feature_information.bathymetry_coverage_dataset[0].fill_value
uncert_grid = numpy.copy(uncert_grid)
uncert_grid[uncert_grid == nodata_value] = root.feature_information.bathymetry_coverage_dataset[1].fill_value
grid.depth = depth_grid
grid.uncertainty = uncert_grid
return data_file
def from_arrays_with_metadata(depth_grid: s1xx_sequence, uncert_grid: s1xx_sequence, metadata: dict, output_file, nodata_value=None,
overwrite: bool = True) -> S102File: # raw arrays and metadata accepted
""" Fills or creates an :any:`S102File` from the given arguments.
Parameters
----------
depth_grid
a numpy or hdf5 dataset object of the rectangular grid of depths, lower left corner is the first point
uncert_grid
a numpy or hdf5 dataset object of the rectangular grid of uncertainties, lower left corner is the first point
metadata
a dictionary of metadata describing the grids passed in,
metadata should have the following key/value pairs:
- "origin": tuple of the position (x,y) or (lon, lat) for the reference corner node.
Other corners are calulated from this corner using the resolution and size of the data array.
- "res": tuple of the resolution (cell size) of each grid cell (x, y).
If a resolution is negative then the grid will be flipped in that dimension and the origin adjusted accordingly.
- "horizontalDatumReference": See :any:`S102Root` horizontal_datum_reference, ex: "EPSG".
"EPSG" is the default value.
- "horizontalDatumValue": The value for the horizontal data such as the EPSG code ex: 32611
- "epoch":
- "geographicIdentifier": Location of the data, ex: "Long Beach, CA, USA".
An empty string ("") is the default.
- "issueDate":
- "metadataFile": File name for the associated discovery metatadata (xml)
output_file
Can be an S102File object or anything the h5py.File would accept, e.g. string file path, tempfile obect, BytesIO etc.
nodata_value
the "no data" value used in the grids
overwrite
if the output_file was an existing S102File then keep any attributes that might have
Returns
-------
S102File
"""
# @todo - add logic to see if the coordinate system is lower right, if not then need to mirror the arrays or add flags to do that in from_arrays
res_x, res_y = metadata["res"]
flip_x = True if res_x < 0 else False
flip_y = True if res_y < 0 else False
nx, ny = depth_grid.shape
corner_x, corner_y = metadata['origin']
# S-102 is node based, so distance to far corner is res * (n -1)
opposite_corner_x = corner_x + res_x * (nx - 1)
opposite_corner_y = corner_y + res_y * (ny - 1)
minx = min((corner_x, opposite_corner_x))
maxx = max((corner_x, opposite_corner_x))
miny = min((corner_y, opposite_corner_y))
maxy = max((corner_y, opposite_corner_y))
data_file = from_arrays(depth_grid, uncert_grid, output_file, nodata_value=nodata_value, overwrite=overwrite, flip_x=flip_x, flip_y=flip_y)
# now add the additional metadata
root = data_file.root
bathy_01 = root.bathymetry_coverage.bathymetry_coverage[0]
bathy_group_object = bathy_01.bathymetry_group[0]
root.east_bound_longitude = minx
root.west_bound_longitude = maxx
root.south_bound_latitude = miny
root.north_bound_latitude = maxy
bathy_01.east_bound_longitude = minx
bathy_01.west_bound_longitude = maxx
bathy_01.south_bound_latitude = miny
bathy_01.north_bound_latitude = maxy
bathy_01.grid_origin_latitude = miny
bathy_01.grid_origin_longitude = minx
bathy_01.grid_origin_latitude = miny
bathy_01.grid_spacing_longitudinal = abs(res_x) # we adjust for negative resolution in the from_arrays
bathy_01.grid_spacing_latitudinal = abs(res_y)
bathy_group_object.origin.coordinate = numpy.array([minx, miny])
# these names are taken from the S100/S102 attribute names
# but are hard coded here to allow the S102 spec to change but not affect any tools built on these utility functions
if "horizontalDatumReference" in metadata or overwrite:
root.horizontal_datum_reference = metadata.get("horizontalDatumReference", "EPSG")
if "horizontalDatumValue" in metadata or overwrite:
source_epsg = int(metadata.get("horizontalDatumValue", 0))
if source_epsg in get_valid_epsg():
root.horizontal_datum_value = source_epsg
else:
raise ValueError(f'The provided EPSG code {source_epsg} is not within the S102 specified values.')
srs = osr.SpatialReference()
srs.ImportFromEPSG(root.horizontal_datum_value)
if srs.IsProjected():
axes = ["Easting", "Northing"]
else:
axes = ["Longitude", "Latitude"]
bathy_group_object.axis_names = numpy.array(axes) # row major order means X/longitude first
root.bathymetry_coverage.axis_names = numpy.array(axes) # row major order means X/longitude first
root.bathymetry_coverage.sequencing_rule_scan_direction = ", ".join(axes)
if "epoch" in metadata or overwrite:
root.epoch = metadata.get("epoch", "") # e.g. "G1762" this is the 2013-10-16 WGS84 used by CRS
if "geographicIdentifier" in metadata or overwrite:
root.geographic_identifier = metadata.get("geographicIdentifier", "")
if "issueDate" in metadata or overwrite:
root.issue_date = metadata.get('issueDate', "") # datetime.date.today().isoformat()
if "metadataFile" in metadata or overwrite:
root.metadata = metadata.get('metadataFile', "") # datetime.date.today().isoformat()
data_file.write()
data_file.flush()
return data_file
def from_gdal(input_raster, output_file, metadata: dict = {}) -> S102File: # gdal instance or filename accepted
""" Fills or creates an :any:`S102File` from the given arguments.
Parameters
----------
input_raster
Either a path to a raster file that GDAL can open or a gdal.Dataset object.
output_file
Can be an S102File object or anything the h5py.File would accept, e.g. string file path, tempfile obect, BytesIO etc.
metadata
A dictionary of metadata describing the grids passed in.
All the metadata used in :any:`from_from_arrays_with_metadata` can be specified and
would override the values that would have been populated based on the GDAL data.
horizontalDatumReference, horizontalDatumValue, origin, res will be determined from GDAL if not otherwise specified.
Returns
-------
S102File
"""
if isinstance(input_raster, gdal.Dataset):
dataset = input_raster
else:
dataset = gdal.Open(input_raster)
# @todo @fixme -- transform the coordinate system to a WGS84. Strictly this may not end up being square, so how do we handle
# transform = osr.CoordinateTransformation( src_srs, tgt_srs)
# Until we have a working datum engine this module should not do datum transformations - GR 20200402
if "horizontalDatumReference" not in metadata or "horizontalDatumValue" not in metadata:
metadata["horizontalDatumReference"] = "EPSG"
epsg = osr.SpatialReference(dataset.GetProjection()).GetAttrValue("AUTHORITY", 1)
metadata["horizontalDatumValue"] = int(epsg)
if "epoch" not in metadata:
# @todo We should be able to pull this from the WKT
pass
raster_band = dataset.GetRasterBand(1)
depth_nodata_value = raster_band.GetNoDataValue()
uncertainty_band = dataset.GetRasterBand(2)
ulx, dxx, dxy, uly, dyx, dyy = dataset.GetGeoTransform()
if dxy != 0.0 or dyx != 0.0:
raise S102Exception("raster is not north up but is rotated, this is not handled at this time")
if "origin" not in metadata:
# shift the gdal geotransform corner point to reference the node (pixel is center) rather than cell (pixel is area)
metadata["origin"] = [ulx + dxx/2, uly + dyy/2]
if "res" not in metadata:
metadata["res"] = [dxx, dyy]
s102_data_file = from_arrays_with_metadata(raster_band.ReadAsArray(), uncertainty_band.ReadAsArray(), metadata, output_file,
nodata_value=depth_nodata_value)
return s102_data_file
def from_bag(bagfile, output_file, metadata: dict = {}) -> S102File:
"""
Parameters
----------
bagfile
output_file
Can be an S102File object or anything the h5py.File would accept, e.g. string file path, tempfile obect, BytesIO etc.
Returns
-------
"""
# @todo update method docstring for possible metadata fields
if isinstance(bagfile, gdal.Dataset):
bag = bagfile
else:
bag = gdal.Open(bagfile)
# check for and resample variable resolution BAG if able
gdal_metadata = bag.GetMetadata()
if 'HAS_SUPERGRIDS' in gdal_metadata and gdal_metadata['HAS_SUPERGRIDS'] == 'TRUE':
bag_filename = bag.GetFileList()[0]
if "resample_resolution" in metadata:
res = metadata["resample_resolution"]
bag = None
bag = gdal.OpenEx(bag_filename, open_options=['MODE=RESAMPLED_GRID', f'RESX={res}',f'RESY={res}'])
else:
warnings.warn(f'No resampling resolution provided for variable resolution bag {bag_filename}. Using overview resolution.', category=RuntimeWarning)
# populate the issueDate if possible from a simple string search
xml_str = bag.GetMetadata('xml:BAG')[0]
if 'issueDate' not in metadata:
date_key = '<gmd:dateStamp>\n <gco:Date>'
date_idx = xml_str.find(date_key)
if date_idx > 0:
date_idx += len(date_key)
date = xml_str[date_idx:date_idx + 10]
metadata['issueDate'] = date
s102_data_file = from_gdal(bag, output_file, metadata=metadata)
return s102_data_file
def get_valid_epsg() -> list:
"""
Create and return the list of valid EPSG codes for S-102 version 2.0.
"""
valid_epsg = [4326, 5041, 5042]
valid_epsg += list(numpy.arange(32601, 32660 + 1))
valid_epsg += list(numpy.arange(32701, 32760 + 1))
return valid_epsg
|
""" Functions to create S102 data from other sources
"""
import logging
import sys
import warnings
import numpy
from osgeo import gdal, osr
from ..s1xx import s1xx_sequence
from .api import DEPTH, UNCERTAINTY, S102File, S102Exception
# @todo create a friendly name mapping to s102 nested location, then add s102 functions for "to dictionary" and "from dictionary" to api
# that would make these functions easily invertable
def _get_S102File(output_file):
""" Small helper function to convert the output_file parameter into a S102File, currently accepting file path as string or S102File instance.
Could propbably accept h5py.File or other things in the future"""
if isinstance(output_file, S102File):
data_file = output_file
else: # try everything else -- pathlib, str, tempfile, io.BytesIO
try:
data_file = S102File(output_file)
except TypeError as typeerr:
msg = "Failed to create S102File using {}".format(str(output_file))
logging.error(msg)
raise type(typeerr)(msg).with_traceback(sys.exc_info()[2])
return data_file
def create_s102(output_file, overwrite=True) -> S102File:
""" Creates or updates an S102File object.
Default values are set for any data that don't have options or are mandatory to be filled in the S102 spec.
Parameters
----------
output_file
Can be an S102File object or anything the h5py.File would accept, e.g. string file path, tempfile obect, BytesIO etc.
overwrite
If updating an existing file then set this option to False in order to retain data (not sure this is needed).
Returns
-------
S102File
The object created or updated by this function.
"""
data_file = _get_S102File(output_file)
# @fixme @todo -- I think this will overwrite no matter what, need to look into that
data_file.create_empty_metadata() # init the root with a fully filled out empty metadata set
root = data_file.root
bathy_cov_dset = root.feature_information.bathymetry_coverage_dataset
bathy_depth_info = bathy_cov_dset.append_new_item() # bathy_cov_dset.append(bathy_cov_dset.metadata_type())
bathy_depth_info.initialize_properties(True, overwrite=overwrite)
bathy_depth_info.code = DEPTH
bathy_depth_info.name = DEPTH
# these are auto-filled by the api
# bathy_depth_info.unit_of_measure="metres"
# bathy_depth_info.fill_value=1000000.0
# bathy_depth_info.datatype=H5T_NATIVE_FLOAT
# bathy_depth_info.lower = -12000
# bathy_depth_info.upper = 12000
# bathy_depth_info.closure = "closedInterval"
bathy_uncertainty_info = bathy_cov_dset.append_new_item()
bathy_uncertainty_info.initialize_properties(True, overwrite=overwrite)
bathy_uncertainty_info.code = UNCERTAINTY
bathy_uncertainty_info.name = UNCERTAINTY
# I'm not sure what to put here, yet
tracking_cov = root.feature_information.tracking_list_coverage
track_info = tracking_cov.append_new_item() # append(tracking_cov.metadata_type())
track_info.initialize_properties(True, overwrite=overwrite)
track_info.code = "X"
track_info.name = "X"
track_info.unit_of_measure = "N/A"
track_info = tracking_cov.append_new_item()
track_info.initialize_properties(True, overwrite=overwrite)
track_info.code = "Y"
track_info.name = "Y"
track_info.unit_of_measure = "N/A"
track_info = tracking_cov.append_new_item()
track_info.initialize_properties(True, overwrite=overwrite)
track_info.code = "originalValue"
track_info.name = "Original Value"
track_info = tracking_cov.append_new_item()
track_info.initialize_properties(True, overwrite=overwrite)
track_info.code = "trackCode"
track_info.name = "Track Code"
track_info.unit_of_measure = "N/A"
track_info = tracking_cov.append_new_item()
track_info.initialize_properties(True, overwrite=overwrite)
track_info.code = "listSeries"
track_info.name = "List Series"
track_info.unit_of_measure = "N/A"
root.bathymetry_coverage.axis_names = numpy.array(["Longitude", "Latitude"]) # row major order means X/longitude first
root.bathymetry_coverage.sequencing_rule_scan_direction = "Longitude, Latitude"
root.bathymetry_coverage.common_point_rule = 1 # average
# root.bathymetry_coverage.data_coding_format = 2 # default
# root.bathymetry_coverage.dimension = 2 # default value
root.bathymetry_coverage.interpolation_type = 1 # nearest neighbor
root.bathymetry_coverage.num_instances = 1 # how many Bathycoverages
root.bathymetry_coverage.sequencing_rule_type = 1 # linear
del root.bathymetry_coverage.time_uncertainty
return data_file
def from_arrays(depth_grid: s1xx_sequence, uncert_grid: s1xx_sequence, output_file, nodata_value=None,
flip_x: bool = False, flip_y: bool = False, overwrite: bool = True) -> S102File: # num_array, or list of lists accepted
""" Creates or updates an S102File object based on numpy array/h5py datasets.
Calls :any:`create_s102` then fills in the HDF5 datasets with the supplied depth_grid and uncert_grid.
Fills the number of points areas and any other appropriate places in the HDF5 file per the S102 spec.
Raises an S102Exception if the shapes of the depth and uncertainty (if not None) grids are not equal.
Parameters
----------
depth_grid
uncert_grid
The uncertainty dataset to embed in the object.
If None then a numpy.zeros array will be created in the appropriate shape to be stored in the file.
output_file
Can be an S102File object or anything the h5py.File would accept, e.g. string file path, tempfile obect, BytesIO etc.
nodata_value
Value used to denote an empty cell in the grid. Used in finding the min/max and then converted to the S102 fillValue.
flip_x
boolean if the data should be mirrored on x coordinate (i.e. the original grid is right to left)
Flips are done here so we can implement a chunked read/write to save memory
flip_y
boolean if the data should be mirrored on y coordinate (i.e. the original grid is top to bottom)
Flips are done here so we can implement a chunked read/write to save memory
overwrite
If updating an existing file then set this option to False in order to retain data (not sure this is needed).
Returns
-------
S102File
The object created or updated by this function.
"""
# @todo -- Add logic that if the grids are gdal raster bands then read in blocks and use h5py slicing to write in blocks. Slower but saves resources
data_file = create_s102(output_file)
root = data_file.root
try:
bathy_01 = root.bathymetry_coverage.bathymetry_coverage[0]
except IndexError:
bathy_01 = root.bathymetry_coverage.bathymetry_coverage.append_new_item()
bathy_01.initialize_properties(recursively_create_children=True, overwrite=overwrite)
del bathy_01.grid_spacing_vertical
del bathy_01.grid_origin_vertical
del bathy_01.number_of_times
del bathy_01.time_record_interval
del bathy_01.date_time_of_last_record
del bathy_01.date_time_of_first_record
bathy_01.num_grp = 1
try:
bathy_group_object = bathy_01.bathymetry_group[0]
except IndexError:
bathy_group_object = bathy_01.bathymetry_group.append_new_item()
# bathy_group_object.initialize_properties() # Not creating everything as I'm not sure if the grid attributes should be there
# @todo @fixme fix here -- row/column order?
nx, ny = depth_grid.shape
if uncert_grid is None:
uncert_grid = numpy.full(depth_grid.shape, nodata_value, dtype=numpy.float32)
if depth_grid.shape != uncert_grid.shape:
raise S102Exception("Depth and Uncertainty grids have different shapes")
bathy_01.num_points_latitudinal = ny
bathy_01.num_points_longitudinal = nx
bathy_01.start_sequence = "0,0"
del bathy_01.num_points_vertical
del bathy_01.vertical_extent_maximum_z
del bathy_01.vertical_extent_minimum_z
bathy_group_object.extent_create()
bathy_group_object.extent.initialize_properties(True, overwrite=overwrite)
bathy_group_object.extent.low.coord_values[0:2] = [0, 0]
bathy_group_object.extent.high.coord_values[0:2] = [nx, ny]
depth_max = depth_grid[depth_grid != nodata_value].max()
depth_min = depth_grid[depth_grid != nodata_value].min()
bathy_group_object.maximum_depth = depth_max
bathy_group_object.minimum_depth = depth_min
uncertainty_max = uncert_grid[uncert_grid != nodata_value].max()
uncertainty_min = uncert_grid[uncert_grid != nodata_value].min()
bathy_group_object.minimum_uncertainty = uncertainty_min
bathy_group_object.maximum_uncertainty = uncertainty_max
bathy_group_object.dimension = 2
bathy_group_object.origin_create()
bathy_group_object.origin.initialize_properties(True, overwrite=overwrite)
bathy_group_object.origin.dimension = 2
bathy_group_object.values_create()
grid = bathy_group_object.values
# @todo -- need to make sure nodata values are correct, especially if converting something other than bag which is supposed to have the same nodata value
# @todo -- Add logic that if the grids are gdal raster bands then read in blocks and use h5py slicing to write in blocks. Slower but saves resources
if flip_x:
depth_grid = numpy.fliplr(depth_grid)
uncert_grid = numpy.fliplr(uncert_grid)
if flip_y:
depth_grid = numpy.flipud(depth_grid)
uncert_grid = numpy.flipud(uncert_grid)
if nodata_value != root.feature_information.bathymetry_coverage_dataset[0].fill_value:
depth_grid = numpy.copy(depth_grid)
depth_grid[depth_grid == nodata_value] = root.feature_information.bathymetry_coverage_dataset[0].fill_value
uncert_grid = numpy.copy(uncert_grid)
uncert_grid[uncert_grid == nodata_value] = root.feature_information.bathymetry_coverage_dataset[1].fill_value
grid.depth = depth_grid
grid.uncertainty = uncert_grid
return data_file
def from_arrays_with_metadata(depth_grid: s1xx_sequence, uncert_grid: s1xx_sequence, metadata: dict, output_file, nodata_value=None,
overwrite: bool = True) -> S102File: # raw arrays and metadata accepted
""" Fills or creates an :any:`S102File` from the given arguments.
Parameters
----------
depth_grid
a numpy or hdf5 dataset object of the rectangular grid of depths, lower left corner is the first point
uncert_grid
a numpy or hdf5 dataset object of the rectangular grid of uncertainties, lower left corner is the first point
metadata
a dictionary of metadata describing the grids passed in,
metadata should have the following key/value pairs:
- "origin": tuple of the position (x,y) or (lon, lat) for the reference corner node.
Other corners are calulated from this corner using the resolution and size of the data array.
- "res": tuple of the resolution (cell size) of each grid cell (x, y).
If a resolution is negative then the grid will be flipped in that dimension and the origin adjusted accordingly.
- "horizontalDatumReference": See :any:`S102Root` horizontal_datum_reference, ex: "EPSG".
"EPSG" is the default value.
- "horizontalDatumValue": The value for the horizontal data such as the EPSG code ex: 32611
- "epoch":
- "geographicIdentifier": Location of the data, ex: "Long Beach, CA, USA".
An empty string ("") is the default.
- "issueDate":
- "metadataFile": File name for the associated discovery metatadata (xml)
output_file
Can be an S102File object or anything the h5py.File would accept, e.g. string file path, tempfile obect, BytesIO etc.
nodata_value
the "no data" value used in the grids
overwrite
if the output_file was an existing S102File then keep any attributes that might have
Returns
-------
S102File
"""
# @todo - add logic to see if the coordinate system is lower right, if not then need to mirror the arrays or add flags to do that in from_arrays
res_x, res_y = metadata["res"]
flip_x = True if res_x < 0 else False
flip_y = True if res_y < 0 else False
nx, ny = depth_grid.shape
corner_x, corner_y = metadata['origin']
# S-102 is node based, so distance to far corner is res * (n -1)
opposite_corner_x = corner_x + res_x * (nx - 1)
opposite_corner_y = corner_y + res_y * (ny - 1)
minx = min((corner_x, opposite_corner_x))
maxx = max((corner_x, opposite_corner_x))
miny = min((corner_y, opposite_corner_y))
maxy = max((corner_y, opposite_corner_y))
data_file = from_arrays(depth_grid, uncert_grid, output_file, nodata_value=nodata_value, overwrite=overwrite, flip_x=flip_x, flip_y=flip_y)
# now add the additional metadata
root = data_file.root
bathy_01 = root.bathymetry_coverage.bathymetry_coverage[0]
bathy_group_object = bathy_01.bathymetry_group[0]
root.east_bound_longitude = minx
root.west_bound_longitude = maxx
root.south_bound_latitude = miny
root.north_bound_latitude = maxy
bathy_01.east_bound_longitude = minx
bathy_01.west_bound_longitude = maxx
bathy_01.south_bound_latitude = miny
bathy_01.north_bound_latitude = maxy
bathy_01.grid_origin_latitude = miny
bathy_01.grid_origin_longitude = minx
bathy_01.grid_origin_latitude = miny
bathy_01.grid_spacing_longitudinal = abs(res_x) # we adjust for negative resolution in the from_arrays
bathy_01.grid_spacing_latitudinal = abs(res_y)
bathy_group_object.origin.coordinate = numpy.array([minx, miny])
# these names are taken from the S100/S102 attribute names
# but are hard coded here to allow the S102 spec to change but not affect any tools built on these utility functions
if "horizontalDatumReference" in metadata or overwrite:
root.horizontal_datum_reference = metadata.get("horizontalDatumReference", "EPSG")
if "horizontalDatumValue" in metadata or overwrite:
source_epsg = int(metadata.get("horizontalDatumValue", 0))
if source_epsg in get_valid_epsg():
root.horizontal_datum_value = source_epsg
else:
raise ValueError(f'The provided EPSG code {source_epsg} is not within the S102 specified values.')
srs = osr.SpatialReference()
srs.ImportFromEPSG(root.horizontal_datum_value)
if srs.IsProjected():
axes = ["Easting", "Northing"]
else:
axes = ["Longitude", "Latitude"]
bathy_group_object.axis_names = numpy.array(axes) # row major order means X/longitude first
root.bathymetry_coverage.axis_names = numpy.array(axes) # row major order means X/longitude first
root.bathymetry_coverage.sequencing_rule_scan_direction = ", ".join(axes)
if "epoch" in metadata or overwrite:
root.epoch = metadata.get("epoch", "") # e.g. "G1762" this is the 2013-10-16 WGS84 used by CRS
if "geographicIdentifier" in metadata or overwrite:
root.geographic_identifier = metadata.get("geographicIdentifier", "")
if "issueDate" in metadata or overwrite:
root.issue_date = metadata.get('issueDate', "") # datetime.date.today().isoformat()
if "metadataFile" in metadata or overwrite:
root.metadata = metadata.get('metadataFile', "") # datetime.date.today().isoformat()
data_file.write()
data_file.flush()
return data_file
def from_gdal(input_raster, output_file, metadata: dict = {}) -> S102File: # gdal instance or filename accepted
""" Fills or creates an :any:`S102File` from the given arguments.
Parameters
----------
input_raster
Either a path to a raster file that GDAL can open or a gdal.Dataset object.
output_file
Can be an S102File object or anything the h5py.File would accept, e.g. string file path, tempfile obect, BytesIO etc.
metadata
A dictionary of metadata describing the grids passed in.
All the metadata used in :any:`from_from_arrays_with_metadata` can be specified and
would override the values that would have been populated based on the GDAL data.
horizontalDatumReference, horizontalDatumValue, origin, res will be determined from GDAL if not otherwise specified.
Returns
-------
S102File
"""
if isinstance(input_raster, gdal.Dataset):
dataset = input_raster
else:
dataset = gdal.Open(input_raster)
# @todo @fixme -- transform the coordinate system to a WGS84. Strictly this may not end up being square, so how do we handle
# transform = osr.CoordinateTransformation( src_srs, tgt_srs)
# Until we have a working datum engine this module should not do datum transformations - GR 20200402
if "horizontalDatumReference" not in metadata or "horizontalDatumValue" not in metadata:
metadata["horizontalDatumReference"] = "EPSG"
epsg = osr.SpatialReference(dataset.GetProjection()).GetAttrValue("AUTHORITY", 1)
metadata["horizontalDatumValue"] = int(epsg)
if "epoch" not in metadata:
# @todo We should be able to pull this from the WKT
pass
raster_band = dataset.GetRasterBand(1)
depth_nodata_value = raster_band.GetNoDataValue()
uncertainty_band = dataset.GetRasterBand(2)
ulx, dxx, dxy, uly, dyx, dyy = dataset.GetGeoTransform()
if dxy != 0.0 or dyx != 0.0:
raise S102Exception("raster is not north up but is rotated, this is not handled at this time")
if "origin" not in metadata:
# shift the gdal geotransform corner point to reference the node (pixel is center) rather than cell (pixel is area)
metadata["origin"] = [ulx + dxx/2, uly + dyy/2]
if "res" not in metadata:
metadata["res"] = [dxx, dyy]
s102_data_file = from_arrays_with_metadata(raster_band.ReadAsArray(), uncertainty_band.ReadAsArray(), metadata, output_file,
nodata_value=depth_nodata_value)
return s102_data_file
def from_bag(bagfile, output_file, metadata: dict = {}) -> S102File:
"""
Parameters
----------
bagfile
output_file
Can be an S102File object or anything the h5py.File would accept, e.g. string file path, tempfile obect, BytesIO etc.
Returns
-------
"""
# @todo update method docstring for possible metadata fields
if isinstance(bagfile, gdal.Dataset):
bag = bagfile
else:
bag = gdal.Open(bagfile)
# check for and resample variable resolution BAG if able
gdal_metadata = bag.GetMetadata()
if 'HAS_SUPERGRIDS' in gdal_metadata and gdal_metadata['HAS_SUPERGRIDS'] == 'TRUE':
bag_filename = bag.GetFileList()[0]
if "resample_resolution" in metadata:
res = metadata["resample_resolution"]
bag = None
bag = gdal.OpenEx(bag_filename, open_options=['MODE=RESAMPLED_GRID', f'RESX={res}',f'RESY={res}'])
else:
warnings.warn(f'No resampling resolution provided for variable resolution bag {bag_filename}. Using overview resolution.', category=RuntimeWarning)
# populate the issueDate if possible from a simple string search
xml_str = bag.GetMetadata('xml:BAG')[0]
if 'issueDate' not in metadata:
date_key = '<gmd:dateStamp>\n <gco:Date>'
date_idx = xml_str.find(date_key)
if date_idx > 0:
date_idx += len(date_key)
date = xml_str[date_idx:date_idx + 10]
metadata['issueDate'] = date
s102_data_file = from_gdal(bag, output_file, metadata=metadata)
return s102_data_file
def get_valid_epsg() -> list:
"""
Create and return the list of valid EPSG codes for S-102 version 2.0.
"""
valid_epsg = [4326, 5041, 5042]
valid_epsg += list(numpy.arange(32601, 32660 + 1))
valid_epsg += list(numpy.arange(32701, 32760 + 1))
return valid_epsg
|
en
| 0.734771
|
Functions to create S102 data from other sources # @todo create a friendly name mapping to s102 nested location, then add s102 functions for "to dictionary" and "from dictionary" to api # that would make these functions easily invertable Small helper function to convert the output_file parameter into a S102File, currently accepting file path as string or S102File instance. Could propbably accept h5py.File or other things in the future # try everything else -- pathlib, str, tempfile, io.BytesIO Creates or updates an S102File object. Default values are set for any data that don't have options or are mandatory to be filled in the S102 spec. Parameters ---------- output_file Can be an S102File object or anything the h5py.File would accept, e.g. string file path, tempfile obect, BytesIO etc. overwrite If updating an existing file then set this option to False in order to retain data (not sure this is needed). Returns ------- S102File The object created or updated by this function. # @fixme @todo -- I think this will overwrite no matter what, need to look into that # init the root with a fully filled out empty metadata set # bathy_cov_dset.append(bathy_cov_dset.metadata_type()) # these are auto-filled by the api # bathy_depth_info.unit_of_measure="metres" # bathy_depth_info.fill_value=1000000.0 # bathy_depth_info.datatype=H5T_NATIVE_FLOAT # bathy_depth_info.lower = -12000 # bathy_depth_info.upper = 12000 # bathy_depth_info.closure = "closedInterval" # I'm not sure what to put here, yet # append(tracking_cov.metadata_type()) # row major order means X/longitude first # average # root.bathymetry_coverage.data_coding_format = 2 # default # root.bathymetry_coverage.dimension = 2 # default value # nearest neighbor # how many Bathycoverages # linear # num_array, or list of lists accepted Creates or updates an S102File object based on numpy array/h5py datasets. Calls :any:`create_s102` then fills in the HDF5 datasets with the supplied depth_grid and uncert_grid. Fills the number of points areas and any other appropriate places in the HDF5 file per the S102 spec. Raises an S102Exception if the shapes of the depth and uncertainty (if not None) grids are not equal. Parameters ---------- depth_grid uncert_grid The uncertainty dataset to embed in the object. If None then a numpy.zeros array will be created in the appropriate shape to be stored in the file. output_file Can be an S102File object or anything the h5py.File would accept, e.g. string file path, tempfile obect, BytesIO etc. nodata_value Value used to denote an empty cell in the grid. Used in finding the min/max and then converted to the S102 fillValue. flip_x boolean if the data should be mirrored on x coordinate (i.e. the original grid is right to left) Flips are done here so we can implement a chunked read/write to save memory flip_y boolean if the data should be mirrored on y coordinate (i.e. the original grid is top to bottom) Flips are done here so we can implement a chunked read/write to save memory overwrite If updating an existing file then set this option to False in order to retain data (not sure this is needed). Returns ------- S102File The object created or updated by this function. # @todo -- Add logic that if the grids are gdal raster bands then read in blocks and use h5py slicing to write in blocks. Slower but saves resources # bathy_group_object.initialize_properties() # Not creating everything as I'm not sure if the grid attributes should be there # @todo @fixme fix here -- row/column order? # @todo -- need to make sure nodata values are correct, especially if converting something other than bag which is supposed to have the same nodata value # @todo -- Add logic that if the grids are gdal raster bands then read in blocks and use h5py slicing to write in blocks. Slower but saves resources # raw arrays and metadata accepted Fills or creates an :any:`S102File` from the given arguments. Parameters ---------- depth_grid a numpy or hdf5 dataset object of the rectangular grid of depths, lower left corner is the first point uncert_grid a numpy or hdf5 dataset object of the rectangular grid of uncertainties, lower left corner is the first point metadata a dictionary of metadata describing the grids passed in, metadata should have the following key/value pairs: - "origin": tuple of the position (x,y) or (lon, lat) for the reference corner node. Other corners are calulated from this corner using the resolution and size of the data array. - "res": tuple of the resolution (cell size) of each grid cell (x, y). If a resolution is negative then the grid will be flipped in that dimension and the origin adjusted accordingly. - "horizontalDatumReference": See :any:`S102Root` horizontal_datum_reference, ex: "EPSG". "EPSG" is the default value. - "horizontalDatumValue": The value for the horizontal data such as the EPSG code ex: 32611 - "epoch": - "geographicIdentifier": Location of the data, ex: "Long Beach, CA, USA". An empty string ("") is the default. - "issueDate": - "metadataFile": File name for the associated discovery metatadata (xml) output_file Can be an S102File object or anything the h5py.File would accept, e.g. string file path, tempfile obect, BytesIO etc. nodata_value the "no data" value used in the grids overwrite if the output_file was an existing S102File then keep any attributes that might have Returns ------- S102File # @todo - add logic to see if the coordinate system is lower right, if not then need to mirror the arrays or add flags to do that in from_arrays # S-102 is node based, so distance to far corner is res * (n -1) # now add the additional metadata # we adjust for negative resolution in the from_arrays # these names are taken from the S100/S102 attribute names # but are hard coded here to allow the S102 spec to change but not affect any tools built on these utility functions # row major order means X/longitude first # row major order means X/longitude first # e.g. "G1762" this is the 2013-10-16 WGS84 used by CRS # datetime.date.today().isoformat() # datetime.date.today().isoformat() # gdal instance or filename accepted Fills or creates an :any:`S102File` from the given arguments. Parameters ---------- input_raster Either a path to a raster file that GDAL can open or a gdal.Dataset object. output_file Can be an S102File object or anything the h5py.File would accept, e.g. string file path, tempfile obect, BytesIO etc. metadata A dictionary of metadata describing the grids passed in. All the metadata used in :any:`from_from_arrays_with_metadata` can be specified and would override the values that would have been populated based on the GDAL data. horizontalDatumReference, horizontalDatumValue, origin, res will be determined from GDAL if not otherwise specified. Returns ------- S102File # @todo @fixme -- transform the coordinate system to a WGS84. Strictly this may not end up being square, so how do we handle # transform = osr.CoordinateTransformation( src_srs, tgt_srs) # Until we have a working datum engine this module should not do datum transformations - GR 20200402 # @todo We should be able to pull this from the WKT # shift the gdal geotransform corner point to reference the node (pixel is center) rather than cell (pixel is area) Parameters ---------- bagfile output_file Can be an S102File object or anything the h5py.File would accept, e.g. string file path, tempfile obect, BytesIO etc. Returns ------- # @todo update method docstring for possible metadata fields # check for and resample variable resolution BAG if able # populate the issueDate if possible from a simple string search Create and return the list of valid EPSG codes for S-102 version 2.0.
| 3.065223
| 3
|
tests/test_models/test_vanilla_resnet/test_vanilla_resnet.py
|
rahulgupta9202/ColossalAI
| 1
|
6629131
|
<reponame>rahulgupta9202/ColossalAI
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import pytest
import torch
import torchvision.models as models
from colossalai.builder import build_model
NUM_CLS = 10
RESNET18 = dict(
type='VanillaResNet',
block_type='ResNetBasicBlock',
layers=[2, 2, 2, 2],
num_cls=NUM_CLS
)
RESNET34 = dict(
type='VanillaResNet',
block_type='ResNetBasicBlock',
layers=[3, 4, 6, 3],
num_cls=NUM_CLS
)
RESNET50 = dict(
type='VanillaResNet',
block_type='ResNetBottleneck',
layers=[3, 4, 6, 3],
num_cls=NUM_CLS
)
RESNET101 = dict(
type='VanillaResNet',
block_type='ResNetBottleneck',
layers=[3, 4, 23, 3],
num_cls=NUM_CLS
)
RESNET152 = dict(
type='VanillaResNet',
block_type='ResNetBottleneck',
layers=[3, 8, 36, 3],
num_cls=NUM_CLS
)
def compare_model(data, colossal_model, torchvision_model):
colossal_output = colossal_model(data)
torchvision_output = torchvision_model(data)
assert colossal_output[
0].shape == torchvision_output.shape, f'{colossal_output[0].shape}, {torchvision_output.shape}'
@pytest.mark.cpu
def test_vanilla_resnet():
"""Compare colossal resnet with torchvision resnet"""
# data
x = torch.randn((2, 3, 224, 224))
# resnet 18
col_resnet18 = build_model(RESNET18)
col_resnet18.build_from_cfg()
torchvision_resnet18 = models.resnet18(num_classes=NUM_CLS)
compare_model(x, col_resnet18, torchvision_resnet18)
# resnet 34
col_resnet34 = build_model(RESNET34)
col_resnet34.build_from_cfg()
torchvision_resnet34 = models.resnet34(num_classes=NUM_CLS)
compare_model(x, col_resnet34, torchvision_resnet34)
# resnet 50
col_resnet50 = build_model(RESNET50)
col_resnet50.build_from_cfg()
torchvision_resnet50 = models.resnet50(num_classes=NUM_CLS)
compare_model(x, col_resnet50, torchvision_resnet50)
# resnet 101
col_resnet101 = build_model(RESNET101)
col_resnet101.build_from_cfg()
torchvision_resnet101 = models.resnet101(num_classes=NUM_CLS)
compare_model(x, col_resnet101, torchvision_resnet101)
# # resnet 152
col_resnet152 = build_model(RESNET152)
col_resnet152.build_from_cfg()
torchvision_resnet152 = models.resnet152(num_classes=NUM_CLS)
compare_model(x, col_resnet152, torchvision_resnet152)
if __name__ == '__main__':
test_vanilla_resnet()
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import pytest
import torch
import torchvision.models as models
from colossalai.builder import build_model
NUM_CLS = 10
RESNET18 = dict(
type='VanillaResNet',
block_type='ResNetBasicBlock',
layers=[2, 2, 2, 2],
num_cls=NUM_CLS
)
RESNET34 = dict(
type='VanillaResNet',
block_type='ResNetBasicBlock',
layers=[3, 4, 6, 3],
num_cls=NUM_CLS
)
RESNET50 = dict(
type='VanillaResNet',
block_type='ResNetBottleneck',
layers=[3, 4, 6, 3],
num_cls=NUM_CLS
)
RESNET101 = dict(
type='VanillaResNet',
block_type='ResNetBottleneck',
layers=[3, 4, 23, 3],
num_cls=NUM_CLS
)
RESNET152 = dict(
type='VanillaResNet',
block_type='ResNetBottleneck',
layers=[3, 8, 36, 3],
num_cls=NUM_CLS
)
def compare_model(data, colossal_model, torchvision_model):
colossal_output = colossal_model(data)
torchvision_output = torchvision_model(data)
assert colossal_output[
0].shape == torchvision_output.shape, f'{colossal_output[0].shape}, {torchvision_output.shape}'
@pytest.mark.cpu
def test_vanilla_resnet():
"""Compare colossal resnet with torchvision resnet"""
# data
x = torch.randn((2, 3, 224, 224))
# resnet 18
col_resnet18 = build_model(RESNET18)
col_resnet18.build_from_cfg()
torchvision_resnet18 = models.resnet18(num_classes=NUM_CLS)
compare_model(x, col_resnet18, torchvision_resnet18)
# resnet 34
col_resnet34 = build_model(RESNET34)
col_resnet34.build_from_cfg()
torchvision_resnet34 = models.resnet34(num_classes=NUM_CLS)
compare_model(x, col_resnet34, torchvision_resnet34)
# resnet 50
col_resnet50 = build_model(RESNET50)
col_resnet50.build_from_cfg()
torchvision_resnet50 = models.resnet50(num_classes=NUM_CLS)
compare_model(x, col_resnet50, torchvision_resnet50)
# resnet 101
col_resnet101 = build_model(RESNET101)
col_resnet101.build_from_cfg()
torchvision_resnet101 = models.resnet101(num_classes=NUM_CLS)
compare_model(x, col_resnet101, torchvision_resnet101)
# # resnet 152
col_resnet152 = build_model(RESNET152)
col_resnet152.build_from_cfg()
torchvision_resnet152 = models.resnet152(num_classes=NUM_CLS)
compare_model(x, col_resnet152, torchvision_resnet152)
if __name__ == '__main__':
test_vanilla_resnet()
|
en
| 0.587766
|
#!/usr/bin/env python # -*- encoding: utf-8 -*- Compare colossal resnet with torchvision resnet # data # resnet 18 # resnet 34 # resnet 50 # resnet 101 # # resnet 152
| 2.358459
| 2
|
src/profiles/urls.py
|
speedlight/rblmonitor
| 4
|
6629132
|
from django.conf.urls import url
from . import views
app_name = 'profiles'
urlpatterns = [
url(r'^me$', views.ShowProfile.as_view(), name='show_self'),
url(r'^me/edit$', views.EditProfile.as_view(), name='edit_self'),
url(r'^(?P<slug>[\w\-]+)$', views.ShowProfile.as_view(),
name='show'),
]
|
from django.conf.urls import url
from . import views
app_name = 'profiles'
urlpatterns = [
url(r'^me$', views.ShowProfile.as_view(), name='show_self'),
url(r'^me/edit$', views.EditProfile.as_view(), name='edit_self'),
url(r'^(?P<slug>[\w\-]+)$', views.ShowProfile.as_view(),
name='show'),
]
|
none
| 1
| 1.714605
| 2
|
|
parser/fase2/team14/CodigoIntermedio.py
|
Yosoyfr/tytus
| 0
|
6629133
|
import gramatica2 as g
from storageManager import jsonMode
from Expresion.variablesestaticas import variables
from tkinter import *
class CodigoIntermedio():
def __init__(self,entorno):
self.entorno=entorno
jsonMode.dropAll()
variables.consola.delete("1.0", "end")
variables.consola.configure(state='normal')
def ejecutarsql(self, stringinstr):
'ejecucion del bloque'
instr=g.parse(stringinstr)
for inst in instr:
return inst.ejecutar(self.entorno)
def getSym(self):
f = open('tsAux', 'a+')
f.write(self.entorno.mostrarSimbolos())
f.close()
|
import gramatica2 as g
from storageManager import jsonMode
from Expresion.variablesestaticas import variables
from tkinter import *
class CodigoIntermedio():
def __init__(self,entorno):
self.entorno=entorno
jsonMode.dropAll()
variables.consola.delete("1.0", "end")
variables.consola.configure(state='normal')
def ejecutarsql(self, stringinstr):
'ejecucion del bloque'
instr=g.parse(stringinstr)
for inst in instr:
return inst.ejecutar(self.entorno)
def getSym(self):
f = open('tsAux', 'a+')
f.write(self.entorno.mostrarSimbolos())
f.close()
|
none
| 1
| 2.865345
| 3
|
|
kstaretappipig/NTupleMaker_MagUp.py
|
Williams224/davinci-scripts
| 0
|
6629134
|
<gh_stars>0
from Gaudi.Configuration import *
from Configurables import DaVinci
#from Configurables import AlgTool
from Configurables import GaudiSequencer
MySequencer = GaudiSequencer('Sequence')
#For 2012 MC
DaVinci.DDDBtag='dddb-20130929-1'
DaVinci.CondDBtag='sim-20130522-1-vc-mu100'
#for 2011 MC
#DaVinci.DDDBtag='dddb-20130929'
#DaVinci.CondDBtag='sim-20130522-vc-mu100'
simulation=True
#################################################################
#Rerun with stripping21 applied
if simulation:
from Configurables import EventNodeKiller
from StrippingConf.Configuration import StrippingConf, StrippingStream
from StrippingSettings.Utils import strippingConfiguration
from StrippingArchive.Utils import buildStreams
from StrippingArchive import strippingArchive
event_node_killer=EventNodeKiller('StripKiller')
event_node_killer.Nodes=['Event/AllStreams','/Event/Strip']
from Configurables import PhysConf
PhysConf().CaloReProcessing=True
stripping="stripping21"
config=strippingConfiguration(stripping)
archive=strippingArchive(stripping)
streams=buildStreams(stripping=config,archive=archive)
MyStream= StrippingStream("MyStream")
MyLines= ["StrippingB2XEtaB2etapKstarLine"]
for stream in streams:
for line in stream.lines:
if line.name() in MyLines:
MyStream.appendLines( [ line ])
from Configurables import ProcStatusCheck
filterBadEvents=ProcStatusCheck()
sc=StrippingConf( Streams= [ MyStream ],
MaxCandidates = 2000,
AcceptBadEvents = False,
BadEventSelection = filterBadEvents)
DaVinci().appendToMainSequence([event_node_killer,sc.sequence()])
##################Creating NTuples#####################################
from Configurables import DecayTreeTuple
from Configurables import TupleToolL0Calo
from DecayTreeTuple.Configuration import *
line = 'B2XEtaB2etapKstarLine'
tuple=DecayTreeTuple()
tuple.Decay="[B0 -> ^(K*(892)0 -> ^K+ ^pi-) ^(eta_prime -> ^pi- ^pi+ ^gamma)]CC"
tuple.Branches={"B0":"[B0 -> (K*(892)0 -> K+ pi-) (eta_prime -> pi- pi+ gamma)]CC"}
tuple.Inputs=['/Event/Phys/{0}/Particles'.format(line)]
tuple.ToolList += [
"TupleToolGeometry"
, "TupleToolDira"
, "TupleToolAngles"
# , "TupleToolL0Calo"
, "TupleToolPid"
, "TupleToolKinematic"
, "TupleToolPropertime"
, "TupleToolPrimaries"
, "TupleToolEventInfo"
, "TupleToolTrackInfo"
, "TupleToolVtxIsoln"
, "TupleToolPhotonInfo"
, "TupleToolMCTruth"
, "TupleToolMCBackgroundInfo"
, "TupleToolCaloHypo"
, "TupleToolTrackIsolation"
]
tuple.addTool(TupleToolDecay,name="B0")
from Configurables import TupleToolDecayTreeFitter
#========================================REFIT WITH DAUGHTERS AND PV CONSTRAINED==============================
tuple.B0.addTupleTool('TupleToolDecayTreeFitter/ConsAll')
tuple.B0.ConsAll.Verbose=True
tuple.B0.ConsAll.constrainToOriginVertex=True
tuple.B0.ConsAll.daughtersToConstrain = ["K*(892)0","eta_prime"]
#==============================REFIT WITH ONLY ETA AND PV CONTRAINED==============================
tuple.B0.addTupleTool('TupleToolDecayTreeFitter/PVFit')
tuple.B0.PVFit.Verbose=True
tuple.B0.PVFit.constrainToOriginVertex=True
tuple.B0.PVFit.daughtersToConstrain = ["eta_prime"]
#==============================REFIT WITH ONLY K* CONSTRAINED===================================
tuple.B0.addTupleTool('TupleToolDecayTreeFitter/KStarOnly')
tuple.B0.KStarOnly.Verbose=True
tuple.B0.KStarOnly.constrainToOriginVertex=True
tuple.B0.KStarOnly.daughtersToConstrain = ["K*(892)0"]
#==============================REFIT WITH ONLY PV CONTRAINED==============================
tuple.B0.addTupleTool('TupleToolDecayTreeFitter/PVOnly')
tuple.B0.PVOnly.Verbose=True
tuple.B0.PVOnly.constrainToOriginVertex=True
#========================================REFIT WITH JUST DAUGHTERS CONSTRAINED================================
tuple.B0.addTupleTool('TupleToolDecayTreeFitter/Conskstar_etap')
tuple.B0.Conskstar_etap.Verbose=True
tuple.B0.Conskstar_etap.constrainToOriginVertex=False
tuple.B0.Conskstar_etap.daughtersToConstrain = ["K*(892)0","eta_prime"]
#========================================REFIT WITH NOTHING CONSTRAINED========================================
tuple.B0.addTupleTool('TupleToolDecayTreeFitter/Consnothing')
tuple.B0.Consnothing.Verbose=True
tuple.B0.Consnothing.constrainToOriginVertex=False
#========================================LOKI FUBNCTOR VARIABLES========================================
tuple.addBranches({'Kstar' : '[B0 -> ^(K*(892)0 -> K+ pi-) (eta_prime -> pi- pi+ gamma)]CC',
'eta_prime' : '[B0 -> (K*(892)0 -> K+ pi-) ^(eta_prime -> pi- pi+ gamma)]CC',
'Kplus' : '[B0 -> (K*(892)0 -> ^K+ pi-) (eta_prime -> pi- pi+ gamma)]CC',
'piminus' : '[B0 -> (K*(892)0 -> K+ ^pi-) (eta_prime -> pi- pi+ gamma)]CC',
'piplus' : '[B0 -> (K*(892)0 -> K+ pi-) (eta_prime -> pi- ^pi+ gamma)]CC',
'piminus0' : '[B0 -> (K*(892)0 -> K+ pi-) (eta_prime -> ^pi- pi+ gamma)]CC',
'gamma' : '[B0 -> (K*(892)0 -> K+ pi-) (eta_prime -> pi- pi+ ^gamma)]CC'})
from LoKiPhys.decorators import MAXTREE,MINTREE,ISBASIC,HASTRACK,SUMTREE,PT,ABSID,NINTREE,ETA,TRPCHI2
B0_hybrid=tuple.B0.addTupleTool('LoKi::Hybrid::TupleTool/LoKi_B0')
Kstar_hybrid=tuple.Kstar.addTupleTool('LoKi::Hybrid::TupleTool/LoKi_Kstar')
eta_prime_hybrid=tuple.eta_prime.addTupleTool('LoKi::Hybrid::TupleTool/LoKi_eta_prime')
Kplus_hybrid=tuple.Kplus.addTupleTool('LoKi::Hybrid::TupleTool/LoKi_Kplus')
piminus_hybrid=tuple.piminus.addTupleTool('LoKi::Hybrid::TupleTool/LoKi_piminus')
piplus_hybrid=tuple.piplus.addTupleTool('LoKi::Hybrid::TupleTool/LoKi_piplus')
piminus0_hybrid=tuple.piminus0.addTupleTool('LoKi::Hybrid::TupleTool/LoKi_piminus0')
gamma_hybrid=tuple.gamma.addTupleTool('LoKi::Hybrid::TupleTool/LoKi_gamma')
preamble=[
'TRACK_MAX_PT= MAXTREE(PT, ISBASIC & HASTRACK, -666)',
'TRACK_MIN_PT= MINTREE(PT, ISBASIC & HASTRACK)',
'SUMTRACK_PT= SUMTREE((211 == ABSID)|(-211 == ABSID)|(321 == ABSID)|(-321 == ABSID),PT)',
'SUM_PCHI2= SUMTREE((211 == ABSID)|(-211 == ABSID)|(321 == ABSID)|(-321 == ABSID),TRPCHI2)'
]
B0_hybrid.Preambulo=preamble
B0_hybrid.Variables = {
'max_pt_track' : 'TRACK_MAX_PT',
'min_pt_track' : 'TRACK_MIN_PT',
'sum_track_pt' : 'SUMTRACK_PT',
'sum_pchi2' : 'SUM_PCHI2',
'n_highpt_tracks' : 'NINTREE(ISBASIC & HASTRACK & (PT>250.0*MeV))',
'eta' :'ETA'
}
Kstar_hybrid.Variables ={
'branch_mass':'MM',
'eta': 'ETA'
}
eta_prime_hybrid.Variables ={
'branch_mass':'MM',
'eta': 'ETA'
}
Kplus_hybrid.Variables ={
'eta': 'ETA'
}
piminus_hybrid.Variables ={
'eta': 'ETA'
}
piplus_hybrid.Variables ={
'eta': 'ETA'
}
piminus0_hybrid.Variables ={
'eta': 'ETA'
}
gamma_hybrid.Variables = {
'eta':'ETA'
}
#==============================MassSubs=====================================
from Configurables import TupleToolSubMass
tuple.B0.addTool(TupleToolSubMass)
tuple.B0.ToolList += ["TupleToolSubMass"]
tuple.B0.TupleToolSubMass.Substitution += ["pi- => K-"]
tuple.B0.TupleToolSubMass.Substitution += ["K+ => pi+"]
tuple.B0.TupleToolSubMass.Substitution += ["pi+ => K+"]
tuple.B0.TupleToolSubMass.Substitution += ["pi+ => p+"]
tuple.B0.TupleToolSubMass.Substitution += ["pi- => p~-"]
tuple.B0.TupleToolSubMass.Substitution += ["K+ => p+"]
tuple.B0.TupleToolSubMass.Substitution += ["gamma => pi0"]
tuple.B0.TupleToolSubMass.Substitution += ["gamma => e-"]
tuple.B0.TupleToolSubMass.Substitution += ["gamma => e+"]
tuple.B0.TupleToolSubMass.Substitution += ["pi- => mu-"]
tuple.B0.TupleToolSubMass.Substitution += ["pi+ => mu+"]
tuple.B0.TupleToolSubMass.DoubleSubstitution += ["K+/pi- => pi+/K-"]
tuple.B0.TupleToolSubMass.DoubleSubstitution += ["pi+/pi- => pi-/pi+"]
tuple.B0.TupleToolSubMass.DoubleSubstitution += ["pi+/pi- => mu+/mu-"]
#==============================TRIGGER DECISIONS==============================-
from Configurables import TupleToolTISTOS
tistos=tuple.B0.addTupleTool(TupleToolTISTOS, name="TupleToolTISTOS")
tistos.VerboseL0=True
tistos.VerboseHlt1=True
tistos.VerboseHlt2=True
tistos.TriggerList=["L0PhotonDecision",
"L0ElectronDecision",
"Hlt1TrackPhotonDecision",
"Hlt1TrackAllL0Decision",
"Hlt1TrackMuonDecision",
"Hlt1TrackForwardPassThroughDecision",
"Hlt1TrackForwardPassThroughLooseDecision",
"Hlt1SingleElectronNoIPDecision",
"L0HadronDecision",
"L0LocalPi0Decision",
"L0GlobalPi0Decision",
"L0MuonDecision",
"Hlt2Topo2BodyBBDTDecision",
"Hlt2Topo3BodyBBDTDecision",
"Hlt2Topo4BodyBBDTDecision",
"Hlt2RadiativeTopoTrackTOSDecision",
"Hlt2RadiativeTopoPhotonL0Decision",
"Hlt2TopoRad2BodyBBDTDecision",
"Hlt2TopoRad2plus1BodyBBDTDecision",
"Hlt2Topo2BodySimpleDecision",
"Hlt2Topo3BodySimpleDecision",
"Hlt2Topo4BodySimpleDecision"]
from Configurables import TupleToolL0Calo
tuple.Kplus.addTool(TupleToolL0Calo,name="KplusL0Calo")
tuple.Kplus.ToolList += ["TupleToolL0Calo/KplusL0Calo"]
tuple.Kplus.KplusL0Calo.WhichCalo="HCAL"
tuple.piplus.addTool(TupleToolL0Calo,name="piplusL0Calo")
tuple.piplus.ToolList += ["TupleToolL0Calo/piplusL0Calo"]
tuple.piplus.piplusL0Calo.WhichCalo="HCAL"
tuple.piminus.addTool(TupleToolL0Calo,name="piminusL0Calo")
tuple.piminus.ToolList += ["TupleToolL0Calo/piminusL0Calo"]
tuple.piminus.piminusL0Calo.WhichCalo="HCAL"
tuple.piminus0.addTool(TupleToolL0Calo,name="piminus0L0Calo")
tuple.piminus0.ToolList += ["TupleToolL0Calo/piminus0L0Calo"]
tuple.piminus0.piminus0L0Calo.WhichCalo="HCAL"
etuple=EventTuple()
etuple.ToolList=["TupleToolEventInfo"]
from Configurables import MCDecayTreeTuple
mctuple=MCDecayTreeTuple("mctuple")
mctuple.ToolList+=["MCTupleToolKinematic","MCTupleToolReconstructed","MCTupleToolHierarchy","MCTupleToolDecayType","MCTupleToolPID"]
mctuple.Decay="[B0 -> ^(K*(892)0 -> ^K+ ^pi-) ^(eta_prime -> ^rho(770)0 ^gamma)]CC"
MySequencer.Members.append(etuple)
MySequencer.Members.append(tuple)
MySequencer.Members.append(mctuple)
DaVinci().InputType='DST'
DaVinci().UserAlgorithms+=[MySequencer]
DaVinci().TupleFile="Output.root"
DaVinci().HistogramFile="histos.root"
DaVinci().DataType='2012'
DaVinci().EvtMax=-1
DaVinci().PrintFreq=1000
DaVinci().MoniSequence=[tuple]
DaVinci().Simulation=simulation
#from GaudiConf import IOHelper
# Use the local input data
#IOHelper().inputFiles([
# './00038839_00000002_2.AllStreams.dst'
#], clear=True)
|
from Gaudi.Configuration import *
from Configurables import DaVinci
#from Configurables import AlgTool
from Configurables import GaudiSequencer
MySequencer = GaudiSequencer('Sequence')
#For 2012 MC
DaVinci.DDDBtag='dddb-20130929-1'
DaVinci.CondDBtag='sim-20130522-1-vc-mu100'
#for 2011 MC
#DaVinci.DDDBtag='dddb-20130929'
#DaVinci.CondDBtag='sim-20130522-vc-mu100'
simulation=True
#################################################################
#Rerun with stripping21 applied
if simulation:
from Configurables import EventNodeKiller
from StrippingConf.Configuration import StrippingConf, StrippingStream
from StrippingSettings.Utils import strippingConfiguration
from StrippingArchive.Utils import buildStreams
from StrippingArchive import strippingArchive
event_node_killer=EventNodeKiller('StripKiller')
event_node_killer.Nodes=['Event/AllStreams','/Event/Strip']
from Configurables import PhysConf
PhysConf().CaloReProcessing=True
stripping="stripping21"
config=strippingConfiguration(stripping)
archive=strippingArchive(stripping)
streams=buildStreams(stripping=config,archive=archive)
MyStream= StrippingStream("MyStream")
MyLines= ["StrippingB2XEtaB2etapKstarLine"]
for stream in streams:
for line in stream.lines:
if line.name() in MyLines:
MyStream.appendLines( [ line ])
from Configurables import ProcStatusCheck
filterBadEvents=ProcStatusCheck()
sc=StrippingConf( Streams= [ MyStream ],
MaxCandidates = 2000,
AcceptBadEvents = False,
BadEventSelection = filterBadEvents)
DaVinci().appendToMainSequence([event_node_killer,sc.sequence()])
##################Creating NTuples#####################################
from Configurables import DecayTreeTuple
from Configurables import TupleToolL0Calo
from DecayTreeTuple.Configuration import *
line = 'B2XEtaB2etapKstarLine'
tuple=DecayTreeTuple()
tuple.Decay="[B0 -> ^(K*(892)0 -> ^K+ ^pi-) ^(eta_prime -> ^pi- ^pi+ ^gamma)]CC"
tuple.Branches={"B0":"[B0 -> (K*(892)0 -> K+ pi-) (eta_prime -> pi- pi+ gamma)]CC"}
tuple.Inputs=['/Event/Phys/{0}/Particles'.format(line)]
tuple.ToolList += [
"TupleToolGeometry"
, "TupleToolDira"
, "TupleToolAngles"
# , "TupleToolL0Calo"
, "TupleToolPid"
, "TupleToolKinematic"
, "TupleToolPropertime"
, "TupleToolPrimaries"
, "TupleToolEventInfo"
, "TupleToolTrackInfo"
, "TupleToolVtxIsoln"
, "TupleToolPhotonInfo"
, "TupleToolMCTruth"
, "TupleToolMCBackgroundInfo"
, "TupleToolCaloHypo"
, "TupleToolTrackIsolation"
]
tuple.addTool(TupleToolDecay,name="B0")
from Configurables import TupleToolDecayTreeFitter
#========================================REFIT WITH DAUGHTERS AND PV CONSTRAINED==============================
tuple.B0.addTupleTool('TupleToolDecayTreeFitter/ConsAll')
tuple.B0.ConsAll.Verbose=True
tuple.B0.ConsAll.constrainToOriginVertex=True
tuple.B0.ConsAll.daughtersToConstrain = ["K*(892)0","eta_prime"]
#==============================REFIT WITH ONLY ETA AND PV CONTRAINED==============================
tuple.B0.addTupleTool('TupleToolDecayTreeFitter/PVFit')
tuple.B0.PVFit.Verbose=True
tuple.B0.PVFit.constrainToOriginVertex=True
tuple.B0.PVFit.daughtersToConstrain = ["eta_prime"]
#==============================REFIT WITH ONLY K* CONSTRAINED===================================
tuple.B0.addTupleTool('TupleToolDecayTreeFitter/KStarOnly')
tuple.B0.KStarOnly.Verbose=True
tuple.B0.KStarOnly.constrainToOriginVertex=True
tuple.B0.KStarOnly.daughtersToConstrain = ["K*(892)0"]
#==============================REFIT WITH ONLY PV CONTRAINED==============================
tuple.B0.addTupleTool('TupleToolDecayTreeFitter/PVOnly')
tuple.B0.PVOnly.Verbose=True
tuple.B0.PVOnly.constrainToOriginVertex=True
#========================================REFIT WITH JUST DAUGHTERS CONSTRAINED================================
tuple.B0.addTupleTool('TupleToolDecayTreeFitter/Conskstar_etap')
tuple.B0.Conskstar_etap.Verbose=True
tuple.B0.Conskstar_etap.constrainToOriginVertex=False
tuple.B0.Conskstar_etap.daughtersToConstrain = ["K*(892)0","eta_prime"]
#========================================REFIT WITH NOTHING CONSTRAINED========================================
tuple.B0.addTupleTool('TupleToolDecayTreeFitter/Consnothing')
tuple.B0.Consnothing.Verbose=True
tuple.B0.Consnothing.constrainToOriginVertex=False
#========================================LOKI FUBNCTOR VARIABLES========================================
tuple.addBranches({'Kstar' : '[B0 -> ^(K*(892)0 -> K+ pi-) (eta_prime -> pi- pi+ gamma)]CC',
'eta_prime' : '[B0 -> (K*(892)0 -> K+ pi-) ^(eta_prime -> pi- pi+ gamma)]CC',
'Kplus' : '[B0 -> (K*(892)0 -> ^K+ pi-) (eta_prime -> pi- pi+ gamma)]CC',
'piminus' : '[B0 -> (K*(892)0 -> K+ ^pi-) (eta_prime -> pi- pi+ gamma)]CC',
'piplus' : '[B0 -> (K*(892)0 -> K+ pi-) (eta_prime -> pi- ^pi+ gamma)]CC',
'piminus0' : '[B0 -> (K*(892)0 -> K+ pi-) (eta_prime -> ^pi- pi+ gamma)]CC',
'gamma' : '[B0 -> (K*(892)0 -> K+ pi-) (eta_prime -> pi- pi+ ^gamma)]CC'})
from LoKiPhys.decorators import MAXTREE,MINTREE,ISBASIC,HASTRACK,SUMTREE,PT,ABSID,NINTREE,ETA,TRPCHI2
B0_hybrid=tuple.B0.addTupleTool('LoKi::Hybrid::TupleTool/LoKi_B0')
Kstar_hybrid=tuple.Kstar.addTupleTool('LoKi::Hybrid::TupleTool/LoKi_Kstar')
eta_prime_hybrid=tuple.eta_prime.addTupleTool('LoKi::Hybrid::TupleTool/LoKi_eta_prime')
Kplus_hybrid=tuple.Kplus.addTupleTool('LoKi::Hybrid::TupleTool/LoKi_Kplus')
piminus_hybrid=tuple.piminus.addTupleTool('LoKi::Hybrid::TupleTool/LoKi_piminus')
piplus_hybrid=tuple.piplus.addTupleTool('LoKi::Hybrid::TupleTool/LoKi_piplus')
piminus0_hybrid=tuple.piminus0.addTupleTool('LoKi::Hybrid::TupleTool/LoKi_piminus0')
gamma_hybrid=tuple.gamma.addTupleTool('LoKi::Hybrid::TupleTool/LoKi_gamma')
preamble=[
'TRACK_MAX_PT= MAXTREE(PT, ISBASIC & HASTRACK, -666)',
'TRACK_MIN_PT= MINTREE(PT, ISBASIC & HASTRACK)',
'SUMTRACK_PT= SUMTREE((211 == ABSID)|(-211 == ABSID)|(321 == ABSID)|(-321 == ABSID),PT)',
'SUM_PCHI2= SUMTREE((211 == ABSID)|(-211 == ABSID)|(321 == ABSID)|(-321 == ABSID),TRPCHI2)'
]
B0_hybrid.Preambulo=preamble
B0_hybrid.Variables = {
'max_pt_track' : 'TRACK_MAX_PT',
'min_pt_track' : 'TRACK_MIN_PT',
'sum_track_pt' : 'SUMTRACK_PT',
'sum_pchi2' : 'SUM_PCHI2',
'n_highpt_tracks' : 'NINTREE(ISBASIC & HASTRACK & (PT>250.0*MeV))',
'eta' :'ETA'
}
Kstar_hybrid.Variables ={
'branch_mass':'MM',
'eta': 'ETA'
}
eta_prime_hybrid.Variables ={
'branch_mass':'MM',
'eta': 'ETA'
}
Kplus_hybrid.Variables ={
'eta': 'ETA'
}
piminus_hybrid.Variables ={
'eta': 'ETA'
}
piplus_hybrid.Variables ={
'eta': 'ETA'
}
piminus0_hybrid.Variables ={
'eta': 'ETA'
}
gamma_hybrid.Variables = {
'eta':'ETA'
}
#==============================MassSubs=====================================
from Configurables import TupleToolSubMass
tuple.B0.addTool(TupleToolSubMass)
tuple.B0.ToolList += ["TupleToolSubMass"]
tuple.B0.TupleToolSubMass.Substitution += ["pi- => K-"]
tuple.B0.TupleToolSubMass.Substitution += ["K+ => pi+"]
tuple.B0.TupleToolSubMass.Substitution += ["pi+ => K+"]
tuple.B0.TupleToolSubMass.Substitution += ["pi+ => p+"]
tuple.B0.TupleToolSubMass.Substitution += ["pi- => p~-"]
tuple.B0.TupleToolSubMass.Substitution += ["K+ => p+"]
tuple.B0.TupleToolSubMass.Substitution += ["gamma => pi0"]
tuple.B0.TupleToolSubMass.Substitution += ["gamma => e-"]
tuple.B0.TupleToolSubMass.Substitution += ["gamma => e+"]
tuple.B0.TupleToolSubMass.Substitution += ["pi- => mu-"]
tuple.B0.TupleToolSubMass.Substitution += ["pi+ => mu+"]
tuple.B0.TupleToolSubMass.DoubleSubstitution += ["K+/pi- => pi+/K-"]
tuple.B0.TupleToolSubMass.DoubleSubstitution += ["pi+/pi- => pi-/pi+"]
tuple.B0.TupleToolSubMass.DoubleSubstitution += ["pi+/pi- => mu+/mu-"]
#==============================TRIGGER DECISIONS==============================-
from Configurables import TupleToolTISTOS
tistos=tuple.B0.addTupleTool(TupleToolTISTOS, name="TupleToolTISTOS")
tistos.VerboseL0=True
tistos.VerboseHlt1=True
tistos.VerboseHlt2=True
tistos.TriggerList=["L0PhotonDecision",
"L0ElectronDecision",
"Hlt1TrackPhotonDecision",
"Hlt1TrackAllL0Decision",
"Hlt1TrackMuonDecision",
"Hlt1TrackForwardPassThroughDecision",
"Hlt1TrackForwardPassThroughLooseDecision",
"Hlt1SingleElectronNoIPDecision",
"L0HadronDecision",
"L0LocalPi0Decision",
"L0GlobalPi0Decision",
"L0MuonDecision",
"Hlt2Topo2BodyBBDTDecision",
"Hlt2Topo3BodyBBDTDecision",
"Hlt2Topo4BodyBBDTDecision",
"Hlt2RadiativeTopoTrackTOSDecision",
"Hlt2RadiativeTopoPhotonL0Decision",
"Hlt2TopoRad2BodyBBDTDecision",
"Hlt2TopoRad2plus1BodyBBDTDecision",
"Hlt2Topo2BodySimpleDecision",
"Hlt2Topo3BodySimpleDecision",
"Hlt2Topo4BodySimpleDecision"]
from Configurables import TupleToolL0Calo
tuple.Kplus.addTool(TupleToolL0Calo,name="KplusL0Calo")
tuple.Kplus.ToolList += ["TupleToolL0Calo/KplusL0Calo"]
tuple.Kplus.KplusL0Calo.WhichCalo="HCAL"
tuple.piplus.addTool(TupleToolL0Calo,name="piplusL0Calo")
tuple.piplus.ToolList += ["TupleToolL0Calo/piplusL0Calo"]
tuple.piplus.piplusL0Calo.WhichCalo="HCAL"
tuple.piminus.addTool(TupleToolL0Calo,name="piminusL0Calo")
tuple.piminus.ToolList += ["TupleToolL0Calo/piminusL0Calo"]
tuple.piminus.piminusL0Calo.WhichCalo="HCAL"
tuple.piminus0.addTool(TupleToolL0Calo,name="piminus0L0Calo")
tuple.piminus0.ToolList += ["TupleToolL0Calo/piminus0L0Calo"]
tuple.piminus0.piminus0L0Calo.WhichCalo="HCAL"
etuple=EventTuple()
etuple.ToolList=["TupleToolEventInfo"]
from Configurables import MCDecayTreeTuple
mctuple=MCDecayTreeTuple("mctuple")
mctuple.ToolList+=["MCTupleToolKinematic","MCTupleToolReconstructed","MCTupleToolHierarchy","MCTupleToolDecayType","MCTupleToolPID"]
mctuple.Decay="[B0 -> ^(K*(892)0 -> ^K+ ^pi-) ^(eta_prime -> ^rho(770)0 ^gamma)]CC"
MySequencer.Members.append(etuple)
MySequencer.Members.append(tuple)
MySequencer.Members.append(mctuple)
DaVinci().InputType='DST'
DaVinci().UserAlgorithms+=[MySequencer]
DaVinci().TupleFile="Output.root"
DaVinci().HistogramFile="histos.root"
DaVinci().DataType='2012'
DaVinci().EvtMax=-1
DaVinci().PrintFreq=1000
DaVinci().MoniSequence=[tuple]
DaVinci().Simulation=simulation
#from GaudiConf import IOHelper
# Use the local input data
#IOHelper().inputFiles([
# './00038839_00000002_2.AllStreams.dst'
#], clear=True)
|
fr
| 0.409412
|
#from Configurables import AlgTool #For 2012 MC #for 2011 MC #DaVinci.DDDBtag='dddb-20130929' #DaVinci.CondDBtag='sim-20130522-vc-mu100' ################################################################# #Rerun with stripping21 applied ##################Creating NTuples##################################### # , "TupleToolL0Calo" #========================================REFIT WITH DAUGHTERS AND PV CONSTRAINED============================== #==============================REFIT WITH ONLY ETA AND PV CONTRAINED============================== #==============================REFIT WITH ONLY K* CONSTRAINED=================================== #==============================REFIT WITH ONLY PV CONTRAINED============================== #========================================REFIT WITH JUST DAUGHTERS CONSTRAINED================================ #========================================REFIT WITH NOTHING CONSTRAINED======================================== #========================================LOKI FUBNCTOR VARIABLES======================================== #==============================MassSubs===================================== #==============================TRIGGER DECISIONS==============================- #from GaudiConf import IOHelper # Use the local input data #IOHelper().inputFiles([ # './00038839_00000002_2.AllStreams.dst' #], clear=True)
| 2.145838
| 2
|
os_module/return_dir_subdir.py
|
marioymario/scripts
| 0
|
6629135
|
import os
os.listdir("scripts") # file that we will be reading
dir = "scripts"
for name in os.listdir(dir):
fullname = os.path.join(dir, name)#using os.path.join, we join the directory to each of those file names and create a String with a valid full name.
if os.path.isdir(fullname):
print("{} is a DIR".format(fullname))
else:
print("{} is a FILE".format(fullname))
"""What's the purpose of the os.path.join function?
It creates a string containing cross-platform concatenated directories."""
|
import os
os.listdir("scripts") # file that we will be reading
dir = "scripts"
for name in os.listdir(dir):
fullname = os.path.join(dir, name)#using os.path.join, we join the directory to each of those file names and create a String with a valid full name.
if os.path.isdir(fullname):
print("{} is a DIR".format(fullname))
else:
print("{} is a FILE".format(fullname))
"""What's the purpose of the os.path.join function?
It creates a string containing cross-platform concatenated directories."""
|
en
| 0.821538
|
# file that we will be reading #using os.path.join, we join the directory to each of those file names and create a String with a valid full name. What's the purpose of the os.path.join function? It creates a string containing cross-platform concatenated directories.
| 4.137284
| 4
|
alephnull/data/benchmarks.py
|
flatM/AlephNull
| 234
|
6629136
|
<gh_stars>100-1000
#
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from datetime import datetime
import csv
from functools import partial
import requests
import pandas as pd
from . loader_utils import (
date_conversion,
source_to_records,
Mapping
)
DailyReturn = collections.namedtuple('DailyReturn', ['date', 'returns'])
class BenchmarkDataNotFoundError(Exception):
pass
_BENCHMARK_MAPPING = {
# Need to add 'symbol'
'volume': (int, 'Volume'),
'open': (float, 'Open'),
'close': (float, 'Close'),
'high': (float, 'High'),
'low': (float, 'Low'),
'adj_close': (float, 'Adj Close'),
'date': (partial(date_conversion, date_pattern='%Y-%m-%d'), 'Date')
}
def benchmark_mappings():
return {key: Mapping(*value)
for key, value
in _BENCHMARK_MAPPING.iteritems()}
def get_raw_benchmark_data(start_date, end_date, symbol):
# create benchmark files
# ^GSPC 19500103
params = collections.OrderedDict((
('s', symbol),
# start_date month, zero indexed
('a', start_date.month - 1),
# start_date day
('b', start_date.day),
# start_date year
('c', start_date.year),
# end_date month, zero indexed
('d', end_date.month - 1),
# end_date day str(int(todate[6:8])) #day
('e', end_date.day),
# end_date year str(int(todate[0:4]))
('f', end_date.year),
# daily frequency
('g', 'd'),
))
res = requests.get('http://ichart.finance.yahoo.com/table.csv',
params=params, stream=True)
if not res.ok:
raise BenchmarkDataNotFoundError("""
No benchmark data found for date range.
start_date={start_date}, end_date={end_date}, url={url}""".strip().
format(start_date=start_date,
end_date=end_date,
url=res.url))
return csv.DictReader(res.iter_lines())
def get_benchmark_data(symbol, start_date=None, end_date=None):
"""
Benchmarks from Yahoo.
"""
if start_date is None:
start_date = datetime(year=1950, month=1, day=3)
if end_date is None:
end_date = datetime.utcnow()
raw_benchmark_data = get_raw_benchmark_data(start_date, end_date, symbol)
mappings = benchmark_mappings()
return source_to_records(mappings, raw_benchmark_data)
def get_benchmark_returns(symbol, start_date=None, end_date=None):
"""
Returns a list of return percentages in chronological order.
"""
if start_date is None:
start_date = datetime(year=1950, month=1, day=3)
if end_date is None:
end_date = datetime.utcnow()
# Get the benchmark data and convert it to a list in chronological order.
data_points = list(get_benchmark_data(symbol, start_date, end_date))
data_points.reverse()
# Calculate the return percentages.
benchmark_returns = []
for i, data_point in enumerate(data_points):
if i == 0:
curr_open = data_points[i]['open']
returns = (data_points[i]['close'] - curr_open) / curr_open
else:
prev_close = data_points[i - 1]['close']
returns = (data_point['close'] - prev_close) / prev_close
date = pd.tseries.tools.normalize_date(data_point['date'])
daily_return = DailyReturn(date=date, returns=returns)
benchmark_returns.append(daily_return)
return benchmark_returns
|
#
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from datetime import datetime
import csv
from functools import partial
import requests
import pandas as pd
from . loader_utils import (
date_conversion,
source_to_records,
Mapping
)
DailyReturn = collections.namedtuple('DailyReturn', ['date', 'returns'])
class BenchmarkDataNotFoundError(Exception):
pass
_BENCHMARK_MAPPING = {
# Need to add 'symbol'
'volume': (int, 'Volume'),
'open': (float, 'Open'),
'close': (float, 'Close'),
'high': (float, 'High'),
'low': (float, 'Low'),
'adj_close': (float, 'Adj Close'),
'date': (partial(date_conversion, date_pattern='%Y-%m-%d'), 'Date')
}
def benchmark_mappings():
return {key: Mapping(*value)
for key, value
in _BENCHMARK_MAPPING.iteritems()}
def get_raw_benchmark_data(start_date, end_date, symbol):
# create benchmark files
# ^GSPC 19500103
params = collections.OrderedDict((
('s', symbol),
# start_date month, zero indexed
('a', start_date.month - 1),
# start_date day
('b', start_date.day),
# start_date year
('c', start_date.year),
# end_date month, zero indexed
('d', end_date.month - 1),
# end_date day str(int(todate[6:8])) #day
('e', end_date.day),
# end_date year str(int(todate[0:4]))
('f', end_date.year),
# daily frequency
('g', 'd'),
))
res = requests.get('http://ichart.finance.yahoo.com/table.csv',
params=params, stream=True)
if not res.ok:
raise BenchmarkDataNotFoundError("""
No benchmark data found for date range.
start_date={start_date}, end_date={end_date}, url={url}""".strip().
format(start_date=start_date,
end_date=end_date,
url=res.url))
return csv.DictReader(res.iter_lines())
def get_benchmark_data(symbol, start_date=None, end_date=None):
"""
Benchmarks from Yahoo.
"""
if start_date is None:
start_date = datetime(year=1950, month=1, day=3)
if end_date is None:
end_date = datetime.utcnow()
raw_benchmark_data = get_raw_benchmark_data(start_date, end_date, symbol)
mappings = benchmark_mappings()
return source_to_records(mappings, raw_benchmark_data)
def get_benchmark_returns(symbol, start_date=None, end_date=None):
"""
Returns a list of return percentages in chronological order.
"""
if start_date is None:
start_date = datetime(year=1950, month=1, day=3)
if end_date is None:
end_date = datetime.utcnow()
# Get the benchmark data and convert it to a list in chronological order.
data_points = list(get_benchmark_data(symbol, start_date, end_date))
data_points.reverse()
# Calculate the return percentages.
benchmark_returns = []
for i, data_point in enumerate(data_points):
if i == 0:
curr_open = data_points[i]['open']
returns = (data_points[i]['close'] - curr_open) / curr_open
else:
prev_close = data_points[i - 1]['close']
returns = (data_point['close'] - prev_close) / prev_close
date = pd.tseries.tools.normalize_date(data_point['date'])
daily_return = DailyReturn(date=date, returns=returns)
benchmark_returns.append(daily_return)
return benchmark_returns
|
en
| 0.701195
|
# # Copyright 2013 Quantopian, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Need to add 'symbol' # create benchmark files # ^GSPC 19500103 # start_date month, zero indexed # start_date day # start_date year # end_date month, zero indexed # end_date day str(int(todate[6:8])) #day # end_date year str(int(todate[0:4])) # daily frequency No benchmark data found for date range. start_date={start_date}, end_date={end_date}, url={url} Benchmarks from Yahoo. Returns a list of return percentages in chronological order. # Get the benchmark data and convert it to a list in chronological order. # Calculate the return percentages.
| 2.235787
| 2
|
StoneAgeGUI.py
|
TheGameBotYT/StoneAgeAI
| 8
|
6629137
|
from kivy.app import App
from kivy.uix.widget import Widget
from kivy.uix.image import Image
from kivy.uix.label import Label
from kivy.uix.button import Button
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.gridlayout import GridLayout
from kivy.vector import Vector
from kivy.clock import Clock
from kivy.core.window import Window
import numpy as np
from StoneAgeGame import StoneAgeGame
from StoneAgeAI import GLIEMonteCarloControl
import time
Window.clearcolor = (0.3, 0.3, 0.3, 1)
policy = GLIEMonteCarloControl('Q2Mnewdowncast.p')
game_instance = StoneAgeGame(policy=policy, player_types=['Player', 'AI'])
class StoneAgeGUI(BoxLayout):
def __init__(self, game):
super(StoneAgeGUI, self).__init__()
self.game = game
self.meeple_image_ids = ['farm1', 'hut1', 'hut2', 'wood1', 'wood2', 'wood3',
'wood4', 'wood5', 'wood6', 'wood7', 'food1', 'food2',
'food3', 'food4', 'food5', 'food6', 'food7']
self.orientation = 'vertical'
food_space, wood_space, farm_space, hut_space = self.create_meeple_space()
self.create_scoreboard()
button_space = BoxLayout(id='static_space', orientation='horizontal', size_hint_y=0.1)
self.add_widget(button_space)
# Adding Meeple Image widgets
self.meeple_group = []
for i, label in enumerate(self.meeple_image_ids):
meeple = MeepleImage(id=label, allow_stretch=True)
if 'farm' in label:
farm_space.add_widget(meeple)
elif 'hut' in label:
hut_space.add_widget(meeple)
elif 'food' in label:
food_space.add_widget(meeple)
elif 'wood' in label:
wood_space.add_widget(meeple)
else:
raise FutureWarning
setattr(self, label, meeple)
self.meeple_group.append(meeple)
# Adding button widgets
for i, label in enumerate(['farm_button', 'hut_button', 'wood_button', 'food_button']):
button = Action(id=str(label), text="{}".format(label))
button_space.add_widget(button)
setattr(self, label, button)
def create_meeple_space(self):
meeple_space = BoxLayout(orientation='vertical')
self.add_widget(meeple_space)
food_wood_space = BoxLayout(orientation='horizontal', size_hint_y=0.7)
farm_hut_space = BoxLayout(orientation='horizontal', size_hint_y=0.3)
meeple_space.add_widget(food_wood_space)
meeple_space.add_widget(farm_hut_space)
food_space = GridLayout(rows=2)
wood_space = GridLayout(rows=2)
food_wood_space.add_widget(food_space)
food_wood_space.add_widget(wood_space)
farm_space = GridLayout(rows=1)
hut_space = GridLayout(rows=1)
farm_hut_space.add_widget(farm_space)
farm_hut_space.add_widget(hut_space)
return food_space, wood_space, farm_space, hut_space
def create_scoreboard(self):
scoreboard = GridLayout(cols=2, size_hint_y=0.2)
scoreboard_grp = []
scoreboard_dict = self.get_scoreboard_values()
for k, v in scoreboard_dict.items():
item = Label(id=k, text=k+' '+v, font_size=30)
scoreboard.add_widget(item)
scoreboard_grp.append(item)
self.add_widget(scoreboard)
self.scoreboard_grp = scoreboard_grp
def get_scoreboard_values(self):
scoreboard_vals = {'Round': str(self.game.round),
'Phase': str(self.game.phase),
'Score': ', '.join(map(str, self.game.points)),
'Food': ', '.join(map(str, self.game.food)),
'Farms': ', '.join(map(str, self.game.farms)),
'Meeples': ', '.join(map(str, self.game.meeples))}
return scoreboard_vals
def update(self, dt):
"""
Skeleton for Stone Age GUI update widget.
On a GUI update call needs to check AI or player
If AI:
'Play a turn' using policy
Elif Player:
While no touch:
pass
If touch:
Resolve action based on touch input
Evolve state if one of above succeeded
Resolve GUI Widgets from new state
:param action: determines if action is given by pressing a button
:param dt:
:return:
"""
if self.game.player_types[self.game.current_player-1] == 'AI':
self.game.play()
time.sleep(1)
self.update_gui()
elif self.game.player_types[self.game.current_player-1] == 'Player':
button_bools = [self.farm_button.touched, self.hut_button.touched,
self.wood_button.touched, self.food_button.touched]
if any(button_bools):
choice = [i for i, x in enumerate(button_bools) if x][0]
self.game.play(**{'Choice': choice})
self.reset_touched()
self.update_gui()
else:
pass
def update_gui(self):
for meeple in self.meeple_group:
label_spots = self.check_game_spots()
meeple.show(label_spots[meeple.id])
scoreboard_vals = self.get_scoreboard_values()
for label in self.scoreboard_grp:
label.text = label.id + ' ' + scoreboard_vals[label.id]
def reset_touched(self):
self.farm_button.touched = False
self.hut_button.touched = False
self.wood_button.touched = False
self.food_button.touched = False
def check_game_spots(self):
game_spots = self.game.spots
label_spots = {
'farm1': game_spots[0][0],
'hut1': game_spots[1][0],
'hut2': game_spots[1][1],
'wood1': game_spots[2][0],
'wood2': game_spots[2][1],
'wood3': game_spots[2][2],
'wood4': game_spots[2][3],
'wood5': game_spots[2][4],
'wood6': game_spots[2][5],
'wood7': game_spots[2][6],
'food1': game_spots[3][0],
'food2': game_spots[3][1],
'food3': game_spots[3][2],
'food4': game_spots[3][3],
'food5': game_spots[3][4],
'food6': game_spots[3][5],
'food7': game_spots[3][6]
}
return label_spots
class MeepleImage(Image):
def __init__(self, **kwargs):
super(MeepleImage, self).__init__(**kwargs)
self.source = 'trans.png'
def show(self, source_int):
"""
Functions which tells the meeple it should be visible or not and which colour
:param source_int: Integer relating to the player playing
"""
if source_int == 0:
self.source = 'trans.png'
elif source_int == 1:
self.source = 'green_meeple.png'
elif source_int == 2:
self.source = 'red_meeple.png'
class Action(Button):
def __init__(self, **kwargs):
super(Action, self).__init__(**kwargs)
self.font_size = 30
self.color = [0, 1, 1, 0.5]
self.touched = False
self.action_dict = {'farm_button': 0,
'hut_button': 1,
'food_button': 2,
'wood_button': 3}
def on_touch_down(self, touch):
if self.collide_point(*touch.pos):
self.touched = True
class StoneAgeApp(App):
def build(self):
gui = StoneAgeGUI(game_instance)
Clock.schedule_interval(gui.update, 30.0 / 60.0)
return gui
app = StoneAgeApp()
app.run()
|
from kivy.app import App
from kivy.uix.widget import Widget
from kivy.uix.image import Image
from kivy.uix.label import Label
from kivy.uix.button import Button
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.gridlayout import GridLayout
from kivy.vector import Vector
from kivy.clock import Clock
from kivy.core.window import Window
import numpy as np
from StoneAgeGame import StoneAgeGame
from StoneAgeAI import GLIEMonteCarloControl
import time
Window.clearcolor = (0.3, 0.3, 0.3, 1)
policy = GLIEMonteCarloControl('Q2Mnewdowncast.p')
game_instance = StoneAgeGame(policy=policy, player_types=['Player', 'AI'])
class StoneAgeGUI(BoxLayout):
def __init__(self, game):
super(StoneAgeGUI, self).__init__()
self.game = game
self.meeple_image_ids = ['farm1', 'hut1', 'hut2', 'wood1', 'wood2', 'wood3',
'wood4', 'wood5', 'wood6', 'wood7', 'food1', 'food2',
'food3', 'food4', 'food5', 'food6', 'food7']
self.orientation = 'vertical'
food_space, wood_space, farm_space, hut_space = self.create_meeple_space()
self.create_scoreboard()
button_space = BoxLayout(id='static_space', orientation='horizontal', size_hint_y=0.1)
self.add_widget(button_space)
# Adding Meeple Image widgets
self.meeple_group = []
for i, label in enumerate(self.meeple_image_ids):
meeple = MeepleImage(id=label, allow_stretch=True)
if 'farm' in label:
farm_space.add_widget(meeple)
elif 'hut' in label:
hut_space.add_widget(meeple)
elif 'food' in label:
food_space.add_widget(meeple)
elif 'wood' in label:
wood_space.add_widget(meeple)
else:
raise FutureWarning
setattr(self, label, meeple)
self.meeple_group.append(meeple)
# Adding button widgets
for i, label in enumerate(['farm_button', 'hut_button', 'wood_button', 'food_button']):
button = Action(id=str(label), text="{}".format(label))
button_space.add_widget(button)
setattr(self, label, button)
def create_meeple_space(self):
meeple_space = BoxLayout(orientation='vertical')
self.add_widget(meeple_space)
food_wood_space = BoxLayout(orientation='horizontal', size_hint_y=0.7)
farm_hut_space = BoxLayout(orientation='horizontal', size_hint_y=0.3)
meeple_space.add_widget(food_wood_space)
meeple_space.add_widget(farm_hut_space)
food_space = GridLayout(rows=2)
wood_space = GridLayout(rows=2)
food_wood_space.add_widget(food_space)
food_wood_space.add_widget(wood_space)
farm_space = GridLayout(rows=1)
hut_space = GridLayout(rows=1)
farm_hut_space.add_widget(farm_space)
farm_hut_space.add_widget(hut_space)
return food_space, wood_space, farm_space, hut_space
def create_scoreboard(self):
scoreboard = GridLayout(cols=2, size_hint_y=0.2)
scoreboard_grp = []
scoreboard_dict = self.get_scoreboard_values()
for k, v in scoreboard_dict.items():
item = Label(id=k, text=k+' '+v, font_size=30)
scoreboard.add_widget(item)
scoreboard_grp.append(item)
self.add_widget(scoreboard)
self.scoreboard_grp = scoreboard_grp
def get_scoreboard_values(self):
scoreboard_vals = {'Round': str(self.game.round),
'Phase': str(self.game.phase),
'Score': ', '.join(map(str, self.game.points)),
'Food': ', '.join(map(str, self.game.food)),
'Farms': ', '.join(map(str, self.game.farms)),
'Meeples': ', '.join(map(str, self.game.meeples))}
return scoreboard_vals
def update(self, dt):
"""
Skeleton for Stone Age GUI update widget.
On a GUI update call needs to check AI or player
If AI:
'Play a turn' using policy
Elif Player:
While no touch:
pass
If touch:
Resolve action based on touch input
Evolve state if one of above succeeded
Resolve GUI Widgets from new state
:param action: determines if action is given by pressing a button
:param dt:
:return:
"""
if self.game.player_types[self.game.current_player-1] == 'AI':
self.game.play()
time.sleep(1)
self.update_gui()
elif self.game.player_types[self.game.current_player-1] == 'Player':
button_bools = [self.farm_button.touched, self.hut_button.touched,
self.wood_button.touched, self.food_button.touched]
if any(button_bools):
choice = [i for i, x in enumerate(button_bools) if x][0]
self.game.play(**{'Choice': choice})
self.reset_touched()
self.update_gui()
else:
pass
def update_gui(self):
for meeple in self.meeple_group:
label_spots = self.check_game_spots()
meeple.show(label_spots[meeple.id])
scoreboard_vals = self.get_scoreboard_values()
for label in self.scoreboard_grp:
label.text = label.id + ' ' + scoreboard_vals[label.id]
def reset_touched(self):
self.farm_button.touched = False
self.hut_button.touched = False
self.wood_button.touched = False
self.food_button.touched = False
def check_game_spots(self):
game_spots = self.game.spots
label_spots = {
'farm1': game_spots[0][0],
'hut1': game_spots[1][0],
'hut2': game_spots[1][1],
'wood1': game_spots[2][0],
'wood2': game_spots[2][1],
'wood3': game_spots[2][2],
'wood4': game_spots[2][3],
'wood5': game_spots[2][4],
'wood6': game_spots[2][5],
'wood7': game_spots[2][6],
'food1': game_spots[3][0],
'food2': game_spots[3][1],
'food3': game_spots[3][2],
'food4': game_spots[3][3],
'food5': game_spots[3][4],
'food6': game_spots[3][5],
'food7': game_spots[3][6]
}
return label_spots
class MeepleImage(Image):
def __init__(self, **kwargs):
super(MeepleImage, self).__init__(**kwargs)
self.source = 'trans.png'
def show(self, source_int):
"""
Functions which tells the meeple it should be visible or not and which colour
:param source_int: Integer relating to the player playing
"""
if source_int == 0:
self.source = 'trans.png'
elif source_int == 1:
self.source = 'green_meeple.png'
elif source_int == 2:
self.source = 'red_meeple.png'
class Action(Button):
def __init__(self, **kwargs):
super(Action, self).__init__(**kwargs)
self.font_size = 30
self.color = [0, 1, 1, 0.5]
self.touched = False
self.action_dict = {'farm_button': 0,
'hut_button': 1,
'food_button': 2,
'wood_button': 3}
def on_touch_down(self, touch):
if self.collide_point(*touch.pos):
self.touched = True
class StoneAgeApp(App):
def build(self):
gui = StoneAgeGUI(game_instance)
Clock.schedule_interval(gui.update, 30.0 / 60.0)
return gui
app = StoneAgeApp()
app.run()
|
en
| 0.77723
|
# Adding Meeple Image widgets # Adding button widgets Skeleton for Stone Age GUI update widget.
On a GUI update call needs to check AI or player
If AI:
'Play a turn' using policy
Elif Player:
While no touch:
pass
If touch:
Resolve action based on touch input
Evolve state if one of above succeeded
Resolve GUI Widgets from new state
:param action: determines if action is given by pressing a button
:param dt:
:return: Functions which tells the meeple it should be visible or not and which colour
:param source_int: Integer relating to the player playing
| 2.655882
| 3
|
testing_support/gerrit_test_case.py
|
stdft112/depot_tools
| 2,151
|
6629138
|
<filename>testing_support/gerrit_test_case.py
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Test framework for code that interacts with gerrit.
class GerritTestCase
--------------------------------------------------------------------------------
This class initializes and runs an a gerrit instance on localhost. To use the
framework, define a class that extends GerritTestCase, and then do standard
python unittest development as described here:
http://docs.python.org/2.7/library/unittest.html#basic-example
When your test code runs, the framework will:
- Download the latest stable(-ish) binary release of the gerrit code.
- Start up a live gerrit instance running in a temp directory on the localhost.
- Set up a single gerrit user account with admin priveleges.
- Supply credential helpers for interacting with the gerrit instance via http
or ssh.
Refer to depot_tools/testing_support/gerrit-init.sh for details about how the
gerrit instance is set up, and refer to helper methods defined below
(createProject, cloneProject, uploadChange, etc.) for ways to interact with the
gerrit instance from your test methods.
class RepoTestCase
--------------------------------------------------------------------------------
This class extends GerritTestCase, and creates a set of project repositories
and a manifest repository that can be used in conjunction with the 'repo' tool.
Each test method will initialize and sync a brand-new repo working directory.
The 'repo' command may be invoked in a subprocess as part of your tests.
One gotcha: 'repo upload' will always attempt to use the ssh interface to talk
to gerrit.
"""
import collections
import errno
import netrc
import os
import re
import shutil
import signal
import socket
import stat
import subprocess
import sys
import tempfile
import unittest
import urllib
import gerrit_util
DEPOT_TOOLS_DIR = os.path.normpath(os.path.join(
os.path.realpath(__file__), '..', '..'))
# When debugging test code, it's sometimes helpful to leave the test gerrit
# instance intact and running after the test code exits. Setting TEARDOWN
# to False will do that.
TEARDOWN = True
class GerritTestCase(unittest.TestCase):
"""Test class for tests that interact with a gerrit server.
The class setup creates and launches a stand-alone gerrit instance running on
localhost, for test methods to interact with. Class teardown stops and
deletes the gerrit instance.
Note that there is a single gerrit instance for ALL test methods in a
GerritTestCase sub-class.
"""
COMMIT_RE = re.compile(r'^commit ([0-9a-fA-F]{40})$')
CHANGEID_RE = re.compile('^\s+Change-Id:\s*(\S+)$')
DEVNULL = open(os.devnull, 'w')
TEST_USERNAME = 'test-username'
TEST_EMAIL = '<EMAIL>'
GerritInstance = collections.namedtuple('GerritInstance', [
'credential_file',
'gerrit_dir',
'gerrit_exe',
'gerrit_host',
'gerrit_pid',
'gerrit_url',
'git_dir',
'git_host',
'git_url',
'http_port',
'netrc_file',
'ssh_ident',
'ssh_port',
])
@classmethod
def check_call(cls, *args, **kwargs):
kwargs.setdefault('stdout', cls.DEVNULL)
kwargs.setdefault('stderr', cls.DEVNULL)
subprocess.check_call(*args, **kwargs)
@classmethod
def check_output(cls, *args, **kwargs):
kwargs.setdefault('stderr', cls.DEVNULL)
return subprocess.check_output(*args, **kwargs)
@classmethod
def _create_gerrit_instance(cls, gerrit_dir):
gerrit_init_script = os.path.join(
DEPOT_TOOLS_DIR, 'testing_support', 'gerrit-init.sh')
http_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
http_sock.bind(('', 0))
http_port = str(http_sock.getsockname()[1])
ssh_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ssh_sock.bind(('', 0))
ssh_port = str(ssh_sock.getsockname()[1])
# NOTE: this is not completely safe. These port numbers could be
# re-assigned by the OS between the calls to socket.close() and gerrit
# starting up. The only safe way to do this would be to pass file
# descriptors down to the gerrit process, which is not even remotely
# supported. Alas.
http_sock.close()
ssh_sock.close()
cls.check_call(['bash', gerrit_init_script, '--http-port', http_port,
'--ssh-port', ssh_port, gerrit_dir])
gerrit_exe = os.path.join(gerrit_dir, 'bin', 'gerrit.sh')
cls.check_call(['bash', gerrit_exe, 'start'])
with open(os.path.join(gerrit_dir, 'logs', 'gerrit.pid')) as fh:
gerrit_pid = int(fh.read().rstrip())
return cls.GerritInstance(
credential_file=os.path.join(gerrit_dir, 'tmp', '.git-credentials'),
gerrit_dir=gerrit_dir,
gerrit_exe=gerrit_exe,
gerrit_host='localhost:%s' % http_port,
gerrit_pid=gerrit_pid,
gerrit_url='http://localhost:%s' % http_port,
git_dir=os.path.join(gerrit_dir, 'git'),
git_host='%s/git' % gerrit_dir,
git_url='file://%s/git' % gerrit_dir,
http_port=http_port,
netrc_file=os.path.join(gerrit_dir, 'tmp', '.netrc'),
ssh_ident=os.path.join(gerrit_dir, 'tmp', 'id_rsa'),
ssh_port=ssh_port,)
@classmethod
def setUpClass(cls):
"""Sets up the gerrit instances in a class-specific temp dir."""
# Create gerrit instance.
gerrit_dir = tempfile.mkdtemp()
os.chmod(gerrit_dir, 0o700)
gi = cls.gerrit_instance = cls._create_gerrit_instance(gerrit_dir)
# Set netrc file for http authentication.
cls.gerrit_util_netrc_orig = gerrit_util.NETRC
gerrit_util.NETRC = netrc.netrc(gi.netrc_file)
# gerrit_util.py defaults to using https, but for testing, it's much
# simpler to use http connections.
cls.gerrit_util_protocol_orig = gerrit_util.GERRIT_PROTOCOL
gerrit_util.GERRIT_PROTOCOL = 'http'
# Because we communicate with the test server via http, rather than https,
# libcurl won't add authentication headers to raw git requests unless the
# gerrit server returns 401. That works for pushes, but for read operations
# (like git-ls-remote), gerrit will simply omit any ref that requires
# authentication. By default gerrit doesn't permit anonymous read access to
# refs/meta/config. Override that behavior so tests can access
# refs/meta/config if necessary.
clone_path = os.path.join(gi.gerrit_dir, 'tmp', 'All-Projects')
cls._CloneProject('All-Projects', clone_path)
project_config = os.path.join(clone_path, 'project.config')
cls.check_call(['git', 'config', '--file', project_config, '--add',
'access.refs/meta/config.read', 'group Anonymous Users'])
cls.check_call(['git', 'add', project_config], cwd=clone_path)
cls.check_call(
['git', 'commit', '-m', 'Anonyous read for refs/meta/config'],
cwd=clone_path)
cls.check_call(['git', 'push', 'origin', 'HEAD:refs/meta/config'],
cwd=clone_path)
def setUp(self):
self.tempdir = tempfile.mkdtemp()
os.chmod(self.tempdir, 0o700)
def tearDown(self):
if TEARDOWN:
shutil.rmtree(self.tempdir)
@classmethod
def createProject(cls, name, description='Test project', owners=None,
submit_type='CHERRY_PICK'):
"""Create a project on the test gerrit server."""
if owners is None:
owners = ['Administrators']
body = {
'description': description,
'submit_type': submit_type,
'owners': owners,
}
path = 'projects/%s' % urllib.quote(name, '')
conn = gerrit_util.CreateHttpConn(
cls.gerrit_instance.gerrit_host, path, reqtype='PUT', body=body)
jmsg = gerrit_util.ReadHttpJsonResponse(conn, accept_statuses=[200, 201])
assert jmsg['name'] == name
@classmethod
def _post_clone_bookkeeping(cls, clone_path):
config_path = os.path.join(clone_path, '.git', 'config')
cls.check_call(
['git', 'config', '--file', config_path, 'user.email', cls.TEST_EMAIL])
cls.check_call(
['git', 'config', '--file', config_path, 'credential.helper',
'store --file=%s' % cls.gerrit_instance.credential_file])
@classmethod
def _CloneProject(cls, name, path):
"""Clone a project from the test gerrit server."""
gi = cls.gerrit_instance
parent_dir = os.path.dirname(path)
if not os.path.exists(parent_dir):
os.makedirs(parent_dir)
url = '/'.join((gi.gerrit_url, name))
cls.check_call(['git', 'clone', url, path])
cls._post_clone_bookkeeping(path)
# Install commit-msg hook to add Change-Id lines.
hook_path = os.path.join(path, '.git', 'hooks', 'commit-msg')
cls.check_call(['curl', '-o', hook_path,
'/'.join((gi.gerrit_url, 'tools/hooks/commit-msg'))])
os.chmod(hook_path, stat.S_IRWXU)
return path
def cloneProject(self, name, path=None):
"""Clone a project from the test gerrit server."""
if path is None:
path = os.path.basename(name)
if path.endswith('.git'):
path = path[:-4]
path = os.path.join(self.tempdir, path)
return self._CloneProject(name, path)
@classmethod
def _CreateCommit(cls, clone_path, fn=None, msg=None, text=None):
"""Create a commit in the given git checkout."""
if not fn:
fn = 'test-file.txt'
if not msg:
msg = 'Test Message'
if not text:
text = 'Another day, another dollar.'
fpath = os.path.join(clone_path, fn)
with open(fpath, 'a') as fh:
fh.write('%s\n' % text)
cls.check_call(['git', 'add', fn], cwd=clone_path)
cls.check_call(['git', 'commit', '-m', msg], cwd=clone_path)
return cls._GetCommit(clone_path)
def createCommit(self, clone_path, fn=None, msg=None, text=None):
"""Create a commit in the given git checkout."""
clone_path = os.path.join(self.tempdir, clone_path)
return self._CreateCommit(clone_path, fn, msg, text)
@classmethod
def _GetCommit(cls, clone_path, ref='HEAD'):
"""Get the sha1 and change-id for a ref in the git checkout."""
log_proc = cls.check_output(['git', 'log', '-n', '1', ref], cwd=clone_path)
sha1 = None
change_id = None
for line in log_proc.splitlines():
match = cls.COMMIT_RE.match(line)
if match:
sha1 = match.group(1)
continue
match = cls.CHANGEID_RE.match(line)
if match:
change_id = match.group(1)
continue
assert sha1
assert change_id
return (sha1, change_id)
def getCommit(self, clone_path, ref='HEAD'):
"""Get the sha1 and change-id for a ref in the git checkout."""
clone_path = os.path.join(self.tempdir, clone_path)
return self._GetCommit(clone_path, ref)
@classmethod
def _UploadChange(cls, clone_path, branch='master', remote='origin'):
"""Create a gerrit CL from the HEAD of a git checkout."""
cls.check_call(
['git', 'push', remote, 'HEAD:refs/for/%s' % branch], cwd=clone_path)
def uploadChange(self, clone_path, branch='master', remote='origin'):
"""Create a gerrit CL from the HEAD of a git checkout."""
clone_path = os.path.join(self.tempdir, clone_path)
self._UploadChange(clone_path, branch, remote)
@classmethod
def _PushBranch(cls, clone_path, branch='master'):
"""Push a branch directly to gerrit, bypassing code review."""
cls.check_call(
['git', 'push', 'origin', 'HEAD:refs/heads/%s' % branch],
cwd=clone_path)
def pushBranch(self, clone_path, branch='master'):
"""Push a branch directly to gerrit, bypassing code review."""
clone_path = os.path.join(self.tempdir, clone_path)
self._PushBranch(clone_path, branch)
@classmethod
def createAccount(cls, name='Test User', email='<EMAIL>',
password=None, groups=None):
"""Create a new user account on gerrit."""
username = email.partition('@')[0]
gerrit_cmd = 'gerrit create-account %s --full-name "%s" --email %s' % (
username, name, email)
if password:
gerrit_cmd += ' --http-password "%s"' % password
if groups:
gerrit_cmd += ' '.join(['--group %s' % x for x in groups])
ssh_cmd = ['ssh', '-p', cls.gerrit_instance.ssh_port,
'-i', cls.gerrit_instance.ssh_ident,
'-o', 'NoHostAuthenticationForLocalhost=yes',
'-o', 'StrictHostKeyChecking=no',
'%s@localhost' % cls.TEST_USERNAME, gerrit_cmd]
cls.check_call(ssh_cmd)
@classmethod
def _stop_gerrit(cls, gerrit_instance):
"""Stops the running gerrit instance and deletes it."""
try:
# This should terminate the gerrit process.
cls.check_call(['bash', gerrit_instance.gerrit_exe, 'stop'])
finally:
try:
# cls.gerrit_pid should have already terminated. If it did, then
# os.waitpid will raise OSError.
os.waitpid(gerrit_instance.gerrit_pid, os.WNOHANG)
except OSError as e:
if e.errno == errno.ECHILD:
# If gerrit shut down cleanly, os.waitpid will land here.
# pylint: disable=lost-exception
return
# If we get here, the gerrit process is still alive. Send the process
# SIGKILL for good measure.
try:
os.kill(gerrit_instance.gerrit_pid, signal.SIGKILL)
except OSError:
if e.errno == errno.ESRCH:
# os.kill raised an error because the process doesn't exist. Maybe
# gerrit shut down cleanly after all.
# pylint: disable=lost-exception
return
# Announce that gerrit didn't shut down cleanly.
msg = 'Test gerrit server (pid=%d) did not shut down cleanly.' % (
gerrit_instance.gerrit_pid)
print >> sys.stderr, msg
@classmethod
def tearDownClass(cls):
gerrit_util.NETRC = cls.gerrit_util_netrc_orig
gerrit_util.GERRIT_PROTOCOL = cls.gerrit_util_protocol_orig
if TEARDOWN:
cls._stop_gerrit(cls.gerrit_instance)
shutil.rmtree(cls.gerrit_instance.gerrit_dir)
class RepoTestCase(GerritTestCase):
"""Test class which runs in a repo checkout."""
REPO_URL = 'https://chromium.googlesource.com/external/repo'
MANIFEST_PROJECT = 'remotepath/manifest'
MANIFEST_TEMPLATE = """<?xml version="1.0" encoding="UTF-8"?>
<manifest>
<remote name="remote1"
fetch="%(gerrit_url)s"
review="%(gerrit_host)s" />
<remote name="remote2"
fetch="%(gerrit_url)s"
review="%(gerrit_host)s" />
<default revision="refs/heads/master" remote="remote1" sync-j="1" />
<project remote="remote1" path="localpath/testproj1" name="remotepath/testproj1" />
<project remote="remote1" path="localpath/testproj2" name="remotepath/testproj2" />
<project remote="remote2" path="localpath/testproj3" name="remotepath/testproj3" />
<project remote="remote2" path="localpath/testproj4" name="remotepath/testproj4" />
</manifest>
"""
@classmethod
def setUpClass(cls):
GerritTestCase.setUpClass()
gi = cls.gerrit_instance
# Create local mirror of repo tool repository.
repo_mirror_path = os.path.join(gi.git_dir, 'repo.git')
cls.check_call(
['git', 'clone', '--mirror', cls.REPO_URL, repo_mirror_path])
# Check out the top-level repo script; it will be used for invocation.
repo_clone_path = os.path.join(gi.gerrit_dir, 'tmp', 'repo')
cls.check_call(['git', 'clone', '-n', repo_mirror_path, repo_clone_path])
cls.check_call(
['git', 'checkout', 'origin/stable', 'repo'], cwd=repo_clone_path)
shutil.rmtree(os.path.join(repo_clone_path, '.git'))
cls.repo_exe = os.path.join(repo_clone_path, 'repo')
# Create manifest repository.
cls.createProject(cls.MANIFEST_PROJECT)
clone_path = os.path.join(gi.gerrit_dir, 'tmp', 'manifest')
cls._CloneProject(cls.MANIFEST_PROJECT, clone_path)
manifest_path = os.path.join(clone_path, 'default.xml')
with open(manifest_path, 'w') as fh:
fh.write(cls.MANIFEST_TEMPLATE % gi.__dict__)
cls.check_call(['git', 'add', 'default.xml'], cwd=clone_path)
cls.check_call(['git', 'commit', '-m', 'Test manifest.'], cwd=clone_path)
cls._PushBranch(clone_path)
# Create project repositories.
for i in xrange(1, 5):
proj = 'testproj%d' % i
cls.createProject('remotepath/%s' % proj)
clone_path = os.path.join(gi.gerrit_dir, 'tmp', proj)
cls._CloneProject('remotepath/%s' % proj, clone_path)
cls._CreateCommit(clone_path)
cls._PushBranch(clone_path, 'master')
def setUp(self):
super(RepoTestCase, self).setUp()
manifest_url = '/'.join((self.gerrit_instance.gerrit_url,
self.MANIFEST_PROJECT))
repo_url = '/'.join((self.gerrit_instance.gerrit_url, 'repo'))
self.check_call(
[self.repo_exe, 'init', '-u', manifest_url, '--repo-url',
repo_url, '--no-repo-verify'], cwd=self.tempdir)
self.check_call([self.repo_exe, 'sync'], cwd=self.tempdir)
for i in xrange(1, 5):
clone_path = os.path.join(self.tempdir, 'localpath', 'testproj%d' % i)
self._post_clone_bookkeeping(clone_path)
# Tell 'repo upload' to upload this project without prompting.
config_path = os.path.join(clone_path, '.git', 'config')
self.check_call(
['git', 'config', '--file', config_path, 'review.%s.upload' %
self.gerrit_instance.gerrit_host, 'true'])
@classmethod
def runRepo(cls, *args, **kwargs):
# Unfortunately, munging $HOME appears to be the only way to control the
# netrc file used by repo.
munged_home = os.path.join(cls.gerrit_instance.gerrit_dir, 'tmp')
if 'env' not in kwargs:
env = kwargs['env'] = os.environ.copy()
env['HOME'] = munged_home
else:
env.setdefault('HOME', munged_home)
args[0].insert(0, cls.repo_exe)
cls.check_call(*args, **kwargs)
def uploadChange(self, clone_path, branch='master', remote='origin'):
review_host = self.check_output(
['git', 'config', 'remote.%s.review' % remote],
cwd=clone_path).strip()
assert(review_host)
projectname = self.check_output(
['git', 'config', 'remote.%s.projectname' % remote],
cwd=clone_path).strip()
assert(projectname)
GerritTestCase._UploadChange(
clone_path, branch=branch, remote='%s://%s/%s' % (
gerrit_util.GERRIT_PROTOCOL, review_host, projectname))
|
<filename>testing_support/gerrit_test_case.py
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Test framework for code that interacts with gerrit.
class GerritTestCase
--------------------------------------------------------------------------------
This class initializes and runs an a gerrit instance on localhost. To use the
framework, define a class that extends GerritTestCase, and then do standard
python unittest development as described here:
http://docs.python.org/2.7/library/unittest.html#basic-example
When your test code runs, the framework will:
- Download the latest stable(-ish) binary release of the gerrit code.
- Start up a live gerrit instance running in a temp directory on the localhost.
- Set up a single gerrit user account with admin priveleges.
- Supply credential helpers for interacting with the gerrit instance via http
or ssh.
Refer to depot_tools/testing_support/gerrit-init.sh for details about how the
gerrit instance is set up, and refer to helper methods defined below
(createProject, cloneProject, uploadChange, etc.) for ways to interact with the
gerrit instance from your test methods.
class RepoTestCase
--------------------------------------------------------------------------------
This class extends GerritTestCase, and creates a set of project repositories
and a manifest repository that can be used in conjunction with the 'repo' tool.
Each test method will initialize and sync a brand-new repo working directory.
The 'repo' command may be invoked in a subprocess as part of your tests.
One gotcha: 'repo upload' will always attempt to use the ssh interface to talk
to gerrit.
"""
import collections
import errno
import netrc
import os
import re
import shutil
import signal
import socket
import stat
import subprocess
import sys
import tempfile
import unittest
import urllib
import gerrit_util
DEPOT_TOOLS_DIR = os.path.normpath(os.path.join(
os.path.realpath(__file__), '..', '..'))
# When debugging test code, it's sometimes helpful to leave the test gerrit
# instance intact and running after the test code exits. Setting TEARDOWN
# to False will do that.
TEARDOWN = True
class GerritTestCase(unittest.TestCase):
"""Test class for tests that interact with a gerrit server.
The class setup creates and launches a stand-alone gerrit instance running on
localhost, for test methods to interact with. Class teardown stops and
deletes the gerrit instance.
Note that there is a single gerrit instance for ALL test methods in a
GerritTestCase sub-class.
"""
COMMIT_RE = re.compile(r'^commit ([0-9a-fA-F]{40})$')
CHANGEID_RE = re.compile('^\s+Change-Id:\s*(\S+)$')
DEVNULL = open(os.devnull, 'w')
TEST_USERNAME = 'test-username'
TEST_EMAIL = '<EMAIL>'
GerritInstance = collections.namedtuple('GerritInstance', [
'credential_file',
'gerrit_dir',
'gerrit_exe',
'gerrit_host',
'gerrit_pid',
'gerrit_url',
'git_dir',
'git_host',
'git_url',
'http_port',
'netrc_file',
'ssh_ident',
'ssh_port',
])
@classmethod
def check_call(cls, *args, **kwargs):
kwargs.setdefault('stdout', cls.DEVNULL)
kwargs.setdefault('stderr', cls.DEVNULL)
subprocess.check_call(*args, **kwargs)
@classmethod
def check_output(cls, *args, **kwargs):
kwargs.setdefault('stderr', cls.DEVNULL)
return subprocess.check_output(*args, **kwargs)
@classmethod
def _create_gerrit_instance(cls, gerrit_dir):
gerrit_init_script = os.path.join(
DEPOT_TOOLS_DIR, 'testing_support', 'gerrit-init.sh')
http_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
http_sock.bind(('', 0))
http_port = str(http_sock.getsockname()[1])
ssh_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ssh_sock.bind(('', 0))
ssh_port = str(ssh_sock.getsockname()[1])
# NOTE: this is not completely safe. These port numbers could be
# re-assigned by the OS between the calls to socket.close() and gerrit
# starting up. The only safe way to do this would be to pass file
# descriptors down to the gerrit process, which is not even remotely
# supported. Alas.
http_sock.close()
ssh_sock.close()
cls.check_call(['bash', gerrit_init_script, '--http-port', http_port,
'--ssh-port', ssh_port, gerrit_dir])
gerrit_exe = os.path.join(gerrit_dir, 'bin', 'gerrit.sh')
cls.check_call(['bash', gerrit_exe, 'start'])
with open(os.path.join(gerrit_dir, 'logs', 'gerrit.pid')) as fh:
gerrit_pid = int(fh.read().rstrip())
return cls.GerritInstance(
credential_file=os.path.join(gerrit_dir, 'tmp', '.git-credentials'),
gerrit_dir=gerrit_dir,
gerrit_exe=gerrit_exe,
gerrit_host='localhost:%s' % http_port,
gerrit_pid=gerrit_pid,
gerrit_url='http://localhost:%s' % http_port,
git_dir=os.path.join(gerrit_dir, 'git'),
git_host='%s/git' % gerrit_dir,
git_url='file://%s/git' % gerrit_dir,
http_port=http_port,
netrc_file=os.path.join(gerrit_dir, 'tmp', '.netrc'),
ssh_ident=os.path.join(gerrit_dir, 'tmp', 'id_rsa'),
ssh_port=ssh_port,)
@classmethod
def setUpClass(cls):
"""Sets up the gerrit instances in a class-specific temp dir."""
# Create gerrit instance.
gerrit_dir = tempfile.mkdtemp()
os.chmod(gerrit_dir, 0o700)
gi = cls.gerrit_instance = cls._create_gerrit_instance(gerrit_dir)
# Set netrc file for http authentication.
cls.gerrit_util_netrc_orig = gerrit_util.NETRC
gerrit_util.NETRC = netrc.netrc(gi.netrc_file)
# gerrit_util.py defaults to using https, but for testing, it's much
# simpler to use http connections.
cls.gerrit_util_protocol_orig = gerrit_util.GERRIT_PROTOCOL
gerrit_util.GERRIT_PROTOCOL = 'http'
# Because we communicate with the test server via http, rather than https,
# libcurl won't add authentication headers to raw git requests unless the
# gerrit server returns 401. That works for pushes, but for read operations
# (like git-ls-remote), gerrit will simply omit any ref that requires
# authentication. By default gerrit doesn't permit anonymous read access to
# refs/meta/config. Override that behavior so tests can access
# refs/meta/config if necessary.
clone_path = os.path.join(gi.gerrit_dir, 'tmp', 'All-Projects')
cls._CloneProject('All-Projects', clone_path)
project_config = os.path.join(clone_path, 'project.config')
cls.check_call(['git', 'config', '--file', project_config, '--add',
'access.refs/meta/config.read', 'group Anonymous Users'])
cls.check_call(['git', 'add', project_config], cwd=clone_path)
cls.check_call(
['git', 'commit', '-m', 'Anonyous read for refs/meta/config'],
cwd=clone_path)
cls.check_call(['git', 'push', 'origin', 'HEAD:refs/meta/config'],
cwd=clone_path)
def setUp(self):
self.tempdir = tempfile.mkdtemp()
os.chmod(self.tempdir, 0o700)
def tearDown(self):
if TEARDOWN:
shutil.rmtree(self.tempdir)
@classmethod
def createProject(cls, name, description='Test project', owners=None,
submit_type='CHERRY_PICK'):
"""Create a project on the test gerrit server."""
if owners is None:
owners = ['Administrators']
body = {
'description': description,
'submit_type': submit_type,
'owners': owners,
}
path = 'projects/%s' % urllib.quote(name, '')
conn = gerrit_util.CreateHttpConn(
cls.gerrit_instance.gerrit_host, path, reqtype='PUT', body=body)
jmsg = gerrit_util.ReadHttpJsonResponse(conn, accept_statuses=[200, 201])
assert jmsg['name'] == name
@classmethod
def _post_clone_bookkeeping(cls, clone_path):
config_path = os.path.join(clone_path, '.git', 'config')
cls.check_call(
['git', 'config', '--file', config_path, 'user.email', cls.TEST_EMAIL])
cls.check_call(
['git', 'config', '--file', config_path, 'credential.helper',
'store --file=%s' % cls.gerrit_instance.credential_file])
@classmethod
def _CloneProject(cls, name, path):
"""Clone a project from the test gerrit server."""
gi = cls.gerrit_instance
parent_dir = os.path.dirname(path)
if not os.path.exists(parent_dir):
os.makedirs(parent_dir)
url = '/'.join((gi.gerrit_url, name))
cls.check_call(['git', 'clone', url, path])
cls._post_clone_bookkeeping(path)
# Install commit-msg hook to add Change-Id lines.
hook_path = os.path.join(path, '.git', 'hooks', 'commit-msg')
cls.check_call(['curl', '-o', hook_path,
'/'.join((gi.gerrit_url, 'tools/hooks/commit-msg'))])
os.chmod(hook_path, stat.S_IRWXU)
return path
def cloneProject(self, name, path=None):
"""Clone a project from the test gerrit server."""
if path is None:
path = os.path.basename(name)
if path.endswith('.git'):
path = path[:-4]
path = os.path.join(self.tempdir, path)
return self._CloneProject(name, path)
@classmethod
def _CreateCommit(cls, clone_path, fn=None, msg=None, text=None):
"""Create a commit in the given git checkout."""
if not fn:
fn = 'test-file.txt'
if not msg:
msg = 'Test Message'
if not text:
text = 'Another day, another dollar.'
fpath = os.path.join(clone_path, fn)
with open(fpath, 'a') as fh:
fh.write('%s\n' % text)
cls.check_call(['git', 'add', fn], cwd=clone_path)
cls.check_call(['git', 'commit', '-m', msg], cwd=clone_path)
return cls._GetCommit(clone_path)
def createCommit(self, clone_path, fn=None, msg=None, text=None):
"""Create a commit in the given git checkout."""
clone_path = os.path.join(self.tempdir, clone_path)
return self._CreateCommit(clone_path, fn, msg, text)
@classmethod
def _GetCommit(cls, clone_path, ref='HEAD'):
"""Get the sha1 and change-id for a ref in the git checkout."""
log_proc = cls.check_output(['git', 'log', '-n', '1', ref], cwd=clone_path)
sha1 = None
change_id = None
for line in log_proc.splitlines():
match = cls.COMMIT_RE.match(line)
if match:
sha1 = match.group(1)
continue
match = cls.CHANGEID_RE.match(line)
if match:
change_id = match.group(1)
continue
assert sha1
assert change_id
return (sha1, change_id)
def getCommit(self, clone_path, ref='HEAD'):
"""Get the sha1 and change-id for a ref in the git checkout."""
clone_path = os.path.join(self.tempdir, clone_path)
return self._GetCommit(clone_path, ref)
@classmethod
def _UploadChange(cls, clone_path, branch='master', remote='origin'):
"""Create a gerrit CL from the HEAD of a git checkout."""
cls.check_call(
['git', 'push', remote, 'HEAD:refs/for/%s' % branch], cwd=clone_path)
def uploadChange(self, clone_path, branch='master', remote='origin'):
"""Create a gerrit CL from the HEAD of a git checkout."""
clone_path = os.path.join(self.tempdir, clone_path)
self._UploadChange(clone_path, branch, remote)
@classmethod
def _PushBranch(cls, clone_path, branch='master'):
"""Push a branch directly to gerrit, bypassing code review."""
cls.check_call(
['git', 'push', 'origin', 'HEAD:refs/heads/%s' % branch],
cwd=clone_path)
def pushBranch(self, clone_path, branch='master'):
"""Push a branch directly to gerrit, bypassing code review."""
clone_path = os.path.join(self.tempdir, clone_path)
self._PushBranch(clone_path, branch)
@classmethod
def createAccount(cls, name='Test User', email='<EMAIL>',
password=None, groups=None):
"""Create a new user account on gerrit."""
username = email.partition('@')[0]
gerrit_cmd = 'gerrit create-account %s --full-name "%s" --email %s' % (
username, name, email)
if password:
gerrit_cmd += ' --http-password "%s"' % password
if groups:
gerrit_cmd += ' '.join(['--group %s' % x for x in groups])
ssh_cmd = ['ssh', '-p', cls.gerrit_instance.ssh_port,
'-i', cls.gerrit_instance.ssh_ident,
'-o', 'NoHostAuthenticationForLocalhost=yes',
'-o', 'StrictHostKeyChecking=no',
'%s@localhost' % cls.TEST_USERNAME, gerrit_cmd]
cls.check_call(ssh_cmd)
@classmethod
def _stop_gerrit(cls, gerrit_instance):
"""Stops the running gerrit instance and deletes it."""
try:
# This should terminate the gerrit process.
cls.check_call(['bash', gerrit_instance.gerrit_exe, 'stop'])
finally:
try:
# cls.gerrit_pid should have already terminated. If it did, then
# os.waitpid will raise OSError.
os.waitpid(gerrit_instance.gerrit_pid, os.WNOHANG)
except OSError as e:
if e.errno == errno.ECHILD:
# If gerrit shut down cleanly, os.waitpid will land here.
# pylint: disable=lost-exception
return
# If we get here, the gerrit process is still alive. Send the process
# SIGKILL for good measure.
try:
os.kill(gerrit_instance.gerrit_pid, signal.SIGKILL)
except OSError:
if e.errno == errno.ESRCH:
# os.kill raised an error because the process doesn't exist. Maybe
# gerrit shut down cleanly after all.
# pylint: disable=lost-exception
return
# Announce that gerrit didn't shut down cleanly.
msg = 'Test gerrit server (pid=%d) did not shut down cleanly.' % (
gerrit_instance.gerrit_pid)
print >> sys.stderr, msg
@classmethod
def tearDownClass(cls):
gerrit_util.NETRC = cls.gerrit_util_netrc_orig
gerrit_util.GERRIT_PROTOCOL = cls.gerrit_util_protocol_orig
if TEARDOWN:
cls._stop_gerrit(cls.gerrit_instance)
shutil.rmtree(cls.gerrit_instance.gerrit_dir)
class RepoTestCase(GerritTestCase):
"""Test class which runs in a repo checkout."""
REPO_URL = 'https://chromium.googlesource.com/external/repo'
MANIFEST_PROJECT = 'remotepath/manifest'
MANIFEST_TEMPLATE = """<?xml version="1.0" encoding="UTF-8"?>
<manifest>
<remote name="remote1"
fetch="%(gerrit_url)s"
review="%(gerrit_host)s" />
<remote name="remote2"
fetch="%(gerrit_url)s"
review="%(gerrit_host)s" />
<default revision="refs/heads/master" remote="remote1" sync-j="1" />
<project remote="remote1" path="localpath/testproj1" name="remotepath/testproj1" />
<project remote="remote1" path="localpath/testproj2" name="remotepath/testproj2" />
<project remote="remote2" path="localpath/testproj3" name="remotepath/testproj3" />
<project remote="remote2" path="localpath/testproj4" name="remotepath/testproj4" />
</manifest>
"""
@classmethod
def setUpClass(cls):
GerritTestCase.setUpClass()
gi = cls.gerrit_instance
# Create local mirror of repo tool repository.
repo_mirror_path = os.path.join(gi.git_dir, 'repo.git')
cls.check_call(
['git', 'clone', '--mirror', cls.REPO_URL, repo_mirror_path])
# Check out the top-level repo script; it will be used for invocation.
repo_clone_path = os.path.join(gi.gerrit_dir, 'tmp', 'repo')
cls.check_call(['git', 'clone', '-n', repo_mirror_path, repo_clone_path])
cls.check_call(
['git', 'checkout', 'origin/stable', 'repo'], cwd=repo_clone_path)
shutil.rmtree(os.path.join(repo_clone_path, '.git'))
cls.repo_exe = os.path.join(repo_clone_path, 'repo')
# Create manifest repository.
cls.createProject(cls.MANIFEST_PROJECT)
clone_path = os.path.join(gi.gerrit_dir, 'tmp', 'manifest')
cls._CloneProject(cls.MANIFEST_PROJECT, clone_path)
manifest_path = os.path.join(clone_path, 'default.xml')
with open(manifest_path, 'w') as fh:
fh.write(cls.MANIFEST_TEMPLATE % gi.__dict__)
cls.check_call(['git', 'add', 'default.xml'], cwd=clone_path)
cls.check_call(['git', 'commit', '-m', 'Test manifest.'], cwd=clone_path)
cls._PushBranch(clone_path)
# Create project repositories.
for i in xrange(1, 5):
proj = 'testproj%d' % i
cls.createProject('remotepath/%s' % proj)
clone_path = os.path.join(gi.gerrit_dir, 'tmp', proj)
cls._CloneProject('remotepath/%s' % proj, clone_path)
cls._CreateCommit(clone_path)
cls._PushBranch(clone_path, 'master')
def setUp(self):
super(RepoTestCase, self).setUp()
manifest_url = '/'.join((self.gerrit_instance.gerrit_url,
self.MANIFEST_PROJECT))
repo_url = '/'.join((self.gerrit_instance.gerrit_url, 'repo'))
self.check_call(
[self.repo_exe, 'init', '-u', manifest_url, '--repo-url',
repo_url, '--no-repo-verify'], cwd=self.tempdir)
self.check_call([self.repo_exe, 'sync'], cwd=self.tempdir)
for i in xrange(1, 5):
clone_path = os.path.join(self.tempdir, 'localpath', 'testproj%d' % i)
self._post_clone_bookkeeping(clone_path)
# Tell 'repo upload' to upload this project without prompting.
config_path = os.path.join(clone_path, '.git', 'config')
self.check_call(
['git', 'config', '--file', config_path, 'review.%s.upload' %
self.gerrit_instance.gerrit_host, 'true'])
@classmethod
def runRepo(cls, *args, **kwargs):
# Unfortunately, munging $HOME appears to be the only way to control the
# netrc file used by repo.
munged_home = os.path.join(cls.gerrit_instance.gerrit_dir, 'tmp')
if 'env' not in kwargs:
env = kwargs['env'] = os.environ.copy()
env['HOME'] = munged_home
else:
env.setdefault('HOME', munged_home)
args[0].insert(0, cls.repo_exe)
cls.check_call(*args, **kwargs)
def uploadChange(self, clone_path, branch='master', remote='origin'):
review_host = self.check_output(
['git', 'config', 'remote.%s.review' % remote],
cwd=clone_path).strip()
assert(review_host)
projectname = self.check_output(
['git', 'config', 'remote.%s.projectname' % remote],
cwd=clone_path).strip()
assert(projectname)
GerritTestCase._UploadChange(
clone_path, branch=branch, remote='%s://%s/%s' % (
gerrit_util.GERRIT_PROTOCOL, review_host, projectname))
|
en
| 0.762643
|
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. Test framework for code that interacts with gerrit. class GerritTestCase -------------------------------------------------------------------------------- This class initializes and runs an a gerrit instance on localhost. To use the framework, define a class that extends GerritTestCase, and then do standard python unittest development as described here: http://docs.python.org/2.7/library/unittest.html#basic-example When your test code runs, the framework will: - Download the latest stable(-ish) binary release of the gerrit code. - Start up a live gerrit instance running in a temp directory on the localhost. - Set up a single gerrit user account with admin priveleges. - Supply credential helpers for interacting with the gerrit instance via http or ssh. Refer to depot_tools/testing_support/gerrit-init.sh for details about how the gerrit instance is set up, and refer to helper methods defined below (createProject, cloneProject, uploadChange, etc.) for ways to interact with the gerrit instance from your test methods. class RepoTestCase -------------------------------------------------------------------------------- This class extends GerritTestCase, and creates a set of project repositories and a manifest repository that can be used in conjunction with the 'repo' tool. Each test method will initialize and sync a brand-new repo working directory. The 'repo' command may be invoked in a subprocess as part of your tests. One gotcha: 'repo upload' will always attempt to use the ssh interface to talk to gerrit. # When debugging test code, it's sometimes helpful to leave the test gerrit # instance intact and running after the test code exits. Setting TEARDOWN # to False will do that. Test class for tests that interact with a gerrit server. The class setup creates and launches a stand-alone gerrit instance running on localhost, for test methods to interact with. Class teardown stops and deletes the gerrit instance. Note that there is a single gerrit instance for ALL test methods in a GerritTestCase sub-class. # NOTE: this is not completely safe. These port numbers could be # re-assigned by the OS between the calls to socket.close() and gerrit # starting up. The only safe way to do this would be to pass file # descriptors down to the gerrit process, which is not even remotely # supported. Alas. Sets up the gerrit instances in a class-specific temp dir. # Create gerrit instance. # Set netrc file for http authentication. # gerrit_util.py defaults to using https, but for testing, it's much # simpler to use http connections. # Because we communicate with the test server via http, rather than https, # libcurl won't add authentication headers to raw git requests unless the # gerrit server returns 401. That works for pushes, but for read operations # (like git-ls-remote), gerrit will simply omit any ref that requires # authentication. By default gerrit doesn't permit anonymous read access to # refs/meta/config. Override that behavior so tests can access # refs/meta/config if necessary. Create a project on the test gerrit server. Clone a project from the test gerrit server. # Install commit-msg hook to add Change-Id lines. Clone a project from the test gerrit server. Create a commit in the given git checkout. Create a commit in the given git checkout. Get the sha1 and change-id for a ref in the git checkout. Get the sha1 and change-id for a ref in the git checkout. Create a gerrit CL from the HEAD of a git checkout. Create a gerrit CL from the HEAD of a git checkout. Push a branch directly to gerrit, bypassing code review. Push a branch directly to gerrit, bypassing code review. Create a new user account on gerrit. Stops the running gerrit instance and deletes it. # This should terminate the gerrit process. # cls.gerrit_pid should have already terminated. If it did, then # os.waitpid will raise OSError. # If gerrit shut down cleanly, os.waitpid will land here. # pylint: disable=lost-exception # If we get here, the gerrit process is still alive. Send the process # SIGKILL for good measure. # os.kill raised an error because the process doesn't exist. Maybe # gerrit shut down cleanly after all. # pylint: disable=lost-exception # Announce that gerrit didn't shut down cleanly. Test class which runs in a repo checkout. <?xml version="1.0" encoding="UTF-8"?> <manifest> <remote name="remote1" fetch="%(gerrit_url)s" review="%(gerrit_host)s" /> <remote name="remote2" fetch="%(gerrit_url)s" review="%(gerrit_host)s" /> <default revision="refs/heads/master" remote="remote1" sync-j="1" /> <project remote="remote1" path="localpath/testproj1" name="remotepath/testproj1" /> <project remote="remote1" path="localpath/testproj2" name="remotepath/testproj2" /> <project remote="remote2" path="localpath/testproj3" name="remotepath/testproj3" /> <project remote="remote2" path="localpath/testproj4" name="remotepath/testproj4" /> </manifest> # Create local mirror of repo tool repository. # Check out the top-level repo script; it will be used for invocation. # Create manifest repository. # Create project repositories. # Tell 'repo upload' to upload this project without prompting. # Unfortunately, munging $HOME appears to be the only way to control the # netrc file used by repo.
| 2.253011
| 2
|
ledgercomm/interfaces/tcp_client.py
|
LedgerHQ/ledgercomm
| 3
|
6629139
|
"""ledgercomm.interfaces.tcp_client module."""
import socket
from typing import Tuple
from ledgercomm.interfaces.comm import Comm
from ledgercomm.log import LOG
class TCPClient(Comm):
"""TCPClient class.
Mainly used to connect to the TCP server of the Speculos emulator.
Parameters
----------
server : str
IP address of the TCP server.
port : int
Port of the TCP server.
Attributes
----------
server : str
IP address of the TCP server.
port : int
Port of the TCP server.
socket : socket.socket
TCP socket to communicate with the server.
__opened : bool
Whether the TCP socket is opened or not.
"""
def __init__(self, server: str, port: int) -> None:
"""Init constructor of TCPClient."""
self.server: str = server
self.port: int = port
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.__opened: bool = False
def open(self) -> None:
"""Open connection to TCP socket with `self.server` and `self.port`.
Returns
-------
None
"""
if not self.__opened:
self.socket.connect((self.server, self.port))
self.__opened = True
def send(self, data: bytes) -> int:
"""Send `data` through TCP socket `self.socket`.
Parameters
----------
data : bytes
Bytes of data to send.
Returns
-------
int
Total lenght of data sent through TCP socket.
"""
if not data:
raise Exception("Can't send empty data!")
LOG.debug("=> %s", data.hex())
data_len: bytes = int.to_bytes(len(data), 4, byteorder="big")
return self.socket.send(data_len + data)
def recv(self) -> Tuple[int, bytes]:
"""Receive data through TCP socket `self.socket`.
Blocking IO.
Returns
-------
Tuple[int, bytes]
A pair (sw, rdata) containing the status word and response data.
"""
length: int = int.from_bytes(self.socket.recv(4), byteorder="big")
rdata: bytes = self.socket.recv(length)
sw: int = int.from_bytes(self.socket.recv(2), byteorder="big")
LOG.debug("<= %s %s", rdata.hex(), hex(sw)[2:])
return sw, rdata
def exchange(self, data: bytes) -> Tuple[int, bytes]:
"""Exchange (send + receive) with `self.socket`.
Parameters
----------
data : bytes
Bytes with `data` to send.
Returns
-------
Tuple[int, bytes]
A pair (sw, rdata) containing the status word and response data.
"""
self.send(data)
return self.recv() # blocking IO
def close(self) -> None:
"""Close connection to TCP socket `self.socket`.
Returns
-------
None
"""
if self.__opened:
self.socket.close()
self.__opened = False
|
"""ledgercomm.interfaces.tcp_client module."""
import socket
from typing import Tuple
from ledgercomm.interfaces.comm import Comm
from ledgercomm.log import LOG
class TCPClient(Comm):
"""TCPClient class.
Mainly used to connect to the TCP server of the Speculos emulator.
Parameters
----------
server : str
IP address of the TCP server.
port : int
Port of the TCP server.
Attributes
----------
server : str
IP address of the TCP server.
port : int
Port of the TCP server.
socket : socket.socket
TCP socket to communicate with the server.
__opened : bool
Whether the TCP socket is opened or not.
"""
def __init__(self, server: str, port: int) -> None:
"""Init constructor of TCPClient."""
self.server: str = server
self.port: int = port
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.__opened: bool = False
def open(self) -> None:
"""Open connection to TCP socket with `self.server` and `self.port`.
Returns
-------
None
"""
if not self.__opened:
self.socket.connect((self.server, self.port))
self.__opened = True
def send(self, data: bytes) -> int:
"""Send `data` through TCP socket `self.socket`.
Parameters
----------
data : bytes
Bytes of data to send.
Returns
-------
int
Total lenght of data sent through TCP socket.
"""
if not data:
raise Exception("Can't send empty data!")
LOG.debug("=> %s", data.hex())
data_len: bytes = int.to_bytes(len(data), 4, byteorder="big")
return self.socket.send(data_len + data)
def recv(self) -> Tuple[int, bytes]:
"""Receive data through TCP socket `self.socket`.
Blocking IO.
Returns
-------
Tuple[int, bytes]
A pair (sw, rdata) containing the status word and response data.
"""
length: int = int.from_bytes(self.socket.recv(4), byteorder="big")
rdata: bytes = self.socket.recv(length)
sw: int = int.from_bytes(self.socket.recv(2), byteorder="big")
LOG.debug("<= %s %s", rdata.hex(), hex(sw)[2:])
return sw, rdata
def exchange(self, data: bytes) -> Tuple[int, bytes]:
"""Exchange (send + receive) with `self.socket`.
Parameters
----------
data : bytes
Bytes with `data` to send.
Returns
-------
Tuple[int, bytes]
A pair (sw, rdata) containing the status word and response data.
"""
self.send(data)
return self.recv() # blocking IO
def close(self) -> None:
"""Close connection to TCP socket `self.socket`.
Returns
-------
None
"""
if self.__opened:
self.socket.close()
self.__opened = False
|
en
| 0.581498
|
ledgercomm.interfaces.tcp_client module. TCPClient class. Mainly used to connect to the TCP server of the Speculos emulator. Parameters ---------- server : str IP address of the TCP server. port : int Port of the TCP server. Attributes ---------- server : str IP address of the TCP server. port : int Port of the TCP server. socket : socket.socket TCP socket to communicate with the server. __opened : bool Whether the TCP socket is opened or not. Init constructor of TCPClient. Open connection to TCP socket with `self.server` and `self.port`. Returns ------- None Send `data` through TCP socket `self.socket`. Parameters ---------- data : bytes Bytes of data to send. Returns ------- int Total lenght of data sent through TCP socket. Receive data through TCP socket `self.socket`. Blocking IO. Returns ------- Tuple[int, bytes] A pair (sw, rdata) containing the status word and response data. Exchange (send + receive) with `self.socket`. Parameters ---------- data : bytes Bytes with `data` to send. Returns ------- Tuple[int, bytes] A pair (sw, rdata) containing the status word and response data. # blocking IO Close connection to TCP socket `self.socket`. Returns ------- None
| 3.272391
| 3
|
tools/yaml_to_json.py
|
RatkoD/avid-covider
| 23
|
6629140
|
<filename>tools/yaml_to_json.py
import os
import re
from pathlib import Path
from hashlib import md5
from slugify import slugify
import glob
import yaml
import json
import requests
PROJECT = 'avid-covider'
def calc_hash(s):
ret = md5(s.encode('utf8')).hexdigest()[:10]
return ret
HEB = re.compile('[א-ת]')
def has_hebrew(s):
return len(HEB.findall(s)) > 0
def get_field(x, f):
parts = f.split('.')
while len(parts) > 0:
p = parts.pop(0)
x = x.get(p, {})
return x
def get_uid(x, stack, index=None):
try:
FIELDS = ['name', 'wait.variable', 'say', 'switch.arg', 'do.cmd', 'do.variable', 'match', 'pattern', 'default', 'show']
values = [get_field(x, f) for f in FIELDS]
values = ','.join([str(v) for v in values if v is not None])
assert len(values) > 0
current_hash = ''.join(stack)
key = '{}|{}|{}'.format(current_hash, values, index)
ret = calc_hash(key)
return ret
except:
print(x, stack)
raise
def assign_ids(x, stack=[]):
if isinstance(x, dict):
uid = None
for k, v in x.items():
if k == 'steps':
uid = get_uid(x, stack)
for i, s in enumerate(v):
new_stack = stack + [uid, str(i)]
s['uid'] = get_uid(s, new_stack, i)
assign_ids(s, new_stack)
else:
assign_ids(v, stack)
if uid is not None:
x['uid'] = uid
elif isinstance(x, list):
for xx in x:
assign_ids(xx, stack)
else:
return
TRANSIFEX_TOKEN = os.environ.get('TRANSIFEX_TOKEN') or os.environ.get('TX_TOKEN')
LANGUAGES = ('ar', 'am', 'en', 'ru', 'es', 'fr')
def assign_translations(x, stack, parent=None, parentkey=None, translations=None, fields=(), field_in_key=False):
if isinstance(x, dict):
key = None
if x.get('slug'):
stack.append(x['slug'])
elif x.get('name'):
stack.append(slugify(x['name']))
if 'uid' in x:
stack.append(x['uid'][:2])
for k, v in x.items():
if k == 'steps':
for s in v:
new_stack = stack + []
yield from assign_translations(s, new_stack,
parent=None, parentkey=None,
translations=translations, fields=fields, field_in_key=field_in_key)
else:
yield from assign_translations(v, stack[:],
parent=x, parentkey=k,
translations=translations, fields=fields, field_in_key=field_in_key
)
elif isinstance(x, list):
for index, xx in enumerate(x):
if isinstance(xx, dict) and 'show' in xx:
new_stack = stack + [calc_hash(xx['show'])[:2]]
elif isinstance(xx, str):
new_stack = stack + [calc_hash(xx)[:2]]
else:
new_stack = stack + [index]
yield from assign_translations(xx, new_stack,
parent=x, parentkey=index,
translations=translations, fields=fields, field_in_key=field_in_key
)
elif isinstance(x, str):
if parent and parentkey is not None and has_hebrew(x):
if isinstance(parentkey, str) and parentkey not in fields:
return
if field_in_key:
key = '/'.join(str(s) for s in stack + [parentkey])
else:
key = '/'.join(str(s) for s in stack)
yield key, x
if key in translations:
parent[parentkey]={'.tx': dict(translations[key], _=x)}
# else:
# print('KEY NOT IN TX %s'% key)
def transifex_session():
s = requests.Session()
s.auth = ('api', TRANSIFEX_TOKEN)
return s
def transifex_slug(filename):
return '_'.join(filename.parts).replace('.', '_')
def push_translations(filename: Path, translations):
translations = dict(he=translations)
content = yaml.dump(translations, allow_unicode=True, indent=2, width=1000000)
slug = transifex_slug(filename)
s = transifex_session()
resp = s.get(f'https://www.transifex.com/api/2/project/{PROJECT}/resource/{slug}/')
if resp.status_code == requests.codes.ok:
print('Update file:')
data = dict(
content=content,
)
resp = s.put(
f'https://www.transifex.com/api/2/project/{PROJECT}/resource/{slug}/content/',
json=data
)
print(resp.status_code, resp.content[:50])
else:
print('New file:', slug)
data = dict(
slug=slug,
name=str(filename),
accept_translations=True,
i18n_type='YAML_GENERIC',
content=content,
)
resp = s.post(
f'https://www.transifex.com/api/2/project/{PROJECT}/resources/',
json=data
)
print(resp.status_code, resp.content[:100])
def pull_translations(lang, filename):
s = transifex_session()
slug = transifex_slug(filename)
url = f'https://www.transifex.com/api/2/project/{PROJECT}/resource/{slug}/translation/{lang}/'
try:
translations = s.get(url).json()
except json.decoder.JSONDecodeError:
print('No data from %s' % url)
return {}
translations = yaml.load(translations['content'], Loader=yaml.BaseLoader)['he']
translations = dict((k, v) for k, v in translations.items() if v)
return translations
def write_ical(title, body, path):
path = 'dist/avid-covider/{}assets/corona_reminder.ics'.format(path)
with open(path, 'w', encoding='utf8') as ics:
ics.write('''BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//Microsoft Corporation//Outlook 12.0 MIMEDIR//EN
METHOD:PUBLISH
CALSCALE:GREGORIAN
BEGIN:VTIMEZONE
TZID:Asia/Jerusalem
TZURL:http://tzurl.org/zoneinfo-outlook/Asia/Jerusalem
X-LIC-LOCATION:Asia/Jerusalem
BEGIN:DAYLIGHT
TZOFFSETFROM:+0200
TZOFFSETTO:+0300
TZNAME:EEST
DTSTART:19700329T000000
RRULE:FREQ=YEARLY;BYMONTH=3;BYDAY=-1SU
END:DAYLIGHT
BEGIN:STANDARD
TZOFFSETFROM:+0300
TZOFFSETTO:+0200
TZNAME:EET
DTSTART:19701025T000000
RRULE:FREQ=YEARLY;BYMONTH=10;BYDAY=-1SU
END:STANDARD
END:VTIMEZONE
BEGIN:VEVENT
DTSTAMP:20200328T131636Z
UID:corona-israel-0001
DTSTART;TZID=Asia/Jerusalem:20200328T080000
RRULE:FREQ=DAILY
DTEND;TZID=Asia/Jerusalem:20200328T081500
SUMMARY:{title}
URL:https://coronaisrael.org/?source=calendar
DESCRIPTION:{body}
LOCATION:https://coronaisrael.org/?source=calendar
TRANSP:TRANSPARENT
X-MICROSOFT-CDO-BUSYSTATUS:FREE
BEGIN:VALARM
ACTION:DISPLAY
DESCRIPTION:{title}
TRIGGER:-PT0M
END:VALARM
END:VEVENT
END:VCALENDAR'''.format(title=title, body=body))
def create_assets(script):
translations = {}
for x in script['keys']:
default = {'_': x['show']}
translations[x['name']] = x['show'].get('.tx', default) if isinstance(x['show'], dict) else default
languages = [(x, x+'/') for x in LANGUAGES] + [('_', '')]
for lang, path in languages:
calendarTitle = translations['calendarTitle']
calendarTitle=calendarTitle.get(lang, calendarTitle.get('_'))
calendarBody = translations['calendarBody']
calendarBody=calendarBody.get(lang, calendarBody.get('_'))
try:
write_ical(calendarTitle, calendarBody, path)
except Exception as e:
print('Failed to write ical %s' % e)
if __name__=='__main__':
f_in = Path('scripts/script.yaml')
scripts = yaml.load(f_in.open())
assign_ids(scripts, [str(f_in)])
if TRANSIFEX_TOKEN:
rx_translations = {}
for lang in LANGUAGES:
lang_translations = pull_translations(lang, f_in)
for key, value in lang_translations.items():
rx_translations.setdefault(key, {})[lang] = value
tx_translations = {}
for script in scripts:
for k, v in assign_translations(script, [], translations=rx_translations, fields=('show', 'say', 'placeholder')):
assert tx_translations.get(k, v) == v, 'Duplicate key %s (v=%r, tx[k]==%r)' % (k, v, tx_translations[k])
tx_translations[k] = v
print(k, v)
push_translations(f_in, tx_translations)
create_assets(scripts[-1])
scripts = dict(s=scripts)
f_out = Path('src/app/script.ts')
f_out.open('w').write('''
/* tslint:disable */
export const script = {};
'''.format(json.dumps(
scripts, ensure_ascii=False, sort_keys=True, indent=2)
)
)
|
<filename>tools/yaml_to_json.py
import os
import re
from pathlib import Path
from hashlib import md5
from slugify import slugify
import glob
import yaml
import json
import requests
PROJECT = 'avid-covider'
def calc_hash(s):
ret = md5(s.encode('utf8')).hexdigest()[:10]
return ret
HEB = re.compile('[א-ת]')
def has_hebrew(s):
return len(HEB.findall(s)) > 0
def get_field(x, f):
parts = f.split('.')
while len(parts) > 0:
p = parts.pop(0)
x = x.get(p, {})
return x
def get_uid(x, stack, index=None):
try:
FIELDS = ['name', 'wait.variable', 'say', 'switch.arg', 'do.cmd', 'do.variable', 'match', 'pattern', 'default', 'show']
values = [get_field(x, f) for f in FIELDS]
values = ','.join([str(v) for v in values if v is not None])
assert len(values) > 0
current_hash = ''.join(stack)
key = '{}|{}|{}'.format(current_hash, values, index)
ret = calc_hash(key)
return ret
except:
print(x, stack)
raise
def assign_ids(x, stack=[]):
if isinstance(x, dict):
uid = None
for k, v in x.items():
if k == 'steps':
uid = get_uid(x, stack)
for i, s in enumerate(v):
new_stack = stack + [uid, str(i)]
s['uid'] = get_uid(s, new_stack, i)
assign_ids(s, new_stack)
else:
assign_ids(v, stack)
if uid is not None:
x['uid'] = uid
elif isinstance(x, list):
for xx in x:
assign_ids(xx, stack)
else:
return
TRANSIFEX_TOKEN = os.environ.get('TRANSIFEX_TOKEN') or os.environ.get('TX_TOKEN')
LANGUAGES = ('ar', 'am', 'en', 'ru', 'es', 'fr')
def assign_translations(x, stack, parent=None, parentkey=None, translations=None, fields=(), field_in_key=False):
if isinstance(x, dict):
key = None
if x.get('slug'):
stack.append(x['slug'])
elif x.get('name'):
stack.append(slugify(x['name']))
if 'uid' in x:
stack.append(x['uid'][:2])
for k, v in x.items():
if k == 'steps':
for s in v:
new_stack = stack + []
yield from assign_translations(s, new_stack,
parent=None, parentkey=None,
translations=translations, fields=fields, field_in_key=field_in_key)
else:
yield from assign_translations(v, stack[:],
parent=x, parentkey=k,
translations=translations, fields=fields, field_in_key=field_in_key
)
elif isinstance(x, list):
for index, xx in enumerate(x):
if isinstance(xx, dict) and 'show' in xx:
new_stack = stack + [calc_hash(xx['show'])[:2]]
elif isinstance(xx, str):
new_stack = stack + [calc_hash(xx)[:2]]
else:
new_stack = stack + [index]
yield from assign_translations(xx, new_stack,
parent=x, parentkey=index,
translations=translations, fields=fields, field_in_key=field_in_key
)
elif isinstance(x, str):
if parent and parentkey is not None and has_hebrew(x):
if isinstance(parentkey, str) and parentkey not in fields:
return
if field_in_key:
key = '/'.join(str(s) for s in stack + [parentkey])
else:
key = '/'.join(str(s) for s in stack)
yield key, x
if key in translations:
parent[parentkey]={'.tx': dict(translations[key], _=x)}
# else:
# print('KEY NOT IN TX %s'% key)
def transifex_session():
s = requests.Session()
s.auth = ('api', TRANSIFEX_TOKEN)
return s
def transifex_slug(filename):
return '_'.join(filename.parts).replace('.', '_')
def push_translations(filename: Path, translations):
translations = dict(he=translations)
content = yaml.dump(translations, allow_unicode=True, indent=2, width=1000000)
slug = transifex_slug(filename)
s = transifex_session()
resp = s.get(f'https://www.transifex.com/api/2/project/{PROJECT}/resource/{slug}/')
if resp.status_code == requests.codes.ok:
print('Update file:')
data = dict(
content=content,
)
resp = s.put(
f'https://www.transifex.com/api/2/project/{PROJECT}/resource/{slug}/content/',
json=data
)
print(resp.status_code, resp.content[:50])
else:
print('New file:', slug)
data = dict(
slug=slug,
name=str(filename),
accept_translations=True,
i18n_type='YAML_GENERIC',
content=content,
)
resp = s.post(
f'https://www.transifex.com/api/2/project/{PROJECT}/resources/',
json=data
)
print(resp.status_code, resp.content[:100])
def pull_translations(lang, filename):
s = transifex_session()
slug = transifex_slug(filename)
url = f'https://www.transifex.com/api/2/project/{PROJECT}/resource/{slug}/translation/{lang}/'
try:
translations = s.get(url).json()
except json.decoder.JSONDecodeError:
print('No data from %s' % url)
return {}
translations = yaml.load(translations['content'], Loader=yaml.BaseLoader)['he']
translations = dict((k, v) for k, v in translations.items() if v)
return translations
def write_ical(title, body, path):
path = 'dist/avid-covider/{}assets/corona_reminder.ics'.format(path)
with open(path, 'w', encoding='utf8') as ics:
ics.write('''BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//Microsoft Corporation//Outlook 12.0 MIMEDIR//EN
METHOD:PUBLISH
CALSCALE:GREGORIAN
BEGIN:VTIMEZONE
TZID:Asia/Jerusalem
TZURL:http://tzurl.org/zoneinfo-outlook/Asia/Jerusalem
X-LIC-LOCATION:Asia/Jerusalem
BEGIN:DAYLIGHT
TZOFFSETFROM:+0200
TZOFFSETTO:+0300
TZNAME:EEST
DTSTART:19700329T000000
RRULE:FREQ=YEARLY;BYMONTH=3;BYDAY=-1SU
END:DAYLIGHT
BEGIN:STANDARD
TZOFFSETFROM:+0300
TZOFFSETTO:+0200
TZNAME:EET
DTSTART:19701025T000000
RRULE:FREQ=YEARLY;BYMONTH=10;BYDAY=-1SU
END:STANDARD
END:VTIMEZONE
BEGIN:VEVENT
DTSTAMP:20200328T131636Z
UID:corona-israel-0001
DTSTART;TZID=Asia/Jerusalem:20200328T080000
RRULE:FREQ=DAILY
DTEND;TZID=Asia/Jerusalem:20200328T081500
SUMMARY:{title}
URL:https://coronaisrael.org/?source=calendar
DESCRIPTION:{body}
LOCATION:https://coronaisrael.org/?source=calendar
TRANSP:TRANSPARENT
X-MICROSOFT-CDO-BUSYSTATUS:FREE
BEGIN:VALARM
ACTION:DISPLAY
DESCRIPTION:{title}
TRIGGER:-PT0M
END:VALARM
END:VEVENT
END:VCALENDAR'''.format(title=title, body=body))
def create_assets(script):
translations = {}
for x in script['keys']:
default = {'_': x['show']}
translations[x['name']] = x['show'].get('.tx', default) if isinstance(x['show'], dict) else default
languages = [(x, x+'/') for x in LANGUAGES] + [('_', '')]
for lang, path in languages:
calendarTitle = translations['calendarTitle']
calendarTitle=calendarTitle.get(lang, calendarTitle.get('_'))
calendarBody = translations['calendarBody']
calendarBody=calendarBody.get(lang, calendarBody.get('_'))
try:
write_ical(calendarTitle, calendarBody, path)
except Exception as e:
print('Failed to write ical %s' % e)
if __name__=='__main__':
f_in = Path('scripts/script.yaml')
scripts = yaml.load(f_in.open())
assign_ids(scripts, [str(f_in)])
if TRANSIFEX_TOKEN:
rx_translations = {}
for lang in LANGUAGES:
lang_translations = pull_translations(lang, f_in)
for key, value in lang_translations.items():
rx_translations.setdefault(key, {})[lang] = value
tx_translations = {}
for script in scripts:
for k, v in assign_translations(script, [], translations=rx_translations, fields=('show', 'say', 'placeholder')):
assert tx_translations.get(k, v) == v, 'Duplicate key %s (v=%r, tx[k]==%r)' % (k, v, tx_translations[k])
tx_translations[k] = v
print(k, v)
push_translations(f_in, tx_translations)
create_assets(scripts[-1])
scripts = dict(s=scripts)
f_out = Path('src/app/script.ts')
f_out.open('w').write('''
/* tslint:disable */
export const script = {};
'''.format(json.dumps(
scripts, ensure_ascii=False, sort_keys=True, indent=2)
)
)
|
en
| 0.288607
|
# else: # print('KEY NOT IN TX %s'% key) BEGIN:VCALENDAR VERSION:2.0 PRODID:-//Microsoft Corporation//Outlook 12.0 MIMEDIR//EN METHOD:PUBLISH CALSCALE:GREGORIAN BEGIN:VTIMEZONE TZID:Asia/Jerusalem TZURL:http://tzurl.org/zoneinfo-outlook/Asia/Jerusalem X-LIC-LOCATION:Asia/Jerusalem BEGIN:DAYLIGHT TZOFFSETFROM:+0200 TZOFFSETTO:+0300 TZNAME:EEST DTSTART:19700329T000000 RRULE:FREQ=YEARLY;BYMONTH=3;BYDAY=-1SU END:DAYLIGHT BEGIN:STANDARD TZOFFSETFROM:+0300 TZOFFSETTO:+0200 TZNAME:EET DTSTART:19701025T000000 RRULE:FREQ=YEARLY;BYMONTH=10;BYDAY=-1SU END:STANDARD END:VTIMEZONE BEGIN:VEVENT DTSTAMP:20200328T131636Z UID:corona-israel-0001 DTSTART;TZID=Asia/Jerusalem:20200328T080000 RRULE:FREQ=DAILY DTEND;TZID=Asia/Jerusalem:20200328T081500 SUMMARY:{title} URL:https://coronaisrael.org/?source=calendar DESCRIPTION:{body} LOCATION:https://coronaisrael.org/?source=calendar TRANSP:TRANSPARENT X-MICROSOFT-CDO-BUSYSTATUS:FREE BEGIN:VALARM ACTION:DISPLAY DESCRIPTION:{title} TRIGGER:-PT0M END:VALARM END:VEVENT END:VCALENDAR /* tslint:disable */ export const script = {};
| 2.511051
| 3
|
scenes/website/control/scene_checkinlist.py
|
pretix/pretix-screenshots
| 14
|
6629141
|
import random
from datetime import timedelta
import faker
import pytest
from decimal import Decimal
from django.utils.timezone import now
from django.utils.translation import gettext as _
from pretix.base.models import Order
from ...utils import screenshot
@pytest.fixture
def items(event, tax_rule):
i1 = event.items.create(name=_('Business Ticket'), default_price=400, admission=True, tax_rule=tax_rule,
active=True, position=2)
i2 = event.items.create(name=_('VIP Ticket'), default_price=600, admission=True, tax_rule=tax_rule,
active=True, position=3)
q1 = event.quotas.create(name='Available', size=7)
q1.items.add(i1)
q2 = event.quotas.create(name='Available', size=3)
q2.items.add(i2)
return i1, i2
@pytest.fixture
def list_all(event, items):
return event.checkin_lists.create(
name=_('General admission'),
all_products=True,
)
@pytest.fixture
def list_vip(event, items):
l = event.checkin_lists.create(
name=_('VIP Lounge'),
all_products=False,
)
l.limit_products.add(items[1])
return l
@pytest.fixture
def data(event, items, list_all, list_vip):
for l in range(334):
d = now() - timedelta(days=5)
order = event.orders.create(
status=Order.STATUS_PAID,
email='<EMAIL>',
expires=now(),
datetime=d,
total=Decimal("23"),
locale='en'
)
if random.randrange(0, 10) < 3:
p = order.positions.create(
item=items[1], price=Decimal('600.00'),
)
if random.randrange(0, 10) < 5:
p.checkins.create(datetime=d, list=list_all)
if random.randrange(0, 10) < 3:
p.checkins.create(datetime=d, list=list_vip)
else:
p = order.positions.create(
item=items[0], price=Decimal('400.00')
)
if random.randrange(0, 10) < 6:
p.checkins.create(datetime=d, list=list_all)
@pytest.mark.django_db
def shot_waiting_list_admin(live_server, organizer, event, logged_in_client, data):
logged_in_client.get(live_server.url + '/control/event/{}/{}/checkinlists/'.format(
organizer.slug, event.slug
))
logged_in_client.find_element_by_css_selector(".table")
screenshot(logged_in_client, 'website/control/checkinlist_admin.png')
|
import random
from datetime import timedelta
import faker
import pytest
from decimal import Decimal
from django.utils.timezone import now
from django.utils.translation import gettext as _
from pretix.base.models import Order
from ...utils import screenshot
@pytest.fixture
def items(event, tax_rule):
i1 = event.items.create(name=_('Business Ticket'), default_price=400, admission=True, tax_rule=tax_rule,
active=True, position=2)
i2 = event.items.create(name=_('VIP Ticket'), default_price=600, admission=True, tax_rule=tax_rule,
active=True, position=3)
q1 = event.quotas.create(name='Available', size=7)
q1.items.add(i1)
q2 = event.quotas.create(name='Available', size=3)
q2.items.add(i2)
return i1, i2
@pytest.fixture
def list_all(event, items):
return event.checkin_lists.create(
name=_('General admission'),
all_products=True,
)
@pytest.fixture
def list_vip(event, items):
l = event.checkin_lists.create(
name=_('VIP Lounge'),
all_products=False,
)
l.limit_products.add(items[1])
return l
@pytest.fixture
def data(event, items, list_all, list_vip):
for l in range(334):
d = now() - timedelta(days=5)
order = event.orders.create(
status=Order.STATUS_PAID,
email='<EMAIL>',
expires=now(),
datetime=d,
total=Decimal("23"),
locale='en'
)
if random.randrange(0, 10) < 3:
p = order.positions.create(
item=items[1], price=Decimal('600.00'),
)
if random.randrange(0, 10) < 5:
p.checkins.create(datetime=d, list=list_all)
if random.randrange(0, 10) < 3:
p.checkins.create(datetime=d, list=list_vip)
else:
p = order.positions.create(
item=items[0], price=Decimal('400.00')
)
if random.randrange(0, 10) < 6:
p.checkins.create(datetime=d, list=list_all)
@pytest.mark.django_db
def shot_waiting_list_admin(live_server, organizer, event, logged_in_client, data):
logged_in_client.get(live_server.url + '/control/event/{}/{}/checkinlists/'.format(
organizer.slug, event.slug
))
logged_in_client.find_element_by_css_selector(".table")
screenshot(logged_in_client, 'website/control/checkinlist_admin.png')
|
none
| 1
| 1.91162
| 2
|
|
paip/examples/eliza/eliza.py
|
skynetshrugged/paip-python
| 0
|
6629142
|
<filename>paip/examples/eliza/eliza.py
import json
import sys
import eliza
rules = {
"?*x hello ?*y": [
"How do you do. Please state your problem."
],
"?*x computer ?*y": [
"Do computers worry you?",
"What do you think about machines?",
"Why do you mention computers?",
"What do you think machines have to do with your problem?",
],
"?*x name ?*y": [
"I am not interested in names",
],
"?*x sorry ?*y": [
"Please don't apologize",
"Apologies are not necessary",
"What feelings do you have when you apologize",
],
"?*x I remember ?*y": [
"Do you often think of ?y?",
"Does thinking of ?y bring anything else to mind?",
"What else do you remember?",
"Why do you recall ?y right now?",
"What in the present situation reminds you of ?y?",
"What is the connection between me and ?y?",
],
"?*x do you remember ?*y": [
"Did you think I would forget ?y?",
"Why do you think I should recall ?y now?",
"What about ?y?",
"You mentioned ?y",
],
"?*x I want ?*y": [
"What would it mean if you got ?y?",
"Why do you want ?y?",
"Suppose you got ?y soon."
],
"?*x if ?*y": [
"Do you really think it's likely that ?y?",
"Do you wish that ?y?",
"What do you think about ?y?",
"Really--if ?y?"
],
"?*x I dreamt ?*y": [
"How do you feel about ?y in reality?",
],
"?*x dream ?*y": [
"What does this dream suggest to you?",
"Do you dream often?",
"What persons appear in your dreams?",
"Don't you believe that dream has to do with your problem?",
],
"?*x my mother ?*y": [
"Who else in your family ?y?",
"Tell me more about your family",
],
"?*x my father ?*y": [
"Your father?",
"Does he influence you strongly?",
"What else comes to mind when you think of your father?",
],
"?*x I am glad ?*y": [
"How have I helped you to be ?y?",
"What makes you happy just now?",
"Can you explain why you are suddenly ?y?",
],
"?*x I am sad ?*y": [
"I am sorry to hear you are depressed",
"I'm sure it's not pleasant to be sad",
],
"?*x are like ?*y": [
"What resemblence do you see between ?x and ?y?",
],
"?*x is like ?*y": [
"In what way is it that ?x is like ?y?",
"What resemblence do you see?",
"Could there really be some connection?",
"How?",
],
"?*x alike ?*y": [
"In what way?",
"What similarities are there?",
],
"?* same ?*y": [
"What other connections do you see?",
],
"?*x no ?*y": [
"Why not?",
"You are being a bit negative.",
"Are you saying 'No' just to be negative?"
],
"?*x I was ?*y": [
"Were you really?",
"Perhaps I already knew you were ?y.",
"Why do you tell me you were ?y now?"
],
"?*x was I ?*y": [
"What if you were ?y?",
"Do you think you were ?y?",
"What would it mean if you were ?y?",
],
"?*x I am ?*y": [
"In what way are you ?y?",
"Do you want to be ?y?",
],
"?*x am I ?*y": [
"Do you believe you are ?y?",
"Would you want to be ?y?",
"You wish I would tell you you are ?y?",
"What would it mean if you were ?y?",
],
"?*x am ?*y": [
"Why do you say 'AM?'",
"I don't understand that"
],
"?*x are you ?*y": [
"Why are you interested in whether I am ?y or not?",
"Would you prefer if I weren't ?y?",
"Perhaps I am ?y in your fantasies",
],
"?*x you are ?*y": [
"What makes you think I am ?y?",
],
"?*x because ?*y": [
"Is that the real reason?",
"What other reasons might there be?",
"Does that reason seem to explain anything else?",
],
"?*x were you ?*y": [
"Perhaps I was ?y?",
"What do you think?",
"What if I had been ?y?",
],
"?*x I can't ?*y": [
"Maybe you could ?y now",
"What if you could ?y?",
],
"?*x I feel ?*y": [
"Do you often feel ?y?"
],
"?*x I felt ?*y": [
"What other feelings do you have?"
],
"?*x I ?*y you ?*z": [
"Perhaps in your fantasy we ?y each other",
],
"?*x why don't you ?*y": [
"Should you ?y yourself?",
"Do you believe I don't ?y?",
"Perhaps I will ?y in good time",
],
"?*x yes ?*y": [
"You seem quite positive",
"You are sure?",
"I understand",
],
"?*x someone ?*y": [
"Can you be more specific?",
],
"?*x everyone ?*y": [
"Surely not everyone",
"Can you think of anyone in particular?",
"Who, for example?",
"You are thinking of a special person",
],
"?*x always ?*y": [
"Can you think of a specific example?",
"When?",
"What incident are you thinking of?",
"Really--always?",
],
"?*x what ?*y": [
"Why do you ask?",
"Does that question interest you?",
"What is it you really want to know?",
"What do you think?",
"What comes to your mind when you ask that?",
],
"?*x perhaps ?*y": [
"You do not seem quite certain",
],
"?*x are ?*y": [
"Did you think they might not be ?y?",
"Possibly they are ?y",
],
}
default_responses = [
"Very interesting",
"I am not sure I understand you fully",
"What does that suggest to you?",
"Please continue",
"Go on",
"Do you feel strongly about discussing such things?",
]
def main():
# We need the rules in a list containing elements of the following form:
# `(input pattern, [output pattern 1, output pattern 2, ...]`
rules_list = []
for pattern, transforms in rules.items():
# Remove the punctuation from the pattern to simplify matching.
#pattern = eliza.translate(str(pattern.upper())) # kill unicode
transforms = [str(t).upper() for t in transforms]
rules_list.append((pattern, transforms))
#eliza.interact('ELIZA> ', rules_list, map(str.upper, default_responses))
if __name__ == '__main__':
main()
|
<filename>paip/examples/eliza/eliza.py
import json
import sys
import eliza
rules = {
"?*x hello ?*y": [
"How do you do. Please state your problem."
],
"?*x computer ?*y": [
"Do computers worry you?",
"What do you think about machines?",
"Why do you mention computers?",
"What do you think machines have to do with your problem?",
],
"?*x name ?*y": [
"I am not interested in names",
],
"?*x sorry ?*y": [
"Please don't apologize",
"Apologies are not necessary",
"What feelings do you have when you apologize",
],
"?*x I remember ?*y": [
"Do you often think of ?y?",
"Does thinking of ?y bring anything else to mind?",
"What else do you remember?",
"Why do you recall ?y right now?",
"What in the present situation reminds you of ?y?",
"What is the connection between me and ?y?",
],
"?*x do you remember ?*y": [
"Did you think I would forget ?y?",
"Why do you think I should recall ?y now?",
"What about ?y?",
"You mentioned ?y",
],
"?*x I want ?*y": [
"What would it mean if you got ?y?",
"Why do you want ?y?",
"Suppose you got ?y soon."
],
"?*x if ?*y": [
"Do you really think it's likely that ?y?",
"Do you wish that ?y?",
"What do you think about ?y?",
"Really--if ?y?"
],
"?*x I dreamt ?*y": [
"How do you feel about ?y in reality?",
],
"?*x dream ?*y": [
"What does this dream suggest to you?",
"Do you dream often?",
"What persons appear in your dreams?",
"Don't you believe that dream has to do with your problem?",
],
"?*x my mother ?*y": [
"Who else in your family ?y?",
"Tell me more about your family",
],
"?*x my father ?*y": [
"Your father?",
"Does he influence you strongly?",
"What else comes to mind when you think of your father?",
],
"?*x I am glad ?*y": [
"How have I helped you to be ?y?",
"What makes you happy just now?",
"Can you explain why you are suddenly ?y?",
],
"?*x I am sad ?*y": [
"I am sorry to hear you are depressed",
"I'm sure it's not pleasant to be sad",
],
"?*x are like ?*y": [
"What resemblence do you see between ?x and ?y?",
],
"?*x is like ?*y": [
"In what way is it that ?x is like ?y?",
"What resemblence do you see?",
"Could there really be some connection?",
"How?",
],
"?*x alike ?*y": [
"In what way?",
"What similarities are there?",
],
"?* same ?*y": [
"What other connections do you see?",
],
"?*x no ?*y": [
"Why not?",
"You are being a bit negative.",
"Are you saying 'No' just to be negative?"
],
"?*x I was ?*y": [
"Were you really?",
"Perhaps I already knew you were ?y.",
"Why do you tell me you were ?y now?"
],
"?*x was I ?*y": [
"What if you were ?y?",
"Do you think you were ?y?",
"What would it mean if you were ?y?",
],
"?*x I am ?*y": [
"In what way are you ?y?",
"Do you want to be ?y?",
],
"?*x am I ?*y": [
"Do you believe you are ?y?",
"Would you want to be ?y?",
"You wish I would tell you you are ?y?",
"What would it mean if you were ?y?",
],
"?*x am ?*y": [
"Why do you say 'AM?'",
"I don't understand that"
],
"?*x are you ?*y": [
"Why are you interested in whether I am ?y or not?",
"Would you prefer if I weren't ?y?",
"Perhaps I am ?y in your fantasies",
],
"?*x you are ?*y": [
"What makes you think I am ?y?",
],
"?*x because ?*y": [
"Is that the real reason?",
"What other reasons might there be?",
"Does that reason seem to explain anything else?",
],
"?*x were you ?*y": [
"Perhaps I was ?y?",
"What do you think?",
"What if I had been ?y?",
],
"?*x I can't ?*y": [
"Maybe you could ?y now",
"What if you could ?y?",
],
"?*x I feel ?*y": [
"Do you often feel ?y?"
],
"?*x I felt ?*y": [
"What other feelings do you have?"
],
"?*x I ?*y you ?*z": [
"Perhaps in your fantasy we ?y each other",
],
"?*x why don't you ?*y": [
"Should you ?y yourself?",
"Do you believe I don't ?y?",
"Perhaps I will ?y in good time",
],
"?*x yes ?*y": [
"You seem quite positive",
"You are sure?",
"I understand",
],
"?*x someone ?*y": [
"Can you be more specific?",
],
"?*x everyone ?*y": [
"Surely not everyone",
"Can you think of anyone in particular?",
"Who, for example?",
"You are thinking of a special person",
],
"?*x always ?*y": [
"Can you think of a specific example?",
"When?",
"What incident are you thinking of?",
"Really--always?",
],
"?*x what ?*y": [
"Why do you ask?",
"Does that question interest you?",
"What is it you really want to know?",
"What do you think?",
"What comes to your mind when you ask that?",
],
"?*x perhaps ?*y": [
"You do not seem quite certain",
],
"?*x are ?*y": [
"Did you think they might not be ?y?",
"Possibly they are ?y",
],
}
default_responses = [
"Very interesting",
"I am not sure I understand you fully",
"What does that suggest to you?",
"Please continue",
"Go on",
"Do you feel strongly about discussing such things?",
]
def main():
# We need the rules in a list containing elements of the following form:
# `(input pattern, [output pattern 1, output pattern 2, ...]`
rules_list = []
for pattern, transforms in rules.items():
# Remove the punctuation from the pattern to simplify matching.
#pattern = eliza.translate(str(pattern.upper())) # kill unicode
transforms = [str(t).upper() for t in transforms]
rules_list.append((pattern, transforms))
#eliza.interact('ELIZA> ', rules_list, map(str.upper, default_responses))
if __name__ == '__main__':
main()
|
en
| 0.510099
|
# We need the rules in a list containing elements of the following form: # `(input pattern, [output pattern 1, output pattern 2, ...]` # Remove the punctuation from the pattern to simplify matching. #pattern = eliza.translate(str(pattern.upper())) # kill unicode #eliza.interact('ELIZA> ', rules_list, map(str.upper, default_responses))
| 2.974891
| 3
|
adventofcode-python/adventofcode2019/day/09/09.py
|
salockhart/adventofcode2020
| 1
|
6629143
|
import fileinput
from collections import defaultdict
POSITION = 0
IMMEDIATE = 1
RELATIVE = 2
ADD = 1
MUL = 2
IN = 3
OUT = 4
JUMP_TRUE = 5
JUMP_FALSE = 6
LESS_THAN = 7
EQUALS = 8
ADD_RELATIVE_BASE = 9
HALT = 99
READ = 0
WRITE = 1
OPS = {
ADD: (READ, READ, WRITE),
MUL: (READ, READ, WRITE),
IN: (WRITE,),
OUT: (READ,),
JUMP_TRUE: (READ, READ),
JUMP_FALSE: (READ, READ),
LESS_THAN: (READ, READ, WRITE),
EQUALS: (READ, READ, WRITE),
ADD_RELATIVE_BASE: (READ,),
HALT: (),
}
def get_args(pc, base, codes, arg_kinds, modes):
args = [None] * 4
for i, kind in enumerate(arg_kinds):
a = codes[pc + 1 + i]
modes, mode = divmod(modes, 10)
if mode == RELATIVE:
a += base
if mode in (POSITION, RELATIVE):
if a < 0:
raise Exception(
f"Invalid access to negative memory index: {a}")
if kind == READ:
a = codes[a]
elif kind != WRITE:
raise Exception(f"Invalid arg kind: {kind}")
elif mode == IMMEDIATE:
if kind == WRITE:
raise Exception(f"Invalid arg mode for write arg: {mode}")
else:
raise Exception(f"Invalid arg mode: {mode}")
args[i] = a
return args
def execute(codes, inputs, initial_pc=0, initial_base=0):
pc = initial_pc
base = initial_base
while codes[pc] != 99:
modes, opcode = divmod(codes[pc], 100)
if opcode not in OPS:
raise Exception(f"Unknown opcode: {opcode}")
arg_kinds = OPS[opcode]
a, b, c, _ = get_args(pc, base, codes, arg_kinds, modes)
pc += 1 + len(arg_kinds)
if opcode == IN:
codes[a] = inputs.pop()
elif opcode == OUT:
print(a)
return pc, base, codes, a
elif opcode == ADD:
codes[c] = a + b
elif opcode == MUL:
codes[c] = a * b
elif opcode == LESS_THAN:
codes[c] = 1 if a < b else 0
elif opcode == EQUALS:
codes[c] = 1 if a == b else 0
elif opcode == JUMP_TRUE:
if a != 0:
pc = b
elif opcode == JUMP_FALSE:
if a == 0:
pc = b
elif opcode == ADD_RELATIVE_BASE:
base += a
else:
raise Exception(f"Unimplemented opcode: {opcode}")
return pc, base, codes, None
def part1(lines):
codes = defaultdict(int)
for i, code in enumerate(",".join(line.strip() for line in lines).split(",")):
codes[i] = int(code)
last_output = -1
idx = 0
base = 0
while last_output is not None:
idx, base, codes, last_output = execute(codes, [1], idx, base)
return ""
def part2(lines):
codes = defaultdict(int)
for i, code in enumerate(",".join(line.strip() for line in lines).split(",")):
codes[i] = int(code)
last_output = -1
idx = 0
base = 0
while last_output is not None:
idx, base, codes, last_output = execute(codes, [2], idx, base)
return ""
parts = (part1, part2)
if __name__ == '__main__':
lines = list(fileinput.input())
print("part 1: %s" % (part1(lines),))
print("part 2: %s" % (part2(lines),))
|
import fileinput
from collections import defaultdict
POSITION = 0
IMMEDIATE = 1
RELATIVE = 2
ADD = 1
MUL = 2
IN = 3
OUT = 4
JUMP_TRUE = 5
JUMP_FALSE = 6
LESS_THAN = 7
EQUALS = 8
ADD_RELATIVE_BASE = 9
HALT = 99
READ = 0
WRITE = 1
OPS = {
ADD: (READ, READ, WRITE),
MUL: (READ, READ, WRITE),
IN: (WRITE,),
OUT: (READ,),
JUMP_TRUE: (READ, READ),
JUMP_FALSE: (READ, READ),
LESS_THAN: (READ, READ, WRITE),
EQUALS: (READ, READ, WRITE),
ADD_RELATIVE_BASE: (READ,),
HALT: (),
}
def get_args(pc, base, codes, arg_kinds, modes):
args = [None] * 4
for i, kind in enumerate(arg_kinds):
a = codes[pc + 1 + i]
modes, mode = divmod(modes, 10)
if mode == RELATIVE:
a += base
if mode in (POSITION, RELATIVE):
if a < 0:
raise Exception(
f"Invalid access to negative memory index: {a}")
if kind == READ:
a = codes[a]
elif kind != WRITE:
raise Exception(f"Invalid arg kind: {kind}")
elif mode == IMMEDIATE:
if kind == WRITE:
raise Exception(f"Invalid arg mode for write arg: {mode}")
else:
raise Exception(f"Invalid arg mode: {mode}")
args[i] = a
return args
def execute(codes, inputs, initial_pc=0, initial_base=0):
pc = initial_pc
base = initial_base
while codes[pc] != 99:
modes, opcode = divmod(codes[pc], 100)
if opcode not in OPS:
raise Exception(f"Unknown opcode: {opcode}")
arg_kinds = OPS[opcode]
a, b, c, _ = get_args(pc, base, codes, arg_kinds, modes)
pc += 1 + len(arg_kinds)
if opcode == IN:
codes[a] = inputs.pop()
elif opcode == OUT:
print(a)
return pc, base, codes, a
elif opcode == ADD:
codes[c] = a + b
elif opcode == MUL:
codes[c] = a * b
elif opcode == LESS_THAN:
codes[c] = 1 if a < b else 0
elif opcode == EQUALS:
codes[c] = 1 if a == b else 0
elif opcode == JUMP_TRUE:
if a != 0:
pc = b
elif opcode == JUMP_FALSE:
if a == 0:
pc = b
elif opcode == ADD_RELATIVE_BASE:
base += a
else:
raise Exception(f"Unimplemented opcode: {opcode}")
return pc, base, codes, None
def part1(lines):
codes = defaultdict(int)
for i, code in enumerate(",".join(line.strip() for line in lines).split(",")):
codes[i] = int(code)
last_output = -1
idx = 0
base = 0
while last_output is not None:
idx, base, codes, last_output = execute(codes, [1], idx, base)
return ""
def part2(lines):
codes = defaultdict(int)
for i, code in enumerate(",".join(line.strip() for line in lines).split(",")):
codes[i] = int(code)
last_output = -1
idx = 0
base = 0
while last_output is not None:
idx, base, codes, last_output = execute(codes, [2], idx, base)
return ""
parts = (part1, part2)
if __name__ == '__main__':
lines = list(fileinput.input())
print("part 1: %s" % (part1(lines),))
print("part 2: %s" % (part2(lines),))
|
none
| 1
| 3.02779
| 3
|
|
pyrobolearn/states/robot_states/sensor_states.py
|
Pandinosaurus/pyrobolearn
| 2
|
6629144
|
<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Define the various sensor states
This includes notably the camera, contact, IMU, force/torque sensors and others.
"""
import copy
from abc import ABCMeta
import collections
import numpy as np
# from pyrobolearn.states.robot_states.robot_states import RobotState
from pyrobolearn.states.state import State
from pyrobolearn.robots.legged_robot import LeggedRobot
from pyrobolearn.robots.sensors.sensor import Sensor
from pyrobolearn.robots.sensors.contact import ContactSensor
from pyrobolearn.robots.sensors.camera import CameraSensor
__author__ = "<NAME>"
__copyright__ = "Copyright 2018, PyRoboLearn"
__credits__ = ["<NAME>"]
__license__ = "GNU GPLv3"
__version__ = "1.0.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
class SensorState(State):
r"""Sensor state (abstract class)
"""
__metaclass__ = ABCMeta
def __init__(self, sensor, window_size=1, axis=None, ticks=1, update=False):
"""
Initialize the sensor state.
Args:
sensor (Sensor): sensor instance.
window_size (int): window size of the state. This is the total number of states we should remember. That
is, if the user wants to remember the current state :math:`s_t` and the previous state :math:`s_{t-1}`,
the window size is 2. By default, the :attr:`window_size` is one which means we only remember the
current state. The window size has to be bigger than 1. If it is below, it will be set automatically
to 1. The :attr:`window_size` attribute is only valid when the state is not a combination of states,
but is given some :attr:`data`.
axis (int, None): axis to concatenate or stack the states in the current window. If you have a state with
shape (n,), then if the axis is None (by default), it will just concatenate it such that resulting
state has a shape (n*w,) where w is the window size. If the axis is an integer, then it will just stack
the states in the specified axis. With the example, for axis=0, the resulting state has a shape of
(w,n), and for axis=-1 or 1, it will have a shape of (n,w). The :attr:`axis` attribute is only when the
state is not a combination of states, but is given some :attr:`data`.
ticks (int): number of ticks to sleep before getting the next state data.
update (bool): if we should update the sensor, or not. Note that this is normally carried out by the
`robot.step` method (which is itself called by `world.step`), so normally you shouldn't set it to True.
"""
super(SensorState, self).__init__(window_size=window_size, axis=axis, ticks=ticks)
# set the sensor instance
self.sensor = sensor
self._update = bool(update)
##############
# Properties #
##############
@property
def sensor(self):
"""Return the sensor instance."""
return self._sensor
@sensor.setter
def sensor(self, sensor):
"""Set the sensor instance."""
if not isinstance(sensor, Sensor):
raise TypeError("Expecting the given 'sensor' to be an instance of `Sensor`, instead got: "
"{}".format(type(sensor)))
self._sensor = sensor
###########
# Methods #
###########
def _read(self):
"""Read the sensor values."""
# update the sensor if specified (normally we don't need to do it as it is carried out by robot.step, or
# world.step)
if self._update:
self.sensor.sense(apply_noise=True)
# get the data from the sensor
self.data = self.sensor.data
#############
# Operators #
#############
def __copy__(self):
"""Return a shallow copy of the state. This can be overridden in the child class."""
return self.__class__(sensor=self.sensor, window_size=self.window_size, axis=self.axis, ticks=self.ticks)
def __deepcopy__(self, memo={}):
"""Return a deep copy of the state. This can be overridden in the child class.
Args:
memo (dict): memo dictionary of objects already copied during the current copying pass
"""
if self in memo:
return memo[self]
sensor = copy.deepcopy(self.sensor, memo)
state = self.__class__(sensor=sensor, window_size=self.window_size, axis=self.axis, ticks=self.ticks)
memo[self] = state
return state
class CameraState(SensorState):
r"""Camera state
"""
def __init__(self, camera, window_size=1, axis=None, ticks=1):
"""
Initialize the camera sensor state.
Args:
camera (CameraSensor): camera sensor(s).
window_size (int): window size of the state. This is the total number of states we should remember. That
is, if the user wants to remember the current state :math:`s_t` and the previous state :math:`s_{t-1}`,
the window size is 2. By default, the :attr:`window_size` is one which means we only remember the
current state. The window size has to be bigger than 1. If it is below, it will be set automatically
to 1. The :attr:`window_size` attribute is only valid when the state is not a combination of states,
but is given some :attr:`data`.
axis (int, None): axis to concatenate or stack the states in the current window. If you have a state with
shape (n,), then if the axis is None (by default), it will just concatenate it such that resulting
state has a shape (n*w,) where w is the window size. If the axis is an integer, then it will just stack
the states in the specified axis. With the example, for axis=0, the resulting state has a shape of
(w,n), and for axis=-1 or 1, it will have a shape of (n,w). The :attr:`axis` attribute is only when the
state is not a combination of states, but is given some :attr:`data`.
ticks (int): number of ticks to sleep before getting the next state data.
"""
self.camera = camera
super(CameraState, self).__init__(sensor=camera, window_size=window_size, axis=axis, ticks=ticks)
def _read(self):
pass
class ContactState(SensorState):
r"""Contact state
Return the contact states between a link of the robot and an object in the world (including the floor).
"""
def __init__(self, contacts, window_size=1, axis=None, ticks=1):
"""Initialize the contact state.
Args:
contacts (ContactSensor, list of ContactSensor): list of contact sensor(s).
If None, it will check if the robot has some contact sensors. If there are no contact sensors, it
will check the contact with all the links.
window_size (int): window size of the state. This is the total number of states we should remember. That
is, if the user wants to remember the current state :math:`s_t` and the previous state :math:`s_{t-1}`,
the window size is 2. By default, the :attr:`window_size` is one which means we only remember the
current state. The window size has to be bigger than 1. If it is below, it will be set automatically
to 1. The :attr:`window_size` attribute is only valid when the state is not a combination of states,
but is given some :attr:`data`.
axis (int, None): axis to concatenate or stack the states in the current window. If you have a state with
shape (n,), then if the axis is None (by default), it will just concatenate it such that resulting
state has a shape (n*w,) where w is the window size. If the axis is an integer, then it will just stack
the states in the specified axis. With the example, for axis=0, the resulting state has a shape of
(w,n), and for axis=-1 or 1, it will have a shape of (n,w). The :attr:`axis` attribute is only when the
state is not a combination of states, but is given some :attr:`data`.
ticks (int): number of ticks to sleep before getting the next state data.
"""
if not isinstance(contacts, collections.Iterable):
contacts = [contacts]
for contact in contacts:
if not isinstance(contact, ContactSensor):
raise TypeError("Expecting the given 'contact' to be an instance of `ContactSensor`, instead got: "
"{}".format(type(contact)))
self.contacts = contacts
super(ContactState, self).__init__(sensor=contacts, window_size=window_size, axis=axis, ticks=ticks)
def _read(self):
contacts = np.array([int(contact.is_in_contact()) for contact in self.contacts])
self.data = contacts
class FeetContactState(ContactState):
r"""Feet Contact State
Return the contact states between the foot of the robot and an object in the world (including the floor).
"""
def __init__(self, robot, contacts=None, window_size=1, axis=None, ticks=1):
"""
Initialize the feet contact state.
Args:
robot (LeggedRobot): legged robot.
contacts (ContactSensor, list of ContactSensor, None): list of contact sensors.
window_size (int): window size of the state. This is the total number of states we should remember. That
is, if the user wants to remember the current state :math:`s_t` and the previous state :math:`s_{t-1}`,
the window size is 2. By default, the :attr:`window_size` is one which means we only remember the
current state. The window size has to be bigger than 1. If it is below, it will be set automatically
to 1. The :attr:`window_size` attribute is only valid when the state is not a combination of states,
but is given some :attr:`data`.
axis (int, None): axis to concatenate or stack the states in the current window. If you have a state with
shape (n,), then if the axis is None (by default), it will just concatenate it such that resulting
state has a shape (n*w,) where w is the window size. If the axis is an integer, then it will just stack
the states in the specified axis. With the example, for axis=0, the resulting state has a shape of
(w,n), and for axis=-1 or 1, it will have a shape of (n,w). The :attr:`axis` attribute is only when the
state is not a combination of states, but is given some :attr:`data`.
ticks (int): number of ticks to sleep before getting the next state data.
"""
# check if the robot has feet
if not isinstance(robot, LeggedRobot):
raise TypeError("Expecting the robot to be an instance of `LeggedRobot`, instead got: "
"{}".format(type(robot)))
if len(robot.feet) == 0:
raise ValueError("The given robot has no feet; please set the `feet` attribute in the robot.")
self.robot = robot
# check if the contact sensors or link ids are valid
if contacts is None:
feet = robot.feet
feet_ids = []
for foot in feet:
if isinstance(foot, int):
feet_ids.append(foot)
elif isinstance(foot, collections.Iterable):
for f in foot:
feet_ids.append(f)
else:
raise TypeError("Expecting the list of feet ids to be a list of integers.")
contacts = feet_ids
super(FeetContactState, self).__init__(contacts, window_size=window_size, axis=axis, ticks=ticks)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Define the various sensor states
This includes notably the camera, contact, IMU, force/torque sensors and others.
"""
import copy
from abc import ABCMeta
import collections
import numpy as np
# from pyrobolearn.states.robot_states.robot_states import RobotState
from pyrobolearn.states.state import State
from pyrobolearn.robots.legged_robot import LeggedRobot
from pyrobolearn.robots.sensors.sensor import Sensor
from pyrobolearn.robots.sensors.contact import ContactSensor
from pyrobolearn.robots.sensors.camera import CameraSensor
__author__ = "<NAME>"
__copyright__ = "Copyright 2018, PyRoboLearn"
__credits__ = ["<NAME>"]
__license__ = "GNU GPLv3"
__version__ = "1.0.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
class SensorState(State):
r"""Sensor state (abstract class)
"""
__metaclass__ = ABCMeta
def __init__(self, sensor, window_size=1, axis=None, ticks=1, update=False):
"""
Initialize the sensor state.
Args:
sensor (Sensor): sensor instance.
window_size (int): window size of the state. This is the total number of states we should remember. That
is, if the user wants to remember the current state :math:`s_t` and the previous state :math:`s_{t-1}`,
the window size is 2. By default, the :attr:`window_size` is one which means we only remember the
current state. The window size has to be bigger than 1. If it is below, it will be set automatically
to 1. The :attr:`window_size` attribute is only valid when the state is not a combination of states,
but is given some :attr:`data`.
axis (int, None): axis to concatenate or stack the states in the current window. If you have a state with
shape (n,), then if the axis is None (by default), it will just concatenate it such that resulting
state has a shape (n*w,) where w is the window size. If the axis is an integer, then it will just stack
the states in the specified axis. With the example, for axis=0, the resulting state has a shape of
(w,n), and for axis=-1 or 1, it will have a shape of (n,w). The :attr:`axis` attribute is only when the
state is not a combination of states, but is given some :attr:`data`.
ticks (int): number of ticks to sleep before getting the next state data.
update (bool): if we should update the sensor, or not. Note that this is normally carried out by the
`robot.step` method (which is itself called by `world.step`), so normally you shouldn't set it to True.
"""
super(SensorState, self).__init__(window_size=window_size, axis=axis, ticks=ticks)
# set the sensor instance
self.sensor = sensor
self._update = bool(update)
##############
# Properties #
##############
@property
def sensor(self):
"""Return the sensor instance."""
return self._sensor
@sensor.setter
def sensor(self, sensor):
"""Set the sensor instance."""
if not isinstance(sensor, Sensor):
raise TypeError("Expecting the given 'sensor' to be an instance of `Sensor`, instead got: "
"{}".format(type(sensor)))
self._sensor = sensor
###########
# Methods #
###########
def _read(self):
"""Read the sensor values."""
# update the sensor if specified (normally we don't need to do it as it is carried out by robot.step, or
# world.step)
if self._update:
self.sensor.sense(apply_noise=True)
# get the data from the sensor
self.data = self.sensor.data
#############
# Operators #
#############
def __copy__(self):
"""Return a shallow copy of the state. This can be overridden in the child class."""
return self.__class__(sensor=self.sensor, window_size=self.window_size, axis=self.axis, ticks=self.ticks)
def __deepcopy__(self, memo={}):
"""Return a deep copy of the state. This can be overridden in the child class.
Args:
memo (dict): memo dictionary of objects already copied during the current copying pass
"""
if self in memo:
return memo[self]
sensor = copy.deepcopy(self.sensor, memo)
state = self.__class__(sensor=sensor, window_size=self.window_size, axis=self.axis, ticks=self.ticks)
memo[self] = state
return state
class CameraState(SensorState):
r"""Camera state
"""
def __init__(self, camera, window_size=1, axis=None, ticks=1):
"""
Initialize the camera sensor state.
Args:
camera (CameraSensor): camera sensor(s).
window_size (int): window size of the state. This is the total number of states we should remember. That
is, if the user wants to remember the current state :math:`s_t` and the previous state :math:`s_{t-1}`,
the window size is 2. By default, the :attr:`window_size` is one which means we only remember the
current state. The window size has to be bigger than 1. If it is below, it will be set automatically
to 1. The :attr:`window_size` attribute is only valid when the state is not a combination of states,
but is given some :attr:`data`.
axis (int, None): axis to concatenate or stack the states in the current window. If you have a state with
shape (n,), then if the axis is None (by default), it will just concatenate it such that resulting
state has a shape (n*w,) where w is the window size. If the axis is an integer, then it will just stack
the states in the specified axis. With the example, for axis=0, the resulting state has a shape of
(w,n), and for axis=-1 or 1, it will have a shape of (n,w). The :attr:`axis` attribute is only when the
state is not a combination of states, but is given some :attr:`data`.
ticks (int): number of ticks to sleep before getting the next state data.
"""
self.camera = camera
super(CameraState, self).__init__(sensor=camera, window_size=window_size, axis=axis, ticks=ticks)
def _read(self):
pass
class ContactState(SensorState):
r"""Contact state
Return the contact states between a link of the robot and an object in the world (including the floor).
"""
def __init__(self, contacts, window_size=1, axis=None, ticks=1):
"""Initialize the contact state.
Args:
contacts (ContactSensor, list of ContactSensor): list of contact sensor(s).
If None, it will check if the robot has some contact sensors. If there are no contact sensors, it
will check the contact with all the links.
window_size (int): window size of the state. This is the total number of states we should remember. That
is, if the user wants to remember the current state :math:`s_t` and the previous state :math:`s_{t-1}`,
the window size is 2. By default, the :attr:`window_size` is one which means we only remember the
current state. The window size has to be bigger than 1. If it is below, it will be set automatically
to 1. The :attr:`window_size` attribute is only valid when the state is not a combination of states,
but is given some :attr:`data`.
axis (int, None): axis to concatenate or stack the states in the current window. If you have a state with
shape (n,), then if the axis is None (by default), it will just concatenate it such that resulting
state has a shape (n*w,) where w is the window size. If the axis is an integer, then it will just stack
the states in the specified axis. With the example, for axis=0, the resulting state has a shape of
(w,n), and for axis=-1 or 1, it will have a shape of (n,w). The :attr:`axis` attribute is only when the
state is not a combination of states, but is given some :attr:`data`.
ticks (int): number of ticks to sleep before getting the next state data.
"""
if not isinstance(contacts, collections.Iterable):
contacts = [contacts]
for contact in contacts:
if not isinstance(contact, ContactSensor):
raise TypeError("Expecting the given 'contact' to be an instance of `ContactSensor`, instead got: "
"{}".format(type(contact)))
self.contacts = contacts
super(ContactState, self).__init__(sensor=contacts, window_size=window_size, axis=axis, ticks=ticks)
def _read(self):
contacts = np.array([int(contact.is_in_contact()) for contact in self.contacts])
self.data = contacts
class FeetContactState(ContactState):
r"""Feet Contact State
Return the contact states between the foot of the robot and an object in the world (including the floor).
"""
def __init__(self, robot, contacts=None, window_size=1, axis=None, ticks=1):
"""
Initialize the feet contact state.
Args:
robot (LeggedRobot): legged robot.
contacts (ContactSensor, list of ContactSensor, None): list of contact sensors.
window_size (int): window size of the state. This is the total number of states we should remember. That
is, if the user wants to remember the current state :math:`s_t` and the previous state :math:`s_{t-1}`,
the window size is 2. By default, the :attr:`window_size` is one which means we only remember the
current state. The window size has to be bigger than 1. If it is below, it will be set automatically
to 1. The :attr:`window_size` attribute is only valid when the state is not a combination of states,
but is given some :attr:`data`.
axis (int, None): axis to concatenate or stack the states in the current window. If you have a state with
shape (n,), then if the axis is None (by default), it will just concatenate it such that resulting
state has a shape (n*w,) where w is the window size. If the axis is an integer, then it will just stack
the states in the specified axis. With the example, for axis=0, the resulting state has a shape of
(w,n), and for axis=-1 or 1, it will have a shape of (n,w). The :attr:`axis` attribute is only when the
state is not a combination of states, but is given some :attr:`data`.
ticks (int): number of ticks to sleep before getting the next state data.
"""
# check if the robot has feet
if not isinstance(robot, LeggedRobot):
raise TypeError("Expecting the robot to be an instance of `LeggedRobot`, instead got: "
"{}".format(type(robot)))
if len(robot.feet) == 0:
raise ValueError("The given robot has no feet; please set the `feet` attribute in the robot.")
self.robot = robot
# check if the contact sensors or link ids are valid
if contacts is None:
feet = robot.feet
feet_ids = []
for foot in feet:
if isinstance(foot, int):
feet_ids.append(foot)
elif isinstance(foot, collections.Iterable):
for f in foot:
feet_ids.append(f)
else:
raise TypeError("Expecting the list of feet ids to be a list of integers.")
contacts = feet_ids
super(FeetContactState, self).__init__(contacts, window_size=window_size, axis=axis, ticks=ticks)
|
en
| 0.864068
|
#!/usr/bin/env python # -*- coding: utf-8 -*- Define the various sensor states This includes notably the camera, contact, IMU, force/torque sensors and others. # from pyrobolearn.states.robot_states.robot_states import RobotState Sensor state (abstract class) Initialize the sensor state. Args: sensor (Sensor): sensor instance. window_size (int): window size of the state. This is the total number of states we should remember. That is, if the user wants to remember the current state :math:`s_t` and the previous state :math:`s_{t-1}`, the window size is 2. By default, the :attr:`window_size` is one which means we only remember the current state. The window size has to be bigger than 1. If it is below, it will be set automatically to 1. The :attr:`window_size` attribute is only valid when the state is not a combination of states, but is given some :attr:`data`. axis (int, None): axis to concatenate or stack the states in the current window. If you have a state with shape (n,), then if the axis is None (by default), it will just concatenate it such that resulting state has a shape (n*w,) where w is the window size. If the axis is an integer, then it will just stack the states in the specified axis. With the example, for axis=0, the resulting state has a shape of (w,n), and for axis=-1 or 1, it will have a shape of (n,w). The :attr:`axis` attribute is only when the state is not a combination of states, but is given some :attr:`data`. ticks (int): number of ticks to sleep before getting the next state data. update (bool): if we should update the sensor, or not. Note that this is normally carried out by the `robot.step` method (which is itself called by `world.step`), so normally you shouldn't set it to True. # set the sensor instance ############## # Properties # ############## Return the sensor instance. Set the sensor instance. ########### # Methods # ########### Read the sensor values. # update the sensor if specified (normally we don't need to do it as it is carried out by robot.step, or # world.step) # get the data from the sensor ############# # Operators # ############# Return a shallow copy of the state. This can be overridden in the child class. Return a deep copy of the state. This can be overridden in the child class. Args: memo (dict): memo dictionary of objects already copied during the current copying pass Camera state Initialize the camera sensor state. Args: camera (CameraSensor): camera sensor(s). window_size (int): window size of the state. This is the total number of states we should remember. That is, if the user wants to remember the current state :math:`s_t` and the previous state :math:`s_{t-1}`, the window size is 2. By default, the :attr:`window_size` is one which means we only remember the current state. The window size has to be bigger than 1. If it is below, it will be set automatically to 1. The :attr:`window_size` attribute is only valid when the state is not a combination of states, but is given some :attr:`data`. axis (int, None): axis to concatenate or stack the states in the current window. If you have a state with shape (n,), then if the axis is None (by default), it will just concatenate it such that resulting state has a shape (n*w,) where w is the window size. If the axis is an integer, then it will just stack the states in the specified axis. With the example, for axis=0, the resulting state has a shape of (w,n), and for axis=-1 or 1, it will have a shape of (n,w). The :attr:`axis` attribute is only when the state is not a combination of states, but is given some :attr:`data`. ticks (int): number of ticks to sleep before getting the next state data. Contact state Return the contact states between a link of the robot and an object in the world (including the floor). Initialize the contact state. Args: contacts (ContactSensor, list of ContactSensor): list of contact sensor(s). If None, it will check if the robot has some contact sensors. If there are no contact sensors, it will check the contact with all the links. window_size (int): window size of the state. This is the total number of states we should remember. That is, if the user wants to remember the current state :math:`s_t` and the previous state :math:`s_{t-1}`, the window size is 2. By default, the :attr:`window_size` is one which means we only remember the current state. The window size has to be bigger than 1. If it is below, it will be set automatically to 1. The :attr:`window_size` attribute is only valid when the state is not a combination of states, but is given some :attr:`data`. axis (int, None): axis to concatenate or stack the states in the current window. If you have a state with shape (n,), then if the axis is None (by default), it will just concatenate it such that resulting state has a shape (n*w,) where w is the window size. If the axis is an integer, then it will just stack the states in the specified axis. With the example, for axis=0, the resulting state has a shape of (w,n), and for axis=-1 or 1, it will have a shape of (n,w). The :attr:`axis` attribute is only when the state is not a combination of states, but is given some :attr:`data`. ticks (int): number of ticks to sleep before getting the next state data. Feet Contact State Return the contact states between the foot of the robot and an object in the world (including the floor). Initialize the feet contact state. Args: robot (LeggedRobot): legged robot. contacts (ContactSensor, list of ContactSensor, None): list of contact sensors. window_size (int): window size of the state. This is the total number of states we should remember. That is, if the user wants to remember the current state :math:`s_t` and the previous state :math:`s_{t-1}`, the window size is 2. By default, the :attr:`window_size` is one which means we only remember the current state. The window size has to be bigger than 1. If it is below, it will be set automatically to 1. The :attr:`window_size` attribute is only valid when the state is not a combination of states, but is given some :attr:`data`. axis (int, None): axis to concatenate or stack the states in the current window. If you have a state with shape (n,), then if the axis is None (by default), it will just concatenate it such that resulting state has a shape (n*w,) where w is the window size. If the axis is an integer, then it will just stack the states in the specified axis. With the example, for axis=0, the resulting state has a shape of (w,n), and for axis=-1 or 1, it will have a shape of (n,w). The :attr:`axis` attribute is only when the state is not a combination of states, but is given some :attr:`data`. ticks (int): number of ticks to sleep before getting the next state data. # check if the robot has feet # check if the contact sensors or link ids are valid
| 2.728798
| 3
|
dizoo/league_demo/league_demo_ppo_config.py
|
jayyoung0802/DI-engine
| 1
|
6629145
|
<filename>dizoo/league_demo/league_demo_ppo_config.py<gh_stars>1-10
from easydict import EasyDict
from torch.nn.modules.activation import Threshold
league_demo_ppo_config = dict(
exp_name="league_demo_ppo",
env=dict(
collector_env_num=8,
evaluator_env_num=10,
n_evaluator_episode=100,
env_type='prisoner_dilemma', # ['zero_sum', 'prisoner_dilemma']
stop_value=[-10.1, -5.05], # prisoner_dilemma
),
policy=dict(
cuda=False,
action_space='discrete',
model=dict(
obs_shape=2,
action_shape=2,
action_space='discrete',
encoder_hidden_size_list=[32, 32],
critic_head_hidden_size=32,
actor_head_hidden_size=32,
share_encoder=False,
),
learn=dict(
update_per_collect=3,
batch_size=32,
learning_rate=0.00001,
entropy_weight=0.0,
learner=dict(log_policy=False),
),
collect=dict(
n_episode=128, unroll_len=1, discount_factor=1.0, gae_lambda=1.0, collector=dict(get_train_sample=True, )
),
other=dict(
league=dict(
player_category=['default'],
path_policy="league_demo_ppo/policy",
active_players=dict(
main_player=1,
main_exploiter=1,
league_exploiter=1,
),
main_player=dict(
one_phase_step=200,
branch_probs=dict(
pfsp=0.5,
sp=0.5,
),
strong_win_rate=0.7,
),
main_exploiter=dict(
one_phase_step=200,
branch_probs=dict(main_players=1.0, ),
strong_win_rate=0.7,
min_valid_win_rate=0.3,
),
league_exploiter=dict(
one_phase_step=200,
branch_probs=dict(pfsp=1.0, ),
strong_win_rate=0.7,
mutate_prob=0.5,
),
use_pretrain=False,
use_pretrain_init_historical=False,
payoff=dict(
type='battle',
decay=0.99,
min_win_rate_games=8,
),
metric=dict(
mu=0,
sigma=25 / 3,
beta=25 / 3 / 2,
tau=0.0,
draw_probability=0.02,
),
),
),
),
)
league_demo_ppo_config = EasyDict(league_demo_ppo_config)
# This config file can be executed by `dizoo/league_demo/league_demo_ppo_main.py`
|
<filename>dizoo/league_demo/league_demo_ppo_config.py<gh_stars>1-10
from easydict import EasyDict
from torch.nn.modules.activation import Threshold
league_demo_ppo_config = dict(
exp_name="league_demo_ppo",
env=dict(
collector_env_num=8,
evaluator_env_num=10,
n_evaluator_episode=100,
env_type='prisoner_dilemma', # ['zero_sum', 'prisoner_dilemma']
stop_value=[-10.1, -5.05], # prisoner_dilemma
),
policy=dict(
cuda=False,
action_space='discrete',
model=dict(
obs_shape=2,
action_shape=2,
action_space='discrete',
encoder_hidden_size_list=[32, 32],
critic_head_hidden_size=32,
actor_head_hidden_size=32,
share_encoder=False,
),
learn=dict(
update_per_collect=3,
batch_size=32,
learning_rate=0.00001,
entropy_weight=0.0,
learner=dict(log_policy=False),
),
collect=dict(
n_episode=128, unroll_len=1, discount_factor=1.0, gae_lambda=1.0, collector=dict(get_train_sample=True, )
),
other=dict(
league=dict(
player_category=['default'],
path_policy="league_demo_ppo/policy",
active_players=dict(
main_player=1,
main_exploiter=1,
league_exploiter=1,
),
main_player=dict(
one_phase_step=200,
branch_probs=dict(
pfsp=0.5,
sp=0.5,
),
strong_win_rate=0.7,
),
main_exploiter=dict(
one_phase_step=200,
branch_probs=dict(main_players=1.0, ),
strong_win_rate=0.7,
min_valid_win_rate=0.3,
),
league_exploiter=dict(
one_phase_step=200,
branch_probs=dict(pfsp=1.0, ),
strong_win_rate=0.7,
mutate_prob=0.5,
),
use_pretrain=False,
use_pretrain_init_historical=False,
payoff=dict(
type='battle',
decay=0.99,
min_win_rate_games=8,
),
metric=dict(
mu=0,
sigma=25 / 3,
beta=25 / 3 / 2,
tau=0.0,
draw_probability=0.02,
),
),
),
),
)
league_demo_ppo_config = EasyDict(league_demo_ppo_config)
# This config file can be executed by `dizoo/league_demo/league_demo_ppo_main.py`
|
en
| 0.649144
|
# ['zero_sum', 'prisoner_dilemma'] # prisoner_dilemma # This config file can be executed by `dizoo/league_demo/league_demo_ppo_main.py`
| 1.738853
| 2
|
src/interledger/adapter/indy.py
|
SOFIE-project/interledger-asset-transfer
| 4
|
6629146
|
import json
import pprint
from indy import pool, ledger, wallet, did
from indy.error import IndyError
from .interfaces import Initiator
from ..transfer import Transfer
def print_log(value_color="", value_noncolor=""):
"""set the colors for text."""
HEADER = '\033[92m'
ENDC = '\033[0m'
print(HEADER + value_color + ENDC + str(value_noncolor))
class IndyInitializer:
"""This provides the proper Hyperledger Indy client wrapper
"""
def __init__(self, pool_name, protocol_version, genesis_file_path, \
wallet_id, wallet_key):
self.ready = False # intial status is False before all handlers are ready
self.pool_name = pool_name
self.protocol_version = protocol_version
self.pool_config = json.dumps({'genesis_txn': str(genesis_file_path)})
self.wallet_config = json.dumps({"id": wallet_id})
self.wallet_credentials = json.dumps({"key": wallet_key})
self.pool_handle = None
self.wallet_handle = None
self.client_did = None
self.client_verkey = None
async def create_handlers(self) -> bool:
try:
await pool.set_protocol_version(self.protocol_version)
print_log('Creates a new local pool ledger configuration that is used '
'later when connecting to ledger.\n')
try:
await pool.create_pool_ledger_config(config_name=self.pool_name, config=self.pool_config)
except IndyError:
await pool.delete_pool_ledger_config(config_name=self.pool_name)
await pool.create_pool_ledger_config(config_name=self.pool_name, config=self.pool_config)
print_log('\nOpen pool ledger and get handle from libindy\n')
self.pool_handle = await pool.open_pool_ledger(config_name=self.pool_name, config=None)
print_log('\nCreating new secure wallet\n')
try:
await wallet.create_wallet(self.wallet_config, self.wallet_credentials)
except IndyError:
await wallet.delete_wallet(self.wallet_config, self.wallet_credentials)
await wallet.create_wallet(self.wallet_config, self.wallet_credentials)
print_log('\nOpen wallet and get handle from libindy\n')
self.wallet_handle = await wallet.open_wallet(self.wallet_config, self.wallet_credentials)
print_log('\nGenerating and storing DID and verkey representing a Client '
'that wants to obtain Trust Anchor Verkey\n')
self.client_did, self.client_verkey = await did.create_and_store_my_did(self.wallet_handle, "{}")
print_log('Client DID: ', self.client_did)
print_log('Client Verkey: ', self.client_verkey)
self.ready = True
except IndyError as e:
print('Error occurred: %s' % e)
self.ready = False
return self.ready
class IndyInitiator(IndyInitializer, Initiator):
"""Indy implementation of the Initiator.
"""
def __init__(self, target_did, pool_name, protocol_version, genesis_file_path, \
wallet_id, wallet_key):
IndyInitializer.__init__(self, pool_name, protocol_version, genesis_file_path, \
wallet_id, wallet_key)
self.target_did = target_did
self.verkey = None
self.entries = []
async def listen_for_events(self) -> list:
"""Listen for entries of changes on the Hyperledger Indy ledger and generate transfers accordingly.
:returns: The event transfer lists
:rtype: list
"""
if not self.ready:
res = await IndyInitializer.create_handlers()
if not res: exit(1)
# get entries from GET_NYM responses
get_nym_request = await ledger.build_get_nym_request(submitter_did=self.client_did,
target_did=self.target_did)
get_nym_response_json = await ledger.submit_request(pool_handle=self.pool_handle,
request_json=get_nym_request)
get_nym_response = json.loads(get_nym_response_json)
print_log('GET_NYM response: ')
pprint.pprint(get_nym_response)
# check verkey
verkey_from_ledger = json.loads(get_nym_response['result']['data'])['verkey']
if verkey_from_ledger != self.verkey:
self.verkey = verkey_from_ledger
self.entries = [(verkey_from_ledger, get_nym_response)]
else:
self.entries = []
# convert to transfers and return
return self._buffer_data(self.entries)
# Helper function
def _buffer_data(self, entries: list):
"""Helper function to create a list of Transfer object from a list of indy entries of event
"""
transfers = []
for entry in entries:
transfer = Transfer()
transfer.payload = {'id': entry[0], 'data': entry[1]} # id will be string inside interledger
transfers.append(transfer)
print("*********** buffer data *************")
print(f"{transfer.payload}")
print("*************************************")
return transfers
async def commit_sending(self, id: str) -> dict:
"""Initiate the commit operation to the connected HyperLedger Indy network.
:param str id: the identifier in the originating ledger for a data item
:rtype: dict {
'commit_status': bool,
'commit_tx_hash': str,
'exception': object,# only with errors
'commit_error_code': Enum, # only with errors
'commit_message': str # only with errors
}
"""
return {"commit_status": True,
"commit_tx_hash": "0xfake_tx_hash"}
async def abort_sending(self, id: str, reason: int):
"""Initiate the abort operation to the connected HyperLedger Fabric.
:param str id: the identifier in the originating ledger for a data item
:param int reason: the code to signal the predefined reasons
:rtype: dict {
'abort_status': bool,
'abort_tx_hash': str,
'exception': object,# only with errors
'abort_error_code': Enum, # only with errors
'abort_message': str # only with errors
}
"""
return {"abort_status": True,
"abort_tx_hash": "0xfake_tx_hash"}
|
import json
import pprint
from indy import pool, ledger, wallet, did
from indy.error import IndyError
from .interfaces import Initiator
from ..transfer import Transfer
def print_log(value_color="", value_noncolor=""):
"""set the colors for text."""
HEADER = '\033[92m'
ENDC = '\033[0m'
print(HEADER + value_color + ENDC + str(value_noncolor))
class IndyInitializer:
"""This provides the proper Hyperledger Indy client wrapper
"""
def __init__(self, pool_name, protocol_version, genesis_file_path, \
wallet_id, wallet_key):
self.ready = False # intial status is False before all handlers are ready
self.pool_name = pool_name
self.protocol_version = protocol_version
self.pool_config = json.dumps({'genesis_txn': str(genesis_file_path)})
self.wallet_config = json.dumps({"id": wallet_id})
self.wallet_credentials = json.dumps({"key": wallet_key})
self.pool_handle = None
self.wallet_handle = None
self.client_did = None
self.client_verkey = None
async def create_handlers(self) -> bool:
try:
await pool.set_protocol_version(self.protocol_version)
print_log('Creates a new local pool ledger configuration that is used '
'later when connecting to ledger.\n')
try:
await pool.create_pool_ledger_config(config_name=self.pool_name, config=self.pool_config)
except IndyError:
await pool.delete_pool_ledger_config(config_name=self.pool_name)
await pool.create_pool_ledger_config(config_name=self.pool_name, config=self.pool_config)
print_log('\nOpen pool ledger and get handle from libindy\n')
self.pool_handle = await pool.open_pool_ledger(config_name=self.pool_name, config=None)
print_log('\nCreating new secure wallet\n')
try:
await wallet.create_wallet(self.wallet_config, self.wallet_credentials)
except IndyError:
await wallet.delete_wallet(self.wallet_config, self.wallet_credentials)
await wallet.create_wallet(self.wallet_config, self.wallet_credentials)
print_log('\nOpen wallet and get handle from libindy\n')
self.wallet_handle = await wallet.open_wallet(self.wallet_config, self.wallet_credentials)
print_log('\nGenerating and storing DID and verkey representing a Client '
'that wants to obtain Trust Anchor Verkey\n')
self.client_did, self.client_verkey = await did.create_and_store_my_did(self.wallet_handle, "{}")
print_log('Client DID: ', self.client_did)
print_log('Client Verkey: ', self.client_verkey)
self.ready = True
except IndyError as e:
print('Error occurred: %s' % e)
self.ready = False
return self.ready
class IndyInitiator(IndyInitializer, Initiator):
"""Indy implementation of the Initiator.
"""
def __init__(self, target_did, pool_name, protocol_version, genesis_file_path, \
wallet_id, wallet_key):
IndyInitializer.__init__(self, pool_name, protocol_version, genesis_file_path, \
wallet_id, wallet_key)
self.target_did = target_did
self.verkey = None
self.entries = []
async def listen_for_events(self) -> list:
"""Listen for entries of changes on the Hyperledger Indy ledger and generate transfers accordingly.
:returns: The event transfer lists
:rtype: list
"""
if not self.ready:
res = await IndyInitializer.create_handlers()
if not res: exit(1)
# get entries from GET_NYM responses
get_nym_request = await ledger.build_get_nym_request(submitter_did=self.client_did,
target_did=self.target_did)
get_nym_response_json = await ledger.submit_request(pool_handle=self.pool_handle,
request_json=get_nym_request)
get_nym_response = json.loads(get_nym_response_json)
print_log('GET_NYM response: ')
pprint.pprint(get_nym_response)
# check verkey
verkey_from_ledger = json.loads(get_nym_response['result']['data'])['verkey']
if verkey_from_ledger != self.verkey:
self.verkey = verkey_from_ledger
self.entries = [(verkey_from_ledger, get_nym_response)]
else:
self.entries = []
# convert to transfers and return
return self._buffer_data(self.entries)
# Helper function
def _buffer_data(self, entries: list):
"""Helper function to create a list of Transfer object from a list of indy entries of event
"""
transfers = []
for entry in entries:
transfer = Transfer()
transfer.payload = {'id': entry[0], 'data': entry[1]} # id will be string inside interledger
transfers.append(transfer)
print("*********** buffer data *************")
print(f"{transfer.payload}")
print("*************************************")
return transfers
async def commit_sending(self, id: str) -> dict:
"""Initiate the commit operation to the connected HyperLedger Indy network.
:param str id: the identifier in the originating ledger for a data item
:rtype: dict {
'commit_status': bool,
'commit_tx_hash': str,
'exception': object,# only with errors
'commit_error_code': Enum, # only with errors
'commit_message': str # only with errors
}
"""
return {"commit_status": True,
"commit_tx_hash": "0xfake_tx_hash"}
async def abort_sending(self, id: str, reason: int):
"""Initiate the abort operation to the connected HyperLedger Fabric.
:param str id: the identifier in the originating ledger for a data item
:param int reason: the code to signal the predefined reasons
:rtype: dict {
'abort_status': bool,
'abort_tx_hash': str,
'exception': object,# only with errors
'abort_error_code': Enum, # only with errors
'abort_message': str # only with errors
}
"""
return {"abort_status": True,
"abort_tx_hash": "0xfake_tx_hash"}
|
en
| 0.636962
|
set the colors for text. This provides the proper Hyperledger Indy client wrapper # intial status is False before all handlers are ready Indy implementation of the Initiator. Listen for entries of changes on the Hyperledger Indy ledger and generate transfers accordingly. :returns: The event transfer lists :rtype: list # get entries from GET_NYM responses # check verkey # convert to transfers and return # Helper function Helper function to create a list of Transfer object from a list of indy entries of event # id will be string inside interledger Initiate the commit operation to the connected HyperLedger Indy network. :param str id: the identifier in the originating ledger for a data item :rtype: dict { 'commit_status': bool, 'commit_tx_hash': str, 'exception': object,# only with errors 'commit_error_code': Enum, # only with errors 'commit_message': str # only with errors } Initiate the abort operation to the connected HyperLedger Fabric. :param str id: the identifier in the originating ledger for a data item :param int reason: the code to signal the predefined reasons :rtype: dict { 'abort_status': bool, 'abort_tx_hash': str, 'exception': object,# only with errors 'abort_error_code': Enum, # only with errors 'abort_message': str # only with errors }
| 2.169768
| 2
|
src/helper/gpa.py
|
JosephSalomon/GN-Core
| 1
|
6629147
|
<gh_stars>1-10
###***********************************###
'''
Grade Notifier
File: gpa.py
Author: <NAME>
Core Maintainers: <NAME>, <NAME>,
<NAME>
Copyright: Copyright 2019, <NAME>
License: MIT
'''
###***********************************###
class GPA():
_term_gpa = 0
_cumulative_gpa = 0
def __init__(self, term_gpa=0, cumulative_gpa=0):
self._term_gpa = term_gpa
self._cumulative_gpa = cumulative_gpa
def get_cumulative_gpa(self):
return self._cumulative_gpa
def get_term_gpa(self):
return self._term_gpa
@staticmethod
def get_letter_grade(gpa):
return {
'term_gpa': GPA.convert_float(gpa.get_term_gpa()),
'cumulative_gpa': GPA.convert_float(gpa.get_cumulative_gpa())
}
@staticmethod
def get_number_grade(gpa):
return {
'term_gpa':
GPA.convert_letter(GPA.convert_float(gpa.get_term_gpa())),
'cumulative_gpa':
GPA.convert_letter(GPA.convert_float(gpa.get_cumulative_gpa()))
}
@staticmethod
def convert_float(f):
if 0 <= f < 1:
return 'F'
elif 1 <= f < 1.3:
return 'D'
elif 1.3 <= f < 1.7:
return 'D+'
elif 1.7 <= f < 2:
return 'C-'
elif 2 <= f < 2.3:
return 'C'
elif 2.3 <= f < 2.7:
return 'C+'
elif 2.7 <= f < 3:
return 'B-'
elif 3 <= f < 3.3:
return 'B'
elif 3.3 <= f < 3.7:
return 'B+'
elif 3.7 <= f < 4:
return 'A-'
else:
return 'A'
@staticmethod
def convert_letter(l):
scale = {
'A+': '97 - 100',
'A': '93 - 96',
'A-': '90 - 92',
'B+': '87 - 89',
'B': '83 - 86',
'B-': '80 - 82',
'C+': '77 - 79',
'C': '73 - 76',
'C-': '70 - 72',
'D+': '67 - 69',
'D': '65 - 66',
'F': '0'
}
return scale[l]
|
###***********************************###
'''
Grade Notifier
File: gpa.py
Author: <NAME>
Core Maintainers: <NAME>, <NAME>,
<NAME>
Copyright: Copyright 2019, <NAME>
License: MIT
'''
###***********************************###
class GPA():
_term_gpa = 0
_cumulative_gpa = 0
def __init__(self, term_gpa=0, cumulative_gpa=0):
self._term_gpa = term_gpa
self._cumulative_gpa = cumulative_gpa
def get_cumulative_gpa(self):
return self._cumulative_gpa
def get_term_gpa(self):
return self._term_gpa
@staticmethod
def get_letter_grade(gpa):
return {
'term_gpa': GPA.convert_float(gpa.get_term_gpa()),
'cumulative_gpa': GPA.convert_float(gpa.get_cumulative_gpa())
}
@staticmethod
def get_number_grade(gpa):
return {
'term_gpa':
GPA.convert_letter(GPA.convert_float(gpa.get_term_gpa())),
'cumulative_gpa':
GPA.convert_letter(GPA.convert_float(gpa.get_cumulative_gpa()))
}
@staticmethod
def convert_float(f):
if 0 <= f < 1:
return 'F'
elif 1 <= f < 1.3:
return 'D'
elif 1.3 <= f < 1.7:
return 'D+'
elif 1.7 <= f < 2:
return 'C-'
elif 2 <= f < 2.3:
return 'C'
elif 2.3 <= f < 2.7:
return 'C+'
elif 2.7 <= f < 3:
return 'B-'
elif 3 <= f < 3.3:
return 'B'
elif 3.3 <= f < 3.7:
return 'B+'
elif 3.7 <= f < 4:
return 'A-'
else:
return 'A'
@staticmethod
def convert_letter(l):
scale = {
'A+': '97 - 100',
'A': '93 - 96',
'A-': '90 - 92',
'B+': '87 - 89',
'B': '83 - 86',
'B-': '80 - 82',
'C+': '77 - 79',
'C': '73 - 76',
'C-': '70 - 72',
'D+': '67 - 69',
'D': '65 - 66',
'F': '0'
}
return scale[l]
|
en
| 0.414637
|
###***********************************### Grade Notifier File: gpa.py Author: <NAME> Core Maintainers: <NAME>, <NAME>, <NAME> Copyright: Copyright 2019, <NAME> License: MIT ###***********************************###
| 3.1737
| 3
|
dendropy/datamodel/treecollectionmodel.py
|
EnjoyLifeFund/macHighSierra-py36-pkgs
| 0
|
6629148
|
#! /usr/bin/env python
##############################################################################
## DendroPy Phylogenetic Computing Library.
##
## Copyright 2010-2015 <NAME> and <NAME>.
## All rights reserved.
##
## See "LICENSE.rst" for terms and conditions of usage.
##
## If you use this work or any portion thereof in published work,
## please cite it as:
##
## <NAME>. and <NAME>. 2010. DendroPy: a Python library
## for phylogenetic computing. Bioinformatics 26: 1569-1571.
##
##############################################################################
"""
This module handles the core definition of classes that model collections of
trees.
"""
import collections
import math
import copy
import sys
from dendropy.utility import container
from dendropy.utility import error
from dendropy.utility import bitprocessing
from dendropy.utility import deprecate
from dendropy.utility import constants
from dendropy.calculate import statistics
from dendropy.datamodel import basemodel
from dendropy.datamodel import taxonmodel
from dendropy.datamodel import treemodel
from dendropy import dataio
##############################################################################
### TreeList
class TreeList(
taxonmodel.TaxonNamespaceAssociated,
basemodel.Annotable,
basemodel.Deserializable,
basemodel.MultiReadable,
basemodel.Serializable,
basemodel.DataObject):
"""
A collection of |Tree| objects, all referencing the same "universe" of
opeational taxonomic unit concepts through the same |TaxonNamespace|
object reference.
"""
def _parse_and_create_from_stream(cls,
stream,
schema,
collection_offset=None,
tree_offset=None,
**kwargs):
"""
Constructs a new |TreeList| object and populates it with trees from
file-like object ``stream``.
Notes
-----
*All* operational taxonomic unit concepts in the data source will be included
in the |TaxonNamespace| object associated with the new
|TreeList| object and its contained |Tree| objects, even those
not associated with trees or the particular trees being retrieved.
Parameters
----------
stream : file or file-like object
Source of data.
schema : string
Identifier of format of data in ``stream``
collection_offset : integer or None
0-based index indicating collection of trees to parse. If |None|,
then all tree collections are retrieved, with each distinct
collection parsed into a separate |TreeList| object. If the
tree colleciton offset index is equal or greater than the number of
tree collections in the data source, then IndexError is raised.
Negative offsets work like negative list indexes; e.g., a
``collection_offset`` of -1 means to read the last collection of
trees in the data source. For data formats that do not support the
concept of distinct tree collections (e.g. NEWICK) are considered
single-collection data source (i.e, the only acceptable
``collection_offset`` values are -1 or 0).
tree_offset : integer or None
0-based index indicating particular tree within a particular
collection of trees at which to begin reading. If not specified or
|None| (default), then all trees are parsed. Otherwise, must be an
integer value up the length of the collection minus 1. A positive
offset indicates the number of trees in the collection to skip;
e.g. a ``tree_offset`` of 20 means to skip the first 20 trees in the
collection. Negative offsets work like negative list indexes;
e.g., a ``tree_offset`` value of -10 means to retrieve the last 10
trees in the collection. If the tree offset index is equal or
greater than the number of trees in the collection, then IndexError
is raised. Requires that a particular tree collection has been
identified using the ``tree_collection_offset`` parameter: if
``tree_collection_offset`` is not specified, a TypeError is raised.
\*\*kwargs : keyword arguments
Arguments to customize parsing, instantiation, processing, and
accession of |Tree| objects read from the data source, including
schema- or format-specific handling.
The following optional keyword arguments are recognized and handled
by this function:
* ``label`` Specifies the label or description of the new
|TreeList|.
* ``taxon_namespace`` specifies the |TaxonNamespace|
object to be attached to the new |TreeList| object.
Note that *all* operational taxonomic unit concepts in the
data source will be accessioned into the specified
|TaxonNamespace| instance. This includes the
operation taxonomic unit definitions associated with all
tree collections and character matrices in the data source.
* ``tree_list`` : **SPECIAL** If passed a |TreeList| using
this keyword, then this instance is populated and returned
(instead of a new instance being created).
All other keyword arguments are passed directly to |TreeList|.read()`.
Other keyword arguments may be available, depending on the implementation
of the reader specialized to handle ``schema`` formats.
Notes
-----
Note that in most cases, even if ``collection_offset`` and ``tree_offset``
are specified to restrict the trees returned, the *entire* data source
is still parsed and processed. So this is not more efficient than
reading all the trees and then manually-extracting them later; just
more convenient. If you need just a single subset of trees from a data
source, there is no gain in efficiency. If you need multiple trees or
subsets of trees from the same data source, it would be much more
efficient to read the entire data source, and extract trees as needed.
Returns
-------
A |TreeList| object.
"""
# these must be pulled before passing the kwargs
# down to the reader
tree_list = kwargs.pop("tree_list", None)
taxon_namespace = taxonmodel.process_kwargs_dict_for_taxon_namespace(kwargs, None)
label = kwargs.pop("label", None)
# get the reader
reader = dataio.get_reader(schema, **kwargs)
# Accommodate an existing TreeList object being passed
if tree_list is None:
tree_list = cls(label=label, taxon_namespace=taxon_namespace)
if collection_offset is None and tree_offset is not None:
collection_offset = 0
if collection_offset is None:
# if tree_offset is not None:
# raise TypeError("Cannot specify ``tree_offset`` without specifying ``collection_offset``")
# coerce all tree products into this list
reader.read_tree_lists(
stream=stream,
taxon_namespace_factory=tree_list._taxon_namespace_pseudofactory,
tree_list_factory=tree_list._tree_list_pseudofactory,
global_annotations_target=None)
else:
tree_lists = reader.read_tree_lists(
stream=stream,
taxon_namespace_factory=tree_list._taxon_namespace_pseudofactory,
tree_list_factory=tree_list.__class__,
global_annotations_target=None)
# if collection_offset < 0:
# raise IndexError("Collection offset out of range: {} (minimum valid tree offset = 0)".format(collection_offset))
if collection_offset >= len(tree_lists):
raise IndexError("Collection offset out of range: {} (number of collections = {}, maximum valid collection offset = {})".format(collection_offset, len(tree_lists), len(tree_lists)-1))
target_tree_list = tree_lists[collection_offset]
tree_list.copy_annotations_from(target_tree_list)
if tree_offset is not None:
# if tree_offset < 0:
# raise IndexError("Tree offset out of range: {} (minimum offset = 0)".format(tree_offset))
if tree_offset >= len(target_tree_list):
raise IndexError("Tree offset out of range: {} (number of trees in source = {}, maximum valid tree offset = {})".format(tree_offset, len(target_tree_list), len(target_tree_list)-1))
for tree in target_tree_list[tree_offset:]:
tree_list._trees.append(tree)
else:
for tree in target_tree_list:
tree_list._trees.append(tree)
return tree_list
# taxon_namespace = taxonmodel.process_kwargs_dict_for_taxon_namespace(kwargs, None)
# label = kwargs.pop("label", None)
# tree_list = cls(label=label,
# taxon_namespace=taxon_namespace)
# tree_list.read(
# stream=stream,
# schema=schema,
# collection_offset=collection_offset,
# tree_offset=tree_offset,
# **kwargs)
# return tree_list
_parse_and_create_from_stream = classmethod(_parse_and_create_from_stream)
@classmethod
def get(cls, **kwargs):
"""
Instantiate and return a *new* |TreeList| object from a data source.
**Mandatory Source-Specification Keyword Argument (Exactly One Required):**
- **file** (*file*) -- File or file-like object of data opened for reading.
- **path** (*str*) -- Path to file of data.
- **url** (*str*) -- URL of data.
- **data** (*str*) -- Data given directly.
**Mandatory Schema-Specification Keyword Argument:**
- **schema** (*str*) -- Identifier of format of data given by the
"``file``", "``path``", "``data``", or "``url``" argument
specified above: ":doc:`newick </schemas/newick>`", ":doc:`nexus
</schemas/nexus>`", or ":doc:`nexml </schemas/nexml>`". See
"|Schemas|" for more details.
**Optional General Keyword Arguments:**
- **label** (*str*) -- Name or identifier to be assigned to the new
object; if not given, will be assigned the one specified in the
data source, or |None| otherwise.
- **taxon_namespace** (|TaxonNamespace|) -- The |TaxonNamespace|
instance to use to :doc:`manage the taxon names </primer/taxa>`.
If not specified, a new one will be created.
- **collection_offset** (*int*) -- 0-based index of tree block or
collection in source to be parsed. If not specified then the
first collection (offset = 0) is assumed.
- **tree_offset** (*int*) -- 0-based index of first tree within the
collection specified by ``collection_offset`` to be parsed (i.e.,
skipping the first ``tree_offset`` trees). If not
specified, then the first tree (offset = 0) is assumed (i.e., no
trees within the specified collection will be skipped). Use this
to specify, e.g. a burn-in.
- **ignore_unrecognized_keyword_arguments** (*bool*) -- If |True|,
then unsupported or unrecognized keyword arguments will not
result in an error. Default is |False|: unsupported keyword
arguments will result in an error.
**Optional Schema-Specific Keyword Arguments:**
These provide control over how the data is interpreted and
processed, and supported argument names and values depend on
the schema as specified by the value passed as the "``schema``"
argument. See "|Schemas|" for more details.
**Examples:**
::
tlst1 = dendropy.TreeList.get(
file=open('treefile.tre', 'rU'),
schema="newick")
tlst2 = dendropy.TreeList.get(
path='sometrees.nexus',
schema="nexus",
collection_offset=2,
tree_offset=100)
tlst3 = dendropy.TreeList.get(
data="((A,B),(C,D));((A,C),(B,D));",
schema="newick")
tree4 = dendropy.dendropy.TreeList.get(
url="http://api.opentreeoflife.org/v2/study/pg_1144/tree/tree2324.nex",
schema="nexus")
"""
return cls._get_from(**kwargs)
DEFAULT_TREE_TYPE = treemodel.Tree
def tree_factory(cls, *args, **kwargs):
"""
Creates and returns a |Tree| of a type that this list understands how to
manage.
Deriving classes can override this to provide for custom Tree-type
object lists. You can simple override the class-level variable
`DEFAULT_TREE_TYPE` in your derived class if the constructor signature
of the alternate tree type is the same as |Tree|.
If you want to have a TreeList *instance* that generates
custom trees (i.e., as opposed to a TreeList-ish *class* of instances),
set the ``tree_type`` attribute of the TreeList instance.
Parameters
----------
\*args : positional arguments
Passed directly to constructor of |Tree|.
\*\*kwargs : keyword arguments
Passed directly to constructor of |Tree|.
Returns
-------
A |Tree| object.
"""
tree = cls.DEFAULT_TREE_TYPE(*args, **kwargs)
return tree
tree_factory = classmethod(tree_factory)
###########################################################################
### Lifecycle and Identity
def __init__(self, *args, **kwargs):
"""
Constructs a new |TreeList| object, populating it with any iterable
container with Tree object members passed as unnamed argument, or from
a data source if ``stream`` and ``schema`` are passed.
If passed an iterable container, the objects in that container must be
of type |Tree| (or derived). If the container is of type |TreeList|,
then, because each |Tree| object must have the same |TaxonNamespace|
reference as the containing |TreeList|, the trees in the container
passed as an initialization argument will be **deep**-copied (except
for associated |TaxonNamespace| and |Taxon| objects, which will
be shallow-copied). If the container is any other type of
iterable, then the |Tree| objects will be **shallow**-copied.
|TreeList| objects can directly thus be instantiated in the
following ways::
# /usr/bin/env python
from dendropy import TaxonNamespace, Tree, TreeList
# instantiate an empty tree
tlst1 = TreeList()
# TreeList objects can be instantiated from an external data source
# using the 'get()' factory class method
tlst2 = TreeList.get(file=open('treefile.tre', 'rU'), schema="newick")
tlst3 = TreeList.get(path='sometrees.nexus', schema="nexus")
tlst4 = TreeList.get(data="((A,B),(C,D));((A,C),(B,D));", schema="newick")
# can also call `read()` on a TreeList object; each read adds
# (appends) the tree(s) found to the TreeList
tlst5 = TreeList()
tlst5.read(file=open('boot1.tre', 'rU'), schema="newick")
tlst5.read(path="boot3.tre", schema="newick")
tlst5.read(value="((A,B),(C,D));((A,C),(B,D));", schema="newick")
# populated from list of Tree objects
tlist6_1 = Tree.get(
data="((A,B),(C,D))",
schema="newick")
tlist6_2 = Tree.get(
data="((A,C),(B,D))",
schema="newick")
tlist6 = TreeList([tlist5_1, tlist5_2])
# passing keywords to underlying tree parser
tlst8 = TreeList.get(
data="((A,B),(C,D));((A,C),(B,D));",
schema="newick",
taxon_namespace=tlst3.taxon_namespace,
rooting="force-rooted",
extract_comment_metadata=True,
store_tree_weights=False,
preserve_underscores=True)
# Subsets of trees can be read. Note that in most cases, the entire
# data source is parsed, so this is not more efficient than reading
# all the trees and then manually-extracting them later; just more
# convenient
# skip the *first* 100 trees in the *first* (offset=0) collection of trees
trees = TreeList.get(
path="mcmc.tre",
schema="newick",
collection_offset=0,
tree_offset=100)
# get the *last* 10 trees in the *second* (offset=1) collection of trees
trees = TreeList.get(
path="mcmc.tre",
schema="newick",
collection_offset=1,
tree_offset=-10)
# get the last 10 trees in the second-to-last collection of trees
trees = TreeList.get(
path="mcmc.tre",
schema="newick",
collection_offset=-2,
tree_offset=100)
# Slices give shallow-copy: trees are references
tlst4copy0a = t4[:]
assert tlst4copy0a[0] is t4[0]
tlst4copy0b = t4[:4]
assert tlst4copy0b[0] is t4[0]
# 'Taxon-namespace-scoped' copy:
# I.e., Deep-copied objects but taxa and taxon namespace
# are copied as references
tlst4copy1a = TreeList(t4)
tlst4copy1b = TreeList([Tree(t) for t in tlst5])
assert tlst4copy1a[0] is not tlst4[0] # True
assert tlst4copy1a.taxon_namespace is tlst4.taxon_namespace # True
assert tlst4copy1b[0] is not tlst4[0] # True
assert tlst4copy1b.taxon_namespace is tlst4.taxon_namespace # True
"""
if len(args) > 1:
# only allow 1 positional argument
raise error.TooManyArgumentsError(func_name=self.__class__.__name__, max_args=1, args=args)
elif len(args) == 1 and isinstance(args[0], TreeList):
self._clone_from(args[0], kwargs)
else:
basemodel.DataObject.__init__(self, label=kwargs.pop("label", None))
taxonmodel.TaxonNamespaceAssociated.__init__(self,
taxon_namespace=taxonmodel.process_kwargs_dict_for_taxon_namespace(kwargs, None))
self.tree_type = kwargs.pop("tree_type", self.__class__.DEFAULT_TREE_TYPE)
self._trees = []
self.comments = []
if len(args) == 1:
for aidx, a in enumerate(args[0]):
if not isinstance(a, self.tree_type):
raise ValueError("Cannot add object not of 'Tree' type to 'TreeList'")
self.append(a)
if kwargs:
raise TypeError("Unrecognized or unsupported arguments: {}".format(kwargs))
def __hash__(self):
return id(self)
def __eq__(self, other):
return (
isinstance(other, TreeList)
and (self.taxon_namespace is other.taxon_namespace)
and (self._trees == other._trees)
)
def _clone_from(self, tree_list, kwargs_dict):
memo = {}
# memo[id(tree)] = self
taxon_namespace = taxonmodel.process_kwargs_dict_for_taxon_namespace(kwargs_dict, tree_list.taxon_namespace)
memo[id(tree_list.taxon_namespace)] = taxon_namespace
if taxon_namespace is not tree_list.taxon_namespace:
for t1 in tree_list.taxon_namespace:
t2 = taxon_namespace.require_taxon(label=t1.label)
memo[id(t1)] = t2
else:
for t1 in tree_list.taxon_namespace:
memo[id(t1)] = t1
t = copy.deepcopy(tree_list, memo)
self.__dict__ = t.__dict__
self.label = kwargs_dict.pop("label", tree_list.label)
return self
def __copy__(self):
other = TreeList(label=self.label, taxon_namespace=self.taxon_namespace)
other._trees = list(self._trees)
memo = {}
memo[id(self)] = other
other.deep_copy_annotations_from(self, memo)
return other
def taxon_namespace_scoped_copy(self, memo=None):
if memo is None:
memo = {}
# this populates ``memo`` with references to the
# the TaxonNamespace and Taxon objects
self.taxon_namespace.populate_memo_for_taxon_namespace_scoped_copy(memo)
return self.__deepcopy__(memo=memo)
def __deepcopy__(self, memo=None):
return basemodel.Annotable.__deepcopy__(self, memo=memo)
###########################################################################
### Representation
def __str__(self):
return "<TreeList {} '{}': [{}]>".format(hex(id(self)), self.label, ", ".join(repr(i) for i in self._trees))
###########################################################################
### Data I/O
def _taxon_namespace_pseudofactory(self, **kwargs):
"""
Dummy factory to coerce all |TaxonNamespace| objects required when
parsing a data source to reference ``self.taxon_namespace``.
"""
if "label" in kwargs and kwargs["label"] is not None and self.taxon_namespace.label is None:
self.taxon_namespace.label = kwargs["label"]
return self.taxon_namespace
def _tree_list_pseudofactory(self, **kwargs):
"""
Dummy factory to coerce all |TreeList| objects required when
parsing a data source to reference ``self``.
"""
if "label" in kwargs and kwargs["label"] is not None and self.label is None:
self.label = kwargs["label"]
return self
def _parse_and_add_from_stream(self,
stream,
schema,
collection_offset=None,
tree_offset=None,
**kwargs):
"""
Parses |Tree| objects from data source and adds to this collection.
Notes
-----
*All* operational taxonomic unit concepts in the data source will be included
in the |TaxonNamespace| object associated with the new
|TreeList| object and its contained |Tree| objects, even those
not associated with trees or the particular trees being retrieved.
Parameters
----------
stream : file or file-like object
Source of data.
schema : string
Identifier of format of data in ``stream``.
collection_offset : integer or None
0-based index indicating collection of trees to parse. If |None|,
then all tree collections are retrieved, with each distinct
collection parsed into a separate |TreeList| object. If the
tree colleciton offset index is equal or greater than the number of
tree collections in the data source, then IndexError is raised.
Negative offsets work like negative list indexes; e.g., a
``collection_offset`` of -1 means to read the last collection of
trees in the data source. For data formats that do not support the
concept of distinct tree collections (e.g. NEWICK) are considered
single-collection data source (i.e, the only acceptable
``collection_offset`` values are -1 or 0).
tree_offset : integer or None
0-based index indicating particular tree within a particular
collection of trees at which to begin reading. If not specified or
|None| (default), then all trees are parsed. Otherwise, must be an
integer value up the length of the collection minus 1. A positive
offset indicates the number of trees in the collection to skip;
e.g. a ``tree_offset`` of 20 means to skip the first 20 trees in the
collection. Negative offsets work like negative list indexes;
e.g., a ``tree_offset`` value of -10 means to retrieve the last 10
trees in the collection. If the tree offset index is equal or
greater than the number of trees in the collection, then IndexError
is raised. Requires that a particular tree collection has been
identified using the ``tree_collection_offset`` parameter: if
``tree_collection_offset`` is not specified, a TypeError is raised.
\*\*kwargs : keyword arguments
Arguments to customize parsing, instantiation, processing, and
accession of |Tree| objects read from the data source, including
schema- or format-specific handling. These will be passed to the
underlying schema-specific reader for handling.
General (schema-agnostic) keyword arguments are:
* ``rooted`` specifies the default rooting interpretation of the tree.
* ``edge_length_type`` specifies the type of the edge lengths (int or
float; defaults to 'float')
Other keyword arguments are available depending on the schema. See
specific schema handlers (e.g., `NewickReader`, `NexusReader`,
`NexmlReader`) for more details.
Notes
-----
Note that in most cases, even if ``collection_offset`` and ``tree_offset``
are specified to restrict the trees read, the *entire* data source
is still parsed and processed. So this is not more efficient than
reading all the trees and then manually-extracting them later; just
more convenient. If you need just a single subset of trees from a data
source, there is no gain in efficiency. If you need multiple trees or
subsets of trees from the same data source, it would be much more
efficient to read the entire data source, and extract trees as needed.
Returns
-------
n : ``int``
The number of |Tree| objects read.
"""
if "taxon_namespace" in kwargs and kwargs['taxon_namespace'] is not self.taxon_namespace:
raise TypeError("Cannot change ``taxon_namespace`` when reading into an existing TreeList")
kwargs["taxon_namespace"] = self.taxon_namespace
kwargs["tree_list"] = self
cur_size = len(self._trees)
TreeList._parse_and_create_from_stream(
stream=stream,
schema=schema,
collection_offset=collection_offset,
tree_offset=tree_offset,
**kwargs)
new_size = len(self._trees)
return new_size - cur_size
def read(self, **kwargs):
"""
Add |Tree| objects to existing |TreeList| from data source providing
one or more collections of trees.
**Mandatory Source-Specification Keyword Argument (Exactly One Required):**
- **file** (*file*) -- File or file-like object of data opened for reading.
- **path** (*str*) -- Path to file of data.
- **url** (*str*) -- URL of data.
- **data** (*str*) -- Data given directly.
**Mandatory Schema-Specification Keyword Argument:**
- **schema** (*str*) -- Identifier of format of data given by the
"``file``", "``path``", "``data``", or "``url``" argument
specified above: ":doc:`newick </schemas/newick>`", ":doc:`nexus
</schemas/nexus>`", or ":doc:`nexml </schemas/nexml>`". See
"|Schemas|" for more details.
**Optional General Keyword Arguments:**
- **collection_offset** (*int*) -- 0-based index of tree block or
collection in source to be parsed. If not specified then the
first collection (offset = 0) is assumed.
- **tree_offset** (*int*) -- 0-based index of first tree within the
collection specified by ``collection_offset`` to be parsed (i.e.,
skipping the first ``tree_offset`` trees). If not
specified, then the first tree (offset = 0) is assumed (i.e., no
trees within the specified collection will be skipped). Use this
to specify, e.g. a burn-in.
- **ignore_unrecognized_keyword_arguments** (*bool*) -- If |True|,
then unsupported or unrecognized keyword arguments will not
result in an error. Default is |False|: unsupported keyword
arguments will result in an error.
**Optional Schema-Specific Keyword Arguments:**
These provide control over how the data is interpreted and
processed, and supported argument names and values depend on
the schema as specified by the value passed as the "``schema``"
argument. See "|Schemas|" for more details.
**Examples:**
::
tlist = dendropy.TreeList()
tlist.read(
file=open('treefile.tre', 'rU'),
schema="newick",
tree_offset=100)
tlist.read(
path='sometrees.nexus',
schema="nexus",
collection_offset=2,
tree_offset=100)
tlist.read(
data="((A,B),(C,D));((A,C),(B,D));",
schema="newick")
tlist.read(
url="http://api.opentreeoflife.org/v2/study/pg_1144/tree/tree2324.nex",
schema="nexus")
"""
return basemodel.MultiReadable._read_from(self, **kwargs)
def _format_and_write_to_stream(self, stream, schema, **kwargs):
"""
Writes out ``self`` in ``schema`` format to a destination given by
file-like object ``stream``.
Parameters
----------
stream : file or file-like object
Destination for data.
schema : string
Must be a recognized and tree file schema, such as "nexus",
"newick", etc, for which a specialized tree list writer is
available. If this is not implemented for the schema specified, then
a UnsupportedSchemaError is raised.
\*\*kwargs : keyword arguments, optional
Keyword arguments will be passed directly to the writer for the
specified schema. See documentation for details on keyword
arguments supported by writers of various schemas.
"""
writer = dataio.get_writer(schema, **kwargs)
writer.write_tree_list(self, stream)
###########################################################################
### List Interface
def _import_tree_to_taxon_namespace(self,
tree,
taxon_import_strategy="migrate",
**kwargs):
if tree.taxon_namespace is not self.taxon_namespace:
if taxon_import_strategy == "migrate":
tree.migrate_taxon_namespace(taxon_namespace=self.taxon_namespace,
**kwargs)
elif taxon_import_strategy == "add":
tree._taxon_namespace = self.taxon_namespace
tree.update_taxon_namespace()
else:
raise ValueError("Unrecognized taxon import strategy: '{}'".format(taxon_import_strategy))
# assert tree.taxon_namespace is self.taxon_namespace
return tree
def insert(self,
index,
tree,
taxon_import_strategy="migrate",
**kwargs):
"""
Inserts a |Tree| object, ``tree``, into the collection before
``index``.
The |TaxonNamespace| reference of ``tree`` will be set to that of
``self``. Any |Taxon| objects associated with nodes in ``tree``
that are not already in ``self.taxon_namespace`` will be handled
according to ``taxon_import_strategy``:
- 'migrate'
|Taxon| objects associated with ``tree`` that are not already
in ``self.taxon_nameaspace`` will be remapped based on their
labels, with new :class|Taxon| objects being reconstructed if
none with matching labels are found. Specifically,
:meth:`dendropy.datamodel.treemodel.Tree.migrate_taxon_namespace()`
will be called on ``tree``, where ``kwargs`` is as passed to
this function.
- 'add'
|Taxon| objects associated with ``tree`` that are not already
in ``self.taxon_namespace`` will be added. Note that this might
result in |Taxon| objects with duplicate labels as no
attempt at mapping to existing |Taxon| objects based on
label-matching is done.
Parameters
----------
index : integer
Position before which to insert ``tree``.
tree : A |Tree| instance
The |Tree| object to be added.
taxon_import_strategy : string
If ``tree`` is associated with a different |TaxonNamespace|,
this argument determines how new |Taxon| objects in ``tree``
are handled: 'migrate' or 'add'. See above for details.
\*\*kwargs : keyword arguments
These arguments will be passed directly to
'migrate_taxon_namespace()' method call on ``tree``.
See Also
--------
:meth:`Tree.migrate_taxon_namespace`
"""
self._import_tree_to_taxon_namespace(
tree=tree,
taxon_import_strategy=taxon_import_strategy,
**kwargs)
self._trees.insert(index, tree)
def append(self,
tree,
taxon_import_strategy="migrate",
**kwargs):
"""
Adds a |Tree| object, ``tree``, to the collection.
The |TaxonNamespace| reference of ``tree`` will be set to that of
``self``. Any |Taxon| objects associated with nodes in ``tree``
that are not already in ``self.taxon_namespace`` will be handled
according to ``taxon_import_strategy``:
- 'migrate'
|Taxon| objects associated with ``tree`` that are not already
in ``self.taxon_nameaspace`` will be remapped based on their
labels, with new :class|Taxon| objects being reconstructed if
none with matching labels are found. Specifically,
:meth:`dendropy.datamodel.treemodel.Tree.migrate_taxon_namespace()`
will be called on ``tree``, where ``kwargs`` is as passed to this
function.
- 'add'
|Taxon| objects associated with ``tree`` that are not already
in ``self.taxon_namespace`` will be added. Note that this might
result in |Taxon| objects with duplicate labels as no
attempt at mapping to existing |Taxon| objects based on
label-matching is done.
Parameters
----------
tree : A |Tree| instance
The |Tree| object to be added.
taxon_import_strategy : string
If ``tree`` is associated with a different |TaxonNamespace|,
this argument determines how new |Taxon| objects in ``tree``
are handled: 'migrate' or 'add'. See above for details.
\*\*kwargs : keyword arguments
These arguments will be passed directly to
'migrate_taxon_namespace()' method call on ``tree``.
See Also
--------
:meth:`Tree.migrate_taxon_namespace`
"""
self._import_tree_to_taxon_namespace(
tree=tree,
taxon_import_strategy=taxon_import_strategy,
**kwargs)
self._trees.append(tree)
def extend(self, other):
"""
In-place addition of |Tree| objects in ``other`` to ``self``.
If ``other`` is a |TreeList|, then the trees are *copied*
and migrated into ``self.taxon_namespace``; otherwise, the original
objects are migrated into ``self.taxon_namespace`` and added directly.
Parameters
----------
other : iterable of |Tree| objects
Returns
-------
``self`` : |TreeList|
"""
if isinstance(other, TreeList):
for t0 in other:
t1 = self.tree_type(t0, taxon_namespace=self.taxon_namespace)
self._trees.append(t1)
else:
for t0 in other:
self.append(t0)
return self
def __iadd__(self, other):
"""
In-place addition of |Tree| objects in ``other`` to ``self``.
If ``other`` is a |TreeList|, then the trees are *copied*
and migrated into ``self.taxon_namespace``; otherwise, the original
objects are migrated into ``self.taxon_namespace`` and added directly.
Parameters
----------
other : iterable of |Tree| objects
Returns
-------
``self`` : |TreeList|
"""
return self.extend(other)
def __add__(self, other):
"""
Creates and returns new |TreeList| with clones of all trees in ``self``
as well as all |Tree| objects in ``other``. If ``other`` is a
|TreeList|, then the trees are *cloned* and migrated into
``self.taxon_namespace``; otherwise, the original objects are migrated into
``self.taxon_namespace`` and added directly.
Parameters
----------
other : iterable of |Tree| objects
Returns
-------
tlist : |TreeList| object
|TreeList| object containing clones of |Tree| objects
in ``self`` and ``other``.
"""
tlist = TreeList(taxon_namespace=self.taxon_namespace)
tlist += self
tlist += other
return tlist
def __contains__(self, tree):
return tree in self._trees
def __delitem__(self, tree):
del self._trees[tree]
def __iter__(self):
return iter(self._trees)
def __reversed__(self):
return reversed(self._trees)
def __len__(self):
return len(self._trees)
def __getitem__(self, index):
"""
If ``index`` is an integer, then |Tree| object at position ``index``
is returned. If ``index`` is a slice, then a |TreeList| is returned
with references (i.e., not copies or clones, but the actual original
instances themselves) to |Tree| objects in the positions given
by the slice. The |TaxonNamespace| is the same as ``self``.
Parameters
----------
index : integer or slice
Index or slice.
Returns
-------
t : |Tree| object or |TreeList| object
"""
if isinstance(index, slice):
r = self._trees[index]
return TreeList(r,
taxon_namespace=self.taxon_namespace)
else:
return self._trees[index]
def __setitem__(self, index, value):
if isinstance(index, slice):
if isinstance(value, TreeList):
tt = []
for t0 in value:
t1 = self.tree_type(t0,
taxon_namespace=self.taxon_namespace)
tt.append(t1)
value = tt
else:
for t in value:
self._import_tree_to_taxon_namespace(t)
self._trees[index] = value
else:
self._trees[index] = self._import_tree_to_taxon_namespace(value)
def clear(self):
# list.clear() only with 3.4 or so ...
self._trees = []
def index(self, tree):
return self._trees.index(tree)
def pop(self, index=-1):
return self._trees.pop(index)
def remove(self, tree):
self._trees.remove(tree)
def reverse(self):
self._trees.reverse()
def sort(self, key=None, reverse=False):
self._trees.sort(key=key, reverse=reverse)
def new_tree(self, *args, **kwargs):
tns = taxonmodel.process_kwargs_dict_for_taxon_namespace(kwargs, self.taxon_namespace)
if tns is not self.taxon_namespace:
raise TypeError("Cannot create new Tree with different TaxonNamespace")
kwargs["taxon_namespace"] = self.taxon_namespace
if self.tree_type is not None:
tree = self.tree_type(*args, **kwargs)
else:
tree = self.tree_factory(*args, **kwargs)
self._trees.append(tree)
return tree
##############################################################################
## Taxon Handling
def reconstruct_taxon_namespace(self,
unify_taxa_by_label=True,
taxon_mapping_memo=None):
if taxon_mapping_memo is None:
taxon_mapping_memo = {}
for tree in self._trees:
tree._taxon_namespace = self.taxon_namespace
tree.reconstruct_taxon_namespace(
unify_taxa_by_label=unify_taxa_by_label,
taxon_mapping_memo=taxon_mapping_memo,
)
def update_taxon_namespace(self):
for tree in self._trees:
tree._taxon_namespace = self.taxon_namespace
tree.update_taxon_namespace()
def poll_taxa(self, taxa=None):
"""
Returns a set populated with all of |Taxon| instances associated
with ``self``.
Parameters
----------
taxa : set()
Set to populate. If not specified, a new one will be created.
Returns
-------
taxa : set[|Taxon|]
Set of taxa associated with ``self``.
"""
if taxa is None:
taxa = set()
for tree in self:
tree.poll_taxa(taxa)
return taxa
def reindex_subcomponent_taxa():
raise NotImplementedError()
##############################################################################
## Special Calculations and Operations on Entire Collection
def _get_tree_array(self,
kwargs_dict,
):
"""
Return TreeArray containing information of trees currently
in self. Processes ``kwargs_dict`` intelligently: removing
and passing on keyword arguments pertaining to TreeArray
construction, and leaving everything else.
"""
# TODO: maybe ignore_node_ages defaults to |False| but ``ultrametricity_precision`` defaults to 0?
ta = TreeArray.from_tree_list(
trees=self,
# taxon_namespace=self.taxon_namespace,
is_rooted_trees=kwargs_dict.pop("is_rooted_trees", None),
ignore_edge_lengths=kwargs_dict.pop("ignore_edge_lengths", False),
ignore_node_ages=kwargs_dict.pop("ignore_node_ages", True),
use_tree_weights=kwargs_dict.pop("use_tree_weights", True),
ultrametricity_precision=kwargs_dict.pop("ultrametricity_precision", constants.DEFAULT_ULTRAMETRICITY_PRECISION),
is_force_max_age=kwargs_dict.pop("is_force_max_age", None),
taxon_label_age_map=kwargs_dict.pop("taxon_label_age_map", None),
is_bipartitions_updated=kwargs_dict.pop("is_bipartitions_updated", False)
)
return ta
def split_distribution(self,
is_bipartitions_updated=False,
default_edge_length_value=None,
**kwargs):
"""
Return `SplitDistribution` collecting information on splits in
contained trees. Keyword arguments get passed directly to
`SplitDistribution` constructor.
"""
assert "taxon_namespace" not in kwargs or kwargs["taxon_namespace"] is self.taxon_namespace
kwargs["taxon_namespace"] = self.taxon_namespace
sd = SplitDistribution(**kwargs)
for tree in self:
sd.count_splits_on_tree(
tree=tree,
is_bipartitions_updated=is_bipartitions_updated,
default_edge_length_value=default_edge_length_value)
return sd
def as_tree_array(self, **kwargs):
"""
Return |TreeArray| collecting information on splits in contained
trees. Keyword arguments get passed directly to |TreeArray|
constructor.
"""
ta = TreeArray.from_tree_list(
trees=self,
**kwargs)
return ta
def consensus(self,
min_freq=constants.GREATER_THAN_HALF,
is_bipartitions_updated=False,
summarize_splits=True,
**kwargs):
"""
Returns a consensus tree of all trees in self, with minumum frequency
of bipartition to be added to the consensus tree given by ``min_freq``.
"""
ta = self._get_tree_array(kwargs)
return ta.consensus_tree(min_freq=min_freq,
summarize_splits=summarize_splits,
**kwargs)
def maximum_product_of_split_support_tree(
self,
include_external_splits=False,
score_attr="log_product_of_split_support"):
"""
Return the tree with that maximizes the product of split supports, also
known as the "Maximum Clade Credibility Tree" or MCCT.
Parameters
----------
include_external_splits : bool
If |True|, then non-internal split posteriors will be included in
the score. Defaults to |False|: these are skipped. This should only
make a difference when dealing with splits collected from trees of
different leaf sets.
Returns
-------
mcct_tree : Tree
Tree that maximizes the product of split supports.
"""
ta = self._get_tree_array({})
scores, max_score_tree_idx = ta.calculate_log_product_of_split_supports(
include_external_splits=include_external_splits,
)
tree = self[max_score_tree_idx]
if score_attr is not None:
setattr(tree, score_attr, scores[max_score_tree_idx])
return tree
def maximum_sum_of_split_support_tree(
self,
include_external_splits=False,
score_attr="sum_of_split_support"):
"""
Return the tree with that maximizes the *sum* of split supports.
Parameters
----------
include_external_splits : bool
If |True|, then non-internal split posteriors will be included in
the score. Defaults to |False|: these are skipped. This should only
make a difference when dealing with splits collected from trees of
different leaf sets.
Returns
-------
mcct_tree : Tree
Tree that maximizes the sum of split supports.
"""
ta = self._get_tree_array({})
scores, max_score_tree_idx = ta.calculate_sum_of_split_supports(
include_external_splits=include_external_splits,
)
tree = self[max_score_tree_idx]
if score_attr is not None:
setattr(tree, score_attr, scores[max_score_tree_idx])
return tree
def frequency_of_bipartition(self, **kwargs):
"""
Given a bipartition specified as:
- a |Bipartition| instance given the keyword 'bipartition'
- a split bitmask given the keyword 'split_bitmask'
- a list of |Taxon| objects given with the keyword ``taxa``
- a list of taxon labels given with the keyword ``labels``
this function returns the proportion of trees in self
in which the split is found.
If the tree(s) in the collection are unrooted, then the bipartition
will be normalized for the comparison.
"""
split = None
is_bipartitions_updated = kwargs.pop("is_bipartitions_updated", False)
if "split_bitmask" in kwargs:
split = kwargs["split_bitmask"]
elif "bipartition" in kwargs:
split = kwargs["bipartition"].split_bitmask
elif "taxa" in kwargs or "labels" in kwargs:
split = self.taxon_namespace.taxa_bitmask(**kwargs)
if "taxa" in kwargs:
k = len(kwargs["taxa"])
else:
k = len(kwargs["labels"])
if bitprocessing.num_set_bits(split) != k:
raise IndexError('Not all taxa could be mapped to bipartition (%s): %s' \
% (self.taxon_namespace.bitmask_as_bitstring(split), k))
else:
raise TypeError("Need to specify one of the following keyword arguments: 'split_bitmask', 'bipartition', 'taxa', or 'labels'")
unnormalized_split = split
normalized_split = treemodel.Bipartition.normalize_bitmask(
bitmask=split,
fill_bitmask=self.taxon_namespace.all_taxa_bitmask(),
lowest_relevant_bit=1)
found = 0
total = 0
for tree in self:
if not is_bipartitions_updated or not tree.bipartition_encoding:
tree.encode_bipartitions()
bipartition_encoding = set(b.split_bitmask for b in tree.bipartition_encoding)
total += 1
if tree.is_unrooted and (normalized_split in bipartition_encoding):
found += 1
elif (not tree.is_unrooted) and (unnormalized_split in bipartition_encoding):
found += 1
try:
return float(found)/total
except ZeroDivisionError:
return 0
def frequency_of_split(self, **kwargs):
"""
DEPRECATED: use 'frequency_of_bipartition()' instead.
"""
deprecate.dendropy_deprecation_warning(
message="Deprecated since DendroPy 4: Instead of 'frequency_of_split()' use 'frequency_of_bipartition()'",
stacklevel=4,
)
return self.frequency_of_bipartition(**kwargs)
###############################################################################
### SplitDistribution
class SplitDistribution(taxonmodel.TaxonNamespaceAssociated):
"""
Collects information regarding splits over multiple trees.
"""
SUMMARY_STATS_FIELDNAMES = ('mean', 'median', 'sd', 'hpd95', 'quant_5_95', 'range')
def __init__(self,
taxon_namespace=None,
ignore_edge_lengths=False,
ignore_node_ages=True,
use_tree_weights=True,
ultrametricity_precision=constants.DEFAULT_ULTRAMETRICITY_PRECISION,
is_force_max_age=False,
taxon_label_age_map=None):
# Taxon Namespace
taxonmodel.TaxonNamespaceAssociated.__init__(self,
taxon_namespace=taxon_namespace)
# configuration
self.ignore_edge_lengths = ignore_edge_lengths
self.ignore_node_ages = ignore_node_ages
self.use_tree_weights = use_tree_weights
self.ultrametricity_precision = ultrametricity_precision
# storage/function
self.total_trees_counted = 0
self.sum_of_tree_weights = 0.0
self.tree_rooting_types_counted = set()
self.split_counts = collections.defaultdict(float)
self.split_edge_lengths = collections.defaultdict(list)
self.split_node_ages = collections.defaultdict(list)
self.is_force_max_age = is_force_max_age
self.is_force_min_age = False
self.taxon_label_age_map = taxon_label_age_map
# secondary/derived/generated/collected data
self._is_rooted = False
self._split_freqs = None
self._trees_counted_for_freqs = 0
self._split_edge_length_summaries = None
self._split_node_age_summaries = None
self._trees_counted_for_summaries = 0
# services
self.tree_decorator = None
###########################################################################
### Utility
def normalize_bitmask(self, bitmask):
"""
"Normalizes" split, by ensuring that the least-significant bit is
always 1 (used on unrooted trees to establish split identity
independent of rotation).
Parameters
----------
bitmask : integer
Split bitmask hash to be normalized.
Returns
-------
h : integer
Normalized split bitmask.
"""
return treemodel.Bipartition.normalize_bitmask(
bitmask=bitmask,
fill_bitmask=self.taxon_namespace.all_taxa_bitmask(),
lowest_relevant_bit=1)
###########################################################################
### Configuration
def _is_rooted_deprecation_warning(self):
deprecate.dendropy_deprecation_warning(
message="Deprecated since DendroPy 4: 'SplitDistribution.is_rooted' and 'SplitDistribution.is_unrooted' are no longer valid attributes; rooting state tracking and management is now the responsibility of client code.",
stacklevel=4,
)
def _get_is_rooted(self):
self._is_rooted_deprecation_warning()
return self._is_rooted
def _set_is_rooted(self, val):
self._is_rooted_deprecation_warning()
self._is_rooted = val
is_rooted = property(_get_is_rooted, _set_is_rooted)
def _get_is_unrooted(self):
self._is_rooted_deprecation_warning()
return not self._is_rooted
def _set_is_unrooted(self, val):
self._is_rooted_deprecation_warning()
self._is_rooted = not val
is_unrooted = property(_get_is_unrooted, _set_is_unrooted)
###########################################################################
### Split Counting and Book-Keeping
def add_split_count(self, split, count=1):
self.split_counts[split] += count
def count_splits_on_tree(self,
tree,
is_bipartitions_updated=False,
default_edge_length_value=None):
"""
Counts splits in this tree and add to totals. ``tree`` must be decorated
with splits, and no attempt is made to normalize taxa.
Parameters
----------
tree : a |Tree| object.
The tree on which to count the splits.
is_bipartitions_updated : bool
If |False| [default], then the tree will have its splits encoded or
updated. Otherwise, if |True|, then the tree is assumed to have its
splits already encoded and updated.
Returns
--------
s : iterable of splits
A list of split bitmasks from ``tree``.
e :
A list of edge length values from ``tree``.
a :
A list of node age values from ``tree``.
"""
assert tree.taxon_namespace is self.taxon_namespace
self.total_trees_counted += 1
if not self.ignore_node_ages:
if self.taxon_label_age_map:
set_node_age_fn = self._set_node_age
else:
set_node_age_fn = None
tree.calc_node_ages(
ultrametricity_precision=self.ultrametricity_precision,
is_force_max_age=self.is_force_max_age,
is_force_min_age=self.is_force_min_age,
set_node_age_fn=set_node_age_fn,
)
if tree.weight is not None and self.use_tree_weights:
weight_to_use = float(tree.weight)
else:
weight_to_use = 1.0
self.sum_of_tree_weights += weight_to_use
if tree.is_rooted:
self.tree_rooting_types_counted.add(True)
else:
self.tree_rooting_types_counted.add(False)
if not is_bipartitions_updated:
tree.encode_bipartitions()
splits = []
edge_lengths = []
node_ages = []
for bipartition in tree.bipartition_encoding:
split = bipartition.split_bitmask
## if edge is stored as an attribute, might be faster to:
# edge = bipartition.edge
edge = tree.bipartition_edge_map[bipartition]
splits.append(split)
self.split_counts[split] += weight_to_use
if not self.ignore_edge_lengths:
sel = self.split_edge_lengths.setdefault(split,[])
if edge.length is None:
elen = default_edge_length_value
else:
elen = edge.length
sel.append(elen)
edge_lengths.append(elen)
else:
sel = None
if not self.ignore_node_ages:
sna = self.split_node_ages.setdefault(split, [])
if edge.head_node is not None:
nage = edge.head_node.age
else:
nage = None
sna.append(nage)
node_ages.append(nage)
else:
sna = None
return splits, edge_lengths, node_ages
def splits_considered(self):
"""
Returns 4 values:
total number of splits counted
total *weighted* number of unique splits counted
total number of non-trivial splits counted
total *weighted* number of unique non-trivial splits counted
"""
if not self.split_counts:
return 0, 0, 0, 0
num_splits = 0
num_unique_splits = 0
num_nt_splits = 0
num_nt_unique_splits = 0
taxa_mask = self.taxon_namespace.all_taxa_bitmask()
for s in self.split_counts:
num_unique_splits += 1
num_splits += self.split_counts[s]
if not treemodel.Bipartition.is_trivial_bitmask(s, taxa_mask):
num_nt_unique_splits += 1
num_nt_splits += self.split_counts[s]
return num_splits, num_unique_splits, num_nt_splits, num_nt_unique_splits
def calc_freqs(self):
"Forces recalculation of frequencies."
self._split_freqs = {}
if self.total_trees_counted == 0:
for split in self.split_counts:
self._split_freqs[split] = 1.0
else:
normalization_weight = self.calc_normalization_weight()
for split in self.split_counts:
count = self.split_counts[split]
self._split_freqs[split] = float(self.split_counts[split]) / normalization_weight
self._trees_counted_for_freqs = self.total_trees_counted
self._split_edge_length_summaries = None
self._split_node_age_summaries = None
return self._split_freqs
def calc_normalization_weight(self):
if not self.sum_of_tree_weights:
return self.total_trees_counted
else:
return float(self.sum_of_tree_weights)
def update(self, split_dist):
self.total_trees_counted += split_dist.total_trees_counted
self.sum_of_tree_weights += split_dist.sum_of_tree_weights
self._split_edge_length_summaries = None
self._split_node_age_summaries = None
self._trees_counted_for_summaries = 0
self.tree_rooting_types_counted.update(split_dist.tree_rooting_types_counted)
for split in split_dist.split_counts:
self.split_counts[split] += split_dist.split_counts[split]
self.split_edge_lengths[split] += split_dist.split_edge_lengths[split]
self.split_node_ages[split] += split_dist.split_node_ages[split]
###########################################################################
### Basic Information Access
def __len__(self):
return len(self.split_counts)
def __iter__(self):
for s in self.split_counts:
yield s
def __getitem__(self, split_bitmask):
"""
Returns freqency of split_bitmask.
"""
return self._get_split_frequencies().get(split_bitmask, 0.0)
def _get_split_frequencies(self):
if self._split_freqs is None or self._trees_counted_for_freqs != self.total_trees_counted:
self.calc_freqs()
return self._split_freqs
split_frequencies = property(_get_split_frequencies)
def is_mixed_rootings_counted(self):
return ( (True in self.tree_rooting_types_counted)
and (False in self.tree_rooting_types_counted or None in self.tree_rooting_types_counted) )
def is_all_counted_trees_rooted(self):
return (True in self.tree_rooting_types_counted) and (len(self.tree_rooting_types_counted) == 1)
def is_all_counted_trees_strictly_unrooted(self):
return (False in self.tree_rooting_types_counted) and (len(self.tree_rooting_types_counted) == 1)
def is_all_counted_trees_treated_as_unrooted(self):
return True not in self.tree_rooting_types_counted
###########################################################################
### Summarization
def split_support_iter(self,
tree,
is_bipartitions_updated=False,
include_external_splits=False,
traversal_strategy="preorder",
node_support_attr_name=None,
edge_support_attr_name=None,
):
"""
Returns iterator over support values for the splits of a given tree,
where the support value is given by the proportional frequency of the
split in the current split distribution.
Parameters
----------
tree : |Tree|
The |Tree| which will be scored.
is_bipartitions_updated : bool
If |False| [default], then the tree will have its splits encoded or
updated. Otherwise, if |True|, then the tree is assumed to have its
splits already encoded and updated.
include_external_splits : bool
If |True|, then non-internal split posteriors will be included.
If |False|, then these are skipped. This should only make a
difference when dealing with splits collected from trees of
different leaf sets.
traversal_strategy : str
One of: "preorder" or "postorder". Specfies order in which splits
are visited.
Returns
-------
s : list of floats
List of values for splits in the tree corresponding to the
proportional frequency that the split is found in the current
distribution.
"""
if traversal_strategy == "preorder":
if include_external_splits:
iter_fn = tree.preorder_node_iter
else:
iter_fn = tree.preorder_internal_node_iter
elif traversal_strategy == "postorder":
if include_external_splits:
iter_fn = tree.postorder_node_iter
else:
iter_fn = tree.postorder_internal_node_iter
else:
raise ValueError("Traversal strategy not supported: '{}'".format(traversal_strategy))
if not is_bipartitions_updated:
tree.encode_bipartitions()
split_frequencies = self._get_split_frequencies()
for nd in iter_fn():
split = nd.edge.split_bitmask
support = split_frequencies.get(split, 0.0)
yield support
def calc_split_edge_length_summaries(self):
self._split_edge_length_summaries = {}
for split, elens in self.split_edge_lengths.items():
if not elens:
continue
try:
self._split_edge_length_summaries[split] = statistics.summarize(elens)
except ValueError:
pass
return self._split_edge_length_summaries
def calc_split_node_age_summaries(self):
self._split_node_age_summaries = {}
for split, ages in self.split_node_ages.items():
if not ages:
continue
try:
self._split_node_age_summaries[split] = statistics.summarize(ages)
except ValueError:
pass
return self._split_node_age_summaries
def _set_node_age(self, nd):
if nd.taxon is None or nd._child_nodes:
return None
else:
return self.taxon_label_age_map.get(nd.taxon.label, 0.0)
def _get_split_edge_length_summaries(self):
if self._split_edge_length_summaries is None \
or self._trees_counted_for_summaries != self.total_trees_counted:
self.calc_split_edge_length_summaries()
return self._split_edge_length_summaries
split_edge_length_summaries = property(_get_split_edge_length_summaries)
def _get_split_node_age_summaries(self):
if self._split_node_age_summaries is None \
or self._trees_counted_for_summaries != self.total_trees_counted:
self.calc_split_node_age_summaries()
return self._split_node_age_summaries
split_node_age_summaries = property(_get_split_node_age_summaries)
def log_product_of_split_support_on_tree(self,
tree,
is_bipartitions_updated=False,
include_external_splits=False,
):
"""
Calculates the (log) product of the support of the splits of the
tree, where the support is given by the proportional frequency of the
split in the current split distribution.
The tree that has the highest product of split support out of a sample
of trees corresponds to the "maximum credibility tree" for that sample.
This can also be referred to as the "maximum clade credibility tree",
though this latter term is sometimes use for the tree that has the
highest *sum* of split support (see
:meth:`SplitDistribution.sum_of_split_support_on_tree()`).
Parameters
----------
tree : |Tree|
The tree for which the score should be calculated.
is_bipartitions_updated : bool
If |True|, then the splits are assumed to have already been encoded
and will not be updated on the trees.
include_external_splits : bool
If |True|, then non-internal split posteriors will be included in
the score. Defaults to |False|: these are skipped. This should only
make a difference when dealing with splits collected from trees of
different leaf sets.
Returns
-------
s : numeric
The log product of the support of the splits of the tree.
"""
log_product_of_split_support = 0.0
for split_support in self.split_support_iter(
tree=tree,
is_bipartitions_updated=is_bipartitions_updated,
include_external_splits=include_external_splits,
traversal_strategy="preorder",
):
if split_support:
log_product_of_split_support += math.log(split_support)
return log_product_of_split_support
def sum_of_split_support_on_tree(self,
tree,
is_bipartitions_updated=False,
include_external_splits=False,
):
"""
Calculates the sum of the support of the splits of the tree, where the
support is given by the proportional frequency of the split in the
current distribtion.
Parameters
----------
tree : |Tree|
The tree for which the score should be calculated.
is_bipartitions_updated : bool
If |True|, then the splits are assumed to have already been encoded
and will not be updated on the trees.
include_external_splits : bool
If |True|, then non-internal split posteriors will be included in
the score. Defaults to |False|: these are skipped. This should only
make a difference when dealing with splits collected from trees of
different leaf sets.
Returns
-------
s : numeric
The sum of the support of the splits of the tree.
"""
sum_of_split_support = 0.0
for split_support in self.split_support_iter(
tree=tree,
is_bipartitions_updated=is_bipartitions_updated,
include_external_splits=include_external_splits,
traversal_strategy="preorder",
):
sum_of_split_support += split_support
return sum_of_split_support
def collapse_edges_with_less_than_minimum_support(self,
tree,
min_freq=constants.GREATER_THAN_HALF,
):
"""
Collapse edges on tree that have support less than indicated by
``min_freq``.
"""
if not tree.is_rooted and self.is_all_counted_trees_rooted():
raise ValueError("Tree is interpreted as unrooted, but split support is based on rooted trees")
elif tree.is_rooted and self.is_all_counted_trees_treated_as_unrooted():
raise ValueError("Tree is interpreted as rooted, but split support is based on unrooted trees")
tree.encode_bipartitions()
split_frequencies = self._get_split_frequencies()
to_collapse = []
for nd in tree.postorder_node_iter():
s = nd.edge.bipartition.split_bitmask
if s not in split_frequencies:
to_collapse.append(nd)
elif split_frequencies[s] < min_freq:
to_collapse.append(nd)
for nd in to_collapse:
nd.edge.collapse(adjust_collapsed_head_children_edge_lengths=True)
def consensus_tree(self,
min_freq=constants.GREATER_THAN_HALF,
is_rooted=None,
summarize_splits=True,
**split_summarization_kwargs
):
"""
Returns a consensus tree from splits in ``self``.
Parameters
----------
min_freq : real
The minimum frequency of a split in this distribution for it to be
added to the tree.
is_rooted : bool
Should tree be rooted or not? If *all* trees counted for splits are
explicitly rooted or unrooted, then this will default to |True| or
|False|, respectively. Otherwise it defaults to |None|.
\*\*split_summarization_kwargs : keyword arguments
These will be passed directly to the underlying
`SplitDistributionSummarizer` object. See
:meth:`SplitDistributionSummarizer.configure` for options.
Returns
-------
t : consensus tree
"""
if is_rooted is None:
if self.is_all_counted_trees_rooted():
is_rooted = True
elif self.is_all_counted_trees_strictly_unrooted():
is_rooted = False
split_frequencies = self._get_split_frequencies()
to_try_to_add = []
_almost_one = lambda x: abs(x - 1.0) <= 0.0000001
for s in split_frequencies:
freq = split_frequencies[s]
if (min_freq is None) or (freq >= min_freq) or (_almost_one(min_freq) and _almost_one(freq)):
to_try_to_add.append((freq, s))
to_try_to_add.sort(reverse=True)
splits_for_tree = [i[1] for i in to_try_to_add]
con_tree = treemodel.Tree.from_split_bitmasks(
split_bitmasks=splits_for_tree,
taxon_namespace=self.taxon_namespace,
is_rooted=is_rooted)
if summarize_splits:
self.summarize_splits_on_tree(
tree=con_tree,
is_bipartitions_updated=False,
**split_summarization_kwargs
)
return con_tree
def summarize_splits_on_tree(self,
tree,
is_bipartitions_updated=False,
**split_summarization_kwargs
):
"""
Summarizes support of splits/edges/node on tree.
Parameters
----------
tree: |Tree| instance
Tree to be decorated with support values.
is_bipartitions_updated: bool
If |True|, then bipartitions will not be recalculated.
\*\*split_summarization_kwargs : keyword arguments
These will be passed directly to the underlying
`SplitDistributionSummarizer` object. See
:meth:`SplitDistributionSummarizer.configure` for options.
"""
if self.taxon_namespace is not tree.taxon_namespace:
raise error.TaxonNamespaceIdentityError(self, tree)
if self.tree_decorator is None:
self.tree_decorator = SplitDistributionSummarizer()
self.tree_decorator.configure(**split_summarization_kwargs)
self.tree_decorator.summarize_splits_on_tree(
split_distribution=self,
tree=tree,
is_bipartitions_updated=is_bipartitions_updated)
return tree
###########################################################################
### legacy
def _get_taxon_set(self):
from dendropy import taxonmodel
taxon_model.taxon_set_deprecation_warning()
return self.taxon_namespace
def _set_taxon_set(self, v):
from dendropy import taxonmodel
taxon_model.taxon_set_deprecation_warning()
self.taxon_namespace = v
def _del_taxon_set(self):
from dendropy import taxonmodel
taxon_model.taxon_set_deprecation_warning()
taxon_set = property(_get_taxon_set, _set_taxon_set, _del_taxon_set)
###############################################################################
### SplitDistributionSummarizer
class SplitDistributionSummarizer(object):
def __init__(self, **kwargs):
"""
See :meth:`SplitDistributionSummarizer.configure` for configuration
options.
"""
self.configure(**kwargs)
def configure(self, **kwargs):
"""
Configure rendition/mark-up.
Parameters
----------
set_edge_lengths : string
For each edge, set the length based on:
- "support": use support values split corresponding to edge
- "mean-length": mean of edge lengths for split
- "median-length": median of edge lengths for split
- "mean-age": such that split age is equal to mean of ages
- "median-age": such that split age is equal to mean of ages
- |None|: do not set edge lengths
add_support_as_node_attribute: bool
Adds each node's support value as an attribute of the node,
"``support``".
add_support_as_node_annotation: bool
Adds support as a metadata annotation, "``support``". If
``add_support_as_node_attribute`` is |True|, then the value will be
dynamically-bound to the value of the node's "``support``" attribute.
set_support_as_node_label : bool
Sets the ``label`` attribute of each node to the support value.
add_node_age_summaries_as_node_attributes: bool
Summarizes the distribution of the ages of each node in the
following attributes:
- ``age_mean``
- ``age_median``
- ``age_sd``
- ``age_hpd95``
- ``age_range``
add_node_age_summaries_as_node_annotations: bool
Summarizes the distribution of the ages of each node in the
following metadata annotations:
- ``age_mean``
- ``age_median``
- ``age_sd``
- ``age_hpd95``
- ``age_range``
If ``add_node_age_summaries_as_node_attributes`` is |True|, then the
values will be dynamically-bound to the corresponding node
attributes.
add_edge_length_summaries_as_edge_attributes: bool
Summarizes the distribution of the lengths of each edge in the
following attribtutes:
- ``length_mean``
- ``length_median``
- ``length_sd``
- ``length_hpd95``
- ``length_range``
add_edge_length_summaries_as_edge_annotations: bool
Summarizes the distribution of the lengths of each edge in the
following metadata annotations:
- ``length_mean``
- ``length_median``
- ``length_sd``
- ``length_hpd95``
- ``length_range``
If ``add_edge_length_summaries_as_edge_attributes`` is |True|, then the
values will be dynamically-bound to the corresponding edge
attributes.
support_label_decimals: int
Number of decimal places to express when rendering the support
value as a string for the node label.
support_as_percentages: bool
Whether or not to express the support value as percentages (default
is probability or proportion).
minimum_edge_length : numeric
All edge lengths calculated to have a value less than this will be
set to this.
error_on_negative_edge_lengths : bool
If |True|, an inferred edge length that is less than 0 will result
in a ValueError.
"""
self.set_edge_lengths = kwargs.pop("set_edge_lengths", None)
self.add_support_as_node_attribute = kwargs.pop("add_support_as_node_attribute", True)
self.add_support_as_node_annotation = kwargs.pop("add_support_as_node_annotation", True)
self.set_support_as_node_label = kwargs.pop("set_support_as_node_label", None)
self.add_node_age_summaries_as_node_attributes = kwargs.pop("add_node_age_summaries_as_node_attributes", True)
self.add_node_age_summaries_as_node_annotations = kwargs.pop("add_node_age_summaries_as_node_annotations", True)
self.add_edge_length_summaries_as_edge_attributes = kwargs.pop("add_edge_length_summaries_as_edge_attributes", True)
self.add_edge_length_summaries_as_edge_annotations = kwargs.pop("add_edge_length_summaries_as_edge_annotations", True)
self.support_label_decimals = kwargs.pop("support_label_decimals", 4)
self.support_as_percentages = kwargs.pop("support_as_percentages", False)
self.support_label_compose_fn = kwargs.pop("support_label_compose_fn", None)
self.primary_fieldnames = ["support",]
self.summary_stats_fieldnames = SplitDistribution.SUMMARY_STATS_FIELDNAMES
self.no_data_values = {
'hpd95': [],
'quant_5_95': [],
'range': [],
}
self.node_age_summaries_fieldnames = list("age_{}".format(f) for f in self.summary_stats_fieldnames)
self.edge_length_summaries_fieldnames = list("length_{}".format(f) for f in self.summary_stats_fieldnames)
self.fieldnames = self.primary_fieldnames + self.node_age_summaries_fieldnames + self.edge_length_summaries_fieldnames
for fieldname in self.fieldnames:
setattr(self, "{}_attr_name".format(fieldname), kwargs.pop("{}_attr_name".format(fieldname), fieldname))
setattr(self, "{}_annotation_name".format(fieldname), kwargs.pop("{}_annotation_name".format(fieldname), fieldname))
setattr(self, "is_{}_annotation_dynamic".format(fieldname), kwargs.pop("is_{}_annotation_dynamic".format(fieldname), True))
self.minimum_edge_length = kwargs.pop("minimum_edge_length", None)
self.error_on_negative_edge_lengths = kwargs.pop("error_on_negative_edge_lengths", False)
if kwargs:
TypeError("Unrecognized or unsupported arguments: {}".format(kwargs))
def _decorate(self,
target,
fieldname,
value,
set_attribute,
set_annotation,
):
attr_name = getattr(self, "{}_attr_name".format(fieldname))
annotation_name = getattr(self, "{}_annotation_name".format(fieldname))
if set_attribute:
setattr(target, attr_name, value)
if set_annotation:
target.annotations.drop(name=annotation_name)
if getattr(self, "is_{}_annotation_dynamic".format(fieldname)):
target.annotations.add_bound_attribute(
attr_name=attr_name,
annotation_name=annotation_name,
)
else:
target.annotations.add_new(
name=annotation_name,
value=value,
)
elif set_annotation:
target.annotations.drop(name=annotation_name)
target.annotations.add_new(
name=annotation_name,
value=value,
)
def summarize_splits_on_tree(self,
split_distribution,
tree,
is_bipartitions_updated=False):
if split_distribution.taxon_namespace is not tree.taxon_namespace:
raise error.TaxonNamespaceIdentityError(split_distribution, tree)
if not is_bipartitions_updated:
tree.encode_bipartitions()
if self.support_label_compose_fn is not None:
support_label_fn = lambda freq: self.support_label_compose_fn(freq)
else:
support_label_fn = lambda freq: "{:.{places}f}".format(freq, places=self.support_label_decimals)
node_age_summaries = split_distribution.split_node_age_summaries
edge_length_summaries = split_distribution.split_edge_length_summaries
split_freqs = split_distribution.split_frequencies
assert len(self.node_age_summaries_fieldnames) == len(self.summary_stats_fieldnames)
for node in tree:
split_bitmask = node.edge.bipartition.split_bitmask
split_support = split_freqs.get(split_bitmask, 0.0)
if self.support_as_percentages:
split_support = split_support * 100
self._decorate(
target=node,
fieldname="support",
value=split_support,
set_attribute=self.add_support_as_node_attribute,
set_annotation=self.add_support_as_node_annotation,
)
if self.set_support_as_node_label:
node.label = support_label_fn(split_support)
if (self.add_node_age_summaries_as_node_attributes or self.add_node_age_summaries_as_node_annotations) and node_age_summaries:
for fieldname, stats_fieldname in zip(self.node_age_summaries_fieldnames, self.summary_stats_fieldnames):
no_data_value = self.no_data_values.get(stats_fieldname, 0.0)
if not node_age_summaries or split_bitmask not in node_age_summaries:
value = no_data_value
else:
value = node_age_summaries[split_bitmask].get(stats_fieldname, no_data_value)
self._decorate(
target=node,
fieldname=fieldname,
value=value,
set_attribute=self.add_node_age_summaries_as_node_attributes,
set_annotation=self.add_node_age_summaries_as_node_annotations,
)
if (self.add_edge_length_summaries_as_edge_attributes or self.add_edge_length_summaries_as_edge_annotations) and edge_length_summaries:
for fieldname, stats_fieldname in zip(self.edge_length_summaries_fieldnames, self.summary_stats_fieldnames):
no_data_value = self.no_data_values.get(stats_fieldname, 0.0)
if not edge_length_summaries or split_bitmask not in edge_length_summaries:
value = no_data_value
else:
value = edge_length_summaries[split_bitmask].get(stats_fieldname, no_data_value)
self._decorate(
target=node.edge,
fieldname=fieldname,
value=value,
set_attribute=self.add_edge_length_summaries_as_edge_attributes,
set_annotation=self.add_edge_length_summaries_as_edge_annotations,
)
if self.set_edge_lengths is None:
pass
elif self.set_edge_lengths == "keep":
pass
elif self.set_edge_lengths == "support":
node.edge.length = split_support
elif self.set_edge_lengths == "clear":
edge.length = None
elif self.set_edge_lengths in ("mean-age", "median-age"):
if not node_age_summaries:
raise ValueError("Node ages not available")
if self.set_edge_lengths == "mean-age":
try:
node.age = node_age_summaries[split_bitmask]["mean"]
except KeyError:
node.age = self.no_data_values.get("mean", 0.0)
elif self.set_edge_lengths == "median-age":
try:
node.age = node_age_summaries[split_bitmask]["median"]
except KeyError:
node.age = self.no_data_values.get("median", 0.0)
else:
raise ValueError(self.set_edge_lengths)
elif self.set_edge_lengths in ("mean-length", "median-length"):
if not edge_length_summaries:
raise ValueError("Edge lengths not available")
if self.set_edge_lengths == "mean-length":
try:
node.edge.length = edge_length_summaries[split_bitmask]["mean"]
except KeyError:
node.edge.length = self.no_data_values.get("mean", 0.0)
elif self.set_edge_lengths == "median-length":
try:
node.edge.length = edge_length_summaries[split_bitmask]["median"]
except KeyError:
node.edge.length = self.no_data_values.get("median", 0.0)
else:
raise ValueError(self.set_edge_lengths)
if self.minimum_edge_length is not None and edge.length < self.minimum_edge_length:
edge.length = self.minimum_edge_length
else:
raise ValueError(self.set_edge_lengths)
if self.set_edge_lengths in ("mean-age", "median-age"):
tree.set_edge_lengths_from_node_ages(
minimum_edge_length=self.minimum_edge_length,
error_on_negative_edge_lengths=self.error_on_negative_edge_lengths)
elif self.set_edge_lengths not in ("keep", "clear", None) and self.minimum_edge_length is not None:
for node in tree:
if node.edge.length is None:
node.edge.length = self.minimum_edge_length
elif node.edge.length < self.minimum_edge_length:
node.edge.length = self.minimum_edge_length
return tree
###############################################################################
### TreeArray
class TreeArray(
taxonmodel.TaxonNamespaceAssociated,
basemodel.MultiReadable,
):
"""
High-performance collection of tree structures.
Storage of minimal tree structural information as represented by toplogy
and edge lengths, minimizing memory and processing time.
This class stores trees as collections of splits and edge lengths. All
other information, such as labels, metadata annotations, etc. will be
discarded. A full |Tree| instance can be reconstructed as needed
from the structural information stored by this class, at the cost of
computation time.
"""
class IncompatibleTreeArrayUpdate(Exception):
pass
class IncompatibleRootingTreeArrayUpdate(IncompatibleTreeArrayUpdate):
pass
class IncompatibleEdgeLengthsTreeArrayUpdate(IncompatibleTreeArrayUpdate):
pass
class IncompatibleNodeAgesTreeArrayUpdate(IncompatibleTreeArrayUpdate):
pass
class IncompatibleTreeWeightsTreeArrayUpdate(IncompatibleTreeArrayUpdate):
pass
##############################################################################
## Factory Function
@classmethod
def from_tree_list(cls,
trees,
is_rooted_trees=None,
ignore_edge_lengths=False,
ignore_node_ages=True,
use_tree_weights=True,
ultrametricity_precision=constants.DEFAULT_ULTRAMETRICITY_PRECISION,
is_force_max_age=None,
taxon_label_age_map=None,
is_bipartitions_updated=False,
):
taxon_namespace = trees.taxon_namespace
ta = cls(
taxon_namespace=taxon_namespace,
is_rooted_trees=is_rooted_trees,
ignore_edge_lengths=ignore_edge_lengths,
ignore_node_ages=ignore_node_ages,
use_tree_weights=use_tree_weights,
ultrametricity_precision=ultrametricity_precision,
is_force_max_age=is_force_max_age,
taxon_label_age_map=taxon_label_age_map,
)
ta.add_trees(
trees=trees,
is_bipartitions_updated=is_bipartitions_updated)
return ta
##############################################################################
## Life-Cycle
def __init__(self,
taxon_namespace=None,
is_rooted_trees=None,
ignore_edge_lengths=False,
ignore_node_ages=True,
use_tree_weights=True,
ultrametricity_precision=constants.DEFAULT_ULTRAMETRICITY_PRECISION,
is_force_max_age=None,
taxon_label_age_map=None,
):
"""
Parameters
----------
taxon_namespace : |TaxonNamespace|
The operational taxonomic unit concept namespace to manage taxon
references.
is_rooted_trees : bool
If not set, then it will be set based on the rooting state of the
first tree added. If |True|, then trying to add an unrooted tree
will result in an error. If |False|, then trying to add a rooted
tree will result in an error.
ignore_edge_lengths : bool
If |True|, then edge lengths of splits will not be stored. If
|False|, then edge lengths will be stored.
ignore_node_ages : bool
If |True|, then node ages of splits will not be stored. If
|False|, then node ages will be stored.
use_tree_weights : bool
If |False|, then tree weights will not be used to weight splits.
"""
taxonmodel.TaxonNamespaceAssociated.__init__(self,
taxon_namespace=taxon_namespace)
# Configuration
self._is_rooted_trees = is_rooted_trees
self.ignore_edge_lengths = ignore_edge_lengths
self.ignore_node_ages = ignore_node_ages
self.use_tree_weights = use_tree_weights
self.default_edge_length_value = 0 # edge.length of |None| gets this value
self.tree_type = treemodel.Tree
self.taxon_label_age_map = taxon_label_age_map
# Storage
self._tree_split_bitmasks = []
self._tree_edge_lengths = []
self._tree_leafset_bitmasks = []
self._tree_weights = []
self._split_distribution = SplitDistribution(
taxon_namespace=self.taxon_namespace,
ignore_edge_lengths=self.ignore_edge_lengths,
ignore_node_ages=self.ignore_node_ages,
ultrametricity_precision=ultrametricity_precision,
is_force_max_age=is_force_max_age,
taxon_label_age_map=self.taxon_label_age_map,
)
##############################################################################
## Book-Keeping
def _get_is_rooted_trees(self):
return self._is_rooted_trees
is_rooted_trees = property(_get_is_rooted_trees)
def _get_split_distribution(self):
return self._split_distribution
split_distribution = property(_get_split_distribution)
def validate_rooting(self, rooting_of_other):
if self._is_rooted_trees is None:
self._is_rooted_trees = rooting_of_other
elif self._is_rooted_trees != rooting_of_other:
if self._is_rooted_trees:
ta = "rooted"
t = "unrooted"
else:
ta = "unrooted"
t = "rooted"
raise error.MixedRootingError("Cannot add {tree_rooting} tree to TreeArray with {tree_array_rooting} trees".format(
tree_rooting=t,
tree_array_rooting=ta))
##############################################################################
## Updating from Another TreeArray
def update(self, other):
if len(self) > 0:
# self.validate_rooting(other._is_rooted_trees)
if self._is_rooted_trees is not other._is_rooted_trees:
raise TreeArray.IncompatibleRootingTreeArrayUpdate("Updating from incompatible TreeArray: 'is_rooted_trees' should be '{}', but is instead '{}'".format(other._is_rooted_trees, self._is_rooted_trees, ))
if self.ignore_edge_lengths is not other.ignore_edge_lengths:
raise TreeArray.IncompatibleEdgeLengthsTreeArrayUpdate("Updating from incompatible TreeArray: 'ignore_edge_lengths' is not: {} ".format(other.ignore_edge_lengths, self.ignore_edge_lengths, ))
if self.ignore_node_ages is not other.ignore_node_ages:
raise TreeArray.IncompatibleNodeAgesTreeArrayUpdate("Updating from incompatible TreeArray: 'ignore_node_ages' should be '{}', but is instead '{}'".format(other.ignore_node_ages, self.ignore_node_ages))
if self.use_tree_weights is not other.use_tree_weights:
raise TreeArray.IncompatibleTreeWeightsTreeArrayUpdate("Updating from incompatible TreeArray: 'use_tree_weights' should be '{}', but is instead '{}'".format(other.use_tree_weights, self.use_tree_weights))
else:
self._is_rooted_trees = other._is_rooted_trees
self.ignore_edge_lengths = other.ignore_edge_lengths
self.ignore_node_ages = other.ignore_node_ages
self.use_tree_weights = other.use_tree_weights
self._tree_split_bitmasks.extend(other._tree_split_bitmasks)
self._tree_edge_lengths.extend(other._tree_edge_lengths)
self._tree_leafset_bitmasks.extend(other._tree_leafset_bitmasks)
self._tree_weights.extend(other._tree_weights)
self._split_distribution.update(other._split_distribution)
##############################################################################
## Fundamental Tree Accession
def add_tree(self,
tree,
is_bipartitions_updated=False,
index=None):
"""
Adds the structure represented by a |Tree| instance to the
collection.
Parameters
----------
tree : |Tree|
A |Tree| instance. This must have the same rooting state as
all the other trees accessioned into this collection as well as
that of ``self.is_rooted_trees``.
is_bipartitions_updated : bool
If |False| [default], then the tree will have its splits encoded or
updated. Otherwise, if |True|, then the tree is assumed to have its
splits already encoded and updated.
index : integer
Insert before index.
Returns
-------
index : int
The index of the accession.
s : iterable of splits
A list of split bitmasks from ``tree``.
e :
A list of edge length values from ``tree``.
"""
if self.taxon_namespace is not tree.taxon_namespace:
raise error.TaxonNamespaceIdentityError(self, tree)
self.validate_rooting(tree.is_rooted)
splits, edge_lengths, node_ages = self._split_distribution.count_splits_on_tree(
tree=tree,
is_bipartitions_updated=is_bipartitions_updated,
default_edge_length_value=self.default_edge_length_value)
# pre-process splits
splits = tuple(splits)
# pre-process edge lengths
if self.ignore_edge_lengths:
# edge_lengths = tuple( [None] * len(splits) )
edge_lengths = tuple( None for x in range(len(splits)) )
else:
assert len(splits) == len(edge_lengths), "Unequal vectors:\n Splits: {}\n Edges: {}\n".format(splits, edge_lengths)
edge_lengths = tuple(edge_lengths)
# pre-process weights
if tree.weight is not None and self.use_tree_weights:
weight_to_use = float(tree.weight)
else:
weight_to_use = 1.0
# accession info
if index is None:
index = len(self._tree_split_bitmasks)
self._tree_split_bitmasks.append(splits)
self._tree_leafset_bitmasks.append(tree.seed_node.edge.bipartition.leafset_bitmask)
self._tree_edge_lengths.append(edge_lengths)
self._tree_weights.append(weight_to_use)
else:
self._tree_split_bitmasks.insert(index, splits)
self._tree_leafset_bitmasks.insert(index,
tree.seed_node.edge.bipartition.leafset_bitmask)
self._tree_edge_lengths.insert(index, edge_lengths)
self._tree_weights.insert(index, weight_to_use)
return index, splits, edge_lengths, weight_to_use
def add_trees(self, trees, is_bipartitions_updated=False):
"""
Adds multiple structures represneted by an iterator over or iterable of
|Tree| instances to the collection.
Parameters
----------
trees : iterator over or iterable of |Tree| instances
An iterator over or iterable of |Tree| instances. Thess must
have the same rooting state as all the other trees accessioned into
this collection as well as that of ``self.is_rooted_trees``.
is_bipartitions_updated : bool
If |False| [default], then the tree will have its splits encoded or
updated. Otherwise, if |True|, then the tree is assumed to have its
splits already encoded and updated.
"""
for tree in trees:
self.add_tree(tree,
is_bipartitions_updated=is_bipartitions_updated)
##############################################################################
## I/O
def read_from_files(self,
files,
schema,
**kwargs):
"""
Adds multiple structures from one or more external file sources to the
collection.
Parameters
----------
files : iterable of strings and/or file objects
A list or some other iterable of file paths or file-like objects
(string elements will be assumed to be paths to files, while all
other types of elements will be assumed to be file-like
objects opened for reading).
schema : string
The data format of the source. E.g., "nexus", "newick", "nexml".
\*\*kwargs : keyword arguments
These will be passed directly to the underlying schema-specific
reader implementation.
"""
if "taxon_namespace" in kwargs:
if kwargs["taxon_namespace"] is not self.taxon_namespace:
raise ValueError("TaxonNamespace object passed as keyword argument is not the same as self's TaxonNamespace reference")
kwargs.pop("taxon_namespace")
target_tree_offset = kwargs.pop("tree_offset", 0)
tree_yielder = self.tree_type.yield_from_files(
files=files,
schema=schema,
taxon_namespace=self.taxon_namespace,
**kwargs)
current_source_index = None
current_tree_offset = None
for tree_idx, tree in enumerate(tree_yielder):
current_yielder_index = tree_yielder.current_file_index
if current_source_index != current_yielder_index:
current_source_index = current_yielder_index
current_tree_offset = 0
if current_tree_offset >= target_tree_offset:
self.add_tree(tree=tree, is_bipartitions_updated=False)
current_tree_offset += 1
def _parse_and_add_from_stream(self,
stream,
schema,
**kwargs):
cur_size = len(self._tree_split_bitmasks)
self.read_from_files(files=[stream], schema=schema, **kwargs)
new_size = len(self._tree_split_bitmasks)
return new_size - cur_size
def read(self, **kwargs):
"""
Add |Tree| objects to existing |TreeList| from data source providing
one or more collections of trees.
**Mandatory Source-Specification Keyword Argument (Exactly One Required):**
- **file** (*file*) -- File or file-like object of data opened for reading.
- **path** (*str*) -- Path to file of data.
- **url** (*str*) -- URL of data.
- **data** (*str*) -- Data given directly.
**Mandatory Schema-Specification Keyword Argument:**
- **schema** (*str*) -- Identifier of format of data given by the
"``file``", "``path``", "``data``", or "``url``" argument
specified above: ":doc:`newick </schemas/newick>`", ":doc:`nexus
</schemas/nexus>`", or ":doc:`nexml </schemas/nexml>`". See
"|Schemas|" for more details.
**Optional General Keyword Arguments:**
- **collection_offset** (*int*) -- 0-based index of tree block or
collection in source to be parsed. If not specified then the
first collection (offset = 0) is assumed.
- **tree_offset** (*int*) -- 0-based index of first tree within the
collection specified by ``collection_offset`` to be parsed (i.e.,
skipping the first ``tree_offset`` trees). If not
specified, then the first tree (offset = 0) is assumed (i.e., no
trees within the specified collection will be skipped). Use this
to specify, e.g. a burn-in.
- **ignore_unrecognized_keyword_arguments** (*bool*) -- If |True|,
then unsupported or unrecognized keyword arguments will not
result in an error. Default is |False|: unsupported keyword
arguments will result in an error.
**Optional Schema-Specific Keyword Arguments:**
These provide control over how the data is interpreted and
processed, and supported argument names and values depend on
the schema as specified by the value passed as the "``schema``"
argument. See "|Schemas|" for more details.
**Examples:**
::
tree_array = dendropy.TreeArray()
tree_array.read(
file=open('treefile.tre', 'rU'),
schema="newick",
tree_offset=100)
tree_array.read(
path='sometrees.nexus',
schema="nexus",
collection_offset=2,
tree_offset=100)
tree_array.read(
data="((A,B),(C,D));((A,C),(B,D));",
schema="newick")
tree_array.read(
url="http://api.opentreeoflife.org/v2/study/pg_1144/tree/tree2324.nex",
schema="nexus")
"""
return basemodel.MultiReadable._read_from(self, **kwargs)
##############################################################################
## Container (List) Interface
def append(tree, is_bipartitions_updated=False):
"""
Adds a |Tree| instance to the collection before position given
by ``index``.
Parameters
----------
tree : |Tree|
A |Tree| instance. This must have the same rooting state as
all the other trees accessioned into this collection as well as
that of ``self.is_rooted_trees``.
is_bipartitions_updated : bool
If |False| [default], then the tree will have its splits encoded or
updated. Otherwise, if |True|, then the tree is assumed to have its
splits already encoded and updated.
"""
return self.add_tree(tree=tree,
is_bipartitions_updated=is_bipartitions_updated)
def insert(index, tree, is_bipartitions_updated=False):
"""
Adds a |Tree| instance to the collection before position given
by ``index``.
Parameters
----------
index : integer
Insert before index.
tree : |Tree|
A |Tree| instance. This must have the same rooting state as
all the other trees accessioned into this collection as well as
that of ``self.is_rooted_trees``.
is_bipartitions_updated : bool
If |False| [default], then the tree will have its splits encoded or
updated. Otherwise, if |True|, then the tree is assumed to have its
splits already encoded and updated.
Returns
-------
index : int
The index of the accession.
s : iterable of splits
A list of split bitmasks from ``tree``.
e :
A list of edge length values ``tree``.
"""
return self.add_tree(tree=tree,
is_bipartitions_updated=is_bipartitions_updated,
index=index)
def extend(self, tree_array):
"""
Accession of data from ``tree_array`` to self.
Parameters
----------
tree_array : |TreeArray|
A |TreeArray| instance from which to add data.
"""
assert self.taxon_namespace is tree_array.taxon_namespace
assert self._is_rooted_trees is tree_array._is_rooted_trees
assert self.ignore_edge_lengths is tree_array.ignore_edge_lengths
assert self.ignore_node_ages is tree_array.ignore_node_ages
assert self.use_tree_weights is tree_array.use_tree_weights
self._tree_split_bitmasks.extend(tree_array._tree_split_bitmasks)
self._tree_edge_lengths.extend(tree_array._tree_edge_lengths)
self._tree_weights.extend(other._tree_weights)
self._split_distribution.update(tree_array._split_distribution)
return self
def __iadd__(self, tree_array):
"""
Accession of data from ``tree_array`` to self.
Parameters
----------
tree_array : |TreeArray|
A |TreeArray| instance from which to add data.
"""
return self.extend(tree_array)
def __add__(self, other):
"""
Creates and returns new |TreeArray|.
Parameters
----------
other : iterable of |Tree| objects
Returns
-------
tlist : |TreeArray| object
|TreeArray| object containing clones of |Tree| objects
in ``self`` and ``other``.
"""
ta = TreeArray(
taxon_namespace=self.taxon_namespace,
is_rooted_trees=self._is_rooted_trees,
ignore_edge_lengths=self.ignore_edge_lengths,
ignore_node_ages=self.ignore_node_ages,
use_tree_weights=self.use_tree_weights,
ultrametricity_precision=self._split_distribution.ultrametricity_precision,
)
ta.default_edge_length_value = self.default_edge_length_value
ta.tree_type = self.tree_type
ta += self
ta += other
return ta
def __contains__(self, splits):
# expensive!!
return tuple(splits) in self._tree_split_bitmasks
def __delitem__(self, index):
raise NotImplementedError
# expensive!!
# tree_split_bitmasks = self._trees_splits[index]
### TODO: remove this "tree" from underlying splits distribution
# for split in tree_split_bitmasks:
# self._split_distribution.split_counts[split] -= 1
# etc.
# becomes complicated because tree weights need to be updated etc.
# del self._tree_split_bitmasks[index]
# del self._tree_edge_lengths[index]
# return
def __iter__(self):
"""
Yields pairs of (split, edge_length) from the store.
"""
for split, edge_length in zip(self._tree_split_bitmasks, self._tree_edge_lengths):
yield split, edge_length
def __reversed__(self):
raise NotImplementedError
def __len__(self):
return len(self._tree_split_bitmasks)
def __getitem__(self, index):
raise NotImplementedError
# """
# Returns a pair of tuples, ( (splits...), (lengths...) ), corresponding
# to the "tree" at ``index``.
# """
# return self._tree_split_bitmasks[index], self._tree_edge_lengths[index]
def __setitem__(self, index, value):
raise NotImplementedError
def clear(self):
raise NotImplementedError
self._tree_split_bitmasks = []
self._tree_edge_lengths = []
self._tree_leafset_bitmasks = []
self._split_distribution.clear()
def index(self, splits):
raise NotImplementedError
return self._tree_split_bitmasks.index(splits)
def pop(self, index=-1):
raise NotImplementedError
def remove(self, tree):
raise NotImplementedError
def reverse(self):
raise NotImplementedError
def sort(self, key=None, reverse=False):
raise NotImplementedError
##############################################################################
## Accessors/Settors
def get_split_bitmask_and_edge_tuple(self, index):
"""
Returns a pair of tuples, ( (splits...), (lengths...) ), corresponding
to the "tree" at ``index``.
"""
return self._tree_split_bitmasks[index], self._tree_edge_lengths[index]
##############################################################################
## Calculations
def calculate_log_product_of_split_supports(self,
include_external_splits=False,
):
"""
Calculates the log product of split support for each of the trees in
the collection.
Parameters
----------
include_external_splits : bool
If |True|, then non-internal split posteriors will be included in
the score. Defaults to |False|: these are skipped. This should only
make a difference when dealing with splits collected from trees of
different leaf sets.
Returns
-------
s : tuple(list[numeric], integer)
Returns a tuple, with the first element being the list of scores
and the second being the index of the highest score. The element order
corresponds to the trees accessioned in the collection.
"""
assert len(self._tree_leafset_bitmasks) == len(self._tree_split_bitmasks)
scores = []
max_score = None
max_score_tree_idx = None
split_frequencies = self._split_distribution.split_frequencies
for tree_idx, (tree_leafset_bitmask, split_bitmasks) in enumerate(zip(self._tree_leafset_bitmasks, self._tree_split_bitmasks)):
log_product_of_split_support = 0.0
for split_bitmask in split_bitmasks:
if (include_external_splits
or split_bitmask == tree_leafset_bitmask # count root edge (following BEAST)
or not treemodel.Bipartition.is_trivial_bitmask(split_bitmask, tree_leafset_bitmask)
):
split_support = split_frequencies.get(split_bitmask, 0.0)
if split_support:
log_product_of_split_support += math.log(split_support)
if max_score is None or max_score < log_product_of_split_support:
max_score = log_product_of_split_support
max_score_tree_idx = tree_idx
scores.append(log_product_of_split_support)
return scores, max_score_tree_idx
def maximum_product_of_split_support_tree(self,
include_external_splits=False,
summarize_splits=True,
**split_summarization_kwargs
):
"""
Return the tree with that maximizes the product of split supports, also
known as the "Maximum Clade Credibility Tree" or MCCT.
Parameters
----------
include_external_splits : bool
If |True|, then non-internal split posteriors will be included in
the score. Defaults to |False|: these are skipped. This should only
make a difference when dealing with splits collected from trees of
different leaf sets.
Returns
-------
mcct_tree : Tree
Tree that maximizes the product of split supports.
"""
scores, max_score_tree_idx = self.calculate_log_product_of_split_supports(
include_external_splits=include_external_splits,
)
tree = self.restore_tree(
index=max_score_tree_idx,
**split_summarization_kwargs)
tree.log_product_of_split_support = scores[max_score_tree_idx]
if summarize_splits:
self._split_distribution.summarize_splits_on_tree(
tree=tree,
is_bipartitions_updated=True,
**split_summarization_kwargs
)
return tree
def calculate_sum_of_split_supports(self,
include_external_splits=False,
):
"""
Calculates the *sum* of split support for all trees in the
collection.
Parameters
----------
include_external_splits : bool
If |True|, then non-internal split posteriors will be included in
the score. Defaults to |False|: these are skipped. This should only
make a difference when dealing with splits collected from trees of
different leaf sets.
Returns
-------
s : tuple(list[numeric], integer)
Returns a tuple, with the first element being the list of scores
and the second being the index of the highest score. The element order
corresponds to the trees accessioned in the collection.
"""
assert len(self._tree_leafset_bitmasks) == len(self._tree_split_bitmasks)
scores = []
max_score = None
max_score_tree_idx = None
split_frequencies = self._split_distribution.split_frequencies
for tree_idx, (tree_leafset_bitmask, split_bitmasks) in enumerate(zip(self._tree_leafset_bitmasks, self._tree_split_bitmasks)):
sum_of_support = 0.0
for split_bitmask in split_bitmasks:
if (include_external_splits
or split_bitmask == tree_leafset_bitmask # count root edge (following BEAST)
or not treemodel.Bipartition.is_trivial_bitmask(split_bitmask, tree_leafset_bitmask)
):
split_support = split_frequencies.get(split_bitmask, 0.0)
sum_of_support += split_support
if max_score is None or max_score < sum_of_support:
max_score = sum_of_support
max_score_tree_idx = tree_idx
scores.append(sum_of_support)
return scores, max_score_tree_idx
def maximum_sum_of_split_support_tree(self,
include_external_splits=False,
summarize_splits=True,
**split_summarization_kwargs
):
"""
Return the tree with that maximizes the *sum* of split supports.
Parameters
----------
include_external_splits : bool
If |True|, then non-internal split posteriors will be included in
the score. Defaults to |False|: these are skipped. This should only
make a difference when dealing with splits collected from trees of
different leaf sets.
Returns
-------
mst_tree : Tree
Tree that maximizes the sum of split supports.
"""
scores, max_score_tree_idx = self.calculate_sum_of_split_supports(
include_external_splits=include_external_splits,
)
tree = self.restore_tree(
index=max_score_tree_idx,
**split_summarization_kwargs
)
tree.sum_of_split_support = scores[max_score_tree_idx]
if summarize_splits:
self._split_distribution.summarize_splits_on_tree(
tree=tree,
is_bipartitions_updated=True,
**split_summarization_kwargs
)
return tree
def collapse_edges_with_less_than_minimum_support(self,
tree,
min_freq=constants.GREATER_THAN_HALF,
):
return self.split_distribution.collapse_edges_with_less_than_minimum_support(
tree=tree,
min_freq=min_freq)
def consensus_tree(self,
min_freq=constants.GREATER_THAN_HALF,
summarize_splits=True,
**split_summarization_kwargs
):
"""
Returns a consensus tree from splits in ``self``.
Parameters
----------
min_freq : real
The minimum frequency of a split in this distribution for it to be
added to the tree.
is_rooted : bool
Should tree be rooted or not? If *all* trees counted for splits are
explicitly rooted or unrooted, then this will default to |True| or
|False|, respectively. Otherwise it defaults to |None|.
\*\*split_summarization_kwargs : keyword arguments
These will be passed directly to the underlying
`SplitDistributionSummarizer` object. See
:meth:`SplitDistributionSummarizer.configure` for options.
Returns
-------
t : consensus tree
"""
tree = self._split_distribution.consensus_tree(
min_freq=min_freq,
is_rooted=self.is_rooted_trees,
summarize_splits=summarize_splits,
**split_summarization_kwargs
)
# return self._split_distribution.consensus_tree(*args, **kwargs)
return tree
##############################################################################
## Mapping of Split Support
def summarize_splits_on_tree(self,
tree,
is_bipartitions_updated=False,
**kwargs):
if self.taxon_namespace is not tree.taxon_namespace:
raise error.TaxonNamespaceIdentityError(self, tree)
self._split_distribution.summarize_splits_on_tree(
tree=tree,
is_bipartitions_updated=is_bipartitions_updated,
**kwargs
)
##############################################################################
## Tree Reconstructions
def restore_tree(self,
index,
summarize_splits_on_tree=False,
**split_summarization_kwargs
):
split_bitmasks = self._tree_split_bitmasks[index]
if self.ignore_edge_lengths:
split_edge_lengths = None
else:
assert len(self._tree_split_bitmasks) == len(self._tree_edge_lengths)
edge_lengths = self._tree_edge_lengths[index]
split_edge_lengths = dict(zip(split_bitmasks, edge_lengths))
tree = self.tree_type.from_split_bitmasks(
split_bitmasks=split_bitmasks,
taxon_namespace=self.taxon_namespace,
is_rooted=self._is_rooted_trees,
split_edge_lengths=split_edge_lengths,
)
# if update_bipartitions:
# tree.encode_bipartitions()
if summarize_splits_on_tree:
split_summarization_kwargs["is_bipartitions_updated"] = True
self._split_distribution.summarize_splits_on_tree(
tree=tree,
**split_summarization_kwargs)
return tree
##############################################################################
## Topology Frequencies
def split_bitmask_set_frequencies(self):
"""
Returns a dictionary with keys being sets of split bitmasks and values
being the frequency of occurrence of trees represented by those split
bitmask sets in the collection.
"""
split_bitmask_set_count_map = collections.Counter()
assert len(self._tree_split_bitmasks) == len(self._tree_weights)
for split_bitmask_set, weight in zip(self._tree_split_bitmasks, self._tree_weights):
split_bitmask_set_count_map[frozenset(split_bitmask_set)] += (1.0 * weight)
split_bitmask_set_freqs = {}
normalization_weight = self._split_distribution.calc_normalization_weight()
# print("===> {}".format(normalization_weight))
for split_bitmask_set in split_bitmask_set_count_map:
split_bitmask_set_freqs[split_bitmask_set] = split_bitmask_set_count_map[split_bitmask_set] / normalization_weight
return split_bitmask_set_freqs
def bipartition_encoding_frequencies(self):
"""
Returns a dictionary with keys being bipartition encodings of trees
(as ``frozenset`` collections of |Bipartition| objects) and
values the frequency of occurrence of trees represented by that
encoding in the collection.
"""
# split_bitmask_set_freqs = self.split_bitmask_set_frequencies()
# bipartition_encoding_freqs = {}
# for split_bitmask_set, freq in split_bitmask_set_freqs.items():
# bipartition_encoding = []
# inferred_leafset = max(split_bitmask_set)
# for split_bitmask in split_bitmask_set:
# bipartition = treemodel.Bipartition(
# bitmask=split_bitmask,
# tree_leafset_bitmask=inferred_leafset,
# is_rooted=self._is_rooted_trees,
# is_mutable=False,
# compile_bipartition=True,
# )
# bipartition_encoding.append(bipartition)
# bipartition_encoding_freqs[frozenset(bipartition_encoding)] = freq
# return bipartition_encoding_freqs
bipartition_encoding_freqs = {}
topologies = self.topologies()
for tree in topologies:
bipartition_encoding_freqs[ frozenset(tree.encode_bipartitions()) ] = tree.frequency
return bipartition_encoding_freqs
def topologies(self,
sort_descending=None,
frequency_attr_name="frequency",
frequency_annotation_name="frequency",
):
"""
Returns a |TreeList| instance containing the reconstructed tree
topologies (i.e. |Tree| instances with no edge weights) in the
collection, with the frequency added as an attributed.
Parameters
----------
sort_descending : bool
If |True|, then topologies will be sorted in *descending* frequency
order (i.e., topologies with the highest frequencies will be listed
first). If |False|, then they will be sorted in *ascending*
frequency. If |None| (default), then they will not be sorted.
frequency_attr_name : str
Name of attribute to add to each |Tree| representing
the frequency of that topology in the collection. If |None|
then the attribute will not be added.
frequency_annotation_name : str
Name of annotation to add to the annotations of each |Tree|,
representing the frequency of that topology in the collection. The
value of this annotation will be dynamically-bound to the attribute
specified by ``frequency_attr_name`` unless that is |None|. If
``frequency_annotation_name`` is |None| then the annotation will not
be added.
"""
if sort_descending is not None and frequency_attr_name is None:
raise ValueError("Attribute needs to be set on topologies to enable sorting")
split_bitmask_set_freqs = self.split_bitmask_set_frequencies()
topologies = TreeList(taxon_namespace=self.taxon_namespace)
for split_bitmask_set, freq in split_bitmask_set_freqs.items():
tree = self.tree_type.from_split_bitmasks(
split_bitmasks=split_bitmask_set,
taxon_namespace=self.taxon_namespace,
is_rooted=self._is_rooted_trees,
)
if frequency_attr_name is not None:
setattr(tree, frequency_attr_name, freq)
if frequency_annotation_name is not None:
tree.annotations.add_bound_attribute(
attr_name=frequency_attr_name,
annotation_name=frequency_annotation_name,
)
else:
tree.annotations.add_new(
frequency_annotation_name,
freq,
)
topologies.append(tree)
if sort_descending is not None:
topologies.sort(key=lambda t: getattr(t, frequency_attr_name), reverse=sort_descending)
return topologies
|
#! /usr/bin/env python
##############################################################################
## DendroPy Phylogenetic Computing Library.
##
## Copyright 2010-2015 <NAME> and <NAME>.
## All rights reserved.
##
## See "LICENSE.rst" for terms and conditions of usage.
##
## If you use this work or any portion thereof in published work,
## please cite it as:
##
## <NAME>. and <NAME>. 2010. DendroPy: a Python library
## for phylogenetic computing. Bioinformatics 26: 1569-1571.
##
##############################################################################
"""
This module handles the core definition of classes that model collections of
trees.
"""
import collections
import math
import copy
import sys
from dendropy.utility import container
from dendropy.utility import error
from dendropy.utility import bitprocessing
from dendropy.utility import deprecate
from dendropy.utility import constants
from dendropy.calculate import statistics
from dendropy.datamodel import basemodel
from dendropy.datamodel import taxonmodel
from dendropy.datamodel import treemodel
from dendropy import dataio
##############################################################################
### TreeList
class TreeList(
taxonmodel.TaxonNamespaceAssociated,
basemodel.Annotable,
basemodel.Deserializable,
basemodel.MultiReadable,
basemodel.Serializable,
basemodel.DataObject):
"""
A collection of |Tree| objects, all referencing the same "universe" of
opeational taxonomic unit concepts through the same |TaxonNamespace|
object reference.
"""
def _parse_and_create_from_stream(cls,
stream,
schema,
collection_offset=None,
tree_offset=None,
**kwargs):
"""
Constructs a new |TreeList| object and populates it with trees from
file-like object ``stream``.
Notes
-----
*All* operational taxonomic unit concepts in the data source will be included
in the |TaxonNamespace| object associated with the new
|TreeList| object and its contained |Tree| objects, even those
not associated with trees or the particular trees being retrieved.
Parameters
----------
stream : file or file-like object
Source of data.
schema : string
Identifier of format of data in ``stream``
collection_offset : integer or None
0-based index indicating collection of trees to parse. If |None|,
then all tree collections are retrieved, with each distinct
collection parsed into a separate |TreeList| object. If the
tree colleciton offset index is equal or greater than the number of
tree collections in the data source, then IndexError is raised.
Negative offsets work like negative list indexes; e.g., a
``collection_offset`` of -1 means to read the last collection of
trees in the data source. For data formats that do not support the
concept of distinct tree collections (e.g. NEWICK) are considered
single-collection data source (i.e, the only acceptable
``collection_offset`` values are -1 or 0).
tree_offset : integer or None
0-based index indicating particular tree within a particular
collection of trees at which to begin reading. If not specified or
|None| (default), then all trees are parsed. Otherwise, must be an
integer value up the length of the collection minus 1. A positive
offset indicates the number of trees in the collection to skip;
e.g. a ``tree_offset`` of 20 means to skip the first 20 trees in the
collection. Negative offsets work like negative list indexes;
e.g., a ``tree_offset`` value of -10 means to retrieve the last 10
trees in the collection. If the tree offset index is equal or
greater than the number of trees in the collection, then IndexError
is raised. Requires that a particular tree collection has been
identified using the ``tree_collection_offset`` parameter: if
``tree_collection_offset`` is not specified, a TypeError is raised.
\*\*kwargs : keyword arguments
Arguments to customize parsing, instantiation, processing, and
accession of |Tree| objects read from the data source, including
schema- or format-specific handling.
The following optional keyword arguments are recognized and handled
by this function:
* ``label`` Specifies the label or description of the new
|TreeList|.
* ``taxon_namespace`` specifies the |TaxonNamespace|
object to be attached to the new |TreeList| object.
Note that *all* operational taxonomic unit concepts in the
data source will be accessioned into the specified
|TaxonNamespace| instance. This includes the
operation taxonomic unit definitions associated with all
tree collections and character matrices in the data source.
* ``tree_list`` : **SPECIAL** If passed a |TreeList| using
this keyword, then this instance is populated and returned
(instead of a new instance being created).
All other keyword arguments are passed directly to |TreeList|.read()`.
Other keyword arguments may be available, depending on the implementation
of the reader specialized to handle ``schema`` formats.
Notes
-----
Note that in most cases, even if ``collection_offset`` and ``tree_offset``
are specified to restrict the trees returned, the *entire* data source
is still parsed and processed. So this is not more efficient than
reading all the trees and then manually-extracting them later; just
more convenient. If you need just a single subset of trees from a data
source, there is no gain in efficiency. If you need multiple trees or
subsets of trees from the same data source, it would be much more
efficient to read the entire data source, and extract trees as needed.
Returns
-------
A |TreeList| object.
"""
# these must be pulled before passing the kwargs
# down to the reader
tree_list = kwargs.pop("tree_list", None)
taxon_namespace = taxonmodel.process_kwargs_dict_for_taxon_namespace(kwargs, None)
label = kwargs.pop("label", None)
# get the reader
reader = dataio.get_reader(schema, **kwargs)
# Accommodate an existing TreeList object being passed
if tree_list is None:
tree_list = cls(label=label, taxon_namespace=taxon_namespace)
if collection_offset is None and tree_offset is not None:
collection_offset = 0
if collection_offset is None:
# if tree_offset is not None:
# raise TypeError("Cannot specify ``tree_offset`` without specifying ``collection_offset``")
# coerce all tree products into this list
reader.read_tree_lists(
stream=stream,
taxon_namespace_factory=tree_list._taxon_namespace_pseudofactory,
tree_list_factory=tree_list._tree_list_pseudofactory,
global_annotations_target=None)
else:
tree_lists = reader.read_tree_lists(
stream=stream,
taxon_namespace_factory=tree_list._taxon_namespace_pseudofactory,
tree_list_factory=tree_list.__class__,
global_annotations_target=None)
# if collection_offset < 0:
# raise IndexError("Collection offset out of range: {} (minimum valid tree offset = 0)".format(collection_offset))
if collection_offset >= len(tree_lists):
raise IndexError("Collection offset out of range: {} (number of collections = {}, maximum valid collection offset = {})".format(collection_offset, len(tree_lists), len(tree_lists)-1))
target_tree_list = tree_lists[collection_offset]
tree_list.copy_annotations_from(target_tree_list)
if tree_offset is not None:
# if tree_offset < 0:
# raise IndexError("Tree offset out of range: {} (minimum offset = 0)".format(tree_offset))
if tree_offset >= len(target_tree_list):
raise IndexError("Tree offset out of range: {} (number of trees in source = {}, maximum valid tree offset = {})".format(tree_offset, len(target_tree_list), len(target_tree_list)-1))
for tree in target_tree_list[tree_offset:]:
tree_list._trees.append(tree)
else:
for tree in target_tree_list:
tree_list._trees.append(tree)
return tree_list
# taxon_namespace = taxonmodel.process_kwargs_dict_for_taxon_namespace(kwargs, None)
# label = kwargs.pop("label", None)
# tree_list = cls(label=label,
# taxon_namespace=taxon_namespace)
# tree_list.read(
# stream=stream,
# schema=schema,
# collection_offset=collection_offset,
# tree_offset=tree_offset,
# **kwargs)
# return tree_list
_parse_and_create_from_stream = classmethod(_parse_and_create_from_stream)
@classmethod
def get(cls, **kwargs):
"""
Instantiate and return a *new* |TreeList| object from a data source.
**Mandatory Source-Specification Keyword Argument (Exactly One Required):**
- **file** (*file*) -- File or file-like object of data opened for reading.
- **path** (*str*) -- Path to file of data.
- **url** (*str*) -- URL of data.
- **data** (*str*) -- Data given directly.
**Mandatory Schema-Specification Keyword Argument:**
- **schema** (*str*) -- Identifier of format of data given by the
"``file``", "``path``", "``data``", or "``url``" argument
specified above: ":doc:`newick </schemas/newick>`", ":doc:`nexus
</schemas/nexus>`", or ":doc:`nexml </schemas/nexml>`". See
"|Schemas|" for more details.
**Optional General Keyword Arguments:**
- **label** (*str*) -- Name or identifier to be assigned to the new
object; if not given, will be assigned the one specified in the
data source, or |None| otherwise.
- **taxon_namespace** (|TaxonNamespace|) -- The |TaxonNamespace|
instance to use to :doc:`manage the taxon names </primer/taxa>`.
If not specified, a new one will be created.
- **collection_offset** (*int*) -- 0-based index of tree block or
collection in source to be parsed. If not specified then the
first collection (offset = 0) is assumed.
- **tree_offset** (*int*) -- 0-based index of first tree within the
collection specified by ``collection_offset`` to be parsed (i.e.,
skipping the first ``tree_offset`` trees). If not
specified, then the first tree (offset = 0) is assumed (i.e., no
trees within the specified collection will be skipped). Use this
to specify, e.g. a burn-in.
- **ignore_unrecognized_keyword_arguments** (*bool*) -- If |True|,
then unsupported or unrecognized keyword arguments will not
result in an error. Default is |False|: unsupported keyword
arguments will result in an error.
**Optional Schema-Specific Keyword Arguments:**
These provide control over how the data is interpreted and
processed, and supported argument names and values depend on
the schema as specified by the value passed as the "``schema``"
argument. See "|Schemas|" for more details.
**Examples:**
::
tlst1 = dendropy.TreeList.get(
file=open('treefile.tre', 'rU'),
schema="newick")
tlst2 = dendropy.TreeList.get(
path='sometrees.nexus',
schema="nexus",
collection_offset=2,
tree_offset=100)
tlst3 = dendropy.TreeList.get(
data="((A,B),(C,D));((A,C),(B,D));",
schema="newick")
tree4 = dendropy.dendropy.TreeList.get(
url="http://api.opentreeoflife.org/v2/study/pg_1144/tree/tree2324.nex",
schema="nexus")
"""
return cls._get_from(**kwargs)
DEFAULT_TREE_TYPE = treemodel.Tree
def tree_factory(cls, *args, **kwargs):
"""
Creates and returns a |Tree| of a type that this list understands how to
manage.
Deriving classes can override this to provide for custom Tree-type
object lists. You can simple override the class-level variable
`DEFAULT_TREE_TYPE` in your derived class if the constructor signature
of the alternate tree type is the same as |Tree|.
If you want to have a TreeList *instance* that generates
custom trees (i.e., as opposed to a TreeList-ish *class* of instances),
set the ``tree_type`` attribute of the TreeList instance.
Parameters
----------
\*args : positional arguments
Passed directly to constructor of |Tree|.
\*\*kwargs : keyword arguments
Passed directly to constructor of |Tree|.
Returns
-------
A |Tree| object.
"""
tree = cls.DEFAULT_TREE_TYPE(*args, **kwargs)
return tree
tree_factory = classmethod(tree_factory)
###########################################################################
### Lifecycle and Identity
def __init__(self, *args, **kwargs):
"""
Constructs a new |TreeList| object, populating it with any iterable
container with Tree object members passed as unnamed argument, or from
a data source if ``stream`` and ``schema`` are passed.
If passed an iterable container, the objects in that container must be
of type |Tree| (or derived). If the container is of type |TreeList|,
then, because each |Tree| object must have the same |TaxonNamespace|
reference as the containing |TreeList|, the trees in the container
passed as an initialization argument will be **deep**-copied (except
for associated |TaxonNamespace| and |Taxon| objects, which will
be shallow-copied). If the container is any other type of
iterable, then the |Tree| objects will be **shallow**-copied.
|TreeList| objects can directly thus be instantiated in the
following ways::
# /usr/bin/env python
from dendropy import TaxonNamespace, Tree, TreeList
# instantiate an empty tree
tlst1 = TreeList()
# TreeList objects can be instantiated from an external data source
# using the 'get()' factory class method
tlst2 = TreeList.get(file=open('treefile.tre', 'rU'), schema="newick")
tlst3 = TreeList.get(path='sometrees.nexus', schema="nexus")
tlst4 = TreeList.get(data="((A,B),(C,D));((A,C),(B,D));", schema="newick")
# can also call `read()` on a TreeList object; each read adds
# (appends) the tree(s) found to the TreeList
tlst5 = TreeList()
tlst5.read(file=open('boot1.tre', 'rU'), schema="newick")
tlst5.read(path="boot3.tre", schema="newick")
tlst5.read(value="((A,B),(C,D));((A,C),(B,D));", schema="newick")
# populated from list of Tree objects
tlist6_1 = Tree.get(
data="((A,B),(C,D))",
schema="newick")
tlist6_2 = Tree.get(
data="((A,C),(B,D))",
schema="newick")
tlist6 = TreeList([tlist5_1, tlist5_2])
# passing keywords to underlying tree parser
tlst8 = TreeList.get(
data="((A,B),(C,D));((A,C),(B,D));",
schema="newick",
taxon_namespace=tlst3.taxon_namespace,
rooting="force-rooted",
extract_comment_metadata=True,
store_tree_weights=False,
preserve_underscores=True)
# Subsets of trees can be read. Note that in most cases, the entire
# data source is parsed, so this is not more efficient than reading
# all the trees and then manually-extracting them later; just more
# convenient
# skip the *first* 100 trees in the *first* (offset=0) collection of trees
trees = TreeList.get(
path="mcmc.tre",
schema="newick",
collection_offset=0,
tree_offset=100)
# get the *last* 10 trees in the *second* (offset=1) collection of trees
trees = TreeList.get(
path="mcmc.tre",
schema="newick",
collection_offset=1,
tree_offset=-10)
# get the last 10 trees in the second-to-last collection of trees
trees = TreeList.get(
path="mcmc.tre",
schema="newick",
collection_offset=-2,
tree_offset=100)
# Slices give shallow-copy: trees are references
tlst4copy0a = t4[:]
assert tlst4copy0a[0] is t4[0]
tlst4copy0b = t4[:4]
assert tlst4copy0b[0] is t4[0]
# 'Taxon-namespace-scoped' copy:
# I.e., Deep-copied objects but taxa and taxon namespace
# are copied as references
tlst4copy1a = TreeList(t4)
tlst4copy1b = TreeList([Tree(t) for t in tlst5])
assert tlst4copy1a[0] is not tlst4[0] # True
assert tlst4copy1a.taxon_namespace is tlst4.taxon_namespace # True
assert tlst4copy1b[0] is not tlst4[0] # True
assert tlst4copy1b.taxon_namespace is tlst4.taxon_namespace # True
"""
if len(args) > 1:
# only allow 1 positional argument
raise error.TooManyArgumentsError(func_name=self.__class__.__name__, max_args=1, args=args)
elif len(args) == 1 and isinstance(args[0], TreeList):
self._clone_from(args[0], kwargs)
else:
basemodel.DataObject.__init__(self, label=kwargs.pop("label", None))
taxonmodel.TaxonNamespaceAssociated.__init__(self,
taxon_namespace=taxonmodel.process_kwargs_dict_for_taxon_namespace(kwargs, None))
self.tree_type = kwargs.pop("tree_type", self.__class__.DEFAULT_TREE_TYPE)
self._trees = []
self.comments = []
if len(args) == 1:
for aidx, a in enumerate(args[0]):
if not isinstance(a, self.tree_type):
raise ValueError("Cannot add object not of 'Tree' type to 'TreeList'")
self.append(a)
if kwargs:
raise TypeError("Unrecognized or unsupported arguments: {}".format(kwargs))
def __hash__(self):
return id(self)
def __eq__(self, other):
return (
isinstance(other, TreeList)
and (self.taxon_namespace is other.taxon_namespace)
and (self._trees == other._trees)
)
def _clone_from(self, tree_list, kwargs_dict):
memo = {}
# memo[id(tree)] = self
taxon_namespace = taxonmodel.process_kwargs_dict_for_taxon_namespace(kwargs_dict, tree_list.taxon_namespace)
memo[id(tree_list.taxon_namespace)] = taxon_namespace
if taxon_namespace is not tree_list.taxon_namespace:
for t1 in tree_list.taxon_namespace:
t2 = taxon_namespace.require_taxon(label=t1.label)
memo[id(t1)] = t2
else:
for t1 in tree_list.taxon_namespace:
memo[id(t1)] = t1
t = copy.deepcopy(tree_list, memo)
self.__dict__ = t.__dict__
self.label = kwargs_dict.pop("label", tree_list.label)
return self
def __copy__(self):
other = TreeList(label=self.label, taxon_namespace=self.taxon_namespace)
other._trees = list(self._trees)
memo = {}
memo[id(self)] = other
other.deep_copy_annotations_from(self, memo)
return other
def taxon_namespace_scoped_copy(self, memo=None):
if memo is None:
memo = {}
# this populates ``memo`` with references to the
# the TaxonNamespace and Taxon objects
self.taxon_namespace.populate_memo_for_taxon_namespace_scoped_copy(memo)
return self.__deepcopy__(memo=memo)
def __deepcopy__(self, memo=None):
return basemodel.Annotable.__deepcopy__(self, memo=memo)
###########################################################################
### Representation
def __str__(self):
return "<TreeList {} '{}': [{}]>".format(hex(id(self)), self.label, ", ".join(repr(i) for i in self._trees))
###########################################################################
### Data I/O
def _taxon_namespace_pseudofactory(self, **kwargs):
"""
Dummy factory to coerce all |TaxonNamespace| objects required when
parsing a data source to reference ``self.taxon_namespace``.
"""
if "label" in kwargs and kwargs["label"] is not None and self.taxon_namespace.label is None:
self.taxon_namespace.label = kwargs["label"]
return self.taxon_namespace
def _tree_list_pseudofactory(self, **kwargs):
"""
Dummy factory to coerce all |TreeList| objects required when
parsing a data source to reference ``self``.
"""
if "label" in kwargs and kwargs["label"] is not None and self.label is None:
self.label = kwargs["label"]
return self
def _parse_and_add_from_stream(self,
stream,
schema,
collection_offset=None,
tree_offset=None,
**kwargs):
"""
Parses |Tree| objects from data source and adds to this collection.
Notes
-----
*All* operational taxonomic unit concepts in the data source will be included
in the |TaxonNamespace| object associated with the new
|TreeList| object and its contained |Tree| objects, even those
not associated with trees or the particular trees being retrieved.
Parameters
----------
stream : file or file-like object
Source of data.
schema : string
Identifier of format of data in ``stream``.
collection_offset : integer or None
0-based index indicating collection of trees to parse. If |None|,
then all tree collections are retrieved, with each distinct
collection parsed into a separate |TreeList| object. If the
tree colleciton offset index is equal or greater than the number of
tree collections in the data source, then IndexError is raised.
Negative offsets work like negative list indexes; e.g., a
``collection_offset`` of -1 means to read the last collection of
trees in the data source. For data formats that do not support the
concept of distinct tree collections (e.g. NEWICK) are considered
single-collection data source (i.e, the only acceptable
``collection_offset`` values are -1 or 0).
tree_offset : integer or None
0-based index indicating particular tree within a particular
collection of trees at which to begin reading. If not specified or
|None| (default), then all trees are parsed. Otherwise, must be an
integer value up the length of the collection minus 1. A positive
offset indicates the number of trees in the collection to skip;
e.g. a ``tree_offset`` of 20 means to skip the first 20 trees in the
collection. Negative offsets work like negative list indexes;
e.g., a ``tree_offset`` value of -10 means to retrieve the last 10
trees in the collection. If the tree offset index is equal or
greater than the number of trees in the collection, then IndexError
is raised. Requires that a particular tree collection has been
identified using the ``tree_collection_offset`` parameter: if
``tree_collection_offset`` is not specified, a TypeError is raised.
\*\*kwargs : keyword arguments
Arguments to customize parsing, instantiation, processing, and
accession of |Tree| objects read from the data source, including
schema- or format-specific handling. These will be passed to the
underlying schema-specific reader for handling.
General (schema-agnostic) keyword arguments are:
* ``rooted`` specifies the default rooting interpretation of the tree.
* ``edge_length_type`` specifies the type of the edge lengths (int or
float; defaults to 'float')
Other keyword arguments are available depending on the schema. See
specific schema handlers (e.g., `NewickReader`, `NexusReader`,
`NexmlReader`) for more details.
Notes
-----
Note that in most cases, even if ``collection_offset`` and ``tree_offset``
are specified to restrict the trees read, the *entire* data source
is still parsed and processed. So this is not more efficient than
reading all the trees and then manually-extracting them later; just
more convenient. If you need just a single subset of trees from a data
source, there is no gain in efficiency. If you need multiple trees or
subsets of trees from the same data source, it would be much more
efficient to read the entire data source, and extract trees as needed.
Returns
-------
n : ``int``
The number of |Tree| objects read.
"""
if "taxon_namespace" in kwargs and kwargs['taxon_namespace'] is not self.taxon_namespace:
raise TypeError("Cannot change ``taxon_namespace`` when reading into an existing TreeList")
kwargs["taxon_namespace"] = self.taxon_namespace
kwargs["tree_list"] = self
cur_size = len(self._trees)
TreeList._parse_and_create_from_stream(
stream=stream,
schema=schema,
collection_offset=collection_offset,
tree_offset=tree_offset,
**kwargs)
new_size = len(self._trees)
return new_size - cur_size
def read(self, **kwargs):
"""
Add |Tree| objects to existing |TreeList| from data source providing
one or more collections of trees.
**Mandatory Source-Specification Keyword Argument (Exactly One Required):**
- **file** (*file*) -- File or file-like object of data opened for reading.
- **path** (*str*) -- Path to file of data.
- **url** (*str*) -- URL of data.
- **data** (*str*) -- Data given directly.
**Mandatory Schema-Specification Keyword Argument:**
- **schema** (*str*) -- Identifier of format of data given by the
"``file``", "``path``", "``data``", or "``url``" argument
specified above: ":doc:`newick </schemas/newick>`", ":doc:`nexus
</schemas/nexus>`", or ":doc:`nexml </schemas/nexml>`". See
"|Schemas|" for more details.
**Optional General Keyword Arguments:**
- **collection_offset** (*int*) -- 0-based index of tree block or
collection in source to be parsed. If not specified then the
first collection (offset = 0) is assumed.
- **tree_offset** (*int*) -- 0-based index of first tree within the
collection specified by ``collection_offset`` to be parsed (i.e.,
skipping the first ``tree_offset`` trees). If not
specified, then the first tree (offset = 0) is assumed (i.e., no
trees within the specified collection will be skipped). Use this
to specify, e.g. a burn-in.
- **ignore_unrecognized_keyword_arguments** (*bool*) -- If |True|,
then unsupported or unrecognized keyword arguments will not
result in an error. Default is |False|: unsupported keyword
arguments will result in an error.
**Optional Schema-Specific Keyword Arguments:**
These provide control over how the data is interpreted and
processed, and supported argument names and values depend on
the schema as specified by the value passed as the "``schema``"
argument. See "|Schemas|" for more details.
**Examples:**
::
tlist = dendropy.TreeList()
tlist.read(
file=open('treefile.tre', 'rU'),
schema="newick",
tree_offset=100)
tlist.read(
path='sometrees.nexus',
schema="nexus",
collection_offset=2,
tree_offset=100)
tlist.read(
data="((A,B),(C,D));((A,C),(B,D));",
schema="newick")
tlist.read(
url="http://api.opentreeoflife.org/v2/study/pg_1144/tree/tree2324.nex",
schema="nexus")
"""
return basemodel.MultiReadable._read_from(self, **kwargs)
def _format_and_write_to_stream(self, stream, schema, **kwargs):
"""
Writes out ``self`` in ``schema`` format to a destination given by
file-like object ``stream``.
Parameters
----------
stream : file or file-like object
Destination for data.
schema : string
Must be a recognized and tree file schema, such as "nexus",
"newick", etc, for which a specialized tree list writer is
available. If this is not implemented for the schema specified, then
a UnsupportedSchemaError is raised.
\*\*kwargs : keyword arguments, optional
Keyword arguments will be passed directly to the writer for the
specified schema. See documentation for details on keyword
arguments supported by writers of various schemas.
"""
writer = dataio.get_writer(schema, **kwargs)
writer.write_tree_list(self, stream)
###########################################################################
### List Interface
def _import_tree_to_taxon_namespace(self,
tree,
taxon_import_strategy="migrate",
**kwargs):
if tree.taxon_namespace is not self.taxon_namespace:
if taxon_import_strategy == "migrate":
tree.migrate_taxon_namespace(taxon_namespace=self.taxon_namespace,
**kwargs)
elif taxon_import_strategy == "add":
tree._taxon_namespace = self.taxon_namespace
tree.update_taxon_namespace()
else:
raise ValueError("Unrecognized taxon import strategy: '{}'".format(taxon_import_strategy))
# assert tree.taxon_namespace is self.taxon_namespace
return tree
def insert(self,
index,
tree,
taxon_import_strategy="migrate",
**kwargs):
"""
Inserts a |Tree| object, ``tree``, into the collection before
``index``.
The |TaxonNamespace| reference of ``tree`` will be set to that of
``self``. Any |Taxon| objects associated with nodes in ``tree``
that are not already in ``self.taxon_namespace`` will be handled
according to ``taxon_import_strategy``:
- 'migrate'
|Taxon| objects associated with ``tree`` that are not already
in ``self.taxon_nameaspace`` will be remapped based on their
labels, with new :class|Taxon| objects being reconstructed if
none with matching labels are found. Specifically,
:meth:`dendropy.datamodel.treemodel.Tree.migrate_taxon_namespace()`
will be called on ``tree``, where ``kwargs`` is as passed to
this function.
- 'add'
|Taxon| objects associated with ``tree`` that are not already
in ``self.taxon_namespace`` will be added. Note that this might
result in |Taxon| objects with duplicate labels as no
attempt at mapping to existing |Taxon| objects based on
label-matching is done.
Parameters
----------
index : integer
Position before which to insert ``tree``.
tree : A |Tree| instance
The |Tree| object to be added.
taxon_import_strategy : string
If ``tree`` is associated with a different |TaxonNamespace|,
this argument determines how new |Taxon| objects in ``tree``
are handled: 'migrate' or 'add'. See above for details.
\*\*kwargs : keyword arguments
These arguments will be passed directly to
'migrate_taxon_namespace()' method call on ``tree``.
See Also
--------
:meth:`Tree.migrate_taxon_namespace`
"""
self._import_tree_to_taxon_namespace(
tree=tree,
taxon_import_strategy=taxon_import_strategy,
**kwargs)
self._trees.insert(index, tree)
def append(self,
tree,
taxon_import_strategy="migrate",
**kwargs):
"""
Adds a |Tree| object, ``tree``, to the collection.
The |TaxonNamespace| reference of ``tree`` will be set to that of
``self``. Any |Taxon| objects associated with nodes in ``tree``
that are not already in ``self.taxon_namespace`` will be handled
according to ``taxon_import_strategy``:
- 'migrate'
|Taxon| objects associated with ``tree`` that are not already
in ``self.taxon_nameaspace`` will be remapped based on their
labels, with new :class|Taxon| objects being reconstructed if
none with matching labels are found. Specifically,
:meth:`dendropy.datamodel.treemodel.Tree.migrate_taxon_namespace()`
will be called on ``tree``, where ``kwargs`` is as passed to this
function.
- 'add'
|Taxon| objects associated with ``tree`` that are not already
in ``self.taxon_namespace`` will be added. Note that this might
result in |Taxon| objects with duplicate labels as no
attempt at mapping to existing |Taxon| objects based on
label-matching is done.
Parameters
----------
tree : A |Tree| instance
The |Tree| object to be added.
taxon_import_strategy : string
If ``tree`` is associated with a different |TaxonNamespace|,
this argument determines how new |Taxon| objects in ``tree``
are handled: 'migrate' or 'add'. See above for details.
\*\*kwargs : keyword arguments
These arguments will be passed directly to
'migrate_taxon_namespace()' method call on ``tree``.
See Also
--------
:meth:`Tree.migrate_taxon_namespace`
"""
self._import_tree_to_taxon_namespace(
tree=tree,
taxon_import_strategy=taxon_import_strategy,
**kwargs)
self._trees.append(tree)
def extend(self, other):
"""
In-place addition of |Tree| objects in ``other`` to ``self``.
If ``other`` is a |TreeList|, then the trees are *copied*
and migrated into ``self.taxon_namespace``; otherwise, the original
objects are migrated into ``self.taxon_namespace`` and added directly.
Parameters
----------
other : iterable of |Tree| objects
Returns
-------
``self`` : |TreeList|
"""
if isinstance(other, TreeList):
for t0 in other:
t1 = self.tree_type(t0, taxon_namespace=self.taxon_namespace)
self._trees.append(t1)
else:
for t0 in other:
self.append(t0)
return self
def __iadd__(self, other):
"""
In-place addition of |Tree| objects in ``other`` to ``self``.
If ``other`` is a |TreeList|, then the trees are *copied*
and migrated into ``self.taxon_namespace``; otherwise, the original
objects are migrated into ``self.taxon_namespace`` and added directly.
Parameters
----------
other : iterable of |Tree| objects
Returns
-------
``self`` : |TreeList|
"""
return self.extend(other)
def __add__(self, other):
"""
Creates and returns new |TreeList| with clones of all trees in ``self``
as well as all |Tree| objects in ``other``. If ``other`` is a
|TreeList|, then the trees are *cloned* and migrated into
``self.taxon_namespace``; otherwise, the original objects are migrated into
``self.taxon_namespace`` and added directly.
Parameters
----------
other : iterable of |Tree| objects
Returns
-------
tlist : |TreeList| object
|TreeList| object containing clones of |Tree| objects
in ``self`` and ``other``.
"""
tlist = TreeList(taxon_namespace=self.taxon_namespace)
tlist += self
tlist += other
return tlist
def __contains__(self, tree):
return tree in self._trees
def __delitem__(self, tree):
del self._trees[tree]
def __iter__(self):
return iter(self._trees)
def __reversed__(self):
return reversed(self._trees)
def __len__(self):
return len(self._trees)
def __getitem__(self, index):
"""
If ``index`` is an integer, then |Tree| object at position ``index``
is returned. If ``index`` is a slice, then a |TreeList| is returned
with references (i.e., not copies or clones, but the actual original
instances themselves) to |Tree| objects in the positions given
by the slice. The |TaxonNamespace| is the same as ``self``.
Parameters
----------
index : integer or slice
Index or slice.
Returns
-------
t : |Tree| object or |TreeList| object
"""
if isinstance(index, slice):
r = self._trees[index]
return TreeList(r,
taxon_namespace=self.taxon_namespace)
else:
return self._trees[index]
def __setitem__(self, index, value):
if isinstance(index, slice):
if isinstance(value, TreeList):
tt = []
for t0 in value:
t1 = self.tree_type(t0,
taxon_namespace=self.taxon_namespace)
tt.append(t1)
value = tt
else:
for t in value:
self._import_tree_to_taxon_namespace(t)
self._trees[index] = value
else:
self._trees[index] = self._import_tree_to_taxon_namespace(value)
def clear(self):
# list.clear() only with 3.4 or so ...
self._trees = []
def index(self, tree):
return self._trees.index(tree)
def pop(self, index=-1):
return self._trees.pop(index)
def remove(self, tree):
self._trees.remove(tree)
def reverse(self):
self._trees.reverse()
def sort(self, key=None, reverse=False):
self._trees.sort(key=key, reverse=reverse)
def new_tree(self, *args, **kwargs):
tns = taxonmodel.process_kwargs_dict_for_taxon_namespace(kwargs, self.taxon_namespace)
if tns is not self.taxon_namespace:
raise TypeError("Cannot create new Tree with different TaxonNamespace")
kwargs["taxon_namespace"] = self.taxon_namespace
if self.tree_type is not None:
tree = self.tree_type(*args, **kwargs)
else:
tree = self.tree_factory(*args, **kwargs)
self._trees.append(tree)
return tree
##############################################################################
## Taxon Handling
def reconstruct_taxon_namespace(self,
unify_taxa_by_label=True,
taxon_mapping_memo=None):
if taxon_mapping_memo is None:
taxon_mapping_memo = {}
for tree in self._trees:
tree._taxon_namespace = self.taxon_namespace
tree.reconstruct_taxon_namespace(
unify_taxa_by_label=unify_taxa_by_label,
taxon_mapping_memo=taxon_mapping_memo,
)
def update_taxon_namespace(self):
for tree in self._trees:
tree._taxon_namespace = self.taxon_namespace
tree.update_taxon_namespace()
def poll_taxa(self, taxa=None):
"""
Returns a set populated with all of |Taxon| instances associated
with ``self``.
Parameters
----------
taxa : set()
Set to populate. If not specified, a new one will be created.
Returns
-------
taxa : set[|Taxon|]
Set of taxa associated with ``self``.
"""
if taxa is None:
taxa = set()
for tree in self:
tree.poll_taxa(taxa)
return taxa
def reindex_subcomponent_taxa():
raise NotImplementedError()
##############################################################################
## Special Calculations and Operations on Entire Collection
def _get_tree_array(self,
kwargs_dict,
):
"""
Return TreeArray containing information of trees currently
in self. Processes ``kwargs_dict`` intelligently: removing
and passing on keyword arguments pertaining to TreeArray
construction, and leaving everything else.
"""
# TODO: maybe ignore_node_ages defaults to |False| but ``ultrametricity_precision`` defaults to 0?
ta = TreeArray.from_tree_list(
trees=self,
# taxon_namespace=self.taxon_namespace,
is_rooted_trees=kwargs_dict.pop("is_rooted_trees", None),
ignore_edge_lengths=kwargs_dict.pop("ignore_edge_lengths", False),
ignore_node_ages=kwargs_dict.pop("ignore_node_ages", True),
use_tree_weights=kwargs_dict.pop("use_tree_weights", True),
ultrametricity_precision=kwargs_dict.pop("ultrametricity_precision", constants.DEFAULT_ULTRAMETRICITY_PRECISION),
is_force_max_age=kwargs_dict.pop("is_force_max_age", None),
taxon_label_age_map=kwargs_dict.pop("taxon_label_age_map", None),
is_bipartitions_updated=kwargs_dict.pop("is_bipartitions_updated", False)
)
return ta
def split_distribution(self,
is_bipartitions_updated=False,
default_edge_length_value=None,
**kwargs):
"""
Return `SplitDistribution` collecting information on splits in
contained trees. Keyword arguments get passed directly to
`SplitDistribution` constructor.
"""
assert "taxon_namespace" not in kwargs or kwargs["taxon_namespace"] is self.taxon_namespace
kwargs["taxon_namespace"] = self.taxon_namespace
sd = SplitDistribution(**kwargs)
for tree in self:
sd.count_splits_on_tree(
tree=tree,
is_bipartitions_updated=is_bipartitions_updated,
default_edge_length_value=default_edge_length_value)
return sd
def as_tree_array(self, **kwargs):
"""
Return |TreeArray| collecting information on splits in contained
trees. Keyword arguments get passed directly to |TreeArray|
constructor.
"""
ta = TreeArray.from_tree_list(
trees=self,
**kwargs)
return ta
def consensus(self,
min_freq=constants.GREATER_THAN_HALF,
is_bipartitions_updated=False,
summarize_splits=True,
**kwargs):
"""
Returns a consensus tree of all trees in self, with minumum frequency
of bipartition to be added to the consensus tree given by ``min_freq``.
"""
ta = self._get_tree_array(kwargs)
return ta.consensus_tree(min_freq=min_freq,
summarize_splits=summarize_splits,
**kwargs)
def maximum_product_of_split_support_tree(
self,
include_external_splits=False,
score_attr="log_product_of_split_support"):
"""
Return the tree with that maximizes the product of split supports, also
known as the "Maximum Clade Credibility Tree" or MCCT.
Parameters
----------
include_external_splits : bool
If |True|, then non-internal split posteriors will be included in
the score. Defaults to |False|: these are skipped. This should only
make a difference when dealing with splits collected from trees of
different leaf sets.
Returns
-------
mcct_tree : Tree
Tree that maximizes the product of split supports.
"""
ta = self._get_tree_array({})
scores, max_score_tree_idx = ta.calculate_log_product_of_split_supports(
include_external_splits=include_external_splits,
)
tree = self[max_score_tree_idx]
if score_attr is not None:
setattr(tree, score_attr, scores[max_score_tree_idx])
return tree
def maximum_sum_of_split_support_tree(
self,
include_external_splits=False,
score_attr="sum_of_split_support"):
"""
Return the tree with that maximizes the *sum* of split supports.
Parameters
----------
include_external_splits : bool
If |True|, then non-internal split posteriors will be included in
the score. Defaults to |False|: these are skipped. This should only
make a difference when dealing with splits collected from trees of
different leaf sets.
Returns
-------
mcct_tree : Tree
Tree that maximizes the sum of split supports.
"""
ta = self._get_tree_array({})
scores, max_score_tree_idx = ta.calculate_sum_of_split_supports(
include_external_splits=include_external_splits,
)
tree = self[max_score_tree_idx]
if score_attr is not None:
setattr(tree, score_attr, scores[max_score_tree_idx])
return tree
def frequency_of_bipartition(self, **kwargs):
"""
Given a bipartition specified as:
- a |Bipartition| instance given the keyword 'bipartition'
- a split bitmask given the keyword 'split_bitmask'
- a list of |Taxon| objects given with the keyword ``taxa``
- a list of taxon labels given with the keyword ``labels``
this function returns the proportion of trees in self
in which the split is found.
If the tree(s) in the collection are unrooted, then the bipartition
will be normalized for the comparison.
"""
split = None
is_bipartitions_updated = kwargs.pop("is_bipartitions_updated", False)
if "split_bitmask" in kwargs:
split = kwargs["split_bitmask"]
elif "bipartition" in kwargs:
split = kwargs["bipartition"].split_bitmask
elif "taxa" in kwargs or "labels" in kwargs:
split = self.taxon_namespace.taxa_bitmask(**kwargs)
if "taxa" in kwargs:
k = len(kwargs["taxa"])
else:
k = len(kwargs["labels"])
if bitprocessing.num_set_bits(split) != k:
raise IndexError('Not all taxa could be mapped to bipartition (%s): %s' \
% (self.taxon_namespace.bitmask_as_bitstring(split), k))
else:
raise TypeError("Need to specify one of the following keyword arguments: 'split_bitmask', 'bipartition', 'taxa', or 'labels'")
unnormalized_split = split
normalized_split = treemodel.Bipartition.normalize_bitmask(
bitmask=split,
fill_bitmask=self.taxon_namespace.all_taxa_bitmask(),
lowest_relevant_bit=1)
found = 0
total = 0
for tree in self:
if not is_bipartitions_updated or not tree.bipartition_encoding:
tree.encode_bipartitions()
bipartition_encoding = set(b.split_bitmask for b in tree.bipartition_encoding)
total += 1
if tree.is_unrooted and (normalized_split in bipartition_encoding):
found += 1
elif (not tree.is_unrooted) and (unnormalized_split in bipartition_encoding):
found += 1
try:
return float(found)/total
except ZeroDivisionError:
return 0
def frequency_of_split(self, **kwargs):
"""
DEPRECATED: use 'frequency_of_bipartition()' instead.
"""
deprecate.dendropy_deprecation_warning(
message="Deprecated since DendroPy 4: Instead of 'frequency_of_split()' use 'frequency_of_bipartition()'",
stacklevel=4,
)
return self.frequency_of_bipartition(**kwargs)
###############################################################################
### SplitDistribution
class SplitDistribution(taxonmodel.TaxonNamespaceAssociated):
"""
Collects information regarding splits over multiple trees.
"""
SUMMARY_STATS_FIELDNAMES = ('mean', 'median', 'sd', 'hpd95', 'quant_5_95', 'range')
def __init__(self,
taxon_namespace=None,
ignore_edge_lengths=False,
ignore_node_ages=True,
use_tree_weights=True,
ultrametricity_precision=constants.DEFAULT_ULTRAMETRICITY_PRECISION,
is_force_max_age=False,
taxon_label_age_map=None):
# Taxon Namespace
taxonmodel.TaxonNamespaceAssociated.__init__(self,
taxon_namespace=taxon_namespace)
# configuration
self.ignore_edge_lengths = ignore_edge_lengths
self.ignore_node_ages = ignore_node_ages
self.use_tree_weights = use_tree_weights
self.ultrametricity_precision = ultrametricity_precision
# storage/function
self.total_trees_counted = 0
self.sum_of_tree_weights = 0.0
self.tree_rooting_types_counted = set()
self.split_counts = collections.defaultdict(float)
self.split_edge_lengths = collections.defaultdict(list)
self.split_node_ages = collections.defaultdict(list)
self.is_force_max_age = is_force_max_age
self.is_force_min_age = False
self.taxon_label_age_map = taxon_label_age_map
# secondary/derived/generated/collected data
self._is_rooted = False
self._split_freqs = None
self._trees_counted_for_freqs = 0
self._split_edge_length_summaries = None
self._split_node_age_summaries = None
self._trees_counted_for_summaries = 0
# services
self.tree_decorator = None
###########################################################################
### Utility
def normalize_bitmask(self, bitmask):
"""
"Normalizes" split, by ensuring that the least-significant bit is
always 1 (used on unrooted trees to establish split identity
independent of rotation).
Parameters
----------
bitmask : integer
Split bitmask hash to be normalized.
Returns
-------
h : integer
Normalized split bitmask.
"""
return treemodel.Bipartition.normalize_bitmask(
bitmask=bitmask,
fill_bitmask=self.taxon_namespace.all_taxa_bitmask(),
lowest_relevant_bit=1)
###########################################################################
### Configuration
def _is_rooted_deprecation_warning(self):
deprecate.dendropy_deprecation_warning(
message="Deprecated since DendroPy 4: 'SplitDistribution.is_rooted' and 'SplitDistribution.is_unrooted' are no longer valid attributes; rooting state tracking and management is now the responsibility of client code.",
stacklevel=4,
)
def _get_is_rooted(self):
self._is_rooted_deprecation_warning()
return self._is_rooted
def _set_is_rooted(self, val):
self._is_rooted_deprecation_warning()
self._is_rooted = val
is_rooted = property(_get_is_rooted, _set_is_rooted)
def _get_is_unrooted(self):
self._is_rooted_deprecation_warning()
return not self._is_rooted
def _set_is_unrooted(self, val):
self._is_rooted_deprecation_warning()
self._is_rooted = not val
is_unrooted = property(_get_is_unrooted, _set_is_unrooted)
###########################################################################
### Split Counting and Book-Keeping
def add_split_count(self, split, count=1):
self.split_counts[split] += count
def count_splits_on_tree(self,
tree,
is_bipartitions_updated=False,
default_edge_length_value=None):
"""
Counts splits in this tree and add to totals. ``tree`` must be decorated
with splits, and no attempt is made to normalize taxa.
Parameters
----------
tree : a |Tree| object.
The tree on which to count the splits.
is_bipartitions_updated : bool
If |False| [default], then the tree will have its splits encoded or
updated. Otherwise, if |True|, then the tree is assumed to have its
splits already encoded and updated.
Returns
--------
s : iterable of splits
A list of split bitmasks from ``tree``.
e :
A list of edge length values from ``tree``.
a :
A list of node age values from ``tree``.
"""
assert tree.taxon_namespace is self.taxon_namespace
self.total_trees_counted += 1
if not self.ignore_node_ages:
if self.taxon_label_age_map:
set_node_age_fn = self._set_node_age
else:
set_node_age_fn = None
tree.calc_node_ages(
ultrametricity_precision=self.ultrametricity_precision,
is_force_max_age=self.is_force_max_age,
is_force_min_age=self.is_force_min_age,
set_node_age_fn=set_node_age_fn,
)
if tree.weight is not None and self.use_tree_weights:
weight_to_use = float(tree.weight)
else:
weight_to_use = 1.0
self.sum_of_tree_weights += weight_to_use
if tree.is_rooted:
self.tree_rooting_types_counted.add(True)
else:
self.tree_rooting_types_counted.add(False)
if not is_bipartitions_updated:
tree.encode_bipartitions()
splits = []
edge_lengths = []
node_ages = []
for bipartition in tree.bipartition_encoding:
split = bipartition.split_bitmask
## if edge is stored as an attribute, might be faster to:
# edge = bipartition.edge
edge = tree.bipartition_edge_map[bipartition]
splits.append(split)
self.split_counts[split] += weight_to_use
if not self.ignore_edge_lengths:
sel = self.split_edge_lengths.setdefault(split,[])
if edge.length is None:
elen = default_edge_length_value
else:
elen = edge.length
sel.append(elen)
edge_lengths.append(elen)
else:
sel = None
if not self.ignore_node_ages:
sna = self.split_node_ages.setdefault(split, [])
if edge.head_node is not None:
nage = edge.head_node.age
else:
nage = None
sna.append(nage)
node_ages.append(nage)
else:
sna = None
return splits, edge_lengths, node_ages
def splits_considered(self):
"""
Returns 4 values:
total number of splits counted
total *weighted* number of unique splits counted
total number of non-trivial splits counted
total *weighted* number of unique non-trivial splits counted
"""
if not self.split_counts:
return 0, 0, 0, 0
num_splits = 0
num_unique_splits = 0
num_nt_splits = 0
num_nt_unique_splits = 0
taxa_mask = self.taxon_namespace.all_taxa_bitmask()
for s in self.split_counts:
num_unique_splits += 1
num_splits += self.split_counts[s]
if not treemodel.Bipartition.is_trivial_bitmask(s, taxa_mask):
num_nt_unique_splits += 1
num_nt_splits += self.split_counts[s]
return num_splits, num_unique_splits, num_nt_splits, num_nt_unique_splits
def calc_freqs(self):
"Forces recalculation of frequencies."
self._split_freqs = {}
if self.total_trees_counted == 0:
for split in self.split_counts:
self._split_freqs[split] = 1.0
else:
normalization_weight = self.calc_normalization_weight()
for split in self.split_counts:
count = self.split_counts[split]
self._split_freqs[split] = float(self.split_counts[split]) / normalization_weight
self._trees_counted_for_freqs = self.total_trees_counted
self._split_edge_length_summaries = None
self._split_node_age_summaries = None
return self._split_freqs
def calc_normalization_weight(self):
if not self.sum_of_tree_weights:
return self.total_trees_counted
else:
return float(self.sum_of_tree_weights)
def update(self, split_dist):
self.total_trees_counted += split_dist.total_trees_counted
self.sum_of_tree_weights += split_dist.sum_of_tree_weights
self._split_edge_length_summaries = None
self._split_node_age_summaries = None
self._trees_counted_for_summaries = 0
self.tree_rooting_types_counted.update(split_dist.tree_rooting_types_counted)
for split in split_dist.split_counts:
self.split_counts[split] += split_dist.split_counts[split]
self.split_edge_lengths[split] += split_dist.split_edge_lengths[split]
self.split_node_ages[split] += split_dist.split_node_ages[split]
###########################################################################
### Basic Information Access
def __len__(self):
return len(self.split_counts)
def __iter__(self):
for s in self.split_counts:
yield s
def __getitem__(self, split_bitmask):
"""
Returns freqency of split_bitmask.
"""
return self._get_split_frequencies().get(split_bitmask, 0.0)
def _get_split_frequencies(self):
if self._split_freqs is None or self._trees_counted_for_freqs != self.total_trees_counted:
self.calc_freqs()
return self._split_freqs
split_frequencies = property(_get_split_frequencies)
def is_mixed_rootings_counted(self):
return ( (True in self.tree_rooting_types_counted)
and (False in self.tree_rooting_types_counted or None in self.tree_rooting_types_counted) )
def is_all_counted_trees_rooted(self):
return (True in self.tree_rooting_types_counted) and (len(self.tree_rooting_types_counted) == 1)
def is_all_counted_trees_strictly_unrooted(self):
return (False in self.tree_rooting_types_counted) and (len(self.tree_rooting_types_counted) == 1)
def is_all_counted_trees_treated_as_unrooted(self):
return True not in self.tree_rooting_types_counted
###########################################################################
### Summarization
def split_support_iter(self,
tree,
is_bipartitions_updated=False,
include_external_splits=False,
traversal_strategy="preorder",
node_support_attr_name=None,
edge_support_attr_name=None,
):
"""
Returns iterator over support values for the splits of a given tree,
where the support value is given by the proportional frequency of the
split in the current split distribution.
Parameters
----------
tree : |Tree|
The |Tree| which will be scored.
is_bipartitions_updated : bool
If |False| [default], then the tree will have its splits encoded or
updated. Otherwise, if |True|, then the tree is assumed to have its
splits already encoded and updated.
include_external_splits : bool
If |True|, then non-internal split posteriors will be included.
If |False|, then these are skipped. This should only make a
difference when dealing with splits collected from trees of
different leaf sets.
traversal_strategy : str
One of: "preorder" or "postorder". Specfies order in which splits
are visited.
Returns
-------
s : list of floats
List of values for splits in the tree corresponding to the
proportional frequency that the split is found in the current
distribution.
"""
if traversal_strategy == "preorder":
if include_external_splits:
iter_fn = tree.preorder_node_iter
else:
iter_fn = tree.preorder_internal_node_iter
elif traversal_strategy == "postorder":
if include_external_splits:
iter_fn = tree.postorder_node_iter
else:
iter_fn = tree.postorder_internal_node_iter
else:
raise ValueError("Traversal strategy not supported: '{}'".format(traversal_strategy))
if not is_bipartitions_updated:
tree.encode_bipartitions()
split_frequencies = self._get_split_frequencies()
for nd in iter_fn():
split = nd.edge.split_bitmask
support = split_frequencies.get(split, 0.0)
yield support
def calc_split_edge_length_summaries(self):
self._split_edge_length_summaries = {}
for split, elens in self.split_edge_lengths.items():
if not elens:
continue
try:
self._split_edge_length_summaries[split] = statistics.summarize(elens)
except ValueError:
pass
return self._split_edge_length_summaries
def calc_split_node_age_summaries(self):
self._split_node_age_summaries = {}
for split, ages in self.split_node_ages.items():
if not ages:
continue
try:
self._split_node_age_summaries[split] = statistics.summarize(ages)
except ValueError:
pass
return self._split_node_age_summaries
def _set_node_age(self, nd):
if nd.taxon is None or nd._child_nodes:
return None
else:
return self.taxon_label_age_map.get(nd.taxon.label, 0.0)
def _get_split_edge_length_summaries(self):
if self._split_edge_length_summaries is None \
or self._trees_counted_for_summaries != self.total_trees_counted:
self.calc_split_edge_length_summaries()
return self._split_edge_length_summaries
split_edge_length_summaries = property(_get_split_edge_length_summaries)
def _get_split_node_age_summaries(self):
if self._split_node_age_summaries is None \
or self._trees_counted_for_summaries != self.total_trees_counted:
self.calc_split_node_age_summaries()
return self._split_node_age_summaries
split_node_age_summaries = property(_get_split_node_age_summaries)
def log_product_of_split_support_on_tree(self,
tree,
is_bipartitions_updated=False,
include_external_splits=False,
):
"""
Calculates the (log) product of the support of the splits of the
tree, where the support is given by the proportional frequency of the
split in the current split distribution.
The tree that has the highest product of split support out of a sample
of trees corresponds to the "maximum credibility tree" for that sample.
This can also be referred to as the "maximum clade credibility tree",
though this latter term is sometimes use for the tree that has the
highest *sum* of split support (see
:meth:`SplitDistribution.sum_of_split_support_on_tree()`).
Parameters
----------
tree : |Tree|
The tree for which the score should be calculated.
is_bipartitions_updated : bool
If |True|, then the splits are assumed to have already been encoded
and will not be updated on the trees.
include_external_splits : bool
If |True|, then non-internal split posteriors will be included in
the score. Defaults to |False|: these are skipped. This should only
make a difference when dealing with splits collected from trees of
different leaf sets.
Returns
-------
s : numeric
The log product of the support of the splits of the tree.
"""
log_product_of_split_support = 0.0
for split_support in self.split_support_iter(
tree=tree,
is_bipartitions_updated=is_bipartitions_updated,
include_external_splits=include_external_splits,
traversal_strategy="preorder",
):
if split_support:
log_product_of_split_support += math.log(split_support)
return log_product_of_split_support
def sum_of_split_support_on_tree(self,
tree,
is_bipartitions_updated=False,
include_external_splits=False,
):
"""
Calculates the sum of the support of the splits of the tree, where the
support is given by the proportional frequency of the split in the
current distribtion.
Parameters
----------
tree : |Tree|
The tree for which the score should be calculated.
is_bipartitions_updated : bool
If |True|, then the splits are assumed to have already been encoded
and will not be updated on the trees.
include_external_splits : bool
If |True|, then non-internal split posteriors will be included in
the score. Defaults to |False|: these are skipped. This should only
make a difference when dealing with splits collected from trees of
different leaf sets.
Returns
-------
s : numeric
The sum of the support of the splits of the tree.
"""
sum_of_split_support = 0.0
for split_support in self.split_support_iter(
tree=tree,
is_bipartitions_updated=is_bipartitions_updated,
include_external_splits=include_external_splits,
traversal_strategy="preorder",
):
sum_of_split_support += split_support
return sum_of_split_support
def collapse_edges_with_less_than_minimum_support(self,
tree,
min_freq=constants.GREATER_THAN_HALF,
):
"""
Collapse edges on tree that have support less than indicated by
``min_freq``.
"""
if not tree.is_rooted and self.is_all_counted_trees_rooted():
raise ValueError("Tree is interpreted as unrooted, but split support is based on rooted trees")
elif tree.is_rooted and self.is_all_counted_trees_treated_as_unrooted():
raise ValueError("Tree is interpreted as rooted, but split support is based on unrooted trees")
tree.encode_bipartitions()
split_frequencies = self._get_split_frequencies()
to_collapse = []
for nd in tree.postorder_node_iter():
s = nd.edge.bipartition.split_bitmask
if s not in split_frequencies:
to_collapse.append(nd)
elif split_frequencies[s] < min_freq:
to_collapse.append(nd)
for nd in to_collapse:
nd.edge.collapse(adjust_collapsed_head_children_edge_lengths=True)
def consensus_tree(self,
min_freq=constants.GREATER_THAN_HALF,
is_rooted=None,
summarize_splits=True,
**split_summarization_kwargs
):
"""
Returns a consensus tree from splits in ``self``.
Parameters
----------
min_freq : real
The minimum frequency of a split in this distribution for it to be
added to the tree.
is_rooted : bool
Should tree be rooted or not? If *all* trees counted for splits are
explicitly rooted or unrooted, then this will default to |True| or
|False|, respectively. Otherwise it defaults to |None|.
\*\*split_summarization_kwargs : keyword arguments
These will be passed directly to the underlying
`SplitDistributionSummarizer` object. See
:meth:`SplitDistributionSummarizer.configure` for options.
Returns
-------
t : consensus tree
"""
if is_rooted is None:
if self.is_all_counted_trees_rooted():
is_rooted = True
elif self.is_all_counted_trees_strictly_unrooted():
is_rooted = False
split_frequencies = self._get_split_frequencies()
to_try_to_add = []
_almost_one = lambda x: abs(x - 1.0) <= 0.0000001
for s in split_frequencies:
freq = split_frequencies[s]
if (min_freq is None) or (freq >= min_freq) or (_almost_one(min_freq) and _almost_one(freq)):
to_try_to_add.append((freq, s))
to_try_to_add.sort(reverse=True)
splits_for_tree = [i[1] for i in to_try_to_add]
con_tree = treemodel.Tree.from_split_bitmasks(
split_bitmasks=splits_for_tree,
taxon_namespace=self.taxon_namespace,
is_rooted=is_rooted)
if summarize_splits:
self.summarize_splits_on_tree(
tree=con_tree,
is_bipartitions_updated=False,
**split_summarization_kwargs
)
return con_tree
def summarize_splits_on_tree(self,
tree,
is_bipartitions_updated=False,
**split_summarization_kwargs
):
"""
Summarizes support of splits/edges/node on tree.
Parameters
----------
tree: |Tree| instance
Tree to be decorated with support values.
is_bipartitions_updated: bool
If |True|, then bipartitions will not be recalculated.
\*\*split_summarization_kwargs : keyword arguments
These will be passed directly to the underlying
`SplitDistributionSummarizer` object. See
:meth:`SplitDistributionSummarizer.configure` for options.
"""
if self.taxon_namespace is not tree.taxon_namespace:
raise error.TaxonNamespaceIdentityError(self, tree)
if self.tree_decorator is None:
self.tree_decorator = SplitDistributionSummarizer()
self.tree_decorator.configure(**split_summarization_kwargs)
self.tree_decorator.summarize_splits_on_tree(
split_distribution=self,
tree=tree,
is_bipartitions_updated=is_bipartitions_updated)
return tree
###########################################################################
### legacy
def _get_taxon_set(self):
from dendropy import taxonmodel
taxon_model.taxon_set_deprecation_warning()
return self.taxon_namespace
def _set_taxon_set(self, v):
from dendropy import taxonmodel
taxon_model.taxon_set_deprecation_warning()
self.taxon_namespace = v
def _del_taxon_set(self):
from dendropy import taxonmodel
taxon_model.taxon_set_deprecation_warning()
taxon_set = property(_get_taxon_set, _set_taxon_set, _del_taxon_set)
###############################################################################
### SplitDistributionSummarizer
class SplitDistributionSummarizer(object):
def __init__(self, **kwargs):
"""
See :meth:`SplitDistributionSummarizer.configure` for configuration
options.
"""
self.configure(**kwargs)
def configure(self, **kwargs):
"""
Configure rendition/mark-up.
Parameters
----------
set_edge_lengths : string
For each edge, set the length based on:
- "support": use support values split corresponding to edge
- "mean-length": mean of edge lengths for split
- "median-length": median of edge lengths for split
- "mean-age": such that split age is equal to mean of ages
- "median-age": such that split age is equal to mean of ages
- |None|: do not set edge lengths
add_support_as_node_attribute: bool
Adds each node's support value as an attribute of the node,
"``support``".
add_support_as_node_annotation: bool
Adds support as a metadata annotation, "``support``". If
``add_support_as_node_attribute`` is |True|, then the value will be
dynamically-bound to the value of the node's "``support``" attribute.
set_support_as_node_label : bool
Sets the ``label`` attribute of each node to the support value.
add_node_age_summaries_as_node_attributes: bool
Summarizes the distribution of the ages of each node in the
following attributes:
- ``age_mean``
- ``age_median``
- ``age_sd``
- ``age_hpd95``
- ``age_range``
add_node_age_summaries_as_node_annotations: bool
Summarizes the distribution of the ages of each node in the
following metadata annotations:
- ``age_mean``
- ``age_median``
- ``age_sd``
- ``age_hpd95``
- ``age_range``
If ``add_node_age_summaries_as_node_attributes`` is |True|, then the
values will be dynamically-bound to the corresponding node
attributes.
add_edge_length_summaries_as_edge_attributes: bool
Summarizes the distribution of the lengths of each edge in the
following attribtutes:
- ``length_mean``
- ``length_median``
- ``length_sd``
- ``length_hpd95``
- ``length_range``
add_edge_length_summaries_as_edge_annotations: bool
Summarizes the distribution of the lengths of each edge in the
following metadata annotations:
- ``length_mean``
- ``length_median``
- ``length_sd``
- ``length_hpd95``
- ``length_range``
If ``add_edge_length_summaries_as_edge_attributes`` is |True|, then the
values will be dynamically-bound to the corresponding edge
attributes.
support_label_decimals: int
Number of decimal places to express when rendering the support
value as a string for the node label.
support_as_percentages: bool
Whether or not to express the support value as percentages (default
is probability or proportion).
minimum_edge_length : numeric
All edge lengths calculated to have a value less than this will be
set to this.
error_on_negative_edge_lengths : bool
If |True|, an inferred edge length that is less than 0 will result
in a ValueError.
"""
self.set_edge_lengths = kwargs.pop("set_edge_lengths", None)
self.add_support_as_node_attribute = kwargs.pop("add_support_as_node_attribute", True)
self.add_support_as_node_annotation = kwargs.pop("add_support_as_node_annotation", True)
self.set_support_as_node_label = kwargs.pop("set_support_as_node_label", None)
self.add_node_age_summaries_as_node_attributes = kwargs.pop("add_node_age_summaries_as_node_attributes", True)
self.add_node_age_summaries_as_node_annotations = kwargs.pop("add_node_age_summaries_as_node_annotations", True)
self.add_edge_length_summaries_as_edge_attributes = kwargs.pop("add_edge_length_summaries_as_edge_attributes", True)
self.add_edge_length_summaries_as_edge_annotations = kwargs.pop("add_edge_length_summaries_as_edge_annotations", True)
self.support_label_decimals = kwargs.pop("support_label_decimals", 4)
self.support_as_percentages = kwargs.pop("support_as_percentages", False)
self.support_label_compose_fn = kwargs.pop("support_label_compose_fn", None)
self.primary_fieldnames = ["support",]
self.summary_stats_fieldnames = SplitDistribution.SUMMARY_STATS_FIELDNAMES
self.no_data_values = {
'hpd95': [],
'quant_5_95': [],
'range': [],
}
self.node_age_summaries_fieldnames = list("age_{}".format(f) for f in self.summary_stats_fieldnames)
self.edge_length_summaries_fieldnames = list("length_{}".format(f) for f in self.summary_stats_fieldnames)
self.fieldnames = self.primary_fieldnames + self.node_age_summaries_fieldnames + self.edge_length_summaries_fieldnames
for fieldname in self.fieldnames:
setattr(self, "{}_attr_name".format(fieldname), kwargs.pop("{}_attr_name".format(fieldname), fieldname))
setattr(self, "{}_annotation_name".format(fieldname), kwargs.pop("{}_annotation_name".format(fieldname), fieldname))
setattr(self, "is_{}_annotation_dynamic".format(fieldname), kwargs.pop("is_{}_annotation_dynamic".format(fieldname), True))
self.minimum_edge_length = kwargs.pop("minimum_edge_length", None)
self.error_on_negative_edge_lengths = kwargs.pop("error_on_negative_edge_lengths", False)
if kwargs:
TypeError("Unrecognized or unsupported arguments: {}".format(kwargs))
def _decorate(self,
target,
fieldname,
value,
set_attribute,
set_annotation,
):
attr_name = getattr(self, "{}_attr_name".format(fieldname))
annotation_name = getattr(self, "{}_annotation_name".format(fieldname))
if set_attribute:
setattr(target, attr_name, value)
if set_annotation:
target.annotations.drop(name=annotation_name)
if getattr(self, "is_{}_annotation_dynamic".format(fieldname)):
target.annotations.add_bound_attribute(
attr_name=attr_name,
annotation_name=annotation_name,
)
else:
target.annotations.add_new(
name=annotation_name,
value=value,
)
elif set_annotation:
target.annotations.drop(name=annotation_name)
target.annotations.add_new(
name=annotation_name,
value=value,
)
def summarize_splits_on_tree(self,
split_distribution,
tree,
is_bipartitions_updated=False):
if split_distribution.taxon_namespace is not tree.taxon_namespace:
raise error.TaxonNamespaceIdentityError(split_distribution, tree)
if not is_bipartitions_updated:
tree.encode_bipartitions()
if self.support_label_compose_fn is not None:
support_label_fn = lambda freq: self.support_label_compose_fn(freq)
else:
support_label_fn = lambda freq: "{:.{places}f}".format(freq, places=self.support_label_decimals)
node_age_summaries = split_distribution.split_node_age_summaries
edge_length_summaries = split_distribution.split_edge_length_summaries
split_freqs = split_distribution.split_frequencies
assert len(self.node_age_summaries_fieldnames) == len(self.summary_stats_fieldnames)
for node in tree:
split_bitmask = node.edge.bipartition.split_bitmask
split_support = split_freqs.get(split_bitmask, 0.0)
if self.support_as_percentages:
split_support = split_support * 100
self._decorate(
target=node,
fieldname="support",
value=split_support,
set_attribute=self.add_support_as_node_attribute,
set_annotation=self.add_support_as_node_annotation,
)
if self.set_support_as_node_label:
node.label = support_label_fn(split_support)
if (self.add_node_age_summaries_as_node_attributes or self.add_node_age_summaries_as_node_annotations) and node_age_summaries:
for fieldname, stats_fieldname in zip(self.node_age_summaries_fieldnames, self.summary_stats_fieldnames):
no_data_value = self.no_data_values.get(stats_fieldname, 0.0)
if not node_age_summaries or split_bitmask not in node_age_summaries:
value = no_data_value
else:
value = node_age_summaries[split_bitmask].get(stats_fieldname, no_data_value)
self._decorate(
target=node,
fieldname=fieldname,
value=value,
set_attribute=self.add_node_age_summaries_as_node_attributes,
set_annotation=self.add_node_age_summaries_as_node_annotations,
)
if (self.add_edge_length_summaries_as_edge_attributes or self.add_edge_length_summaries_as_edge_annotations) and edge_length_summaries:
for fieldname, stats_fieldname in zip(self.edge_length_summaries_fieldnames, self.summary_stats_fieldnames):
no_data_value = self.no_data_values.get(stats_fieldname, 0.0)
if not edge_length_summaries or split_bitmask not in edge_length_summaries:
value = no_data_value
else:
value = edge_length_summaries[split_bitmask].get(stats_fieldname, no_data_value)
self._decorate(
target=node.edge,
fieldname=fieldname,
value=value,
set_attribute=self.add_edge_length_summaries_as_edge_attributes,
set_annotation=self.add_edge_length_summaries_as_edge_annotations,
)
if self.set_edge_lengths is None:
pass
elif self.set_edge_lengths == "keep":
pass
elif self.set_edge_lengths == "support":
node.edge.length = split_support
elif self.set_edge_lengths == "clear":
edge.length = None
elif self.set_edge_lengths in ("mean-age", "median-age"):
if not node_age_summaries:
raise ValueError("Node ages not available")
if self.set_edge_lengths == "mean-age":
try:
node.age = node_age_summaries[split_bitmask]["mean"]
except KeyError:
node.age = self.no_data_values.get("mean", 0.0)
elif self.set_edge_lengths == "median-age":
try:
node.age = node_age_summaries[split_bitmask]["median"]
except KeyError:
node.age = self.no_data_values.get("median", 0.0)
else:
raise ValueError(self.set_edge_lengths)
elif self.set_edge_lengths in ("mean-length", "median-length"):
if not edge_length_summaries:
raise ValueError("Edge lengths not available")
if self.set_edge_lengths == "mean-length":
try:
node.edge.length = edge_length_summaries[split_bitmask]["mean"]
except KeyError:
node.edge.length = self.no_data_values.get("mean", 0.0)
elif self.set_edge_lengths == "median-length":
try:
node.edge.length = edge_length_summaries[split_bitmask]["median"]
except KeyError:
node.edge.length = self.no_data_values.get("median", 0.0)
else:
raise ValueError(self.set_edge_lengths)
if self.minimum_edge_length is not None and edge.length < self.minimum_edge_length:
edge.length = self.minimum_edge_length
else:
raise ValueError(self.set_edge_lengths)
if self.set_edge_lengths in ("mean-age", "median-age"):
tree.set_edge_lengths_from_node_ages(
minimum_edge_length=self.minimum_edge_length,
error_on_negative_edge_lengths=self.error_on_negative_edge_lengths)
elif self.set_edge_lengths not in ("keep", "clear", None) and self.minimum_edge_length is not None:
for node in tree:
if node.edge.length is None:
node.edge.length = self.minimum_edge_length
elif node.edge.length < self.minimum_edge_length:
node.edge.length = self.minimum_edge_length
return tree
###############################################################################
### TreeArray
class TreeArray(
taxonmodel.TaxonNamespaceAssociated,
basemodel.MultiReadable,
):
"""
High-performance collection of tree structures.
Storage of minimal tree structural information as represented by toplogy
and edge lengths, minimizing memory and processing time.
This class stores trees as collections of splits and edge lengths. All
other information, such as labels, metadata annotations, etc. will be
discarded. A full |Tree| instance can be reconstructed as needed
from the structural information stored by this class, at the cost of
computation time.
"""
class IncompatibleTreeArrayUpdate(Exception):
pass
class IncompatibleRootingTreeArrayUpdate(IncompatibleTreeArrayUpdate):
pass
class IncompatibleEdgeLengthsTreeArrayUpdate(IncompatibleTreeArrayUpdate):
pass
class IncompatibleNodeAgesTreeArrayUpdate(IncompatibleTreeArrayUpdate):
pass
class IncompatibleTreeWeightsTreeArrayUpdate(IncompatibleTreeArrayUpdate):
pass
##############################################################################
## Factory Function
@classmethod
def from_tree_list(cls,
trees,
is_rooted_trees=None,
ignore_edge_lengths=False,
ignore_node_ages=True,
use_tree_weights=True,
ultrametricity_precision=constants.DEFAULT_ULTRAMETRICITY_PRECISION,
is_force_max_age=None,
taxon_label_age_map=None,
is_bipartitions_updated=False,
):
taxon_namespace = trees.taxon_namespace
ta = cls(
taxon_namespace=taxon_namespace,
is_rooted_trees=is_rooted_trees,
ignore_edge_lengths=ignore_edge_lengths,
ignore_node_ages=ignore_node_ages,
use_tree_weights=use_tree_weights,
ultrametricity_precision=ultrametricity_precision,
is_force_max_age=is_force_max_age,
taxon_label_age_map=taxon_label_age_map,
)
ta.add_trees(
trees=trees,
is_bipartitions_updated=is_bipartitions_updated)
return ta
##############################################################################
## Life-Cycle
def __init__(self,
taxon_namespace=None,
is_rooted_trees=None,
ignore_edge_lengths=False,
ignore_node_ages=True,
use_tree_weights=True,
ultrametricity_precision=constants.DEFAULT_ULTRAMETRICITY_PRECISION,
is_force_max_age=None,
taxon_label_age_map=None,
):
"""
Parameters
----------
taxon_namespace : |TaxonNamespace|
The operational taxonomic unit concept namespace to manage taxon
references.
is_rooted_trees : bool
If not set, then it will be set based on the rooting state of the
first tree added. If |True|, then trying to add an unrooted tree
will result in an error. If |False|, then trying to add a rooted
tree will result in an error.
ignore_edge_lengths : bool
If |True|, then edge lengths of splits will not be stored. If
|False|, then edge lengths will be stored.
ignore_node_ages : bool
If |True|, then node ages of splits will not be stored. If
|False|, then node ages will be stored.
use_tree_weights : bool
If |False|, then tree weights will not be used to weight splits.
"""
taxonmodel.TaxonNamespaceAssociated.__init__(self,
taxon_namespace=taxon_namespace)
# Configuration
self._is_rooted_trees = is_rooted_trees
self.ignore_edge_lengths = ignore_edge_lengths
self.ignore_node_ages = ignore_node_ages
self.use_tree_weights = use_tree_weights
self.default_edge_length_value = 0 # edge.length of |None| gets this value
self.tree_type = treemodel.Tree
self.taxon_label_age_map = taxon_label_age_map
# Storage
self._tree_split_bitmasks = []
self._tree_edge_lengths = []
self._tree_leafset_bitmasks = []
self._tree_weights = []
self._split_distribution = SplitDistribution(
taxon_namespace=self.taxon_namespace,
ignore_edge_lengths=self.ignore_edge_lengths,
ignore_node_ages=self.ignore_node_ages,
ultrametricity_precision=ultrametricity_precision,
is_force_max_age=is_force_max_age,
taxon_label_age_map=self.taxon_label_age_map,
)
##############################################################################
## Book-Keeping
def _get_is_rooted_trees(self):
return self._is_rooted_trees
is_rooted_trees = property(_get_is_rooted_trees)
def _get_split_distribution(self):
return self._split_distribution
split_distribution = property(_get_split_distribution)
def validate_rooting(self, rooting_of_other):
if self._is_rooted_trees is None:
self._is_rooted_trees = rooting_of_other
elif self._is_rooted_trees != rooting_of_other:
if self._is_rooted_trees:
ta = "rooted"
t = "unrooted"
else:
ta = "unrooted"
t = "rooted"
raise error.MixedRootingError("Cannot add {tree_rooting} tree to TreeArray with {tree_array_rooting} trees".format(
tree_rooting=t,
tree_array_rooting=ta))
##############################################################################
## Updating from Another TreeArray
def update(self, other):
if len(self) > 0:
# self.validate_rooting(other._is_rooted_trees)
if self._is_rooted_trees is not other._is_rooted_trees:
raise TreeArray.IncompatibleRootingTreeArrayUpdate("Updating from incompatible TreeArray: 'is_rooted_trees' should be '{}', but is instead '{}'".format(other._is_rooted_trees, self._is_rooted_trees, ))
if self.ignore_edge_lengths is not other.ignore_edge_lengths:
raise TreeArray.IncompatibleEdgeLengthsTreeArrayUpdate("Updating from incompatible TreeArray: 'ignore_edge_lengths' is not: {} ".format(other.ignore_edge_lengths, self.ignore_edge_lengths, ))
if self.ignore_node_ages is not other.ignore_node_ages:
raise TreeArray.IncompatibleNodeAgesTreeArrayUpdate("Updating from incompatible TreeArray: 'ignore_node_ages' should be '{}', but is instead '{}'".format(other.ignore_node_ages, self.ignore_node_ages))
if self.use_tree_weights is not other.use_tree_weights:
raise TreeArray.IncompatibleTreeWeightsTreeArrayUpdate("Updating from incompatible TreeArray: 'use_tree_weights' should be '{}', but is instead '{}'".format(other.use_tree_weights, self.use_tree_weights))
else:
self._is_rooted_trees = other._is_rooted_trees
self.ignore_edge_lengths = other.ignore_edge_lengths
self.ignore_node_ages = other.ignore_node_ages
self.use_tree_weights = other.use_tree_weights
self._tree_split_bitmasks.extend(other._tree_split_bitmasks)
self._tree_edge_lengths.extend(other._tree_edge_lengths)
self._tree_leafset_bitmasks.extend(other._tree_leafset_bitmasks)
self._tree_weights.extend(other._tree_weights)
self._split_distribution.update(other._split_distribution)
##############################################################################
## Fundamental Tree Accession
def add_tree(self,
tree,
is_bipartitions_updated=False,
index=None):
"""
Adds the structure represented by a |Tree| instance to the
collection.
Parameters
----------
tree : |Tree|
A |Tree| instance. This must have the same rooting state as
all the other trees accessioned into this collection as well as
that of ``self.is_rooted_trees``.
is_bipartitions_updated : bool
If |False| [default], then the tree will have its splits encoded or
updated. Otherwise, if |True|, then the tree is assumed to have its
splits already encoded and updated.
index : integer
Insert before index.
Returns
-------
index : int
The index of the accession.
s : iterable of splits
A list of split bitmasks from ``tree``.
e :
A list of edge length values from ``tree``.
"""
if self.taxon_namespace is not tree.taxon_namespace:
raise error.TaxonNamespaceIdentityError(self, tree)
self.validate_rooting(tree.is_rooted)
splits, edge_lengths, node_ages = self._split_distribution.count_splits_on_tree(
tree=tree,
is_bipartitions_updated=is_bipartitions_updated,
default_edge_length_value=self.default_edge_length_value)
# pre-process splits
splits = tuple(splits)
# pre-process edge lengths
if self.ignore_edge_lengths:
# edge_lengths = tuple( [None] * len(splits) )
edge_lengths = tuple( None for x in range(len(splits)) )
else:
assert len(splits) == len(edge_lengths), "Unequal vectors:\n Splits: {}\n Edges: {}\n".format(splits, edge_lengths)
edge_lengths = tuple(edge_lengths)
# pre-process weights
if tree.weight is not None and self.use_tree_weights:
weight_to_use = float(tree.weight)
else:
weight_to_use = 1.0
# accession info
if index is None:
index = len(self._tree_split_bitmasks)
self._tree_split_bitmasks.append(splits)
self._tree_leafset_bitmasks.append(tree.seed_node.edge.bipartition.leafset_bitmask)
self._tree_edge_lengths.append(edge_lengths)
self._tree_weights.append(weight_to_use)
else:
self._tree_split_bitmasks.insert(index, splits)
self._tree_leafset_bitmasks.insert(index,
tree.seed_node.edge.bipartition.leafset_bitmask)
self._tree_edge_lengths.insert(index, edge_lengths)
self._tree_weights.insert(index, weight_to_use)
return index, splits, edge_lengths, weight_to_use
def add_trees(self, trees, is_bipartitions_updated=False):
"""
Adds multiple structures represneted by an iterator over or iterable of
|Tree| instances to the collection.
Parameters
----------
trees : iterator over or iterable of |Tree| instances
An iterator over or iterable of |Tree| instances. Thess must
have the same rooting state as all the other trees accessioned into
this collection as well as that of ``self.is_rooted_trees``.
is_bipartitions_updated : bool
If |False| [default], then the tree will have its splits encoded or
updated. Otherwise, if |True|, then the tree is assumed to have its
splits already encoded and updated.
"""
for tree in trees:
self.add_tree(tree,
is_bipartitions_updated=is_bipartitions_updated)
##############################################################################
## I/O
def read_from_files(self,
files,
schema,
**kwargs):
"""
Adds multiple structures from one or more external file sources to the
collection.
Parameters
----------
files : iterable of strings and/or file objects
A list or some other iterable of file paths or file-like objects
(string elements will be assumed to be paths to files, while all
other types of elements will be assumed to be file-like
objects opened for reading).
schema : string
The data format of the source. E.g., "nexus", "newick", "nexml".
\*\*kwargs : keyword arguments
These will be passed directly to the underlying schema-specific
reader implementation.
"""
if "taxon_namespace" in kwargs:
if kwargs["taxon_namespace"] is not self.taxon_namespace:
raise ValueError("TaxonNamespace object passed as keyword argument is not the same as self's TaxonNamespace reference")
kwargs.pop("taxon_namespace")
target_tree_offset = kwargs.pop("tree_offset", 0)
tree_yielder = self.tree_type.yield_from_files(
files=files,
schema=schema,
taxon_namespace=self.taxon_namespace,
**kwargs)
current_source_index = None
current_tree_offset = None
for tree_idx, tree in enumerate(tree_yielder):
current_yielder_index = tree_yielder.current_file_index
if current_source_index != current_yielder_index:
current_source_index = current_yielder_index
current_tree_offset = 0
if current_tree_offset >= target_tree_offset:
self.add_tree(tree=tree, is_bipartitions_updated=False)
current_tree_offset += 1
def _parse_and_add_from_stream(self,
stream,
schema,
**kwargs):
cur_size = len(self._tree_split_bitmasks)
self.read_from_files(files=[stream], schema=schema, **kwargs)
new_size = len(self._tree_split_bitmasks)
return new_size - cur_size
def read(self, **kwargs):
"""
Add |Tree| objects to existing |TreeList| from data source providing
one or more collections of trees.
**Mandatory Source-Specification Keyword Argument (Exactly One Required):**
- **file** (*file*) -- File or file-like object of data opened for reading.
- **path** (*str*) -- Path to file of data.
- **url** (*str*) -- URL of data.
- **data** (*str*) -- Data given directly.
**Mandatory Schema-Specification Keyword Argument:**
- **schema** (*str*) -- Identifier of format of data given by the
"``file``", "``path``", "``data``", or "``url``" argument
specified above: ":doc:`newick </schemas/newick>`", ":doc:`nexus
</schemas/nexus>`", or ":doc:`nexml </schemas/nexml>`". See
"|Schemas|" for more details.
**Optional General Keyword Arguments:**
- **collection_offset** (*int*) -- 0-based index of tree block or
collection in source to be parsed. If not specified then the
first collection (offset = 0) is assumed.
- **tree_offset** (*int*) -- 0-based index of first tree within the
collection specified by ``collection_offset`` to be parsed (i.e.,
skipping the first ``tree_offset`` trees). If not
specified, then the first tree (offset = 0) is assumed (i.e., no
trees within the specified collection will be skipped). Use this
to specify, e.g. a burn-in.
- **ignore_unrecognized_keyword_arguments** (*bool*) -- If |True|,
then unsupported or unrecognized keyword arguments will not
result in an error. Default is |False|: unsupported keyword
arguments will result in an error.
**Optional Schema-Specific Keyword Arguments:**
These provide control over how the data is interpreted and
processed, and supported argument names and values depend on
the schema as specified by the value passed as the "``schema``"
argument. See "|Schemas|" for more details.
**Examples:**
::
tree_array = dendropy.TreeArray()
tree_array.read(
file=open('treefile.tre', 'rU'),
schema="newick",
tree_offset=100)
tree_array.read(
path='sometrees.nexus',
schema="nexus",
collection_offset=2,
tree_offset=100)
tree_array.read(
data="((A,B),(C,D));((A,C),(B,D));",
schema="newick")
tree_array.read(
url="http://api.opentreeoflife.org/v2/study/pg_1144/tree/tree2324.nex",
schema="nexus")
"""
return basemodel.MultiReadable._read_from(self, **kwargs)
##############################################################################
## Container (List) Interface
def append(tree, is_bipartitions_updated=False):
"""
Adds a |Tree| instance to the collection before position given
by ``index``.
Parameters
----------
tree : |Tree|
A |Tree| instance. This must have the same rooting state as
all the other trees accessioned into this collection as well as
that of ``self.is_rooted_trees``.
is_bipartitions_updated : bool
If |False| [default], then the tree will have its splits encoded or
updated. Otherwise, if |True|, then the tree is assumed to have its
splits already encoded and updated.
"""
return self.add_tree(tree=tree,
is_bipartitions_updated=is_bipartitions_updated)
def insert(index, tree, is_bipartitions_updated=False):
"""
Adds a |Tree| instance to the collection before position given
by ``index``.
Parameters
----------
index : integer
Insert before index.
tree : |Tree|
A |Tree| instance. This must have the same rooting state as
all the other trees accessioned into this collection as well as
that of ``self.is_rooted_trees``.
is_bipartitions_updated : bool
If |False| [default], then the tree will have its splits encoded or
updated. Otherwise, if |True|, then the tree is assumed to have its
splits already encoded and updated.
Returns
-------
index : int
The index of the accession.
s : iterable of splits
A list of split bitmasks from ``tree``.
e :
A list of edge length values ``tree``.
"""
return self.add_tree(tree=tree,
is_bipartitions_updated=is_bipartitions_updated,
index=index)
def extend(self, tree_array):
"""
Accession of data from ``tree_array`` to self.
Parameters
----------
tree_array : |TreeArray|
A |TreeArray| instance from which to add data.
"""
assert self.taxon_namespace is tree_array.taxon_namespace
assert self._is_rooted_trees is tree_array._is_rooted_trees
assert self.ignore_edge_lengths is tree_array.ignore_edge_lengths
assert self.ignore_node_ages is tree_array.ignore_node_ages
assert self.use_tree_weights is tree_array.use_tree_weights
self._tree_split_bitmasks.extend(tree_array._tree_split_bitmasks)
self._tree_edge_lengths.extend(tree_array._tree_edge_lengths)
self._tree_weights.extend(other._tree_weights)
self._split_distribution.update(tree_array._split_distribution)
return self
def __iadd__(self, tree_array):
"""
Accession of data from ``tree_array`` to self.
Parameters
----------
tree_array : |TreeArray|
A |TreeArray| instance from which to add data.
"""
return self.extend(tree_array)
def __add__(self, other):
"""
Creates and returns new |TreeArray|.
Parameters
----------
other : iterable of |Tree| objects
Returns
-------
tlist : |TreeArray| object
|TreeArray| object containing clones of |Tree| objects
in ``self`` and ``other``.
"""
ta = TreeArray(
taxon_namespace=self.taxon_namespace,
is_rooted_trees=self._is_rooted_trees,
ignore_edge_lengths=self.ignore_edge_lengths,
ignore_node_ages=self.ignore_node_ages,
use_tree_weights=self.use_tree_weights,
ultrametricity_precision=self._split_distribution.ultrametricity_precision,
)
ta.default_edge_length_value = self.default_edge_length_value
ta.tree_type = self.tree_type
ta += self
ta += other
return ta
def __contains__(self, splits):
# expensive!!
return tuple(splits) in self._tree_split_bitmasks
def __delitem__(self, index):
raise NotImplementedError
# expensive!!
# tree_split_bitmasks = self._trees_splits[index]
### TODO: remove this "tree" from underlying splits distribution
# for split in tree_split_bitmasks:
# self._split_distribution.split_counts[split] -= 1
# etc.
# becomes complicated because tree weights need to be updated etc.
# del self._tree_split_bitmasks[index]
# del self._tree_edge_lengths[index]
# return
def __iter__(self):
"""
Yields pairs of (split, edge_length) from the store.
"""
for split, edge_length in zip(self._tree_split_bitmasks, self._tree_edge_lengths):
yield split, edge_length
def __reversed__(self):
raise NotImplementedError
def __len__(self):
return len(self._tree_split_bitmasks)
def __getitem__(self, index):
raise NotImplementedError
# """
# Returns a pair of tuples, ( (splits...), (lengths...) ), corresponding
# to the "tree" at ``index``.
# """
# return self._tree_split_bitmasks[index], self._tree_edge_lengths[index]
def __setitem__(self, index, value):
raise NotImplementedError
def clear(self):
raise NotImplementedError
self._tree_split_bitmasks = []
self._tree_edge_lengths = []
self._tree_leafset_bitmasks = []
self._split_distribution.clear()
def index(self, splits):
raise NotImplementedError
return self._tree_split_bitmasks.index(splits)
def pop(self, index=-1):
raise NotImplementedError
def remove(self, tree):
raise NotImplementedError
def reverse(self):
raise NotImplementedError
def sort(self, key=None, reverse=False):
raise NotImplementedError
##############################################################################
## Accessors/Settors
def get_split_bitmask_and_edge_tuple(self, index):
"""
Returns a pair of tuples, ( (splits...), (lengths...) ), corresponding
to the "tree" at ``index``.
"""
return self._tree_split_bitmasks[index], self._tree_edge_lengths[index]
##############################################################################
## Calculations
def calculate_log_product_of_split_supports(self,
include_external_splits=False,
):
"""
Calculates the log product of split support for each of the trees in
the collection.
Parameters
----------
include_external_splits : bool
If |True|, then non-internal split posteriors will be included in
the score. Defaults to |False|: these are skipped. This should only
make a difference when dealing with splits collected from trees of
different leaf sets.
Returns
-------
s : tuple(list[numeric], integer)
Returns a tuple, with the first element being the list of scores
and the second being the index of the highest score. The element order
corresponds to the trees accessioned in the collection.
"""
assert len(self._tree_leafset_bitmasks) == len(self._tree_split_bitmasks)
scores = []
max_score = None
max_score_tree_idx = None
split_frequencies = self._split_distribution.split_frequencies
for tree_idx, (tree_leafset_bitmask, split_bitmasks) in enumerate(zip(self._tree_leafset_bitmasks, self._tree_split_bitmasks)):
log_product_of_split_support = 0.0
for split_bitmask in split_bitmasks:
if (include_external_splits
or split_bitmask == tree_leafset_bitmask # count root edge (following BEAST)
or not treemodel.Bipartition.is_trivial_bitmask(split_bitmask, tree_leafset_bitmask)
):
split_support = split_frequencies.get(split_bitmask, 0.0)
if split_support:
log_product_of_split_support += math.log(split_support)
if max_score is None or max_score < log_product_of_split_support:
max_score = log_product_of_split_support
max_score_tree_idx = tree_idx
scores.append(log_product_of_split_support)
return scores, max_score_tree_idx
def maximum_product_of_split_support_tree(self,
include_external_splits=False,
summarize_splits=True,
**split_summarization_kwargs
):
"""
Return the tree with that maximizes the product of split supports, also
known as the "Maximum Clade Credibility Tree" or MCCT.
Parameters
----------
include_external_splits : bool
If |True|, then non-internal split posteriors will be included in
the score. Defaults to |False|: these are skipped. This should only
make a difference when dealing with splits collected from trees of
different leaf sets.
Returns
-------
mcct_tree : Tree
Tree that maximizes the product of split supports.
"""
scores, max_score_tree_idx = self.calculate_log_product_of_split_supports(
include_external_splits=include_external_splits,
)
tree = self.restore_tree(
index=max_score_tree_idx,
**split_summarization_kwargs)
tree.log_product_of_split_support = scores[max_score_tree_idx]
if summarize_splits:
self._split_distribution.summarize_splits_on_tree(
tree=tree,
is_bipartitions_updated=True,
**split_summarization_kwargs
)
return tree
def calculate_sum_of_split_supports(self,
include_external_splits=False,
):
"""
Calculates the *sum* of split support for all trees in the
collection.
Parameters
----------
include_external_splits : bool
If |True|, then non-internal split posteriors will be included in
the score. Defaults to |False|: these are skipped. This should only
make a difference when dealing with splits collected from trees of
different leaf sets.
Returns
-------
s : tuple(list[numeric], integer)
Returns a tuple, with the first element being the list of scores
and the second being the index of the highest score. The element order
corresponds to the trees accessioned in the collection.
"""
assert len(self._tree_leafset_bitmasks) == len(self._tree_split_bitmasks)
scores = []
max_score = None
max_score_tree_idx = None
split_frequencies = self._split_distribution.split_frequencies
for tree_idx, (tree_leafset_bitmask, split_bitmasks) in enumerate(zip(self._tree_leafset_bitmasks, self._tree_split_bitmasks)):
sum_of_support = 0.0
for split_bitmask in split_bitmasks:
if (include_external_splits
or split_bitmask == tree_leafset_bitmask # count root edge (following BEAST)
or not treemodel.Bipartition.is_trivial_bitmask(split_bitmask, tree_leafset_bitmask)
):
split_support = split_frequencies.get(split_bitmask, 0.0)
sum_of_support += split_support
if max_score is None or max_score < sum_of_support:
max_score = sum_of_support
max_score_tree_idx = tree_idx
scores.append(sum_of_support)
return scores, max_score_tree_idx
def maximum_sum_of_split_support_tree(self,
include_external_splits=False,
summarize_splits=True,
**split_summarization_kwargs
):
"""
Return the tree with that maximizes the *sum* of split supports.
Parameters
----------
include_external_splits : bool
If |True|, then non-internal split posteriors will be included in
the score. Defaults to |False|: these are skipped. This should only
make a difference when dealing with splits collected from trees of
different leaf sets.
Returns
-------
mst_tree : Tree
Tree that maximizes the sum of split supports.
"""
scores, max_score_tree_idx = self.calculate_sum_of_split_supports(
include_external_splits=include_external_splits,
)
tree = self.restore_tree(
index=max_score_tree_idx,
**split_summarization_kwargs
)
tree.sum_of_split_support = scores[max_score_tree_idx]
if summarize_splits:
self._split_distribution.summarize_splits_on_tree(
tree=tree,
is_bipartitions_updated=True,
**split_summarization_kwargs
)
return tree
def collapse_edges_with_less_than_minimum_support(self,
tree,
min_freq=constants.GREATER_THAN_HALF,
):
return self.split_distribution.collapse_edges_with_less_than_minimum_support(
tree=tree,
min_freq=min_freq)
def consensus_tree(self,
min_freq=constants.GREATER_THAN_HALF,
summarize_splits=True,
**split_summarization_kwargs
):
"""
Returns a consensus tree from splits in ``self``.
Parameters
----------
min_freq : real
The minimum frequency of a split in this distribution for it to be
added to the tree.
is_rooted : bool
Should tree be rooted or not? If *all* trees counted for splits are
explicitly rooted or unrooted, then this will default to |True| or
|False|, respectively. Otherwise it defaults to |None|.
\*\*split_summarization_kwargs : keyword arguments
These will be passed directly to the underlying
`SplitDistributionSummarizer` object. See
:meth:`SplitDistributionSummarizer.configure` for options.
Returns
-------
t : consensus tree
"""
tree = self._split_distribution.consensus_tree(
min_freq=min_freq,
is_rooted=self.is_rooted_trees,
summarize_splits=summarize_splits,
**split_summarization_kwargs
)
# return self._split_distribution.consensus_tree(*args, **kwargs)
return tree
##############################################################################
## Mapping of Split Support
def summarize_splits_on_tree(self,
tree,
is_bipartitions_updated=False,
**kwargs):
if self.taxon_namespace is not tree.taxon_namespace:
raise error.TaxonNamespaceIdentityError(self, tree)
self._split_distribution.summarize_splits_on_tree(
tree=tree,
is_bipartitions_updated=is_bipartitions_updated,
**kwargs
)
##############################################################################
## Tree Reconstructions
def restore_tree(self,
index,
summarize_splits_on_tree=False,
**split_summarization_kwargs
):
split_bitmasks = self._tree_split_bitmasks[index]
if self.ignore_edge_lengths:
split_edge_lengths = None
else:
assert len(self._tree_split_bitmasks) == len(self._tree_edge_lengths)
edge_lengths = self._tree_edge_lengths[index]
split_edge_lengths = dict(zip(split_bitmasks, edge_lengths))
tree = self.tree_type.from_split_bitmasks(
split_bitmasks=split_bitmasks,
taxon_namespace=self.taxon_namespace,
is_rooted=self._is_rooted_trees,
split_edge_lengths=split_edge_lengths,
)
# if update_bipartitions:
# tree.encode_bipartitions()
if summarize_splits_on_tree:
split_summarization_kwargs["is_bipartitions_updated"] = True
self._split_distribution.summarize_splits_on_tree(
tree=tree,
**split_summarization_kwargs)
return tree
##############################################################################
## Topology Frequencies
def split_bitmask_set_frequencies(self):
"""
Returns a dictionary with keys being sets of split bitmasks and values
being the frequency of occurrence of trees represented by those split
bitmask sets in the collection.
"""
split_bitmask_set_count_map = collections.Counter()
assert len(self._tree_split_bitmasks) == len(self._tree_weights)
for split_bitmask_set, weight in zip(self._tree_split_bitmasks, self._tree_weights):
split_bitmask_set_count_map[frozenset(split_bitmask_set)] += (1.0 * weight)
split_bitmask_set_freqs = {}
normalization_weight = self._split_distribution.calc_normalization_weight()
# print("===> {}".format(normalization_weight))
for split_bitmask_set in split_bitmask_set_count_map:
split_bitmask_set_freqs[split_bitmask_set] = split_bitmask_set_count_map[split_bitmask_set] / normalization_weight
return split_bitmask_set_freqs
def bipartition_encoding_frequencies(self):
"""
Returns a dictionary with keys being bipartition encodings of trees
(as ``frozenset`` collections of |Bipartition| objects) and
values the frequency of occurrence of trees represented by that
encoding in the collection.
"""
# split_bitmask_set_freqs = self.split_bitmask_set_frequencies()
# bipartition_encoding_freqs = {}
# for split_bitmask_set, freq in split_bitmask_set_freqs.items():
# bipartition_encoding = []
# inferred_leafset = max(split_bitmask_set)
# for split_bitmask in split_bitmask_set:
# bipartition = treemodel.Bipartition(
# bitmask=split_bitmask,
# tree_leafset_bitmask=inferred_leafset,
# is_rooted=self._is_rooted_trees,
# is_mutable=False,
# compile_bipartition=True,
# )
# bipartition_encoding.append(bipartition)
# bipartition_encoding_freqs[frozenset(bipartition_encoding)] = freq
# return bipartition_encoding_freqs
bipartition_encoding_freqs = {}
topologies = self.topologies()
for tree in topologies:
bipartition_encoding_freqs[ frozenset(tree.encode_bipartitions()) ] = tree.frequency
return bipartition_encoding_freqs
def topologies(self,
sort_descending=None,
frequency_attr_name="frequency",
frequency_annotation_name="frequency",
):
"""
Returns a |TreeList| instance containing the reconstructed tree
topologies (i.e. |Tree| instances with no edge weights) in the
collection, with the frequency added as an attributed.
Parameters
----------
sort_descending : bool
If |True|, then topologies will be sorted in *descending* frequency
order (i.e., topologies with the highest frequencies will be listed
first). If |False|, then they will be sorted in *ascending*
frequency. If |None| (default), then they will not be sorted.
frequency_attr_name : str
Name of attribute to add to each |Tree| representing
the frequency of that topology in the collection. If |None|
then the attribute will not be added.
frequency_annotation_name : str
Name of annotation to add to the annotations of each |Tree|,
representing the frequency of that topology in the collection. The
value of this annotation will be dynamically-bound to the attribute
specified by ``frequency_attr_name`` unless that is |None|. If
``frequency_annotation_name`` is |None| then the annotation will not
be added.
"""
if sort_descending is not None and frequency_attr_name is None:
raise ValueError("Attribute needs to be set on topologies to enable sorting")
split_bitmask_set_freqs = self.split_bitmask_set_frequencies()
topologies = TreeList(taxon_namespace=self.taxon_namespace)
for split_bitmask_set, freq in split_bitmask_set_freqs.items():
tree = self.tree_type.from_split_bitmasks(
split_bitmasks=split_bitmask_set,
taxon_namespace=self.taxon_namespace,
is_rooted=self._is_rooted_trees,
)
if frequency_attr_name is not None:
setattr(tree, frequency_attr_name, freq)
if frequency_annotation_name is not None:
tree.annotations.add_bound_attribute(
attr_name=frequency_attr_name,
annotation_name=frequency_annotation_name,
)
else:
tree.annotations.add_new(
frequency_annotation_name,
freq,
)
topologies.append(tree)
if sort_descending is not None:
topologies.sort(key=lambda t: getattr(t, frequency_attr_name), reverse=sort_descending)
return topologies
|
en
| 0.686529
|
#! /usr/bin/env python ############################################################################## ## DendroPy Phylogenetic Computing Library. ## ## Copyright 2010-2015 <NAME> and <NAME>. ## All rights reserved. ## ## See "LICENSE.rst" for terms and conditions of usage. ## ## If you use this work or any portion thereof in published work, ## please cite it as: ## ## <NAME>. and <NAME>. 2010. DendroPy: a Python library ## for phylogenetic computing. Bioinformatics 26: 1569-1571. ## ############################################################################## This module handles the core definition of classes that model collections of trees. ############################################################################## ### TreeList A collection of |Tree| objects, all referencing the same "universe" of opeational taxonomic unit concepts through the same |TaxonNamespace| object reference. Constructs a new |TreeList| object and populates it with trees from file-like object ``stream``. Notes ----- *All* operational taxonomic unit concepts in the data source will be included in the |TaxonNamespace| object associated with the new |TreeList| object and its contained |Tree| objects, even those not associated with trees or the particular trees being retrieved. Parameters ---------- stream : file or file-like object Source of data. schema : string Identifier of format of data in ``stream`` collection_offset : integer or None 0-based index indicating collection of trees to parse. If |None|, then all tree collections are retrieved, with each distinct collection parsed into a separate |TreeList| object. If the tree colleciton offset index is equal or greater than the number of tree collections in the data source, then IndexError is raised. Negative offsets work like negative list indexes; e.g., a ``collection_offset`` of -1 means to read the last collection of trees in the data source. For data formats that do not support the concept of distinct tree collections (e.g. NEWICK) are considered single-collection data source (i.e, the only acceptable ``collection_offset`` values are -1 or 0). tree_offset : integer or None 0-based index indicating particular tree within a particular collection of trees at which to begin reading. If not specified or |None| (default), then all trees are parsed. Otherwise, must be an integer value up the length of the collection minus 1. A positive offset indicates the number of trees in the collection to skip; e.g. a ``tree_offset`` of 20 means to skip the first 20 trees in the collection. Negative offsets work like negative list indexes; e.g., a ``tree_offset`` value of -10 means to retrieve the last 10 trees in the collection. If the tree offset index is equal or greater than the number of trees in the collection, then IndexError is raised. Requires that a particular tree collection has been identified using the ``tree_collection_offset`` parameter: if ``tree_collection_offset`` is not specified, a TypeError is raised. \*\*kwargs : keyword arguments Arguments to customize parsing, instantiation, processing, and accession of |Tree| objects read from the data source, including schema- or format-specific handling. The following optional keyword arguments are recognized and handled by this function: * ``label`` Specifies the label or description of the new |TreeList|. * ``taxon_namespace`` specifies the |TaxonNamespace| object to be attached to the new |TreeList| object. Note that *all* operational taxonomic unit concepts in the data source will be accessioned into the specified |TaxonNamespace| instance. This includes the operation taxonomic unit definitions associated with all tree collections and character matrices in the data source. * ``tree_list`` : **SPECIAL** If passed a |TreeList| using this keyword, then this instance is populated and returned (instead of a new instance being created). All other keyword arguments are passed directly to |TreeList|.read()`. Other keyword arguments may be available, depending on the implementation of the reader specialized to handle ``schema`` formats. Notes ----- Note that in most cases, even if ``collection_offset`` and ``tree_offset`` are specified to restrict the trees returned, the *entire* data source is still parsed and processed. So this is not more efficient than reading all the trees and then manually-extracting them later; just more convenient. If you need just a single subset of trees from a data source, there is no gain in efficiency. If you need multiple trees or subsets of trees from the same data source, it would be much more efficient to read the entire data source, and extract trees as needed. Returns ------- A |TreeList| object. # these must be pulled before passing the kwargs # down to the reader # get the reader # Accommodate an existing TreeList object being passed # if tree_offset is not None: # raise TypeError("Cannot specify ``tree_offset`` without specifying ``collection_offset``") # coerce all tree products into this list # if collection_offset < 0: # raise IndexError("Collection offset out of range: {} (minimum valid tree offset = 0)".format(collection_offset)) # if tree_offset < 0: # raise IndexError("Tree offset out of range: {} (minimum offset = 0)".format(tree_offset)) # taxon_namespace = taxonmodel.process_kwargs_dict_for_taxon_namespace(kwargs, None) # label = kwargs.pop("label", None) # tree_list = cls(label=label, # taxon_namespace=taxon_namespace) # tree_list.read( # stream=stream, # schema=schema, # collection_offset=collection_offset, # tree_offset=tree_offset, # **kwargs) # return tree_list Instantiate and return a *new* |TreeList| object from a data source. **Mandatory Source-Specification Keyword Argument (Exactly One Required):** - **file** (*file*) -- File or file-like object of data opened for reading. - **path** (*str*) -- Path to file of data. - **url** (*str*) -- URL of data. - **data** (*str*) -- Data given directly. **Mandatory Schema-Specification Keyword Argument:** - **schema** (*str*) -- Identifier of format of data given by the "``file``", "``path``", "``data``", or "``url``" argument specified above: ":doc:`newick </schemas/newick>`", ":doc:`nexus </schemas/nexus>`", or ":doc:`nexml </schemas/nexml>`". See "|Schemas|" for more details. **Optional General Keyword Arguments:** - **label** (*str*) -- Name or identifier to be assigned to the new object; if not given, will be assigned the one specified in the data source, or |None| otherwise. - **taxon_namespace** (|TaxonNamespace|) -- The |TaxonNamespace| instance to use to :doc:`manage the taxon names </primer/taxa>`. If not specified, a new one will be created. - **collection_offset** (*int*) -- 0-based index of tree block or collection in source to be parsed. If not specified then the first collection (offset = 0) is assumed. - **tree_offset** (*int*) -- 0-based index of first tree within the collection specified by ``collection_offset`` to be parsed (i.e., skipping the first ``tree_offset`` trees). If not specified, then the first tree (offset = 0) is assumed (i.e., no trees within the specified collection will be skipped). Use this to specify, e.g. a burn-in. - **ignore_unrecognized_keyword_arguments** (*bool*) -- If |True|, then unsupported or unrecognized keyword arguments will not result in an error. Default is |False|: unsupported keyword arguments will result in an error. **Optional Schema-Specific Keyword Arguments:** These provide control over how the data is interpreted and processed, and supported argument names and values depend on the schema as specified by the value passed as the "``schema``" argument. See "|Schemas|" for more details. **Examples:** :: tlst1 = dendropy.TreeList.get( file=open('treefile.tre', 'rU'), schema="newick") tlst2 = dendropy.TreeList.get( path='sometrees.nexus', schema="nexus", collection_offset=2, tree_offset=100) tlst3 = dendropy.TreeList.get( data="((A,B),(C,D));((A,C),(B,D));", schema="newick") tree4 = dendropy.dendropy.TreeList.get( url="http://api.opentreeoflife.org/v2/study/pg_1144/tree/tree2324.nex", schema="nexus") Creates and returns a |Tree| of a type that this list understands how to manage. Deriving classes can override this to provide for custom Tree-type object lists. You can simple override the class-level variable `DEFAULT_TREE_TYPE` in your derived class if the constructor signature of the alternate tree type is the same as |Tree|. If you want to have a TreeList *instance* that generates custom trees (i.e., as opposed to a TreeList-ish *class* of instances), set the ``tree_type`` attribute of the TreeList instance. Parameters ---------- \*args : positional arguments Passed directly to constructor of |Tree|. \*\*kwargs : keyword arguments Passed directly to constructor of |Tree|. Returns ------- A |Tree| object. ########################################################################### ### Lifecycle and Identity Constructs a new |TreeList| object, populating it with any iterable container with Tree object members passed as unnamed argument, or from a data source if ``stream`` and ``schema`` are passed. If passed an iterable container, the objects in that container must be of type |Tree| (or derived). If the container is of type |TreeList|, then, because each |Tree| object must have the same |TaxonNamespace| reference as the containing |TreeList|, the trees in the container passed as an initialization argument will be **deep**-copied (except for associated |TaxonNamespace| and |Taxon| objects, which will be shallow-copied). If the container is any other type of iterable, then the |Tree| objects will be **shallow**-copied. |TreeList| objects can directly thus be instantiated in the following ways:: # /usr/bin/env python from dendropy import TaxonNamespace, Tree, TreeList # instantiate an empty tree tlst1 = TreeList() # TreeList objects can be instantiated from an external data source # using the 'get()' factory class method tlst2 = TreeList.get(file=open('treefile.tre', 'rU'), schema="newick") tlst3 = TreeList.get(path='sometrees.nexus', schema="nexus") tlst4 = TreeList.get(data="((A,B),(C,D));((A,C),(B,D));", schema="newick") # can also call `read()` on a TreeList object; each read adds # (appends) the tree(s) found to the TreeList tlst5 = TreeList() tlst5.read(file=open('boot1.tre', 'rU'), schema="newick") tlst5.read(path="boot3.tre", schema="newick") tlst5.read(value="((A,B),(C,D));((A,C),(B,D));", schema="newick") # populated from list of Tree objects tlist6_1 = Tree.get( data="((A,B),(C,D))", schema="newick") tlist6_2 = Tree.get( data="((A,C),(B,D))", schema="newick") tlist6 = TreeList([tlist5_1, tlist5_2]) # passing keywords to underlying tree parser tlst8 = TreeList.get( data="((A,B),(C,D));((A,C),(B,D));", schema="newick", taxon_namespace=tlst3.taxon_namespace, rooting="force-rooted", extract_comment_metadata=True, store_tree_weights=False, preserve_underscores=True) # Subsets of trees can be read. Note that in most cases, the entire # data source is parsed, so this is not more efficient than reading # all the trees and then manually-extracting them later; just more # convenient # skip the *first* 100 trees in the *first* (offset=0) collection of trees trees = TreeList.get( path="mcmc.tre", schema="newick", collection_offset=0, tree_offset=100) # get the *last* 10 trees in the *second* (offset=1) collection of trees trees = TreeList.get( path="mcmc.tre", schema="newick", collection_offset=1, tree_offset=-10) # get the last 10 trees in the second-to-last collection of trees trees = TreeList.get( path="mcmc.tre", schema="newick", collection_offset=-2, tree_offset=100) # Slices give shallow-copy: trees are references tlst4copy0a = t4[:] assert tlst4copy0a[0] is t4[0] tlst4copy0b = t4[:4] assert tlst4copy0b[0] is t4[0] # 'Taxon-namespace-scoped' copy: # I.e., Deep-copied objects but taxa and taxon namespace # are copied as references tlst4copy1a = TreeList(t4) tlst4copy1b = TreeList([Tree(t) for t in tlst5]) assert tlst4copy1a[0] is not tlst4[0] # True assert tlst4copy1a.taxon_namespace is tlst4.taxon_namespace # True assert tlst4copy1b[0] is not tlst4[0] # True assert tlst4copy1b.taxon_namespace is tlst4.taxon_namespace # True # only allow 1 positional argument # memo[id(tree)] = self # this populates ``memo`` with references to the # the TaxonNamespace and Taxon objects ########################################################################### ### Representation ########################################################################### ### Data I/O Dummy factory to coerce all |TaxonNamespace| objects required when parsing a data source to reference ``self.taxon_namespace``. Dummy factory to coerce all |TreeList| objects required when parsing a data source to reference ``self``. Parses |Tree| objects from data source and adds to this collection. Notes ----- *All* operational taxonomic unit concepts in the data source will be included in the |TaxonNamespace| object associated with the new |TreeList| object and its contained |Tree| objects, even those not associated with trees or the particular trees being retrieved. Parameters ---------- stream : file or file-like object Source of data. schema : string Identifier of format of data in ``stream``. collection_offset : integer or None 0-based index indicating collection of trees to parse. If |None|, then all tree collections are retrieved, with each distinct collection parsed into a separate |TreeList| object. If the tree colleciton offset index is equal or greater than the number of tree collections in the data source, then IndexError is raised. Negative offsets work like negative list indexes; e.g., a ``collection_offset`` of -1 means to read the last collection of trees in the data source. For data formats that do not support the concept of distinct tree collections (e.g. NEWICK) are considered single-collection data source (i.e, the only acceptable ``collection_offset`` values are -1 or 0). tree_offset : integer or None 0-based index indicating particular tree within a particular collection of trees at which to begin reading. If not specified or |None| (default), then all trees are parsed. Otherwise, must be an integer value up the length of the collection minus 1. A positive offset indicates the number of trees in the collection to skip; e.g. a ``tree_offset`` of 20 means to skip the first 20 trees in the collection. Negative offsets work like negative list indexes; e.g., a ``tree_offset`` value of -10 means to retrieve the last 10 trees in the collection. If the tree offset index is equal or greater than the number of trees in the collection, then IndexError is raised. Requires that a particular tree collection has been identified using the ``tree_collection_offset`` parameter: if ``tree_collection_offset`` is not specified, a TypeError is raised. \*\*kwargs : keyword arguments Arguments to customize parsing, instantiation, processing, and accession of |Tree| objects read from the data source, including schema- or format-specific handling. These will be passed to the underlying schema-specific reader for handling. General (schema-agnostic) keyword arguments are: * ``rooted`` specifies the default rooting interpretation of the tree. * ``edge_length_type`` specifies the type of the edge lengths (int or float; defaults to 'float') Other keyword arguments are available depending on the schema. See specific schema handlers (e.g., `NewickReader`, `NexusReader`, `NexmlReader`) for more details. Notes ----- Note that in most cases, even if ``collection_offset`` and ``tree_offset`` are specified to restrict the trees read, the *entire* data source is still parsed and processed. So this is not more efficient than reading all the trees and then manually-extracting them later; just more convenient. If you need just a single subset of trees from a data source, there is no gain in efficiency. If you need multiple trees or subsets of trees from the same data source, it would be much more efficient to read the entire data source, and extract trees as needed. Returns ------- n : ``int`` The number of |Tree| objects read. Add |Tree| objects to existing |TreeList| from data source providing one or more collections of trees. **Mandatory Source-Specification Keyword Argument (Exactly One Required):** - **file** (*file*) -- File or file-like object of data opened for reading. - **path** (*str*) -- Path to file of data. - **url** (*str*) -- URL of data. - **data** (*str*) -- Data given directly. **Mandatory Schema-Specification Keyword Argument:** - **schema** (*str*) -- Identifier of format of data given by the "``file``", "``path``", "``data``", or "``url``" argument specified above: ":doc:`newick </schemas/newick>`", ":doc:`nexus </schemas/nexus>`", or ":doc:`nexml </schemas/nexml>`". See "|Schemas|" for more details. **Optional General Keyword Arguments:** - **collection_offset** (*int*) -- 0-based index of tree block or collection in source to be parsed. If not specified then the first collection (offset = 0) is assumed. - **tree_offset** (*int*) -- 0-based index of first tree within the collection specified by ``collection_offset`` to be parsed (i.e., skipping the first ``tree_offset`` trees). If not specified, then the first tree (offset = 0) is assumed (i.e., no trees within the specified collection will be skipped). Use this to specify, e.g. a burn-in. - **ignore_unrecognized_keyword_arguments** (*bool*) -- If |True|, then unsupported or unrecognized keyword arguments will not result in an error. Default is |False|: unsupported keyword arguments will result in an error. **Optional Schema-Specific Keyword Arguments:** These provide control over how the data is interpreted and processed, and supported argument names and values depend on the schema as specified by the value passed as the "``schema``" argument. See "|Schemas|" for more details. **Examples:** :: tlist = dendropy.TreeList() tlist.read( file=open('treefile.tre', 'rU'), schema="newick", tree_offset=100) tlist.read( path='sometrees.nexus', schema="nexus", collection_offset=2, tree_offset=100) tlist.read( data="((A,B),(C,D));((A,C),(B,D));", schema="newick") tlist.read( url="http://api.opentreeoflife.org/v2/study/pg_1144/tree/tree2324.nex", schema="nexus") Writes out ``self`` in ``schema`` format to a destination given by file-like object ``stream``. Parameters ---------- stream : file or file-like object Destination for data. schema : string Must be a recognized and tree file schema, such as "nexus", "newick", etc, for which a specialized tree list writer is available. If this is not implemented for the schema specified, then a UnsupportedSchemaError is raised. \*\*kwargs : keyword arguments, optional Keyword arguments will be passed directly to the writer for the specified schema. See documentation for details on keyword arguments supported by writers of various schemas. ########################################################################### ### List Interface # assert tree.taxon_namespace is self.taxon_namespace Inserts a |Tree| object, ``tree``, into the collection before ``index``. The |TaxonNamespace| reference of ``tree`` will be set to that of ``self``. Any |Taxon| objects associated with nodes in ``tree`` that are not already in ``self.taxon_namespace`` will be handled according to ``taxon_import_strategy``: - 'migrate' |Taxon| objects associated with ``tree`` that are not already in ``self.taxon_nameaspace`` will be remapped based on their labels, with new :class|Taxon| objects being reconstructed if none with matching labels are found. Specifically, :meth:`dendropy.datamodel.treemodel.Tree.migrate_taxon_namespace()` will be called on ``tree``, where ``kwargs`` is as passed to this function. - 'add' |Taxon| objects associated with ``tree`` that are not already in ``self.taxon_namespace`` will be added. Note that this might result in |Taxon| objects with duplicate labels as no attempt at mapping to existing |Taxon| objects based on label-matching is done. Parameters ---------- index : integer Position before which to insert ``tree``. tree : A |Tree| instance The |Tree| object to be added. taxon_import_strategy : string If ``tree`` is associated with a different |TaxonNamespace|, this argument determines how new |Taxon| objects in ``tree`` are handled: 'migrate' or 'add'. See above for details. \*\*kwargs : keyword arguments These arguments will be passed directly to 'migrate_taxon_namespace()' method call on ``tree``. See Also -------- :meth:`Tree.migrate_taxon_namespace` Adds a |Tree| object, ``tree``, to the collection. The |TaxonNamespace| reference of ``tree`` will be set to that of ``self``. Any |Taxon| objects associated with nodes in ``tree`` that are not already in ``self.taxon_namespace`` will be handled according to ``taxon_import_strategy``: - 'migrate' |Taxon| objects associated with ``tree`` that are not already in ``self.taxon_nameaspace`` will be remapped based on their labels, with new :class|Taxon| objects being reconstructed if none with matching labels are found. Specifically, :meth:`dendropy.datamodel.treemodel.Tree.migrate_taxon_namespace()` will be called on ``tree``, where ``kwargs`` is as passed to this function. - 'add' |Taxon| objects associated with ``tree`` that are not already in ``self.taxon_namespace`` will be added. Note that this might result in |Taxon| objects with duplicate labels as no attempt at mapping to existing |Taxon| objects based on label-matching is done. Parameters ---------- tree : A |Tree| instance The |Tree| object to be added. taxon_import_strategy : string If ``tree`` is associated with a different |TaxonNamespace|, this argument determines how new |Taxon| objects in ``tree`` are handled: 'migrate' or 'add'. See above for details. \*\*kwargs : keyword arguments These arguments will be passed directly to 'migrate_taxon_namespace()' method call on ``tree``. See Also -------- :meth:`Tree.migrate_taxon_namespace` In-place addition of |Tree| objects in ``other`` to ``self``. If ``other`` is a |TreeList|, then the trees are *copied* and migrated into ``self.taxon_namespace``; otherwise, the original objects are migrated into ``self.taxon_namespace`` and added directly. Parameters ---------- other : iterable of |Tree| objects Returns ------- ``self`` : |TreeList| In-place addition of |Tree| objects in ``other`` to ``self``. If ``other`` is a |TreeList|, then the trees are *copied* and migrated into ``self.taxon_namespace``; otherwise, the original objects are migrated into ``self.taxon_namespace`` and added directly. Parameters ---------- other : iterable of |Tree| objects Returns ------- ``self`` : |TreeList| Creates and returns new |TreeList| with clones of all trees in ``self`` as well as all |Tree| objects in ``other``. If ``other`` is a |TreeList|, then the trees are *cloned* and migrated into ``self.taxon_namespace``; otherwise, the original objects are migrated into ``self.taxon_namespace`` and added directly. Parameters ---------- other : iterable of |Tree| objects Returns ------- tlist : |TreeList| object |TreeList| object containing clones of |Tree| objects in ``self`` and ``other``. If ``index`` is an integer, then |Tree| object at position ``index`` is returned. If ``index`` is a slice, then a |TreeList| is returned with references (i.e., not copies or clones, but the actual original instances themselves) to |Tree| objects in the positions given by the slice. The |TaxonNamespace| is the same as ``self``. Parameters ---------- index : integer or slice Index or slice. Returns ------- t : |Tree| object or |TreeList| object # list.clear() only with 3.4 or so ... ############################################################################## ## Taxon Handling Returns a set populated with all of |Taxon| instances associated with ``self``. Parameters ---------- taxa : set() Set to populate. If not specified, a new one will be created. Returns ------- taxa : set[|Taxon|] Set of taxa associated with ``self``. ############################################################################## ## Special Calculations and Operations on Entire Collection Return TreeArray containing information of trees currently in self. Processes ``kwargs_dict`` intelligently: removing and passing on keyword arguments pertaining to TreeArray construction, and leaving everything else. # TODO: maybe ignore_node_ages defaults to |False| but ``ultrametricity_precision`` defaults to 0? # taxon_namespace=self.taxon_namespace, Return `SplitDistribution` collecting information on splits in contained trees. Keyword arguments get passed directly to `SplitDistribution` constructor. Return |TreeArray| collecting information on splits in contained trees. Keyword arguments get passed directly to |TreeArray| constructor. Returns a consensus tree of all trees in self, with minumum frequency of bipartition to be added to the consensus tree given by ``min_freq``. Return the tree with that maximizes the product of split supports, also known as the "Maximum Clade Credibility Tree" or MCCT. Parameters ---------- include_external_splits : bool If |True|, then non-internal split posteriors will be included in the score. Defaults to |False|: these are skipped. This should only make a difference when dealing with splits collected from trees of different leaf sets. Returns ------- mcct_tree : Tree Tree that maximizes the product of split supports. Return the tree with that maximizes the *sum* of split supports. Parameters ---------- include_external_splits : bool If |True|, then non-internal split posteriors will be included in the score. Defaults to |False|: these are skipped. This should only make a difference when dealing with splits collected from trees of different leaf sets. Returns ------- mcct_tree : Tree Tree that maximizes the sum of split supports. Given a bipartition specified as: - a |Bipartition| instance given the keyword 'bipartition' - a split bitmask given the keyword 'split_bitmask' - a list of |Taxon| objects given with the keyword ``taxa`` - a list of taxon labels given with the keyword ``labels`` this function returns the proportion of trees in self in which the split is found. If the tree(s) in the collection are unrooted, then the bipartition will be normalized for the comparison. DEPRECATED: use 'frequency_of_bipartition()' instead. ############################################################################### ### SplitDistribution Collects information regarding splits over multiple trees. # Taxon Namespace # configuration # storage/function # secondary/derived/generated/collected data # services ########################################################################### ### Utility "Normalizes" split, by ensuring that the least-significant bit is always 1 (used on unrooted trees to establish split identity independent of rotation). Parameters ---------- bitmask : integer Split bitmask hash to be normalized. Returns ------- h : integer Normalized split bitmask. ########################################################################### ### Configuration ########################################################################### ### Split Counting and Book-Keeping Counts splits in this tree and add to totals. ``tree`` must be decorated with splits, and no attempt is made to normalize taxa. Parameters ---------- tree : a |Tree| object. The tree on which to count the splits. is_bipartitions_updated : bool If |False| [default], then the tree will have its splits encoded or updated. Otherwise, if |True|, then the tree is assumed to have its splits already encoded and updated. Returns -------- s : iterable of splits A list of split bitmasks from ``tree``. e : A list of edge length values from ``tree``. a : A list of node age values from ``tree``. ## if edge is stored as an attribute, might be faster to: # edge = bipartition.edge Returns 4 values: total number of splits counted total *weighted* number of unique splits counted total number of non-trivial splits counted total *weighted* number of unique non-trivial splits counted ########################################################################### ### Basic Information Access Returns freqency of split_bitmask. ########################################################################### ### Summarization Returns iterator over support values for the splits of a given tree, where the support value is given by the proportional frequency of the split in the current split distribution. Parameters ---------- tree : |Tree| The |Tree| which will be scored. is_bipartitions_updated : bool If |False| [default], then the tree will have its splits encoded or updated. Otherwise, if |True|, then the tree is assumed to have its splits already encoded and updated. include_external_splits : bool If |True|, then non-internal split posteriors will be included. If |False|, then these are skipped. This should only make a difference when dealing with splits collected from trees of different leaf sets. traversal_strategy : str One of: "preorder" or "postorder". Specfies order in which splits are visited. Returns ------- s : list of floats List of values for splits in the tree corresponding to the proportional frequency that the split is found in the current distribution. Calculates the (log) product of the support of the splits of the tree, where the support is given by the proportional frequency of the split in the current split distribution. The tree that has the highest product of split support out of a sample of trees corresponds to the "maximum credibility tree" for that sample. This can also be referred to as the "maximum clade credibility tree", though this latter term is sometimes use for the tree that has the highest *sum* of split support (see :meth:`SplitDistribution.sum_of_split_support_on_tree()`). Parameters ---------- tree : |Tree| The tree for which the score should be calculated. is_bipartitions_updated : bool If |True|, then the splits are assumed to have already been encoded and will not be updated on the trees. include_external_splits : bool If |True|, then non-internal split posteriors will be included in the score. Defaults to |False|: these are skipped. This should only make a difference when dealing with splits collected from trees of different leaf sets. Returns ------- s : numeric The log product of the support of the splits of the tree. Calculates the sum of the support of the splits of the tree, where the support is given by the proportional frequency of the split in the current distribtion. Parameters ---------- tree : |Tree| The tree for which the score should be calculated. is_bipartitions_updated : bool If |True|, then the splits are assumed to have already been encoded and will not be updated on the trees. include_external_splits : bool If |True|, then non-internal split posteriors will be included in the score. Defaults to |False|: these are skipped. This should only make a difference when dealing with splits collected from trees of different leaf sets. Returns ------- s : numeric The sum of the support of the splits of the tree. Collapse edges on tree that have support less than indicated by ``min_freq``. Returns a consensus tree from splits in ``self``. Parameters ---------- min_freq : real The minimum frequency of a split in this distribution for it to be added to the tree. is_rooted : bool Should tree be rooted or not? If *all* trees counted for splits are explicitly rooted or unrooted, then this will default to |True| or |False|, respectively. Otherwise it defaults to |None|. \*\*split_summarization_kwargs : keyword arguments These will be passed directly to the underlying `SplitDistributionSummarizer` object. See :meth:`SplitDistributionSummarizer.configure` for options. Returns ------- t : consensus tree Summarizes support of splits/edges/node on tree. Parameters ---------- tree: |Tree| instance Tree to be decorated with support values. is_bipartitions_updated: bool If |True|, then bipartitions will not be recalculated. \*\*split_summarization_kwargs : keyword arguments These will be passed directly to the underlying `SplitDistributionSummarizer` object. See :meth:`SplitDistributionSummarizer.configure` for options. ########################################################################### ### legacy ############################################################################### ### SplitDistributionSummarizer See :meth:`SplitDistributionSummarizer.configure` for configuration options. Configure rendition/mark-up. Parameters ---------- set_edge_lengths : string For each edge, set the length based on: - "support": use support values split corresponding to edge - "mean-length": mean of edge lengths for split - "median-length": median of edge lengths for split - "mean-age": such that split age is equal to mean of ages - "median-age": such that split age is equal to mean of ages - |None|: do not set edge lengths add_support_as_node_attribute: bool Adds each node's support value as an attribute of the node, "``support``". add_support_as_node_annotation: bool Adds support as a metadata annotation, "``support``". If ``add_support_as_node_attribute`` is |True|, then the value will be dynamically-bound to the value of the node's "``support``" attribute. set_support_as_node_label : bool Sets the ``label`` attribute of each node to the support value. add_node_age_summaries_as_node_attributes: bool Summarizes the distribution of the ages of each node in the following attributes: - ``age_mean`` - ``age_median`` - ``age_sd`` - ``age_hpd95`` - ``age_range`` add_node_age_summaries_as_node_annotations: bool Summarizes the distribution of the ages of each node in the following metadata annotations: - ``age_mean`` - ``age_median`` - ``age_sd`` - ``age_hpd95`` - ``age_range`` If ``add_node_age_summaries_as_node_attributes`` is |True|, then the values will be dynamically-bound to the corresponding node attributes. add_edge_length_summaries_as_edge_attributes: bool Summarizes the distribution of the lengths of each edge in the following attribtutes: - ``length_mean`` - ``length_median`` - ``length_sd`` - ``length_hpd95`` - ``length_range`` add_edge_length_summaries_as_edge_annotations: bool Summarizes the distribution of the lengths of each edge in the following metadata annotations: - ``length_mean`` - ``length_median`` - ``length_sd`` - ``length_hpd95`` - ``length_range`` If ``add_edge_length_summaries_as_edge_attributes`` is |True|, then the values will be dynamically-bound to the corresponding edge attributes. support_label_decimals: int Number of decimal places to express when rendering the support value as a string for the node label. support_as_percentages: bool Whether or not to express the support value as percentages (default is probability or proportion). minimum_edge_length : numeric All edge lengths calculated to have a value less than this will be set to this. error_on_negative_edge_lengths : bool If |True|, an inferred edge length that is less than 0 will result in a ValueError. ############################################################################### ### TreeArray High-performance collection of tree structures. Storage of minimal tree structural information as represented by toplogy and edge lengths, minimizing memory and processing time. This class stores trees as collections of splits and edge lengths. All other information, such as labels, metadata annotations, etc. will be discarded. A full |Tree| instance can be reconstructed as needed from the structural information stored by this class, at the cost of computation time. ############################################################################## ## Factory Function ############################################################################## ## Life-Cycle Parameters ---------- taxon_namespace : |TaxonNamespace| The operational taxonomic unit concept namespace to manage taxon references. is_rooted_trees : bool If not set, then it will be set based on the rooting state of the first tree added. If |True|, then trying to add an unrooted tree will result in an error. If |False|, then trying to add a rooted tree will result in an error. ignore_edge_lengths : bool If |True|, then edge lengths of splits will not be stored. If |False|, then edge lengths will be stored. ignore_node_ages : bool If |True|, then node ages of splits will not be stored. If |False|, then node ages will be stored. use_tree_weights : bool If |False|, then tree weights will not be used to weight splits. # Configuration # edge.length of |None| gets this value # Storage ############################################################################## ## Book-Keeping ############################################################################## ## Updating from Another TreeArray # self.validate_rooting(other._is_rooted_trees) ############################################################################## ## Fundamental Tree Accession Adds the structure represented by a |Tree| instance to the collection. Parameters ---------- tree : |Tree| A |Tree| instance. This must have the same rooting state as all the other trees accessioned into this collection as well as that of ``self.is_rooted_trees``. is_bipartitions_updated : bool If |False| [default], then the tree will have its splits encoded or updated. Otherwise, if |True|, then the tree is assumed to have its splits already encoded and updated. index : integer Insert before index. Returns ------- index : int The index of the accession. s : iterable of splits A list of split bitmasks from ``tree``. e : A list of edge length values from ``tree``. # pre-process splits # pre-process edge lengths # edge_lengths = tuple( [None] * len(splits) ) # pre-process weights # accession info Adds multiple structures represneted by an iterator over or iterable of |Tree| instances to the collection. Parameters ---------- trees : iterator over or iterable of |Tree| instances An iterator over or iterable of |Tree| instances. Thess must have the same rooting state as all the other trees accessioned into this collection as well as that of ``self.is_rooted_trees``. is_bipartitions_updated : bool If |False| [default], then the tree will have its splits encoded or updated. Otherwise, if |True|, then the tree is assumed to have its splits already encoded and updated. ############################################################################## ## I/O Adds multiple structures from one or more external file sources to the collection. Parameters ---------- files : iterable of strings and/or file objects A list or some other iterable of file paths or file-like objects (string elements will be assumed to be paths to files, while all other types of elements will be assumed to be file-like objects opened for reading). schema : string The data format of the source. E.g., "nexus", "newick", "nexml". \*\*kwargs : keyword arguments These will be passed directly to the underlying schema-specific reader implementation. Add |Tree| objects to existing |TreeList| from data source providing one or more collections of trees. **Mandatory Source-Specification Keyword Argument (Exactly One Required):** - **file** (*file*) -- File or file-like object of data opened for reading. - **path** (*str*) -- Path to file of data. - **url** (*str*) -- URL of data. - **data** (*str*) -- Data given directly. **Mandatory Schema-Specification Keyword Argument:** - **schema** (*str*) -- Identifier of format of data given by the "``file``", "``path``", "``data``", or "``url``" argument specified above: ":doc:`newick </schemas/newick>`", ":doc:`nexus </schemas/nexus>`", or ":doc:`nexml </schemas/nexml>`". See "|Schemas|" for more details. **Optional General Keyword Arguments:** - **collection_offset** (*int*) -- 0-based index of tree block or collection in source to be parsed. If not specified then the first collection (offset = 0) is assumed. - **tree_offset** (*int*) -- 0-based index of first tree within the collection specified by ``collection_offset`` to be parsed (i.e., skipping the first ``tree_offset`` trees). If not specified, then the first tree (offset = 0) is assumed (i.e., no trees within the specified collection will be skipped). Use this to specify, e.g. a burn-in. - **ignore_unrecognized_keyword_arguments** (*bool*) -- If |True|, then unsupported or unrecognized keyword arguments will not result in an error. Default is |False|: unsupported keyword arguments will result in an error. **Optional Schema-Specific Keyword Arguments:** These provide control over how the data is interpreted and processed, and supported argument names and values depend on the schema as specified by the value passed as the "``schema``" argument. See "|Schemas|" for more details. **Examples:** :: tree_array = dendropy.TreeArray() tree_array.read( file=open('treefile.tre', 'rU'), schema="newick", tree_offset=100) tree_array.read( path='sometrees.nexus', schema="nexus", collection_offset=2, tree_offset=100) tree_array.read( data="((A,B),(C,D));((A,C),(B,D));", schema="newick") tree_array.read( url="http://api.opentreeoflife.org/v2/study/pg_1144/tree/tree2324.nex", schema="nexus") ############################################################################## ## Container (List) Interface Adds a |Tree| instance to the collection before position given by ``index``. Parameters ---------- tree : |Tree| A |Tree| instance. This must have the same rooting state as all the other trees accessioned into this collection as well as that of ``self.is_rooted_trees``. is_bipartitions_updated : bool If |False| [default], then the tree will have its splits encoded or updated. Otherwise, if |True|, then the tree is assumed to have its splits already encoded and updated. Adds a |Tree| instance to the collection before position given by ``index``. Parameters ---------- index : integer Insert before index. tree : |Tree| A |Tree| instance. This must have the same rooting state as all the other trees accessioned into this collection as well as that of ``self.is_rooted_trees``. is_bipartitions_updated : bool If |False| [default], then the tree will have its splits encoded or updated. Otherwise, if |True|, then the tree is assumed to have its splits already encoded and updated. Returns ------- index : int The index of the accession. s : iterable of splits A list of split bitmasks from ``tree``. e : A list of edge length values ``tree``. Accession of data from ``tree_array`` to self. Parameters ---------- tree_array : |TreeArray| A |TreeArray| instance from which to add data. Accession of data from ``tree_array`` to self. Parameters ---------- tree_array : |TreeArray| A |TreeArray| instance from which to add data. Creates and returns new |TreeArray|. Parameters ---------- other : iterable of |Tree| objects Returns ------- tlist : |TreeArray| object |TreeArray| object containing clones of |Tree| objects in ``self`` and ``other``. # expensive!! # expensive!! # tree_split_bitmasks = self._trees_splits[index] ### TODO: remove this "tree" from underlying splits distribution # for split in tree_split_bitmasks: # self._split_distribution.split_counts[split] -= 1 # etc. # becomes complicated because tree weights need to be updated etc. # del self._tree_split_bitmasks[index] # del self._tree_edge_lengths[index] # return Yields pairs of (split, edge_length) from the store. # """ # Returns a pair of tuples, ( (splits...), (lengths...) ), corresponding # to the "tree" at ``index``. # """ # return self._tree_split_bitmasks[index], self._tree_edge_lengths[index] ############################################################################## ## Accessors/Settors Returns a pair of tuples, ( (splits...), (lengths...) ), corresponding to the "tree" at ``index``. ############################################################################## ## Calculations Calculates the log product of split support for each of the trees in the collection. Parameters ---------- include_external_splits : bool If |True|, then non-internal split posteriors will be included in the score. Defaults to |False|: these are skipped. This should only make a difference when dealing with splits collected from trees of different leaf sets. Returns ------- s : tuple(list[numeric], integer) Returns a tuple, with the first element being the list of scores and the second being the index of the highest score. The element order corresponds to the trees accessioned in the collection. # count root edge (following BEAST) Return the tree with that maximizes the product of split supports, also known as the "Maximum Clade Credibility Tree" or MCCT. Parameters ---------- include_external_splits : bool If |True|, then non-internal split posteriors will be included in the score. Defaults to |False|: these are skipped. This should only make a difference when dealing with splits collected from trees of different leaf sets. Returns ------- mcct_tree : Tree Tree that maximizes the product of split supports. Calculates the *sum* of split support for all trees in the collection. Parameters ---------- include_external_splits : bool If |True|, then non-internal split posteriors will be included in the score. Defaults to |False|: these are skipped. This should only make a difference when dealing with splits collected from trees of different leaf sets. Returns ------- s : tuple(list[numeric], integer) Returns a tuple, with the first element being the list of scores and the second being the index of the highest score. The element order corresponds to the trees accessioned in the collection. # count root edge (following BEAST) Return the tree with that maximizes the *sum* of split supports. Parameters ---------- include_external_splits : bool If |True|, then non-internal split posteriors will be included in the score. Defaults to |False|: these are skipped. This should only make a difference when dealing with splits collected from trees of different leaf sets. Returns ------- mst_tree : Tree Tree that maximizes the sum of split supports. Returns a consensus tree from splits in ``self``. Parameters ---------- min_freq : real The minimum frequency of a split in this distribution for it to be added to the tree. is_rooted : bool Should tree be rooted or not? If *all* trees counted for splits are explicitly rooted or unrooted, then this will default to |True| or |False|, respectively. Otherwise it defaults to |None|. \*\*split_summarization_kwargs : keyword arguments These will be passed directly to the underlying `SplitDistributionSummarizer` object. See :meth:`SplitDistributionSummarizer.configure` for options. Returns ------- t : consensus tree # return self._split_distribution.consensus_tree(*args, **kwargs) ############################################################################## ## Mapping of Split Support ############################################################################## ## Tree Reconstructions # if update_bipartitions: # tree.encode_bipartitions() ############################################################################## ## Topology Frequencies Returns a dictionary with keys being sets of split bitmasks and values being the frequency of occurrence of trees represented by those split bitmask sets in the collection. # print("===> {}".format(normalization_weight)) Returns a dictionary with keys being bipartition encodings of trees (as ``frozenset`` collections of |Bipartition| objects) and values the frequency of occurrence of trees represented by that encoding in the collection. # split_bitmask_set_freqs = self.split_bitmask_set_frequencies() # bipartition_encoding_freqs = {} # for split_bitmask_set, freq in split_bitmask_set_freqs.items(): # bipartition_encoding = [] # inferred_leafset = max(split_bitmask_set) # for split_bitmask in split_bitmask_set: # bipartition = treemodel.Bipartition( # bitmask=split_bitmask, # tree_leafset_bitmask=inferred_leafset, # is_rooted=self._is_rooted_trees, # is_mutable=False, # compile_bipartition=True, # ) # bipartition_encoding.append(bipartition) # bipartition_encoding_freqs[frozenset(bipartition_encoding)] = freq # return bipartition_encoding_freqs Returns a |TreeList| instance containing the reconstructed tree topologies (i.e. |Tree| instances with no edge weights) in the collection, with the frequency added as an attributed. Parameters ---------- sort_descending : bool If |True|, then topologies will be sorted in *descending* frequency order (i.e., topologies with the highest frequencies will be listed first). If |False|, then they will be sorted in *ascending* frequency. If |None| (default), then they will not be sorted. frequency_attr_name : str Name of attribute to add to each |Tree| representing the frequency of that topology in the collection. If |None| then the attribute will not be added. frequency_annotation_name : str Name of annotation to add to the annotations of each |Tree|, representing the frequency of that topology in the collection. The value of this annotation will be dynamically-bound to the attribute specified by ``frequency_attr_name`` unless that is |None|. If ``frequency_annotation_name`` is |None| then the annotation will not be added.
| 2.118647
| 2
|
src/models/xgboost_test_model.py
|
pkiage/credit-risk-modelling-tool
| 1
|
6629149
|
<filename>src/models/xgboost_test_model.py
from models.util_test import make_tests_view
xgboost_test_model = make_tests_view(
"XGBoost", "Gradient Boosted Tree with XGBoost")
|
<filename>src/models/xgboost_test_model.py
from models.util_test import make_tests_view
xgboost_test_model = make_tests_view(
"XGBoost", "Gradient Boosted Tree with XGBoost")
|
none
| 1
| 1.509562
| 2
|
|
nginx-fast-multi-setup.py
|
liquidmotiondzn/nginx-fast-multisetup
| 0
|
6629150
|
<reponame>liquidmotiondzn/nginx-fast-multisetup<gh_stars>0
print("////// Nginx Setup //////")
print()
print("requirements: nginx, certbot already setup")
print("Input your Domain (ex. mydomain.com)")
domain = str(input())
def get_domain(domain):
print()
print("-----NGINX part-----")
print()
print("sudo mkdir -p /var/www/" + domain + "/html")
print("sudo chown -R $USER:$USER /var/www/" + domain + "/html")
print("sudo chmod -R 755 /var/www/" + domain)
print("sudo nano /var/www/" + domain + "/html/index.html")
print()
print("<html>")
print(" <head>")
print(" <title>Welcome to your_domain</title>")
print(" </head>")
print(" <body>")
print(" <h1>Success! Your Nginx server is successfully configured for <em>your_domain</em>. </h1>")
print(" <p>This is a sample page.</p>")
print(" </body>")
print("</html>")
print()
print("--- Control + O ---")
print("--- Control + X ---")
print()
print("sudo nano /etc/nginx/sites-available/" + domain)
print()
print("server {")
print(" listen 80;")
print(" listen [::]:80;")
print()
print(" root /var/www/" + domain + "/html;")
print(" index index.html index.htm index.nginx-debian.html;")
print()
print(" server_name " + domain + " www." + domain + ";")
print()
print(" location / {")
print(" try_files $uri $uri/ =404;")
print(" }")
print("}")
print()
print("--- Control + O ---")
print("--- Control + X ---")
print()
print("sudo ln -s /etc/nginx/sites-available/" + domain + " /etc/nginx/sites-enabled/")
print("sudo nginx -t")
print("sudo systemctl restart nginx")
print()
print("-----Certbot part-----")
print()
print("sudo certbot --nginx -d " + domain + " -d www." + domain)
print("sudo certbot renew --dry-run")
print("-----DONE-----")
get_domain(domain)
|
print("////// Nginx Setup //////")
print()
print("requirements: nginx, certbot already setup")
print("Input your Domain (ex. mydomain.com)")
domain = str(input())
def get_domain(domain):
print()
print("-----NGINX part-----")
print()
print("sudo mkdir -p /var/www/" + domain + "/html")
print("sudo chown -R $USER:$USER /var/www/" + domain + "/html")
print("sudo chmod -R 755 /var/www/" + domain)
print("sudo nano /var/www/" + domain + "/html/index.html")
print()
print("<html>")
print(" <head>")
print(" <title>Welcome to your_domain</title>")
print(" </head>")
print(" <body>")
print(" <h1>Success! Your Nginx server is successfully configured for <em>your_domain</em>. </h1>")
print(" <p>This is a sample page.</p>")
print(" </body>")
print("</html>")
print()
print("--- Control + O ---")
print("--- Control + X ---")
print()
print("sudo nano /etc/nginx/sites-available/" + domain)
print()
print("server {")
print(" listen 80;")
print(" listen [::]:80;")
print()
print(" root /var/www/" + domain + "/html;")
print(" index index.html index.htm index.nginx-debian.html;")
print()
print(" server_name " + domain + " www." + domain + ";")
print()
print(" location / {")
print(" try_files $uri $uri/ =404;")
print(" }")
print("}")
print()
print("--- Control + O ---")
print("--- Control + X ---")
print()
print("sudo ln -s /etc/nginx/sites-available/" + domain + " /etc/nginx/sites-enabled/")
print("sudo nginx -t")
print("sudo systemctl restart nginx")
print()
print("-----Certbot part-----")
print()
print("sudo certbot --nginx -d " + domain + " -d www." + domain)
print("sudo certbot renew --dry-run")
print("-----DONE-----")
get_domain(domain)
|
none
| 1
| 2.707941
| 3
|