gt stringclasses 1
value | context stringlengths 2.49k 119k |
|---|---|
import asynchat
import smtpd
from concurrent import futures
#from tls import TLSHandshaker
from .tls import TLSHandshaker
try:
from multiprocessing import cpu_count
except ImportError:
# some platforms don't have multiprocessing
def cpu_count():
return 1
import sys
try:
import ssl
except ImportError:
class ssl:
SSLSocket = NotImplemented
SSLWantReadError = NotImplemented
SSLWantWriteError = NotImplemented
executor = None
def bootstrap_futures(pool=futures.ThreadPoolExecutor, max_workers=None):
global executor
if executor is not None:
executor.shutdown()
if max_workers is None:
max_workers = cpu_count() * 5
executor=pool(max_workers=max_workers)
SMTP_HELP = {
'EHLO' : '250 Syntax: EHLO hostname',
'HELO' : '250 Syntax: HELO hostname',
'MAIL' : '250 Syntax: MAIL FROM: <address>',
'RCPT' : '250 Syntax: RCPT FROM: <address>',
'MAIL_e' : '250 Syntax: MAIL FROM: <address> [SP <mail-parameters]',
'RCPT_e' : '250 Syntax: RCPT FROM: <address> [SP <mail-parameters]',
'DATA' : '250 Syntax: DATA',
'NOOP' : '250 Syntax: NOOP [SP String]',
'QUIT' : '250 Syntax: QUIT',
'VRFY' : '250 VRFY <address>',
'HELP' : '250 HELP [SP String]',
}
class ExtensionChannel(smtpd.SMTPChannel):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.cmd_help = SMTP_HELP.copy()
self.cmd_list = sorted([
e for e in self.cmd_help.keys() if not e.endswith('_e')])
self.extensions = ['ENHANCEDSTATUSCODES']
if self.data_size_limit:
self.extensions.append('SIZE %s' % self.data_size_limit)
self.command_size_limits['MAIL'] += 26
if not self._decode_data:
self.extensions.append('8BITMIME')
if self.enable_SMTPUTF8:
self.extensions.append('SMTPUTF8')
self.command_size_limits['MAIL'] += 10
self.extensions.append('HELP')
__init__.__doc__ = smtpd.SMTPChannel.__init__.__doc__ # 16*_
def smtp_HELO(self, arg):
if not arg:
self.push('501 5.5.4 Syntax: HELO hostname')
return
self._set_rset_state()
self.seen_greeting = arg
self.push('250 %s' % self.fqdn)
def smtp_EHLO(self, arg):
if not arg:
self.push('501 5.5.4 Syntax: EHLO hostname')
return
self._set_rset_state()
self.seen_greeting = arg
self.extended_smtp = True
self.push('250-%s' % self.fqdn)
extensions = sorted(self.extensions)
for extension in extensions[:-1]:
self.push('250-%s' % extension)
self.push('250 %s' % extensions[-1])
def smtp_NOOP(self, arg):
self.push('250 2.0.0 OK')
def smtp_QUIT(self, arg):
self.push('221 2.0.0 Bye. Have a nice day!')
self.close_when_done()
def smtp_RSET(self, arg):
if arg:
self.push('501 5.5.4 Syntax: RSET')
return
self._set_rset_state()
self.push('250 2.0.0 OK')
def smtp_DATA(self, arg):
if arg:
self.push('501 5.5.4 Syntax: DATA')
return
if not self.seen_greeting:
self.push('503 5.5.0 Error: send HELO first');
return
if not self.rcpttos:
self.push('503 5.5.0 Error: need RCPT command')
return
self.smtp_state = self.DATA
self.set_terminator(b'\r\n.\r\n')
self.push('354 End data with <CR><LF>.<CR><LF>')
def smtp_VRFY(self, arg):
if not arg:
self.push('501 5.5.4 Syntax: VRFY <address>')
return
self.push('252 2.0.0 Cannot VRFY user, '
'but will accept message and attempt delivery')
# Command that have not been implemented
def smtp_EXPN(self, arg):
self.push('502 5.5.0 EXPN not implemented')
def smtp_HELP(self, arg):
if arg:
lc_arg = arg.upper()
command_help = self.cmd_help.get(
lc_arg, '501 Supported commands: {}'.format(
' '.join(self.cmd_list)))
if self.extended_smtp:
command_help = self.cmd_help.get(
lc_arg+'_e', command_help)
self.push(command_help)
else:
self.push('250 Supported commands: {}'.format(
' '.join(self.cmd_list)))
class BackgroundChannel(ExtensionChannel):
in_background = False
def writable(self):
# Disable this channel while in handshake
if not self.in_background:
return super().writable()
else:
return False
def readable(self):
# Disable this channel while in handshake
if not self.in_background:
return super().readable()
else:
return False
class StartTLSChannel(BackgroundChannel):
in_background = False
cipher = None
def __init__(self, *args, **kwargs):
ExtensionChannel.__init__(self, *args, **kwargs)
self.cmd_help["STARTTLS"] = '250 Syntax: STARTTLS'
self.cmd_list.append("STARTTLS")
self.cmd_list = sorted(self.cmd_list)
if self.smtp_server.ctx is not None:
self.extensions.append("STARTTLS")
def smtp_STARTTLS(self, arg):
if arg:
self.push('501 5.5.4 Syntax: STARTTLS')
elif self.smtp_server.ctx is not None and not isinstance(
self.conn, ssl.SSLSocket):
self.push('220 2.0.0 Ready to start TLS')
self.extensions.pop(self.extensions.index('STARTTLS'))
self.in_handshake = True
self.conn = self.smtp_server.ctx.wrap_socket(
self.conn, server_side=True,
do_handshake_on_connect=False)
channel = TLSHandshaker(self.conn, self)
# Reset connection data
self._set_rset_state()
self.command_size_limits.clear()
elif isinstance(self.conn, ssl.SSLSocket):
self.push("503 5.5.1 Bad sequence of commands.")
else:
self.push(
'454 4.7.0 STARTTLS not available due to temporary reason.')
def handle_error(self):
error = sys.exc_info()[1]
if isinstance(error, (ssl.SSLWantReadError, ssl.SSLWantWriteError)):
pass # Just pass, just ignore the (not-)error
else:
super().handle_error()
def replace_connection(self, conn):
self.conn = conn
self.in_background = False
self.cipher = self.conn.cipher()
asynchat.async_chat.__init__(self, self.conn) # Reinitialize
class FutureChannel(BackgroundChannel):
# Implementation of base class abstract method
def found_terminator(self):
line = self._emptystring.join(self.received_lines)
# - print('Data:', repr(line), file=smtpd.DEBUGSTREAM)
self.received_lines = []
if self.smtp_state == self.COMMAND:
sz, self.num_bytes = self.num_bytes, 0
if not line:
self.push('501 5.2.2 Error: Bad syntax.')
return
if not self._decode_data:
line = str(line, 'utf-8')
i = line.find(' ')
if i < 0:
command = line.upper()
arg = None
else:
command = line[:i].upper()
arg = line[i+1:].strip()
max_sz = (self.command_size_limits[command]
if self.extended_smtp else self.command_size_limit)
if sz > max_sz:
self.push('500 5.5.2 Error: line too long.')
return
method = getattr(self, 'smtp_' + command, None)
if not method:
self.push(
'500 5.5.1 Error: command "%s" not recognized' % command)
return
method(arg)
return
else:
if self.smtp_state != self.DATA:
self.push('451 4.5.0 Internal confusion')
self.num_bytes = 0
return
if self.data_size_limit and self.num_bytes > self.data_size_limit:
self.push('552 5.3.4 Error: Too much mail data')
self.num_bytes = 0
return
# Remove extraneous carriage returns and de-transparency according
# to RFC 5321, Section 4.5.2.
data = []
for text in line.split(self._linesep):
if text and text[0] == self._dotsep:
data.append(text[1:])
else:
data.append(text)
self.received_data = self._newline.join(data)
args = (self.peer, self.mailfrom, self.rcpttos, self.received_data)
kwargs = {}
if not self._decode_data:
kwargs = {
'mail_options': self.mail_options,
'rcpt_options': self.rcpt_options,
}
# EEH MODIFICATION
kwargs["greeting"] = self.seen_greeting
if hasattr(self, "cipher"):
kwargs["cipher"] = self.cipher
self.sleep(self.wake_data, self.smtp_server.process_message,
args, kwargs)
def wake_data(self, future):
self.in_background = False
self._set_post_data_state()
try:
status = future.result()
except: # Must be as broad
if hasattr(self.smtp_server, "logger"):
self.smtp_server.logger.exception("Error in channel:")
else:
self.handle_error() # Close connection, print to stdout
return
status = '554 5.5.0 Server error. Please contact admin.'
if status is not None:
self.push(status)
else:
self.push('250 2.0.0 OK')
def sleep(self, continuation, fun, args, kwargs):
self.in_background = True
future = executor.submit(fun, *args, **kwargs)
future.add_done_callback(self.wake_data)
class StartTLSFutureChannel(FutureChannel, StartTLSChannel):
pass # All work done :)
| |
from core.himesis import Himesis, HimesisPreConditionPatternLHS
import uuid
class HPP4_ConnectedLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HPP4_ConnectedLHS.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HPP4_ConnectedLHS, self).__init__(name='HPP4_ConnectedLHS', num_nodes=0, edges=[])
# Set the graph attributes
self["mm__"] = ['MT_pre__FamiliesToPersonsMM', 'MoTifRule']
self["MT_constraint__"] = """#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
"""
self["name"] = """"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'PP4')
# Set the node attributes
# match class State() node
self.add_node()
self.vs[0]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[0]["MT_label__"] = """1"""
self.vs[0]["MT_dirty__"] = False
self.vs[0]["mm__"] = """MT_pre__State"""
self.vs[0]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# match class ExitPoint() node
self.add_node()
self.vs[1]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[1]["MT_label__"] = """2"""
self.vs[1]["MT_dirty__"] = False
self.vs[1]["mm__"] = """MT_pre__ExitPoint"""
self.vs[1]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# match class Transition() node
self.add_node()
self.vs[2]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[2]["MT_label__"] = """3"""
self.vs[2]["MT_dirty__"] = False
self.vs[2]["mm__"] = """MT_pre__Transition"""
self.vs[2]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# match class OUT2() node
self.add_node()
self.vs[3]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[3]["MT_label__"] = """4"""
self.vs[3]["MT_dirty__"] = False
self.vs[3]["mm__"] = """MT_pre__OUT2"""
self.vs[3]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# Nodes that represent the edges of the property.
# match association ExitPoint--outgoingTransitions-->Transition node
self.add_node()
self.vs[4]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "outgoingTransitions"
"""
self.vs[4]["MT_label__"] = """5"""
self.vs[4]["MT_subtypes__"] = []
self.vs[4]["MT_dirty__"] = False
self.vs[4]["mm__"] = """MT_pre__directLink_S"""
self.vs[4]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'assoc4')
# match association Transition--type-->OUT2 node
self.add_node()
self.vs[5]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "type"
"""
self.vs[5]["MT_label__"] = """6"""
self.vs[5]["MT_subtypes__"] = []
self.vs[5]["MT_dirty__"] = False
self.vs[5]["mm__"] = """MT_pre__directLink_S"""
self.vs[5]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'assoc5')
# match association State--exitPoints-->ExitPoint node
self.add_node()
self.vs[6]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "exitPoints"
"""
self.vs[6]["MT_label__"] = """7"""
self.vs[6]["MT_subtypes__"] = []
self.vs[6]["MT_dirty__"] = False
self.vs[6]["mm__"] = """MT_pre__directLink_S"""
self.vs[6]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'assoc6')
# Add the edges
self.add_edges([
(1,4), # match_class ExitPoint() -> association outgoingTransitions
(4,2), # association outgoingTransitions -> match_class Transition()
(2,5), # match_class Transition() -> association type
(5,3), # association type -> match_class OUT2()
(0,6), # match_class State() -> association exitPoints
(6,1) # association exitPoints -> match_class ExitPoint()
])
# Add the attribute equations
self["equations"] = [((0,'isComposite'),('constant','true')), ]
def eval_attr11(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_attr12(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_attr13(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_attr14(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_attr15(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "outgoingTransitions"
def eval_attr16(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "type"
def eval_attr17(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "exitPoints"
def constraint(self, PreNode, graph):
"""
Executable constraint code.
@param PreNode: Function taking an integer as parameter
and returns the node corresponding to that label.
"""
#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
| |
"""Module to perform data cleaning functions on EIA860 data tables."""
import logging
import numpy as np
import pandas as pd
import pudl
from pudl.metadata.classes import DataSource
from pudl.metadata.codes import CODE_METADATA
from pudl.metadata.fields import apply_pudl_dtypes
logger = logging.getLogger(__name__)
def ownership(eia860_dfs, eia860_transformed_dfs):
"""
Pull and transform the ownership table.
Transformations include:
* Replace . values with NA.
* Convert pre-2012 ownership percentages to proportions to match post-2012
reporting.
Args:
eia860_dfs (dict): Each entry in this dictionary of DataFrame objects
corresponds to a page from the EIA860 form, as reported in the Excel
spreadsheets they distribute.
eia860_transformed_dfs (dict): A dictionary of DataFrame objects in which pages
from EIA860 form (keys) correspond to normalized DataFrames of values from
that page (values).
Returns:
dict: eia860_transformed_dfs, a dictionary of DataFrame objects in which
pages from EIA860 form (keys) correspond to normalized DataFrames of values
from that page (values).
"""
# Preiminary clean and get rid of unecessary 'year' column
own_df = (
eia860_dfs['ownership'].copy()
.pipe(pudl.helpers.fix_eia_na)
.pipe(pudl.helpers.convert_to_date)
.drop(columns=['year'])
)
if (min(own_df.report_date.dt.year)
< min(DataSource.from_id('eia860').working_partitions['years'])):
raise ValueError(
f"EIA 860 transform step is only known to work for "
f"year {min(DataSource.from_id('eia860').working_partitions['years'])} and later, "
f"but found data from year {min(own_df.report_date.dt.year)}."
)
# Prior to 2012, ownership was reported as a percentage, rather than
# as a proportion, so we need to divide those values by 100.
own_df.loc[own_df.report_date.dt.year < 2012, 'fraction_owned'] = \
own_df.loc[own_df.report_date.dt.year < 2012, 'fraction_owned'] / 100
# This has to come before the fancy indexing below, otherwise the plant_id_eia
# is still a float.
own_df = apply_pudl_dtypes(own_df, group="eia")
# A small number of generators are reported multiple times in the ownership
# table due to the use of leading zeroes in their integer generator_id values
# which are stored as strings (since some generators use strings). This
# makes sure that we only keep a single copy of those duplicated records which
# we've identified as falling into this category. We refrain from doing a wholesale
# drop_duplicates() so that if duplicates are introduced by some other mechanism
# we'll be notified.
# The plant & generator ID values we know have duplicates to remove.
known_dupes = (
own_df.set_index(["plant_id_eia", "generator_id"])
.loc[(56032, "1")]
)
# Index of own_df w/ duplicated records removed.
without_known_dupes_idx = (
own_df.set_index(["plant_id_eia", "generator_id"])
.index.difference(known_dupes.index)
)
# own_df w/ duplicated records removed.
without_known_dupes = (
own_df.set_index(["plant_id_eia", "generator_id"])
.loc[without_known_dupes_idx]
.reset_index()
)
# Drop duplicates from the known dupes using the whole primary key
own_pk = [
'report_date',
'plant_id_eia',
'generator_id',
'owner_utility_id_eia',
]
deduped = known_dupes.reset_index().drop_duplicates(subset=own_pk)
# Bring these two parts back together:
own_df = pd.concat([without_known_dupes, deduped])
# Check whether we have truly deduplicated the dataframe.
remaining_dupes = own_df[own_df.duplicated(subset=own_pk, keep=False)]
if not remaining_dupes.empty:
raise ValueError(
"Duplicate ownership slices found in ownership_eia860:"
f"{remaining_dupes}"
)
# Remove a couple of records known to have (literal) "nan" values in the
# generator_id column, which is part of the table's natural primary key.
# These "nan" strings get converted to true pd.NA values when the column
# datatypes are applied, which violates the primary key constraints.
# See https://github.com/catalyst-cooperative/pudl/issues/1207
mask = (
(own_df.report_date.isin(["2018-01-01", "2019-01-01", "2020-01-01"]))
& (own_df.plant_id_eia == 62844)
& (own_df.owner_utility_id_eia == 62745)
& (own_df.generator_id == "nan")
)
own_df = own_df[~mask]
# In 2010 there are several hundred utilities that appear to be incorrectly
# reporting the owner_utility_id_eia value *also* in the utility_id_eia
# column. This results in duplicate operator IDs associated with a given
# generator in a particular year, which should never happen. We identify
# these values and set them to NA so they don't mess up the harvested
# relationships between plants and utilities:
# See https://github.com/catalyst-cooperative/pudl/issues/1116
duplicate_operators = (
own_df.groupby(["report_date", "plant_id_eia", "generator_id"])
.utility_id_eia.transform(pd.Series.nunique)
) > 1
own_df.loc[duplicate_operators, "utility_id_eia"] = pd.NA
# The above fix won't catch owner_utility_id_eia values in the
# utility_id_eia (operator) column when there's only a single
# owner-operator. But also, when there's a single owner-operator they souldn't
# even be reporting in this table. So we can also drop those utility_id_eia
# values without losing any valuable information here. The utility_id_eia
# column here is only useful for entity harvesting & resolution purposes
# since the (report_date, plant_id_eia) tuple fully defines the operator id.
# See https://github.com/catalyst-cooperative/pudl/issues/1116
single_owner_operator = (
(own_df.utility_id_eia == own_df.owner_utility_id_eia)
& (own_df.fraction_owned == 1.0)
)
own_df.loc[single_owner_operator, "utility_id_eia"] = pd.NA
own_df = (
pudl.metadata.classes.Package.from_resource_ids()
.get_resource("ownership_eia860")
.encode(own_df)
)
eia860_transformed_dfs['ownership_eia860'] = own_df
return eia860_transformed_dfs
def generators(eia860_dfs, eia860_transformed_dfs):
"""
Pull and transform the generators table.
There are three tabs that the generator records come from (proposed, existing,
retired). Pre 2009, the existing and retired data are lumped together under a single
generator file with one tab. We pull each tab into one dataframe and include an
``operational_status`` to indicate which tab the record came from. We use
``operational_status`` to parse the pre 2009 files as well.
Transformations include:
* Replace . values with NA.
* Update ``operational_status_code`` to reflect plant status as either proposed,
existing or retired.
* Drop values with NA for plant and generator id.
* Replace 0 values with NA where appropriate.
* Convert Y/N/X values to boolean True/False.
* Convert U/Unknown values to NA.
* Map full spelling onto code values.
* Create a fuel_type_code_pudl field that organizes fuel types into
clean, distinguishable categories.
Args:
eia860_dfs (dict): Each entry in this
dictionary of DataFrame objects corresponds to a page from the EIA860 form,
as reported in the Excel spreadsheets they distribute.
eia860_transformed_dfs (dict): A dictionary of DataFrame objects in
which pages from EIA860 form (keys) correspond to a normalized DataFrame of
values from that page (values).
Returns:
dict: eia860_transformed_dfs, a dictionary of DataFrame objects in which pages
from EIA860 form (keys) correspond to normalized DataFrames of values from that
page (values).
"""
# Groupby objects were creating chained assignment warning that is N/A
pd.options.mode.chained_assignment = None
# There are three sets of generator data reported in the EIA860 table,
# planned, existing, and retired generators. We're going to concatenate
# them all together into a single big table, with a column that indicates
# which one of these tables the data came from, since they all have almost
# exactly the same structure
gp_df = eia860_dfs['generator_proposed'].copy()
ge_df = eia860_dfs['generator_existing'].copy()
gr_df = eia860_dfs['generator_retired'].copy()
g_df = eia860_dfs['generator'].copy()
gp_df['operational_status'] = 'proposed'
ge_df['operational_status'] = 'existing'
gr_df['operational_status'] = 'retired'
g_df['operational_status'] = (
g_df['operational_status_code']
.replace({'OP': 'existing', # could move this dict to codes...
'SB': 'existing',
'OA': 'existing',
'OS': 'existing',
'RE': 'retired'})
)
gens_df = (
pd.concat([ge_df, gp_df, gr_df, g_df], sort=True)
.dropna(subset=['generator_id', 'plant_id_eia'])
.pipe(pudl.helpers.fix_eia_na)
)
# A subset of the columns have zero values, where NA is appropriate:
columns_to_fix = [
'planned_retirement_month',
'planned_retirement_year',
'planned_uprate_month',
'planned_uprate_year',
'other_modifications_month',
'other_modifications_year',
'planned_derate_month',
'planned_derate_year',
'planned_repower_month',
'planned_repower_year',
'planned_net_summer_capacity_derate_mw',
'planned_net_summer_capacity_uprate_mw',
'planned_net_winter_capacity_derate_mw',
'planned_net_winter_capacity_uprate_mw',
'planned_new_capacity_mw',
'nameplate_power_factor',
'minimum_load_mw',
'winter_capacity_mw',
'summer_capacity_mw'
]
for column in columns_to_fix:
gens_df[column] = gens_df[column].replace(
to_replace=[" ", 0], value=np.nan)
# A subset of the columns have "X" values, where other columns_to_fix
# have "N" values. Replacing these values with "N" will make for uniform
# values that can be converted to Boolean True and False pairs.
gens_df.duct_burners = \
gens_df.duct_burners.replace(to_replace='X', value='N')
gens_df.bypass_heat_recovery = \
gens_df.bypass_heat_recovery.replace(to_replace='X', value='N')
gens_df.syncronized_transmission_grid = \
gens_df.bypass_heat_recovery.replace(to_replace='X', value='N')
# A subset of the columns have "U" values, presumably for "Unknown," which
# must be set to None in order to convert the columns to datatype Boolean.
gens_df.multiple_fuels = \
gens_df.multiple_fuels.replace(to_replace='U', value=None)
gens_df.switch_oil_gas = \
gens_df.switch_oil_gas.replace(to_replace='U', value=None)
boolean_columns_to_fix = [
'duct_burners',
'multiple_fuels',
'deliver_power_transgrid',
'syncronized_transmission_grid',
'solid_fuel_gasification',
'pulverized_coal_tech',
'fluidized_bed_tech',
'subcritical_tech',
'supercritical_tech',
'ultrasupercritical_tech',
'carbon_capture',
'stoker_tech',
'other_combustion_tech',
'cofire_fuels',
'switch_oil_gas',
'bypass_heat_recovery',
'associated_combined_heat_power',
'planned_modifications',
'other_planned_modifications',
'uprate_derate_during_year',
'previously_canceled',
'owned_by_non_utility',
'summer_capacity_estimate',
'winter_capacity_estimate',
'distributed_generation',
'ferc_cogen_status',
'ferc_small_power_producer',
'ferc_exempt_wholesale_generator'
]
for column in boolean_columns_to_fix:
gens_df[column] = (
gens_df[column]
.fillna("NaN")
.replace(
to_replace=["Y", "N", "NaN"],
value=[True, False, pd.NA])
)
gens_df = (
gens_df
.pipe(pudl.helpers.month_year_to_date)
.pipe(
pudl.helpers.simplify_strings,
columns=['rto_iso_lmp_node_id', 'rto_iso_location_wholesale_reporting_id']
)
.pipe(pudl.helpers.convert_to_date)
)
gens_df = (
pudl.metadata.classes.Package.from_resource_ids()
.get_resource("generators_eia860")
.encode(gens_df)
)
gens_df["fuel_type_code_pudl"] = (
gens_df.energy_source_code_1
.str.upper()
.map(
pudl.helpers.label_map(
CODE_METADATA["energy_sources_eia"]["df"],
from_col="code",
to_col="fuel_type_code_pudl",
null_value=pd.NA,
)
)
)
eia860_transformed_dfs['generators_eia860'] = gens_df
return eia860_transformed_dfs
def plants(eia860_dfs, eia860_transformed_dfs):
"""
Pull and transform the plants table.
Much of the static plant information is reported repeatedly, and scattered across
several different pages of EIA 923. The data frame which this function uses is
assembled from those many different pages, and passed in via the same dictionary of
dataframes that all the other ingest functions use for uniformity.
Transformations include:
* Replace . values with NA.
* Homogenize spelling of county names.
* Convert Y/N/X values to boolean True/False.
Args:
eia860_dfs (dict): Each entry in this dictionary of DataFrame objects
corresponds to a page from the EIA860 form, as reported in the Excel
spreadsheets they distribute.
eia860_transformed_dfs (dict): A dictionary of DataFrame objects in which pages
from EIA860 form (keys) correspond to normalized DataFrames of values from
that page (values).
Returns:
dict: eia860_transformed_dfs, a dictionary of DataFrame objects in which pages
from EIA860 form (keys) correspond to normalized DataFrames of values from that
page (values).
"""
# Populating the 'plants_eia860' table
p_df = (
eia860_dfs['plant'].copy()
.pipe(pudl.helpers.fix_eia_na)
.astype({"zip_code": str})
.drop("iso_rto", axis="columns")
)
# Spelling, punctuation, and capitalization of county names can vary from
# year to year. We homogenize them here to facilitate correct value
# harvesting.
p_df['county'] = (
p_df.county.
str.replace(r'[^a-z,A-Z]+', ' ', regex=True).
str.strip().
str.lower().
str.replace(r'\s+', ' ', regex=True).
str.title()
)
# A subset of the columns have "X" values, where other columns_to_fix
# have "N" values. Replacing these values with "N" will make for uniform
# values that can be converted to Boolean True and False pairs.
p_df.ash_impoundment_lined = p_df.ash_impoundment_lined.replace(
to_replace='X', value='N')
p_df.natural_gas_storage = p_df.natural_gas_storage.replace(
to_replace='X', value='N')
p_df.liquefied_natural_gas_storage = \
p_df.liquefied_natural_gas_storage.replace(to_replace='X', value='N')
boolean_columns_to_fix = [
"ferc_cogen_status",
"ferc_small_power_producer",
"ferc_exempt_wholesale_generator",
"ash_impoundment",
"ash_impoundment_lined",
"energy_storage",
"natural_gas_storage",
"liquefied_natural_gas_storage",
"net_metering",
]
for column in boolean_columns_to_fix:
p_df[column] = (
p_df[column]
.fillna("NaN")
.replace(
to_replace=["Y", "N", "NaN"],
value=[True, False, pd.NA])
)
p_df = pudl.helpers.convert_to_date(p_df)
p_df = (
pudl.metadata.classes.Package.from_resource_ids()
.get_resource("plants_eia860")
.encode(p_df)
)
eia860_transformed_dfs['plants_eia860'] = p_df
return eia860_transformed_dfs
def boiler_generator_assn(eia860_dfs, eia860_transformed_dfs):
"""
Pull and transform the boilder generator association table.
Transformations include:
* Drop non-data rows with EIA notes.
* Drop duplicate rows.
Args:
eia860_dfs (dict): Each entry in this dictionary of DataFrame objects
corresponds to a page from the EIA860 form, as reported in the Excel
spreadsheets they distribute.
eia860_transformed_dfs (dict): A dictionary of DataFrame objects in which pages
from EIA860 form (keys) correspond to normalized DataFrames of values from
that page (values).
Returns:
dict: eia860_transformed_dfs, a dictionary of DataFrame objects in which pages
from EIA860 form (keys) correspond to normalized DataFrames of values from that
page (values).
"""
# Populating the 'generators_eia860' table
b_g_df = eia860_dfs['boiler_generator_assn'].copy()
b_g_cols = ['report_year',
'utility_id_eia',
'plant_id_eia',
'boiler_id',
'generator_id']
b_g_df = b_g_df[b_g_cols]
# There are some bad (non-data) lines in some of the boiler generator
# data files (notes from EIA) which are messing up the import. Need to
# identify and drop them early on.
b_g_df['utility_id_eia'] = b_g_df['utility_id_eia'].astype(str)
b_g_df = b_g_df[b_g_df.utility_id_eia.str.isnumeric()]
b_g_df['plant_id_eia'] = b_g_df['plant_id_eia'].astype(int)
# We need to cast the generator_id column as type str because sometimes
# it is heterogeneous int/str which make drop_duplicates fail.
b_g_df['generator_id'] = b_g_df['generator_id'].astype(str)
b_g_df['boiler_id'] = b_g_df['boiler_id'].astype(str)
# This drop_duplicates isn't removing all duplicates
b_g_df = b_g_df.drop_duplicates().dropna()
b_g_df = pudl.helpers.convert_to_date(b_g_df)
eia860_transformed_dfs['boiler_generator_assn_eia860'] = b_g_df
return eia860_transformed_dfs
def utilities(eia860_dfs, eia860_transformed_dfs):
"""
Pull and transform the utilities table.
Transformations include:
* Replace . values with NA.
* Fix typos in state abbreviations, convert to uppercase.
* Drop address_3 field (all NA).
* Combine phone number columns into one field and set values that don't mimic real
US phone numbers to NA.
* Convert Y/N/X values to boolean True/False.
* Map full spelling onto code values.
Args:
eia860_dfs (dict): Each entry in this
dictionary of DataFrame objects corresponds to a page from the EIA860 form,
as reported in the Excel spreadsheets they distribute.
eia860_transformed_dfs (dict): A dictionary of DataFrame objects in which pages
from EIA860 form (keys) correspond to normalized DataFrames of values from
that page (values).
Returns:
dict: eia860_transformed_dfs, a dictionary of DataFrame objects in which pages
from EIA860 form (keys) correspond to normalized DataFrames of values from that
page (values).
"""
# Populating the 'utilities_eia860' table
u_df = eia860_dfs['utility'].copy()
# Replace empty strings, whitespace, and '.' fields with real NA values
u_df = pudl.helpers.fix_eia_na(u_df)
u_df['state'] = u_df.state.str.upper()
u_df['state'] = u_df.state.replace({
'QB': 'QC', # wrong abbreviation for Quebec
'Y': 'NY', # Typo
})
# Remove Address 3 column that is all NA
u_df = u_df.drop(['address_3'], axis=1)
# Combine phone number columns into one
def _make_phone_number(col1, col2, col3):
"""Make and validate full phone number seperated by dashes."""
p_num = (
col1.astype('string')
+ '-' + col2.astype('string')
+ '-' + col3.astype('string')
)
# Turn anything that doesn't match a US phone number format to NA
# using noqa to get past flake8 test that give a false positive thinking
# that the regex string is supposed to be an f-string and is missing
# the it's designated prefix.
return p_num.replace(regex=r'^(?!.*\d{3}-\d{3}-\d{4}).*$', value=pd.NA) # noqa: FS003
u_df = (
u_df.assign(
phone_number=_make_phone_number(
u_df.phone_number_first,
u_df.phone_number_mid,
u_df.phone_number_last),
phone_number_2=_make_phone_number(
u_df.phone_number_first_2,
u_df.phone_number_mid_2,
u_df.phone_number_last_2))
)
boolean_columns_to_fix = [
'plants_reported_owner',
'plants_reported_operator',
'plants_reported_asset_manager',
'plants_reported_other_relationship'
]
for column in boolean_columns_to_fix:
u_df[column] = (
u_df[column]
.fillna("NaN")
.replace(
to_replace=["Y", "N", "NaN"],
value=[True, False, pd.NA])
)
u_df = (
u_df.astype({
"utility_id_eia": "Int64"
})
.pipe(pudl.helpers.convert_to_date)
.fillna({'entity_type': pd.NA})
)
eia860_transformed_dfs['utilities_eia860'] = u_df
return eia860_transformed_dfs
def transform(eia860_raw_dfs, eia860_tables=DataSource.from_id('eia860').get_resource_ids()):
"""
Transform EIA 860 DataFrames.
Args:
eia860_raw_dfs (dict): a dictionary of tab names (keys) and DataFrames
(values). This can be generated by pudl.
eia860_tables (tuple): A tuple containing the names of the EIA 860 tables that
can be pulled into PUDL.
Returns:
dict: A dictionary of DataFrame objects in which pages from EIA860 form (keys)
corresponds to a normalized DataFrame of values from that page (values).
"""
# these are the tables that we have transform functions for...
eia860_transform_functions = {
'ownership_eia860': ownership,
'generators_eia860': generators,
'plants_eia860': plants,
'boiler_generator_assn_eia860': boiler_generator_assn,
'utilities_eia860': utilities}
eia860_transformed_dfs = {}
if not eia860_raw_dfs:
logger.info("No raw EIA 860 dataframes found. "
"Not transforming EIA 860.")
return eia860_transformed_dfs
# for each of the tables, run the respective transform funtction
for table, transform_func in eia860_transform_functions.items():
if table in eia860_tables:
logger.info("Transforming raw EIA 860 DataFrames for %s "
"concatenated across all years.", table)
transform_func(eia860_raw_dfs, eia860_transformed_dfs)
return eia860_transformed_dfs
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import logging
logger = logging.getLogger("dialogs")
import os.path
from .dialog_exceptions import UnknownVerb
from dialogs.helpers.helpers import colored_print
def singleton(cls):
instances = {}
def getinstance():
if cls not in instances:
instances[cls] = cls()
return instances[cls]
return getinstance
class DummyKnowledgeBase:
""" This class replace a real knowledge base by simply logging
all the "would-have-been" interactions with the knowledge base.
"""
def __init__(self):
self.log = logging.getLogger("dialogs.dummykb")
def revise(self, stmts, options):
self.log.info(colored_print("REVISE (%s): %s" % (options,stmts), 'cyan'))
def findForAgent(self, model, var, query):
self.log.info(colored_print("QUERY (in model <%s>): %s" % (model, query), 'cyan'))
def lookupForAgent(self, model, query):
self.log.info(colored_print("LOOKUP (in model <%s>): %s" % (model, query), 'cyan'))
def __getitem__(self, query):
self.log.info(colored_print("QUERY: %s" % query, 'cyan' ))
def close(self):
pass
class ThematicRole(object):
def __init__(self, desc):
if desc.startswith('['): # optional role
self.optional = True
desc = desc[1:-1] #remove square brackets
else:
self.optional = False
tokens = desc.strip().split()
self.id = tokens[0]
if tokens[1].startswith('['): # preposition
self.preposition = tokens[1][1:-1]
self.actors_classes = tokens[2].split(',')
else:
self.preposition = None
self.actors_classes = tokens[1].split(',')
def __str__(self):
res = " (optional)" if self.optional else ""
res += " " + self.id
if self.preposition:
res += " (introduced by \"" + self.preposition + "\")"
res += " that expects " + str(self.actors_classes)
return res
class VerbEntry(object):
def __init__(self, name, ref, roles):
self.name = name
self.ref = ref
self.subject = roles[0]
self.roles = roles[1:]
self._role_pointer = 0
def next_role(self):
self._role_pointer += 1
try:
role = self.roles[self._role_pointer - 1]
if role.preposition: # if next role is supposed to be introduced by a preposition, skip it
return self.next_role()
else:
return role
except IndexError:
return None
def get_role_for_preposition(self, prep):
for role in self.roles:
if role.preposition == prep:
return role
return None
def is_synonym(self):
return not (self.name == self.ref)
def __str__(self):
res = "verb \"" + self.name + "\""
if self.ref != self.name:
res += " (syn. of " + self.ref + ")"
res += " that has roles:\n"
for role in self.roles:
res += str(role) + "\n"
return res
@singleton
class ThematicRolesDict(object):
"""This class contains all the verbs with their associated thematic roles
as listed in the data/dialog/thematic_roles file. Refer to this file for
details regarding syntax.
"""
def __init__(self):
self.verbs = {}
def get_ref(self, verb):
"""If the verb is the synonym of a known verb, as stated in the
thematic_roles file, return the known verb.
"""
try:
return self.verbs[verb].ref
except KeyError:
raise UnknownVerb('Verb ' + verb + ' doesn\'t exist in the thematic role list.')
def add_verb(self, desc):
lines = desc.split("\n")
verb_desc = lines[0].strip().split()
roles = [ThematicRole(desc) for desc in lines[1:-2]] # lines[0] is verb desc, line[n] is '}'
verbs = [VerbEntry(verb_desc[0], verb_desc[0], roles)]
# synonyms?
if verb_desc[1].startswith('('):
for syn in verb_desc[1][1:-1].split(','):
verbs.append(VerbEntry(syn, verb_desc[0], roles))
for verb in verbs:
self.verbs[verb.name] = verb
def get_subject_role(self, verb, with_spaces=False):
try:
res = self.verbs[verb.lower()].subject.id
except KeyError:
# raise UnknownVerb('Verb ' + verb + ' has no thematic role defined')
res = "performedBy" #for now, return by default a generic "performedBy" predicate when no specific thematic role for the subject is defined.
return (" " + res + " ") if with_spaces else res
def get_next_cmplt_role(self, verb, with_spaces=False):
res = "involves" # for now, return by default a generic "involve" predicate when no specific thematic role is defined.
try:
res = self.verbs[verb.lower()].next_role().id
except KeyError:
# raise UnknownVerb('Verb ' + verb + ' has no thematic role defined')
pass
except AttributeError:
# TODO: case "get me the bottle" -> "get" expects only one cmplt -> warn the user that the grammatical structure is wrong
pass
return (" " + res + " ") if with_spaces else res
def get_cmplt_role_for_preposition(self, verb, preposition, with_spaces=False):
try:
role = self.verbs[verb.lower()].get_role_for_preposition(preposition)
except KeyError:
# raise UnknownVerb('Verb ' + verb + ' has no thematic role defined')
return None
if not role:
return None
return (" " + role.id + " ") if with_spaces else role.id
def get_all_verbs(self):
return list(self.verbs.keys())
def __str__(self):
res = ""
for name, verb in list(self.verbs.items()):
res += str(verb) + "\n"
return res
@singleton
class ResourcePool(object):
default_model = "default"
def split_list(self, word_list):
# init
flag = 0
list_list_word = our_list = []
for i in word_list:
if i:
if i[0].startswith('#'):
if flag == 0:
flag = 1
if our_list:
list_list_word = list_list_word + [our_list]
our_list = []
else:
flag = 0
else:
if flag == 0:
our_list = our_list + [i]
list_list_word = list_list_word + [our_list]
return list_list_word
def __init__(self):
""" Empty constructor for the singleton decorator.
Real initialization must be manually triggered by calling ResourcePool.init().
"""
self.ontology_server = None
self.adjectives = {}
self.irregular_verbs_past = []
self.irregular_verbs_present = []
self.preposition_verbs = []
self.modal = []
self.adjective_verb = []
self.special_nouns = []
self.pronouns = []
self.numbers = []
self.demonstrative_det = []
self.adverbs = []
self.adverbs_at_end = []
self.proposals = []
self.preposition_rdf_object_property = {}
self.compelement_proposals = []
self.capital_letters = []
self.determinants = []
self.nouns_end_s = []
self.relatives = []
self.subsentences = []
self.det_quantifiers = []
self.adjective_rules = []
self.composed_nouns = []
self.plural_nouns = []
self.auxiliary = []
self.direct_transitive = []
self.indirect_transitive = []
self.state = []
self.complement_pronouns = []
self.concatenate_proposals = []
self.change_tuples = []
self.adjective_numbers = []
self.be_pronoun = []
self.noun_not_composed = []
self.adj_quantifiers = []
self.verb_need_to = []
self.prep_change_place = []
self.replace_tuples = []
self.adjective_numbers_digit = []
self.days_list = []
self.months_list = []
self.time_adverbs = []
self.location_adverbs = []
self.unusable_words = []
self.time_proposals = []
self.action_verb_with_passive_behaviour = {}
self.adjectives_ontology_classes = []
self.special_verbs = []
self.adv_sub = []
"""list of tokens that can start a sentence"""
self.sentence_starts = []
"""
list of verbs that express a goal - ie, that would translate to a
[S desires O] statement.
"""
self.goal_verbs = []
"""
dictionnary of all verbs for which thematic roles are known.
"""
self.thematic_roles = ThematicRolesDict()
def init(self, kb_host="localhost", kb_port=6969, embeddedkb=False, defaultontology=None, data_path=None):
if not data_path:
# try to guess the current prefix and then the data directory
data_path = os.path.abspath(__file__).split('lib')[0].split('src')[0] + 'share/dialogs/'
logger.debug("Assuming Dialogs data dir is <%s>" % data_path)
try:
from kb import KB, KbError
except ImportError:
logger.error("Python bindings to access the knowledge are not available." + \
"Please install 'pykb' and restart Dialogs.")
try:
self.ontology_server = KB(kb_host, kb_port, embeddedkb, defaultontology)
except KbError:
logger.error("Error while trying to connect to the knowledge base on %s:%s" % (kb_host, kb_port) + \
". Continuing without knowledge base. Amongst others, resolution won't work.")
self.ontology_server = DummyKnowledgeBase()
for line in open(os.path.join(data_path, "adjectives")):
if line.startswith("#") or not line.strip():
continue
try:
adj, cat = line.split()
except ValueError: # for adjectives without category, set a generic "Feature" category
adj = line.split()[0]
cat = "Feature"
self.adjectives[adj] = cat
verbs = [list(line.split())
for line
in open(os.path.join(data_path, "verbs"))]
verbs = self.split_list(verbs)
self.irregular_verbs_past = verbs[0]
self.irregular_verbs_present = verbs[1]
self.preposition_verbs = verbs[2]
self.modal = [k[0] for k in verbs[3]]
self.adjective_verb = [k[0] for k in verbs[4]]
self.auxiliary = [k[0] for k in verbs[5]]
self.direct_transitive = [k[0] for k in verbs[6]]
self.indirect_transitive = [k[0] for k in verbs[7]]
self.state = [k[0] for k in verbs[8]]
self.verb_need_to = [k[0] for k in verbs[9]]
self.special_verbs = [k[0] for k in verbs[12]]
# Action verbs such as 'see', 'hear' with no active behaviour
self.action_verb_with_passive_behaviour = dict([(k[0], k[1]) for k in verbs[10]])
self.goal_verbs = [k[0] for k in verbs[11]]
self.sentence_starts = [tuple(line.split())
for line
in open(os.path.join(data_path, "sentence_starts"))]
nouns = [list(line.split())
for line
in open(os.path.join(data_path, "nouns"))]
nouns = self.split_list(nouns)
self.special_nouns = [k[0] for k in nouns[0]]
self.pronouns = [k[0] for k in nouns[1]]
for i in nouns[1]:
if i[1] == '1':
self.complement_pronouns = self.complement_pronouns + [i[0]]
self.demonstrative_det = [k[0] for k in nouns[2]]
self.determinants = [k[0] for k in nouns[3]]
self.nouns_end_s = [k[0] for k in nouns[4]]
self.relatives = [k[0] for k in nouns[5]]
self.composed_nouns = [k[0] for k in nouns[6]]
self.plural_nouns = nouns[7]
self.noun_not_composed = [k[0] for k in nouns[8]]
self.days_list = nouns[9]
self.months_list = nouns[10]
self.unusable_words = [k[0] for k in nouns[11]]
# List of diection words, E.g: LEFT, RIGHT, TOP, etc ...
self.direction_words = [k[0] for k in nouns[12]]
self.compound_nouns = nouns[13]
###
### ADVERBIALS
adverbials = [list(line.split())
for line
in open(os.path.join(data_path, "adverbial"))]
adverbials = self.split_list(adverbials)
self.adverbs = [k[0] for k in adverbials[0]]
self.time_adverbs = adverbials[0]
self.time_adverbs = [k[0] for k in adverbials[0] if k[1] in ["day", "hour"]]
self.time_adverbs += [k[0] for k in adverbials[1] if k[1] in ["day", "hour"]]
self.location_adverbs = [k[0] for k in adverbials[0] if k[1] == "location"]
self.location_adverbs += [k[0] for k in adverbials[1] if k[1] == "location"]
self.adverbs_at_end = [k[0] for k in adverbials[1]]
for k in adverbials[2]:
if k[1] == '1':
self.compelement_proposals = self.compelement_proposals + [k[0]]
self.proposals = [k[0] for k in adverbials[2]]
#Preposition with an existing object_property
# E.g: next+to => isNextTo
self.preposition_rdf_object_property = dict([(k[0], k[3:]) for k in adverbials[2]])
self.time_proposals = adverbials[2]
self.subsentences = [k[0] for k in adverbials[3]]
for k in adverbials[3]:
if k[1] == '1':
self.adv_sub = self.adv_sub + [k[0]]
self.prep_change_place = [k[0] for k in adverbials[4]]
grammatical_rules = [list(line.split())
for line
in open(os.path.join(data_path, "grammatical_rules"))]
grammatical_rules = self.split_list(grammatical_rules)
self.numbers = grammatical_rules[0]
self.det_quantifiers = grammatical_rules[1]
self.capital_letters = [k[0] for k in grammatical_rules[2]]
self.adjective_rules = [k[0] for k in grammatical_rules[3]]
self.concatenate_proposals = grammatical_rules[4]
self.change_tuples = grammatical_rules[5]
self.adjective_numbers_digit = grammatical_rules[6]
self.adjective_numbers = [k[0] for k in grammatical_rules[6]]
self.be_pronoun = [k[0] for k in grammatical_rules[7]]
self.adj_quantifiers = [k[0] for k in grammatical_rules[8]]
for k in grammatical_rules[9]:
self.replace_tuples = self.replace_tuples + [[k[0], k[1:]]]
desc = ""
for line in open(os.path.join(data_path, "thematic_roles")):
if line.startswith("#") or not line.strip():
continue
desc += line
if line.startswith("}"): #end of block
self.thematic_roles.add_verb(desc)
desc = ""
#Add action verbs to the ontology
if self.ontology_server:
stmts = [verb.capitalize() + " rdfs:subClassOf cyc:PurposefulAction" for verb in
list(self.thematic_roles.verbs.keys()) if not self.thematic_roles.verbs[verb].is_synonym()]
self.ontology_server.revise(stmts, {"method": "add"})
"""
List of ontology classes that are used in the adjectives list
"""
self.adjectives_ontology_classes = [self.adjectives[adj].lower() for adj in self.adjectives]
adj_s = []
for k in self.adjectives_ontology_classes:
if not k in adj_s:
adj_s.append(k)
self.adjectives_ontology_classes = adj_s
def get_model_mapping(self, agent):
""" Returns the name of the knowledge model associated to a given agent.
"""
if agent == "myself":
return ResourcePool().default_model
else:
return agent + "_model"
def mark_active(self, ids):
if not self.ontology_server:
return
if isinstance(ids, str):
ids = [ids]
self.ontology_server.revise([id + " rdf:type ActiveConcept" for id in ids],
{"method": "add", "models": [ResourcePool().default_model], "lifespan": 10})
def __del__(self):
self.close()
def close(self):
if self.ontology_server:
self.ontology_server.close()
self.ontology_server = None
if __name__ == '__main__':
resources = ResourcePool()
print("*** DIALOG Resource manager ***")
print()
print("List of loaded resources:")
print()
print("Adjectives:")
print((str(resources.adjectives)))
print()
print("special nouns:")
print((str(ResourcePool().special_nouns)))
print()
print("pronouns:")
print((str(ResourcePool().pronouns)))
print()
print("Preposition verbs:")
print((str(resources.preposition_verbs)))
print()
print("Sentence starts:")
print((str(resources.sentence_starts)))
print()
print("Goal verbs:")
print((str(resources.goal_verbs)))
print()
print("Thematic roles:")
print((str(resources.thematic_roles)))
| |
import asyncio
import sys
import threading
from asyncio import QueueEmpty as AsyncQueueEmpty
from asyncio import QueueFull as AsyncQueueFull
from collections import deque
from heapq import heappop, heappush
from queue import Empty as SyncQueueEmpty
from queue import Full as SyncQueueFull
from typing import Any, Callable, Deque, Generic, List, Optional, Set, TypeVar
from typing_extensions import Protocol
__version__ = "1.0.0"
__all__ = (
"Queue",
"PriorityQueue",
"LifoQueue",
"SyncQueue",
"AsyncQueue",
"BaseQueue",
)
T = TypeVar("T")
OptFloat = Optional[float]
class BaseQueue(Protocol[T]):
@property
def maxsize(self) -> int:
...
@property
def closed(self) -> bool:
...
def task_done(self) -> None:
...
def qsize(self) -> int:
...
@property
def unfinished_tasks(self) -> int:
...
def empty(self) -> bool:
...
def full(self) -> bool:
...
def put_nowait(self, item: T) -> None:
...
def get_nowait(self) -> T:
...
class SyncQueue(BaseQueue[T], Protocol[T]):
@property
def maxsize(self) -> int:
...
@property
def closed(self) -> bool:
...
def task_done(self) -> None:
...
def qsize(self) -> int:
...
@property
def unfinished_tasks(self) -> int:
...
def empty(self) -> bool:
...
def full(self) -> bool:
...
def put_nowait(self, item: T) -> None:
...
def get_nowait(self) -> T:
...
def put(self, item: T, block: bool = True, timeout: OptFloat = None) -> None:
...
def get(self, block: bool = True, timeout: OptFloat = None) -> T:
...
def join(self) -> None:
...
class AsyncQueue(BaseQueue[T], Protocol[T]):
async def put(self, item: T) -> None:
...
async def get(self) -> T:
...
async def join(self) -> None:
...
class Queue(Generic[T]):
def __init__(self, maxsize: int = 0) -> None:
self._loop = asyncio.get_running_loop()
self._maxsize = maxsize
self._init(maxsize)
self._unfinished_tasks = 0
self._sync_mutex = threading.Lock()
self._sync_not_empty = threading.Condition(self._sync_mutex)
self._sync_not_full = threading.Condition(self._sync_mutex)
self._all_tasks_done = threading.Condition(self._sync_mutex)
self._async_mutex = asyncio.Lock()
if sys.version_info[:3] == (3, 10, 0):
# Workaround for Python 3.10 bug, see #358:
getattr(self._async_mutex, "_get_loop", lambda: None)()
self._async_not_empty = asyncio.Condition(self._async_mutex)
self._async_not_full = asyncio.Condition(self._async_mutex)
self._finished = asyncio.Event()
self._finished.set()
self._closing = False
self._pending = set() # type: Set[asyncio.Future[Any]]
def checked_call_soon_threadsafe(
callback: Callable[..., None], *args: Any
) -> None:
try:
self._loop.call_soon_threadsafe(callback, *args)
except RuntimeError:
# swallowing agreed in #2
pass
self._call_soon_threadsafe = checked_call_soon_threadsafe
def checked_call_soon(callback: Callable[..., None], *args: Any) -> None:
if not self._loop.is_closed():
self._loop.call_soon(callback, *args)
self._call_soon = checked_call_soon
self._sync_queue = _SyncQueueProxy(self)
self._async_queue = _AsyncQueueProxy(self)
def close(self) -> None:
with self._sync_mutex:
self._closing = True
for fut in self._pending:
fut.cancel()
self._finished.set() # unblocks all async_q.join()
self._all_tasks_done.notify_all() # unblocks all sync_q.join()
async def wait_closed(self) -> None:
# should be called from loop after close().
# Nobody should put/get at this point,
# so lock acquiring is not required
if not self._closing:
raise RuntimeError("Waiting for non-closed queue")
# give execution chances for the task-done callbacks
# of async tasks created inside
# _notify_async_not_empty, _notify_async_not_full
# methods.
await asyncio.sleep(0)
if not self._pending:
return
await asyncio.wait(self._pending)
@property
def closed(self) -> bool:
return self._closing and not self._pending
@property
def maxsize(self) -> int:
return self._maxsize
@property
def sync_q(self) -> "_SyncQueueProxy[T]":
return self._sync_queue
@property
def async_q(self) -> "_AsyncQueueProxy[T]":
return self._async_queue
# Override these methods to implement other queue organizations
# (e.g. stack or priority queue).
# These will only be called with appropriate locks held
def _init(self, maxsize: int) -> None:
self._queue = deque() # type: Deque[T]
def _qsize(self) -> int:
return len(self._queue)
# Put a new item in the queue
def _put(self, item: T) -> None:
self._queue.append(item)
# Get an item from the queue
def _get(self) -> T:
return self._queue.popleft()
def _put_internal(self, item: T) -> None:
self._put(item)
self._unfinished_tasks += 1
self._finished.clear()
def _notify_sync_not_empty(self) -> None:
def f() -> None:
with self._sync_mutex:
self._sync_not_empty.notify()
self._loop.run_in_executor(None, f)
def _notify_sync_not_full(self) -> None:
def f() -> None:
with self._sync_mutex:
self._sync_not_full.notify()
fut = asyncio.ensure_future(self._loop.run_in_executor(None, f))
fut.add_done_callback(self._pending.discard)
self._pending.add(fut)
def _notify_async_not_empty(self, *, threadsafe: bool) -> None:
async def f() -> None:
async with self._async_mutex:
self._async_not_empty.notify()
def task_maker() -> None:
task = self._loop.create_task(f())
task.add_done_callback(self._pending.discard)
self._pending.add(task)
if threadsafe:
self._call_soon_threadsafe(task_maker)
else:
self._call_soon(task_maker)
def _notify_async_not_full(self, *, threadsafe: bool) -> None:
async def f() -> None:
async with self._async_mutex:
self._async_not_full.notify()
def task_maker() -> None:
task = self._loop.create_task(f())
task.add_done_callback(self._pending.discard)
self._pending.add(task)
if threadsafe:
self._call_soon_threadsafe(task_maker)
else:
self._call_soon(task_maker)
def _check_closing(self) -> None:
if self._closing:
raise RuntimeError("Operation on the closed queue is forbidden")
class _SyncQueueProxy(SyncQueue[T]):
"""Create a queue object with a given maximum size.
If maxsize is <= 0, the queue size is infinite.
"""
def __init__(self, parent: Queue[T]):
self._parent = parent
@property
def maxsize(self) -> int:
return self._parent._maxsize
@property
def closed(self) -> bool:
return self._parent.closed
def task_done(self) -> None:
"""Indicate that a formerly enqueued task is complete.
Used by Queue consumer threads. For each get() used to fetch a task,
a subsequent call to task_done() tells the queue that the processing
on the task is complete.
If a join() is currently blocking, it will resume when all items
have been processed (meaning that a task_done() call was received
for every item that had been put() into the queue).
Raises a ValueError if called more times than there were items
placed in the queue.
"""
self._parent._check_closing()
with self._parent._all_tasks_done:
unfinished = self._parent._unfinished_tasks - 1
if unfinished <= 0:
if unfinished < 0:
raise ValueError("task_done() called too many times")
self._parent._all_tasks_done.notify_all()
self._parent._loop.call_soon_threadsafe(self._parent._finished.set)
self._parent._unfinished_tasks = unfinished
def join(self) -> None:
"""Blocks until all items in the Queue have been gotten and processed.
The count of unfinished tasks goes up whenever an item is added to the
queue. The count goes down whenever a consumer thread calls task_done()
to indicate the item was retrieved and all work on it is complete.
When the count of unfinished tasks drops to zero, join() unblocks.
"""
self._parent._check_closing()
with self._parent._all_tasks_done:
while self._parent._unfinished_tasks:
self._parent._all_tasks_done.wait()
self._parent._check_closing()
def qsize(self) -> int:
"""Return the approximate size of the queue (not reliable!)."""
return self._parent._qsize()
@property
def unfinished_tasks(self) -> int:
"""Return the number of unfinished tasks."""
return self._parent._unfinished_tasks
def empty(self) -> bool:
"""Return True if the queue is empty, False otherwise (not reliable!).
This method is likely to be removed at some point. Use qsize() == 0
as a direct substitute, but be aware that either approach risks a race
condition where a queue can grow before the result of empty() or
qsize() can be used.
To create code that needs to wait for all queued tasks to be
completed, the preferred technique is to use the join() method.
"""
return not self._parent._qsize()
def full(self) -> bool:
"""Return True if the queue is full, False otherwise (not reliable!).
This method is likely to be removed at some point. Use qsize() >= n
as a direct substitute, but be aware that either approach risks a race
condition where a queue can shrink before the result of full() or
qsize() can be used.
"""
return 0 < self._parent._maxsize <= self._parent._qsize()
def put(self, item: T, block: bool = True, timeout: OptFloat = None) -> None:
"""Put an item into the queue.
If optional args 'block' is true and 'timeout' is None (the default),
block if necessary until a free slot is available. If 'timeout' is
a non-negative number, it blocks at most 'timeout' seconds and raises
the Full exception if no free slot was available within that time.
Otherwise ('block' is false), put an item on the queue if a free slot
is immediately available, else raise the Full exception ('timeout'
is ignored in that case).
"""
self._parent._check_closing()
with self._parent._sync_not_full:
if self._parent._maxsize > 0:
if not block:
if self._parent._qsize() >= self._parent._maxsize:
raise SyncQueueFull
elif timeout is None:
while self._parent._qsize() >= self._parent._maxsize:
self._parent._sync_not_full.wait()
elif timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
time = self._parent._loop.time
endtime = time() + timeout
while self._parent._qsize() >= self._parent._maxsize:
remaining = endtime - time()
if remaining <= 0.0:
raise SyncQueueFull
self._parent._sync_not_full.wait(remaining)
self._parent._put_internal(item)
self._parent._sync_not_empty.notify()
self._parent._notify_async_not_empty(threadsafe=True)
def get(self, block: bool = True, timeout: OptFloat = None) -> T:
"""Remove and return an item from the queue.
If optional args 'block' is true and 'timeout' is None (the default),
block if necessary until an item is available. If 'timeout' is
a non-negative number, it blocks at most 'timeout' seconds and raises
the Empty exception if no item was available within that time.
Otherwise ('block' is false), return an item if one is immediately
available, else raise the Empty exception ('timeout' is ignored
in that case).
"""
self._parent._check_closing()
with self._parent._sync_not_empty:
if not block:
if not self._parent._qsize():
raise SyncQueueEmpty
elif timeout is None:
while not self._parent._qsize():
self._parent._sync_not_empty.wait()
elif timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
time = self._parent._loop.time
endtime = time() + timeout
while not self._parent._qsize():
remaining = endtime - time()
if remaining <= 0.0:
raise SyncQueueEmpty
self._parent._sync_not_empty.wait(remaining)
item = self._parent._get()
self._parent._sync_not_full.notify()
self._parent._notify_async_not_full(threadsafe=True)
return item
def put_nowait(self, item: T) -> None:
"""Put an item into the queue without blocking.
Only enqueue the item if a free slot is immediately available.
Otherwise raise the Full exception.
"""
return self.put(item, block=False)
def get_nowait(self) -> T:
"""Remove and return an item from the queue without blocking.
Only get an item if one is immediately available. Otherwise
raise the Empty exception.
"""
return self.get(block=False)
class _AsyncQueueProxy(AsyncQueue[T]):
"""Create a queue object with a given maximum size.
If maxsize is <= 0, the queue size is infinite.
"""
def __init__(self, parent: Queue[T]):
self._parent = parent
@property
def closed(self) -> bool:
return self._parent.closed
def qsize(self) -> int:
"""Number of items in the queue."""
return self._parent._qsize()
@property
def unfinished_tasks(self) -> int:
"""Return the number of unfinished tasks."""
return self._parent._unfinished_tasks
@property
def maxsize(self) -> int:
"""Number of items allowed in the queue."""
return self._parent._maxsize
def empty(self) -> bool:
"""Return True if the queue is empty, False otherwise."""
return self.qsize() == 0
def full(self) -> bool:
"""Return True if there are maxsize items in the queue.
Note: if the Queue was initialized with maxsize=0 (the default),
then full() is never True.
"""
if self._parent._maxsize <= 0:
return False
else:
return self.qsize() >= self._parent._maxsize
async def put(self, item: T) -> None:
"""Put an item into the queue.
Put an item into the queue. If the queue is full, wait until a free
slot is available before adding item.
This method is a coroutine.
"""
self._parent._check_closing()
async with self._parent._async_not_full:
self._parent._sync_mutex.acquire()
locked = True
try:
if self._parent._maxsize > 0:
do_wait = True
while do_wait:
do_wait = self._parent._qsize() >= self._parent._maxsize
if do_wait:
locked = False
self._parent._sync_mutex.release()
await self._parent._async_not_full.wait()
self._parent._sync_mutex.acquire()
locked = True
self._parent._put_internal(item)
self._parent._async_not_empty.notify()
self._parent._notify_sync_not_empty()
finally:
if locked:
self._parent._sync_mutex.release()
def put_nowait(self, item: T) -> None:
"""Put an item into the queue without blocking.
If no free slot is immediately available, raise QueueFull.
"""
self._parent._check_closing()
with self._parent._sync_mutex:
if self._parent._maxsize > 0:
if self._parent._qsize() >= self._parent._maxsize:
raise AsyncQueueFull
self._parent._put_internal(item)
self._parent._notify_async_not_empty(threadsafe=False)
self._parent._notify_sync_not_empty()
async def get(self) -> T:
"""Remove and return an item from the queue.
If queue is empty, wait until an item is available.
This method is a coroutine.
"""
self._parent._check_closing()
async with self._parent._async_not_empty:
self._parent._sync_mutex.acquire()
locked = True
try:
do_wait = True
while do_wait:
do_wait = self._parent._qsize() == 0
if do_wait:
locked = False
self._parent._sync_mutex.release()
await self._parent._async_not_empty.wait()
self._parent._sync_mutex.acquire()
locked = True
item = self._parent._get()
self._parent._async_not_full.notify()
self._parent._notify_sync_not_full()
return item
finally:
if locked:
self._parent._sync_mutex.release()
def get_nowait(self) -> T:
"""Remove and return an item from the queue.
Return an item if one is immediately available, else raise QueueEmpty.
"""
self._parent._check_closing()
with self._parent._sync_mutex:
if self._parent._qsize() == 0:
raise AsyncQueueEmpty
item = self._parent._get()
self._parent._notify_async_not_full(threadsafe=False)
self._parent._notify_sync_not_full()
return item
def task_done(self) -> None:
"""Indicate that a formerly enqueued task is complete.
Used by queue consumers. For each get() used to fetch a task,
a subsequent call to task_done() tells the queue that the processing
on the task is complete.
If a join() is currently blocking, it will resume when all items have
been processed (meaning that a task_done() call was received for every
item that had been put() into the queue).
Raises ValueError if called more times than there were items placed in
the queue.
"""
self._parent._check_closing()
with self._parent._all_tasks_done:
if self._parent._unfinished_tasks <= 0:
raise ValueError("task_done() called too many times")
self._parent._unfinished_tasks -= 1
if self._parent._unfinished_tasks == 0:
self._parent._finished.set()
self._parent._all_tasks_done.notify_all()
async def join(self) -> None:
"""Block until all items in the queue have been gotten and processed.
The count of unfinished tasks goes up whenever an item is added to the
queue. The count goes down whenever a consumer calls task_done() to
indicate that the item was retrieved and all work on it is complete.
When the count of unfinished tasks drops to zero, join() unblocks.
"""
while True:
with self._parent._sync_mutex:
self._parent._check_closing()
if self._parent._unfinished_tasks == 0:
break
await self._parent._finished.wait()
class PriorityQueue(Queue[T]):
"""Variant of Queue that retrieves open entries in priority order
(lowest first).
Entries are typically tuples of the form: (priority number, data).
"""
def _init(self, maxsize: int) -> None:
self._heap_queue = [] # type: List[T]
def _qsize(self) -> int:
return len(self._heap_queue)
def _put(self, item: T) -> None:
heappush(self._heap_queue, item)
def _get(self) -> T:
return heappop(self._heap_queue)
class LifoQueue(Queue[T]):
"""Variant of Queue that retrieves most recently added entries first."""
def _qsize(self) -> int:
return len(self._queue)
def _put(self, item: T) -> None:
self._queue.append(item)
def _get(self) -> T:
return self._queue.pop()
| |
#!/usr/bin/env python
# Import OE-Classic recipe data into the layer index database
#
# Copyright (C) 2013 Intel Corporation
# Author: Paul Eggleton <paul.eggleton@linux.intel.com>
#
# Licensed under the MIT license, see COPYING.MIT for details
import sys
import os.path
sys.path.insert(0, os.path.realpath(os.path.join(os.path.dirname(__file__), '..')))
import optparse
import logging
from datetime import datetime
import fnmatch
import re
import tempfile
import shutil
from distutils.version import LooseVersion
import utils
import recipeparse
logger = utils.logger_create('LayerIndexUpdate')
def update_recipe_file(data, path, recipe, layerdir_start, repodir):
fn = str(os.path.join(path, recipe.filename))
try:
logger.debug('Updating recipe %s' % fn)
envdata = bb.cache.Cache.loadDataFull(fn, [], data)
envdata.setVar('SRCPV', 'X')
envdata.setVar('SRCDATE', 'X')
envdata.setVar('SRCREV', 'X')
envdata.setVar('OPIE_SRCREV', 'X')
recipe.pn = envdata.getVar("PN", True)
recipe.pv = envdata.getVar("PV", True)
recipe.summary = envdata.getVar("SUMMARY", True)
recipe.description = envdata.getVar("DESCRIPTION", True)
recipe.section = envdata.getVar("SECTION", True)
recipe.license = envdata.getVar("LICENSE", True)
recipe.homepage = envdata.getVar("HOMEPAGE", True)
recipe.provides = envdata.getVar("PROVIDES", True) or ""
recipe.bbclassextend = envdata.getVar("BBCLASSEXTEND", True) or ""
recipe.save()
except KeyboardInterrupt:
raise
except BaseException as e:
if not recipe.pn:
recipe.pn = recipe.filename[:-3].split('_')[0]
logger.error("Unable to read %s: %s", fn, str(e))
def main():
parser = optparse.OptionParser(
usage = """
%prog [options] <bitbakepath> <oeclassicpath>""")
parser.add_option("-b", "--branch",
help = "Specify branch to import into",
action="store", dest="branch", default='oe-classic')
parser.add_option("-l", "--layer",
help = "Specify layer to import into",
action="store", dest="layer", default='oe-classic')
parser.add_option("-n", "--dry-run",
help = "Don't write any data back to the database",
action="store_true", dest="dryrun")
parser.add_option("-d", "--debug",
help = "Enable debug output",
action="store_const", const=logging.DEBUG, dest="loglevel", default=logging.INFO)
parser.add_option("-q", "--quiet",
help = "Hide all output except error messages",
action="store_const", const=logging.ERROR, dest="loglevel")
options, args = parser.parse_args(sys.argv)
if len(args) < 3:
logger.error('You must specify bitbakepath and oeclassicpath')
parser.print_help()
sys.exit(1)
if len(args) > 3:
logger.error('unexpected argument "%s"' % args[3])
parser.print_help()
sys.exit(1)
utils.setup_django()
import settings
from layerindex.models import LayerItem, LayerBranch, Recipe, ClassicRecipe, Machine, BBAppend, BBClass
from django.db import transaction
logger.setLevel(options.loglevel)
branch = utils.get_branch(options.branch)
if not branch:
logger.error("Specified branch %s is not valid" % options.branch)
sys.exit(1)
res = list(LayerItem.objects.filter(name=options.layer)[:1])
if res:
layer = res[0]
else:
layer = LayerItem()
layer.name = options.layer
layer.status = 'P'
layer.layer_type = 'M'
layer.summary = 'OE-Classic'
layer.description = 'OpenEmbedded-Classic'
layer.vcs_url = 'git://git.openembedded.org/openembedded'
layer.vcs_web_url = 'http://cgit.openembedded.org/cgit.cgi/openembedded'
layer.vcs_web_tree_base_url = 'http://cgit.openembedded.org/cgit.cgi/openembedded/tree/%path%'
layer.vcs_web_file_base_url = 'http://cgit.openembedded.org/cgit.cgi/openembedded/tree/%path%'
layer.classic = True
layer.save()
layerbranch = layer.get_layerbranch(options.branch)
if not layerbranch:
# LayerBranch doesn't exist for this branch, create it
layerbranch = LayerBranch()
layerbranch.layer = layer
layerbranch.branch = branch
layerbranch.save()
fetchdir = settings.LAYER_FETCH_DIR
if not fetchdir:
logger.error("Please set LAYER_FETCH_DIR in settings.py")
sys.exit(1)
if not os.path.exists(fetchdir):
os.makedirs(fetchdir)
fetchedrepos = []
failedrepos = []
bitbakepath = args[1]
oeclassicpath = args[2]
confparentdir = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../oe-classic'))
os.environ['BBPATH'] = str("%s:%s" % (confparentdir, oeclassicpath))
try:
(tinfoil, tempdir) = recipeparse.init_parser(settings, branch, bitbakepath, nocheckout=True, classic=True, logger=logger)
except recipeparse.RecipeParseError as e:
logger.error(str(e))
sys.exit(1)
# Clear the default value of SUMMARY so that we can use DESCRIPTION instead if it hasn't been set
tinfoil.config_data.setVar('SUMMARY', '')
# Clear the default value of DESCRIPTION so that we can see where it's not set
tinfoil.config_data.setVar('DESCRIPTION', '')
# Clear the default value of HOMEPAGE ('unknown')
tinfoil.config_data.setVar('HOMEPAGE', '')
transaction.enter_transaction_management()
transaction.managed(True)
try:
layerdir_start = os.path.normpath(oeclassicpath) + os.sep
layerrecipes = Recipe.objects.filter(layerbranch=layerbranch)
layermachines = Machine.objects.filter(layerbranch=layerbranch)
layerappends = BBAppend.objects.filter(layerbranch=layerbranch)
layerclasses = BBClass.objects.filter(layerbranch=layerbranch)
try:
config_data_copy = recipeparse.setup_layer(tinfoil.config_data, fetchdir, oeclassicpath, layer, layerbranch)
except recipeparse.RecipeParseError as e:
logger.error(str(e))
transaction.rollback()
sys.exit(1)
layerrecipes.delete()
layermachines.delete()
layerappends.delete()
layerclasses.delete()
for root, dirs, files in os.walk(oeclassicpath):
if '.git' in dirs:
dirs.remove('.git')
for f in files:
fullpath = os.path.join(root, f)
(typename, filepath, filename) = recipeparse.detect_file_type(fullpath, layerdir_start)
if typename == 'recipe':
recipe = ClassicRecipe()
recipe.layerbranch = layerbranch
recipe.filename = filename
recipe.filepath = filepath
update_recipe_file(config_data_copy, root, recipe, layerdir_start, oeclassicpath)
recipe.save()
layerbranch.vcs_last_fetch = datetime.now()
layerbranch.save()
if options.dryrun:
transaction.rollback()
else:
transaction.commit()
except:
import traceback
traceback.print_exc()
transaction.rollback()
finally:
transaction.leave_transaction_management()
shutil.rmtree(tempdir)
sys.exit(0)
if __name__ == "__main__":
main()
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SinhArcsinh transformation of a distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import bijectors
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops.distributions import normal
from tensorflow.python.ops.distributions import transformed_distribution
__all__ = [
"SinhArcsinh",
]
class SinhArcsinh(transformed_distribution.TransformedDistribution):
"""The SinhArcsinh transformation of a distribution on `(-inf, inf)`.
This distribution models a random variable, making use of
a `SinhArcsinh` transformation (which has adjustable tailweight and skew),
a rescaling, and a shift.
The `SinhArcsinh` transformation of the Normal is described in great depth in
[Sinh-arcsinh distributions](https://www.jstor.org/stable/27798865).
Here we use a slightly different parameterization, in terms of `tailweight`
and `skewness`. Additionally we allow for distributions other than Normal,
and control over `scale` as well as a "shift" parameter `loc`.
#### Mathematical Details
Given random variable `Z`, we define the SinhArcsinh
transformation of `Z`, `Y`, parameterized by
`(loc, scale, skewness, tailweight)`, via the relation:
```
Y := loc + scale * F(Z) * (2 / F(2))
F(Z) := Sinh( (Arcsinh(Z) + skewness) * tailweight )
```
This distribution is similar to the location-scale transformation
`L(Z) := loc + scale * Z` in the following ways:
* If `skewness = 0` and `tailweight = 1` (the defaults), `F(Z) = Z`, and then
`Y = L(Z)` exactly.
* `loc` is used in both to shift the result by a constant factor.
* Our definition of `C` ensures that
`P[Y - loc <= 2 * scale] = P[L(Z) - loc <= 2 * scale]`.
Thus it can be said that the weights in the tails of `Y` and `L(Z)` beyond
`loc + 2 * scale` are the same.
This distribution is different than `loc + scale * Z` due to the
reshaping done by `F`:
* Positive (negative) `skewness` leads to positive (negative) skew.
* positive skew means, the mode of `F(Z)` is "tilted" to the right.
* positive skew means positive values of `F(Z)` become more likely, and
negative values become less likely.
* Larger (smaller) `tailweight` leads to fatter (thinner) tails.
* Fatter tails mean larger values of `|F(Z)|` become more likely.
* `tailweight < 1` leads to a distribution that is "flat" around `Y = loc`,
and a very steep drop-off in the tails.
* `tailweight > 1` leads to a distribution more peaked at the mode with
heavier tails.
To see the argument about the tails, note that for `|Z| >> 1` and
`|Z| >> (|skewness| * tailweight)**tailweight`, we have
`Y approx 0.5 Z**tailweight e**(sign(Z) skewness * tailweight)`.
To see the argument about `C` and quantiles, note that
```
P[(Y - loc) / scale <= 2] = P[F(Z) <= 2 * scale / C]
= P[Z <= F^{-1}(2 * scale / C)]
= P[Z <= 2].
```
"""
def __init__(self,
loc,
scale,
skewness=None,
tailweight=None,
distribution=None,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalLinearOperator"):
"""Construct SinhArcsinh distribution on `(-inf, inf)`.
Arguments `(loc, scale, skewness, tailweight)` must have broadcastable shape
(indexing batch dimensions). They must all have the same `dtype`.
Args:
loc: Floating-point `Tensor`.
scale: `Tensor` of same `dtype` as `loc`.
skewness: Skewness parameter. Default is `0.0` (no skew).
tailweight: Tailweight parameter. Default is `1.0` (unchanged tailweight)
distribution: `tf.Distribution`-like instance. Distribution that is
transformed to produce this distribution.
Default is `ds.Normal(0., 1.)`.
Must be a scalar-batch, scalar-event distribution. Typically
`distribution.reparameterization_type = FULLY_REPARAMETERIZED` or it is
a function of non-trainable parameters. WARNING: If you backprop through
a `SinhArcsinh` sample and `distribution` is not
`FULLY_REPARAMETERIZED` yet is a function of trainable variables, then
the gradient will be incorrect!
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = locals()
with ops.name_scope(name, values=[loc, scale, skewness, tailweight]):
loc = ops.convert_to_tensor(loc, name="loc")
dtype = loc.dtype
scale = ops.convert_to_tensor(scale, name="scale", dtype=dtype)
tailweight = 1. if tailweight is None else tailweight
skewness = 0. if skewness is None else skewness
tailweight = ops.convert_to_tensor(
tailweight, name="tailweight", dtype=dtype)
skewness = ops.convert_to_tensor(skewness, name="skewness", dtype=dtype)
batch_shape = distribution_util.get_broadcast_shape(
loc, scale, tailweight, skewness)
# Recall, with Z a random variable,
# Y := loc + C * F(Z),
# F(Z) := Sinh( (Arcsinh(Z) + skewness) * tailweight )
# C := 2 * scale / F(2)
if distribution is None:
distribution = normal.Normal(
loc=array_ops.zeros([], dtype=dtype),
scale=array_ops.ones([], dtype=dtype),
allow_nan_stats=allow_nan_stats)
else:
asserts = distribution_util.maybe_check_scalar_distribution(
distribution, dtype, validate_args)
if asserts:
loc = control_flow_ops.with_dependencies(asserts, loc)
# Make the SAS bijector, 'F'.
f = bijectors.SinhArcsinh(
skewness=skewness, tailweight=tailweight, event_ndims=0)
# Make the Affine bijector, Z --> loc + C * Z.
c = 2 * scale / f.forward(ops.convert_to_tensor(2, dtype=dtype))
affine = bijectors.Affine(
shift=loc,
scale_identity_multiplier=c,
validate_args=validate_args,
event_ndims=0)
bijector = bijectors.Chain([affine, f])
super(SinhArcsinh, self).__init__(
distribution=distribution,
bijector=bijector,
batch_shape=batch_shape,
validate_args=validate_args,
name=name)
self._parameters = parameters
self._loc = loc
self._scale = scale
self._tailweight = tailweight
self._skewness = skewness
@property
def loc(self):
"""The `loc` in `Y := loc + scale @ F(Z) * (2 / F(2))."""
return self._loc
@property
def scale(self):
"""The `LinearOperator` `scale` in `Y := loc + scale @ F(Z) * (2 / F(2))."""
return self._scale
@property
def tailweight(self):
"""Controls the tail decay. `tailweight > 1` means faster than Normal."""
return self._tailweight
@property
def skewness(self):
"""Controls the skewness. `Skewness > 0` means right skew."""
return self._skewness
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
import oslo_messaging as messaging
import six
from nova.compute import power_state
from nova.conductor.tasks import base
from nova.conductor.tasks import migrate
import nova.conf
from nova import exception
from nova.i18n import _
from nova import objects
from nova.scheduler import utils as scheduler_utils
from nova import utils
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
def should_do_migration_allocation(context):
minver = objects.Service.get_minimum_version_multi(context,
['nova-compute'])
return minver >= 25
class LiveMigrationTask(base.TaskBase):
def __init__(self, context, instance, destination,
block_migration, disk_over_commit, migration, compute_rpcapi,
servicegroup_api, scheduler_client, request_spec=None):
super(LiveMigrationTask, self).__init__(context, instance)
self.destination = destination
self.block_migration = block_migration
self.disk_over_commit = disk_over_commit
self.migration = migration
self.source = instance.host
self.migrate_data = None
self.compute_rpcapi = compute_rpcapi
self.servicegroup_api = servicegroup_api
self.scheduler_client = scheduler_client
self.request_spec = request_spec
self._source_cn = None
self._held_allocations = None
def _execute(self):
self._check_instance_is_active()
self._check_host_is_up(self.source)
if should_do_migration_allocation(self.context):
self._source_cn, self._held_allocations = (
# NOTE(danms): This may raise various exceptions, which will
# propagate to the API and cause a 500. This is what we
# want, as it would indicate internal data structure corruption
# (such as missing migrations, compute nodes, etc).
migrate.replace_allocation_with_migration(self.context,
self.instance,
self.migration))
if not self.destination:
# Either no host was specified in the API request and the user
# wants the scheduler to pick a destination host, or a host was
# specified but is not forcing it, so they want the scheduler
# filters to run on the specified host, like a scheduler hint.
self.destination, dest_node = self._find_destination()
else:
# This is the case that the user specified the 'force' flag when
# live migrating with a specific destination host so the scheduler
# is bypassed. There are still some minimal checks performed here
# though.
source_node, dest_node = self._check_requested_destination()
# Now that we're semi-confident in the force specified host, we
# need to copy the source compute node allocations in Placement
# to the destination compute node. Normally select_destinations()
# in the scheduler would do this for us, but when forcing the
# target host we don't call the scheduler.
# TODO(mriedem): In Queens, call select_destinations() with a
# skip_filters=True flag so the scheduler does the work of claiming
# resources on the destination in Placement but still bypass the
# scheduler filters, which honors the 'force' flag in the API.
# This raises NoValidHost which will be handled in
# ComputeTaskManager.
scheduler_utils.claim_resources_on_destination(
self.context, self.scheduler_client.reportclient,
self.instance, source_node, dest_node,
source_node_allocations=self._held_allocations)
# dest_node is a ComputeNode object, so we need to get the actual
# node name off it to set in the Migration object below.
dest_node = dest_node.hypervisor_hostname
self.migration.source_node = self.instance.node
self.migration.dest_node = dest_node
self.migration.dest_compute = self.destination
self.migration.save()
# TODO(johngarbutt) need to move complexity out of compute manager
# TODO(johngarbutt) disk_over_commit?
return self.compute_rpcapi.live_migration(self.context,
host=self.source,
instance=self.instance,
dest=self.destination,
block_migration=self.block_migration,
migration=self.migration,
migrate_data=self.migrate_data)
def rollback(self):
# TODO(johngarbutt) need to implement the clean up operation
# but this will make sense only once we pull in the compute
# calls, since this class currently makes no state changes,
# except to call the compute method, that has no matching
# rollback call right now.
if self._held_allocations:
migrate.revert_allocation_for_migration(self.context,
self._source_cn,
self.instance,
self.migration,
self._held_allocations)
def _check_instance_is_active(self):
if self.instance.power_state not in (power_state.RUNNING,
power_state.PAUSED):
raise exception.InstanceInvalidState(
instance_uuid=self.instance.uuid,
attr='power_state',
state=self.instance.power_state,
method='live migrate')
def _check_host_is_up(self, host):
service = objects.Service.get_by_compute_host(self.context, host)
if not self.servicegroup_api.service_is_up(service):
raise exception.ComputeServiceUnavailable(host=host)
def _check_requested_destination(self):
"""Performs basic pre-live migration checks for the forced host.
:returns: tuple of (source ComputeNode, destination ComputeNode)
"""
self._check_destination_is_not_source()
self._check_host_is_up(self.destination)
self._check_destination_has_enough_memory()
source_node, dest_node = self._check_compatible_with_source_hypervisor(
self.destination)
self._call_livem_checks_on_host(self.destination)
# Make sure the forced destination host is in the same cell that the
# instance currently lives in.
# NOTE(mriedem): This can go away if/when the forced destination host
# case calls select_destinations.
source_cell_mapping = self._get_source_cell_mapping()
dest_cell_mapping = self._get_destination_cell_mapping()
if source_cell_mapping.uuid != dest_cell_mapping.uuid:
raise exception.MigrationPreCheckError(
reason=(_('Unable to force live migrate instance %s '
'across cells.') % self.instance.uuid))
return source_node, dest_node
def _check_destination_is_not_source(self):
if self.destination == self.source:
raise exception.UnableToMigrateToSelf(
instance_id=self.instance.uuid, host=self.destination)
def _check_destination_has_enough_memory(self):
# TODO(mriedem): This method can be removed when the forced host
# scenario is calling select_destinations() in the scheduler because
# Placement will be used to filter allocation candidates by MEMORY_MB.
# We likely can't remove it until the CachingScheduler is gone though
# since the CachingScheduler does not use Placement.
compute = self._get_compute_info(self.destination)
free_ram_mb = compute.free_ram_mb
total_ram_mb = compute.memory_mb
mem_inst = self.instance.memory_mb
# NOTE(sbauza): Now the ComputeNode object reports an allocation ratio
# that can be provided by the compute_node if new or by the controller
ram_ratio = compute.ram_allocation_ratio
# NOTE(sbauza): Mimic the RAMFilter logic in order to have the same
# ram validation
avail = total_ram_mb * ram_ratio - (total_ram_mb - free_ram_mb)
if not mem_inst or avail <= mem_inst:
instance_uuid = self.instance.uuid
dest = self.destination
reason = _("Unable to migrate %(instance_uuid)s to %(dest)s: "
"Lack of memory(host:%(avail)s <= "
"instance:%(mem_inst)s)")
raise exception.MigrationPreCheckError(reason=reason % dict(
instance_uuid=instance_uuid, dest=dest, avail=avail,
mem_inst=mem_inst))
def _get_compute_info(self, host):
return objects.ComputeNode.get_first_node_by_host_for_old_compat(
self.context, host)
def _check_compatible_with_source_hypervisor(self, destination):
source_info = self._get_compute_info(self.source)
destination_info = self._get_compute_info(destination)
source_type = source_info.hypervisor_type
destination_type = destination_info.hypervisor_type
if source_type != destination_type:
raise exception.InvalidHypervisorType()
source_version = source_info.hypervisor_version
destination_version = destination_info.hypervisor_version
if source_version > destination_version:
raise exception.DestinationHypervisorTooOld()
return source_info, destination_info
def _call_livem_checks_on_host(self, destination):
try:
self.migrate_data = self.compute_rpcapi.\
check_can_live_migrate_destination(self.context, self.instance,
destination, self.block_migration, self.disk_over_commit)
except messaging.MessagingTimeout:
msg = _("Timeout while checking if we can live migrate to host: "
"%s") % destination
raise exception.MigrationPreCheckError(msg)
def _get_source_cell_mapping(self):
"""Returns the CellMapping for the cell in which the instance lives
:returns: nova.objects.CellMapping record for the cell where
the instance currently lives.
:raises: MigrationPreCheckError - in case a mapping is not found
"""
try:
return objects.InstanceMapping.get_by_instance_uuid(
self.context, self.instance.uuid).cell_mapping
except exception.InstanceMappingNotFound:
raise exception.MigrationPreCheckError(
reason=(_('Unable to determine in which cell '
'instance %s lives.') % self.instance.uuid))
def _get_destination_cell_mapping(self):
"""Returns the CellMapping for the destination host
:returns: nova.objects.CellMapping record for the cell where
the destination host is mapped.
:raises: MigrationPreCheckError - in case a mapping is not found
"""
try:
return objects.HostMapping.get_by_host(
self.context, self.destination).cell_mapping
except exception.HostMappingNotFound:
raise exception.MigrationPreCheckError(
reason=(_('Unable to determine in which cell '
'destination host %s lives.') % self.destination))
def _get_request_spec_for_select_destinations(self, attempted_hosts=None):
"""Builds a RequestSpec that can be passed to select_destinations
Used when calling the scheduler to pick a destination host for live
migrating the instance.
:param attempted_hosts: List of host names to ignore in the scheduler.
This is generally at least seeded with the source host.
:returns: nova.objects.RequestSpec object
"""
if not self.request_spec:
# NOTE(sbauza): We were unable to find an original RequestSpec
# object - probably because the instance is old.
# We need to mock that the old way
image = utils.get_image_from_system_metadata(
self.instance.system_metadata)
filter_properties = {'ignore_hosts': attempted_hosts}
request_spec = objects.RequestSpec.from_components(
self.context, self.instance.uuid, image,
self.instance.flavor, self.instance.numa_topology,
self.instance.pci_requests,
filter_properties, None, self.instance.availability_zone
)
else:
request_spec = self.request_spec
# NOTE(sbauza): Force_hosts/nodes needs to be reset
# if we want to make sure that the next destination
# is not forced to be the original host
request_spec.reset_forced_destinations()
scheduler_utils.setup_instance_group(self.context, request_spec)
# We currently only support live migrating to hosts in the same
# cell that the instance lives in, so we need to tell the scheduler
# to limit the applicable hosts based on cell.
cell_mapping = self._get_source_cell_mapping()
LOG.debug('Requesting cell %(cell)s while live migrating',
{'cell': cell_mapping.identity},
instance=self.instance)
if ('requested_destination' in request_spec and
request_spec.requested_destination):
request_spec.requested_destination.cell = cell_mapping
else:
request_spec.requested_destination = objects.Destination(
cell=cell_mapping)
request_spec.ensure_project_and_user_id(self.instance)
return request_spec
def _find_destination(self):
# TODO(johngarbutt) this retry loop should be shared
attempted_hosts = [self.source]
request_spec = self._get_request_spec_for_select_destinations(
attempted_hosts)
host = None
while host is None:
self._check_not_over_max_retries(attempted_hosts)
request_spec.ignore_hosts = attempted_hosts
try:
selection_lists = self.scheduler_client.select_destinations(
self.context, request_spec, [self.instance.uuid],
return_objects=True, return_alternates=False)
# We only need the first item in the first list, as there is
# only one instance, and we don't care about any alternates.
selection = selection_lists[0][0]
host = selection.service_host
except messaging.RemoteError as ex:
# TODO(ShaoHe Feng) There maybe multi-scheduler, and the
# scheduling algorithm is R-R, we can let other scheduler try.
# Note(ShaoHe Feng) There are types of RemoteError, such as
# NoSuchMethod, UnsupportedVersion, we can distinguish it by
# ex.exc_type.
raise exception.MigrationSchedulerRPCError(
reason=six.text_type(ex))
try:
self._check_compatible_with_source_hypervisor(host)
self._call_livem_checks_on_host(host)
except (exception.Invalid, exception.MigrationPreCheckError) as e:
LOG.debug("Skipping host: %(host)s because: %(e)s",
{"host": host, "e": e})
attempted_hosts.append(host)
# The scheduler would have created allocations against the
# selected destination host in Placement, so we need to remove
# those before moving on.
self._remove_host_allocations(host, selection.nodename)
host = None
return selection.service_host, selection.nodename
def _remove_host_allocations(self, host, node):
"""Removes instance allocations against the given host from Placement
:param host: The name of the host.
:param node: The name of the node.
"""
# Get the compute node object since we need the UUID.
# TODO(mriedem): If the result of select_destinations eventually
# returns the compute node uuid, we wouldn't need to look it
# up via host/node and we can save some time.
try:
compute_node = objects.ComputeNode.get_by_host_and_nodename(
self.context, host, node)
except exception.ComputeHostNotFound:
# This shouldn't happen, but we're being careful.
LOG.info('Unable to remove instance allocations from host %s '
'and node %s since it was not found.', host, node,
instance=self.instance)
return
# Calculate the resource class amounts to subtract from the allocations
# on the node based on the instance flavor.
resources = scheduler_utils.resources_from_flavor(
self.instance, self.instance.flavor)
# Now remove the allocations for our instance against that node.
# Note that this does not remove allocations against any other node
# or shared resource provider, it's just undoing what the scheduler
# allocated for the given (destination) node.
self.scheduler_client.reportclient.\
remove_provider_from_instance_allocation(
self.context, self.instance.uuid, compute_node.uuid,
self.instance.user_id, self.instance.project_id, resources)
def _check_not_over_max_retries(self, attempted_hosts):
if CONF.migrate_max_retries == -1:
return
retries = len(attempted_hosts) - 1
if retries > CONF.migrate_max_retries:
if self.migration:
self.migration.status = 'failed'
self.migration.save()
msg = (_('Exceeded max scheduling retries %(max_retries)d for '
'instance %(instance_uuid)s during live migration')
% {'max_retries': retries,
'instance_uuid': self.instance.uuid})
raise exception.MaxRetriesExceeded(reason=msg)
| |
#! /usr/bin/python
# Copyright (c) 2015 Dave McCoy (dave.mccoy@cospandesign.com)
#
# This file is part of Nysa.
# (http://wiki.cospandesign.com/index.php?title=Nysa.org)
#
# Nysa is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# any later version.
#
# Nysa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Nysa; If not, see <http://www.gnu.org/licenses/>.
from array import array as Array
from sdb_component import SDBComponent as sdbc
import sdb_component
from sdb_object_model import SOMBus
from sdb import SDBInfo
from sdb import SDBWarning
from sdb import SDBError
from sdb_component import SDB_INTERCONNECT_MAGIC
from sdb_component import SDB_ROM_RECORD_LENGTH as RECORD_LENGTH
#Public facing functions
def generate_rom_image(som):
"""
Given a populated SOM generate a ROM image
Args:
som (SDBObjectModel): A populated SOM
Return:
Nothing
Raises:
SDBError, error while parsing the SOM
"""
rom = Array('B')
#Go through each of the elements.
root = som.get_root()
return _bus_to_rom(root, rom)
def get_total_number_of_records(som):
"""
Given a populated SOM return the total number of
Elements in the ROM (This is used to calculate
the size of the ROM within the FPGA)
Args:
som (SDBObjectModel): A populated SOM
Return:
(Integer): Total number of elements that will
be within the ROM
Raises:
SDBError, error while parsing the SOM
"""
rom = Array('B')
root = som.get_root()
_bus_to_rom(root, rom)
return len(rom) / 64
#Private functions (SDB -> ROM)
def _bus_to_rom(bus, rom, addr = None):
"""
Parse a bus, starting with the actual interconnect and then all the
way through the final device, this stores the busses to be processed
later and putting a birdge in the spot where the new bus will be
"""
buses = []
bridge_address_offset = 0x00
if addr is None:
addr = 0x00
#Generate a slice of the ROM that will contain the entire bus
#Add 1 for the initial interconnect
#Add 1 for the empty block afterwards
#print "Total length: %d" % (len(bus) + 2)
#print "Total byte length: %d" % (RECORD_LENGTH * (len(bus) + 2) / 8)
for i in range((len(bus) + 2) * RECORD_LENGTH):
rom.append(0x00)
#Put in a marker for an empty buffer
rom[len(rom) - 1] = 0xFF
_generate_interconnect_rom(bus.get_component(), rom, addr)
addr += RECORD_LENGTH
pos = 0
for entity in bus:
#print "At position: %d" % pos
pos += 1
if isinstance(entity, SOMBus):
_generate_bridge_rom(entity.get_component(), rom, addr)
_bus_to_rom(entity, rom, len(rom))
else:
_generate_entity_rom(entity.get_component(), rom, addr)
addr += RECORD_LENGTH
return rom
def _generate_entity_rom(entity, rom, addr):
"""
Call this function with anything besides a bridge or an interconnect
"""
if entity.is_device():
_generate_device_rom(entity, rom, addr)
elif entity.is_integration_record():
_generate_integration_rom(entity, rom, addr)
elif entity.is_url_record():
_generate_url_rom(entity, rom, addr)
elif entity.is_synthesis_record():
_generate_synthesis_rom(entity, rom, addr)
def _generate_bridge_rom(entity, rom, addr):
_generate_product_rom(entity, rom, addr)
_generate_component_rom(entity, rom, addr)
offset = len(rom) / 8
#addr = entity.get_bridge_child_addr_as_int()
#print "Address: 0x%016X" % addr
rom[addr + 0x00] = (offset >> 56) & 0xFF
rom[addr + 0x01] = (offset >> 48) & 0xFF
rom[addr + 0x02] = (offset >> 40) & 0xFF
rom[addr + 0x03] = (offset >> 32) & 0xFF
rom[addr + 0x04] = (offset >> 24) & 0xFF
rom[addr + 0x05] = (offset >> 16) & 0xFF
rom[addr + 0x06] = (offset >> 8 ) & 0xFF
rom[addr + 0x07] = (offset >> 0 ) & 0xFF
rom[addr + RECORD_LENGTH - 1] = sdb_component.SDB_RECORD_TYPE_BRIDGE
def _generate_interconnect_rom(entity, rom, addr):
_generate_product_rom(entity, rom, addr)
_generate_component_rom(entity, rom, addr)
rom[addr + 0x00] = (SDB_INTERCONNECT_MAGIC >> 24) & 0xFF
rom[addr + 0x01] = (SDB_INTERCONNECT_MAGIC >> 16) & 0xFF
rom[addr + 0x02] = (SDB_INTERCONNECT_MAGIC >> 8 ) & 0xFF
rom[addr + 0x03] = (SDB_INTERCONNECT_MAGIC >> 0 ) & 0xFF
nrecs = entity.get_number_of_records_as_int()
rom[addr + 0x04] = (nrecs >> 8) & 0xFF
rom[addr + 0x05] = (nrecs >> 0) & 0xFF
version = entity.get_version_as_int()
rom[addr + 0x06] = version & 0xFF
bus_type = entity.get_bus_type_as_int()
rom[addr + 0x07] = bus_type & 0xFF
rom[addr + RECORD_LENGTH - 1] = entity.get_module_record_type()
def _generate_device_rom(entity, rom, addr):
_generate_product_rom(entity, rom, addr)
_generate_component_rom(entity, rom, addr)
#ABI Class
abi_class = entity.get_abi_class_as_int()
rom[addr + 0x00] = (abi_class >> 8) & 0xFF
rom[addr + 0x01] = (abi_class) & 0xFF
#abi version major
abi_major = entity.get_abi_version_major_as_int()
rom[addr + 0x02] = (abi_major & 0xFF)
#ABI version minor
abi_minor = entity.get_abi_version_minor_as_int()
rom[addr + 0x03] = (abi_minor & 0xFF)
#Bus Specific Stuff
endian = entity.get_endian_as_int()
bus_width = entity._translate_buf_width_to_rom_version()
executable = 0
writeable = 0
readable = 0
if entity.is_executable():
executable = 1
if entity.is_writeable():
writeable = 1
if entity.is_readable():
readable = 1
#print "executable: %s" % str(executable)
#print "writeable: %s" % str(writeable)
#print "readable: %s" % str(readable)
rom[addr + 0x04] = 0
rom[addr + 0x05] = 0
rom[addr + 0x06] = bus_width
rom[addr + 0x07] = (endian << 4 | executable << 2 | writeable << 1 | readable)
rom[addr + RECORD_LENGTH - 1] = entity.get_module_record_type()
def _generate_integration_rom(entity, rom, addr):
for i in range(0x1F):
rom[addr + i] = 0x00
_generate_product_rom(entity, rom, addr)
rom[addr + RECORD_LENGTH - 1] = entity.get_module_record_type()
def _generate_url_rom(entity, rom, addr):
url = entity.get_url()
_string_to_rom(url, RECORD_LENGTH - 1, rom, addr)
rom[addr + RECORD_LENGTH - 1] = entity.get_module_record_type()
def _generate_synthesis_rom(entity, rom, addr):
_string_to_rom(entity.get_synthesis_name(), 16, rom, addr)
_string_to_rom(entity.get_synthesis_commit_id(), 16, rom, addr + 0x10)
_string_to_rom(entity.get_synthesis_tool_name(), 8, rom, addr + 0x20)
_string_to_rom(entity.get_synthesis_tool_version(), 8, rom, addr + 0x28)
#Date
year, month, day = entity.get_date_as_int()
rom[addr + 0x2C] = int(year / 100)
rom[addr + 0x2D] = int(year % 100)
rom[addr + 0x2E] = (month )
rom[addr + 0x2F] = (day )
name = entity.get_name()
if len(name) > 19:
name = name[:19]
_string_to_rom(name, 15, rom, addr + 0x30)
rom[addr + RECORD_LENGTH - 1] = entity.get_module_record_type()
def _generate_product_rom(entity, rom, addr):
#Vendor ID
vendor_id = entity.get_vendor_id_as_int()
rom[addr + 0x18] = (vendor_id >> 56) & 0xFF
rom[addr + 0x19] = (vendor_id >> 48) & 0xFF
rom[addr + 0x1A] = (vendor_id >> 40) & 0xFF
rom[addr + 0x1B] = (vendor_id >> 32) & 0xFF
rom[addr + 0x1C] = (vendor_id >> 24) & 0xFF
rom[addr + 0x1D] = (vendor_id >> 16) & 0xFF
rom[addr + 0x1E] = (vendor_id >> 8 ) & 0xFF
rom[addr + 0x1F] = (vendor_id >> 0 ) & 0xFF
#Device ID
device_id = entity.get_device_id_as_int()
rom[addr + 0x20] = (device_id >> 24) & 0xFF
rom[addr + 0x21] = (device_id >> 16) & 0xFF
rom[addr + 0x22] = (device_id >> 8) & 0xFF
rom[addr + 0x23] = (device_id >> 0) & 0xFF
#Version
version = entity.get_core_version_as_int()
rom[addr + 0x24] = (version >> 24) & 0xFF
rom[addr + 0x25] = (version >> 16) & 0xFF
rom[addr + 0x26] = (version >> 8) & 0xFF
rom[addr + 0x27] = (version >> 0) & 0xFF
#Date
year, month, day = entity.get_date_as_int()
rom[addr + 0x28] = int(year / 100)
rom[addr + 0x29] = int(year % 100)
rom[addr + 0x2A] = (month )
rom[addr + 0x2B] = (day )
#Name
name = entity.get_name()
if len(name) > 19:
name = name[:19]
_string_to_rom(name, 19, rom, addr + 0x2C)
def _generate_component_rom(entity, rom, addr):
address_first = Array('B')
address_last = Array('B')
start_address = entity.get_start_address_as_int()
end_address = entity.get_end_address_as_int()
for i in range (0, RECORD_LENGTH, 8):
address_first.append((start_address >> (56 - i) & 0xFF))
address_last.append((end_address >> (56 - i) & 0xFF))
rom[addr + 0x08] = address_first[0]
rom[addr + 0x09] = address_first[1]
rom[addr + 0x0A] = address_first[2]
rom[addr + 0x0B] = address_first[3]
rom[addr + 0x0C] = address_first[4]
rom[addr + 0x0D] = address_first[5]
rom[addr + 0x0E] = address_first[6]
rom[addr + 0x0F] = address_first[7]
rom[addr + 0x10] = address_last[0]
rom[addr + 0x11] = address_last[1]
rom[addr + 0x12] = address_last[2]
rom[addr + 0x13] = address_last[3]
rom[addr + 0x14] = address_last[4]
rom[addr + 0x15] = address_last[5]
rom[addr + 0x16] = address_last[6]
rom[addr + 0x17] = address_last[7]
def _string_to_rom(s, max_length, rom, addr):
if len(s) > max_length:
s = s[:max_length]
s = Array('B', s)
for i in range(len(s)):
rom[addr + i] = s[i]
| |
from direct.directtools.DirectSelection import *
from direct.directtools.DirectUtil import ROUND_TO
from direct.directtools.DirectGeometry import LineNodePath
from direct.gui.DirectGui import *
from panda3d.core import *
from panda3d.direct import *
from direct.showbase.DirectObject import DirectObject
from toontown.toonbase import ToontownGlobals
from direct.directnotify import DirectNotifyGlobal
from direct.task import Task
from toontown.catalog import CatalogFurnitureItem
from toontown.catalog import CatalogItemTypes
from direct.showbase import PythonUtil
from toontown.toontowngui import TTDialog
from toontown.toonbase import TTLocalizer
from otp.otpbase import OTPLocalizer
camPos50 = (Point3(0.0, -10.0, 50.0),
Point3(0.0, -9.66, 49.06),
Point3(0.0, 1.5, 12.38),
Point3(0.0, 1.5, -3.1),
1)
camPos40 = (Point3(0.0, -15.0, 40.0),
Point3(0.0, -14.5, 39.13),
Point3(0.0, 1.5, 12.38),
Point3(0.0, 1.5, -3.1),
1)
camPos30 = (Point3(0.0, -20.0, 30.0),
Point3(0.0, -19.29, 29.29),
Point3(0.0, 1.5, 12.38),
Point3(0.0, 1.5, -3.1),
1)
camPos20 = (Point3(0.0, -20.0, 20.0),
Point3(0.0, -19.13, 19.5),
Point3(0.0, 1.5, 12.38),
Point3(0.0, 1.5, -3.1),
1)
camPosList = [camPos20,
camPos30,
camPos40,
camPos50]
DEFAULT_CAM_INDEX = 2
NormalPickerPanelColor = (1, 0.9, 0.745, 1)
DisabledPickerPanelColor = (0.7, 0.65, 0.58, 1)
DeletePickerPanelColor = (1, 0.4, 0.4, 1)
DisabledDeletePickerPanelColor = (0.7, 0.3, 0.3, 1)
class FurnitureItemPanel(DirectButton):
def __init__(self, item, itemId, command = None, deleteMode = 0, withinFunc = None, helpCategory = None):
self.item = item
self.itemId = itemId
self.command = command
self.origHelpCategory = helpCategory
self.deleteMode = deleteMode
if self.deleteMode:
framePanelColor = DeletePickerPanelColor
else:
framePanelColor = NormalPickerPanelColor
DirectButton.__init__(self, relief=DGG.RAISED, frameSize=(-0.25,
0.25,
-0.2,
0.2), frameColor=framePanelColor, borderWidth=(0.02, 0.02), command=self.clicked)
if self.deleteMode:
helpCategory = 'FurnitureItemPanelDelete'
self.bindHelpText(helpCategory)
if withinFunc:
self.bind(DGG.WITHIN, lambda event: withinFunc(self.itemId))
self.initialiseoptions(FurnitureItemPanel)
self.load()
def show(self):
DirectFrame.show(self)
if self.ival:
self.ival.resume()
def hide(self):
DirectFrame.hide(self)
if self.ival:
self.ival.pause()
def load(self):
panelWidth = 7
panelCenter = 0
self.picture, self.ival = self.item.getPicture(base.localAvatar)
if self.picture:
self.picture.reparentTo(self)
self.picture.setScale(0.14)
self.picture.setPos(0, 0, -0.02)
text = self.item.getName()
text_pos = (0, -0.1, 0)
else:
text = self.item.getTypeName() + ': ' + self.item.getName()
text_pos = (0, -0.3, 0)
if self.ival:
self.ival.loop()
self.ival.pause()
self.nameLabel = DirectLabel(parent=self, relief=None, pos=(0, 0, 0.17), scale=0.45, text=text, text_scale=0.15, text_fg=(0, 0, 0, 1), text_pos=text_pos, text_font=ToontownGlobals.getInterfaceFont(), text_wordwrap=panelWidth)
return
def clicked(self):
self.command(self.item, self.itemId)
def unload(self):
if self.item.hasPicture:
self.item.cleanupPicture()
del self.item
self.nameLabel.destroy()
del self.nameLabel
if self.ival:
self.ival.finish()
del self.ival
del self.picture
self.command = None
return
def destroy(self):
self.unload()
DirectButton.destroy(self)
def bindHelpText(self, category):
self.unbind(DGG.ENTER)
self.unbind(DGG.EXIT)
if category is None:
category = self.origHelpCategory
self.bind(DGG.ENTER, base.cr.objectManager.showHelpText, extraArgs=[category, self.item.getName()])
self.bind(DGG.EXIT, base.cr.objectManager.hideHelpText)
return
def setDeleteMode(self, deleteMode):
self.deleteMode = deleteMode
self.__updateAppearance()
def enable(self, enabled):
if enabled:
self['state'] = DGG.NORMAL
else:
self['state'] = DGG.DISABLED
self.__updateAppearance()
def __updateAppearance(self):
color = NormalPickerPanelColor
relief = DGG.RAISED
if self.deleteMode:
if self['state'] == DGG.DISABLED:
color = DisabledDeletePickerPanelColor
relief = DGG.SUNKEN
else:
color = DeletePickerPanelColor
relief = DGG.RAISED
elif self['state'] == DGG.DISABLED:
color = DisabledPickerPanelColor
relief = DGG.SUNKEN
else:
color = NormalPickerPanelColor
relief = DGG.RAISED
self['frameColor'] = color
class MovableObject(NodePath, DirectObject):
def __init__(self, dfitem, parent = render):
NodePath.__init__(self)
self.assign(dfitem)
self.dfitem = dfitem
dfitem.transmitRelativeTo = dfitem.getParent()
self.reparentTo(parent)
self.setTag('movableObject', '1')
self.builtInCNodes = self.findAllMatches('**/+CollisionNode')
self.numBuiltInNodes = self.builtInCNodes.getNumPaths()
self.stashBuiltInCollisionNodes()
shadows = self.findAllMatches('**/*shadow*')
shadows.addPathsFrom(self.findAllMatches('**/*Shadow*'))
shadows.stash()
flags = self.dfitem.item.getFlags()
if flags & CatalogFurnitureItem.FLPainting:
self.setOnFloor(0)
self.setOnWall(1)
else:
self.setOnFloor(1)
self.setOnWall(0)
if flags & CatalogFurnitureItem.FLOnTable:
self.setOnTable(1)
else:
self.setOnTable(0)
if flags & CatalogFurnitureItem.FLRug:
self.setIsRug(1)
else:
self.setIsRug(0)
if flags & CatalogFurnitureItem.FLIsTable:
self.setIsTable(1)
else:
self.setIsTable(0)
m = self.getTransform()
self.iPosHpr()
bMin, bMax = self.bounds = self.getTightBounds()
bMin -= Vec3(0.1, 0.1, 0)
bMax += Vec3(0.1, 0.1, 0)
self.c0 = Point3(bMin[0], bMin[1], 0.2)
self.c1 = Point3(bMax[0], bMin[1], 0.2)
self.c2 = Point3(bMax[0], bMax[1], 0.2)
self.c3 = Point3(bMin[0], bMax[1], 0.2)
self.center = (bMin + bMax) / 2.0
if flags & CatalogFurnitureItem.FLPainting:
self.dragPoint = Vec3(self.center[0], bMax[1], self.center[2])
else:
self.dragPoint = Vec3(self.center[0], self.center[1], bMin[2])
delta = self.dragPoint - self.c0
self.radius = min(delta[0], delta[1])
if self.getOnWall():
self.setWallOffset(0.1)
else:
self.setWallOffset(self.radius + 0.1)
self.makeCollisionBox()
self.setTransform(m)
self.unstashBuiltInCollisionNodes()
shadows.unstash()
def resetMovableObject(self):
self.unstashBuiltInCollisionNodes()
self.collisionNodePath.removeNode()
self.clearTag('movableObject')
def setOnFloor(self, fOnFloor):
self.fOnFloor = fOnFloor
def getOnFloor(self):
return self.fOnFloor
def setOnWall(self, fOnWall):
self.fOnWall = fOnWall
def getOnWall(self):
return self.fOnWall
def setOnTable(self, fOnTable):
self.fOnTable = fOnTable
def getOnTable(self):
return self.fOnTable
def setIsRug(self, fIsRug):
self.fIsRug = fIsRug
def getIsRug(self):
return self.fIsRug
def setIsTable(self, fIsTable):
self.fIsTable = fIsTable
def getIsTable(self):
return self.fIsTable
def setWallOffset(self, offset):
self.wallOffset = offset
def getWallOffset(self):
return self.wallOffset
def destroy(self):
self.removeNode()
def stashBuiltInCollisionNodes(self):
self.builtInCNodes.stash()
def unstashBuiltInCollisionNodes(self):
self.builtInCNodes.unstash()
def getFloorBitmask(self):
if self.getOnTable():
return ToontownGlobals.FloorBitmask | ToontownGlobals.FurnitureTopBitmask
else:
return ToontownGlobals.FloorBitmask
def getWallBitmask(self):
if self.getIsRug() or self.getOnWall():
return ToontownGlobals.WallBitmask
else:
return ToontownGlobals.WallBitmask | ToontownGlobals.FurnitureSideBitmask
def makeCollisionBox(self):
self.collisionNodePath = self.attachNewNode('furnitureCollisionNode')
if self.getIsRug() or self.getOnWall():
return
mx = self.bounds[0][0] - 0.01
Mx = self.bounds[1][0] + 0.01
my = self.bounds[0][1] - 0.01
My = self.bounds[1][1] + 0.01
mz = self.bounds[0][2]
Mz = self.bounds[1][2]
cn = CollisionNode('sideCollisionNode')
cn.setIntoCollideMask(ToontownGlobals.FurnitureSideBitmask)
self.collisionNodePath.attachNewNode(cn)
cp = CollisionPolygon(Point3(mx, My, mz), Point3(mx, my, mz), Point3(mx, my, Mz), Point3(mx, My, Mz))
cn.addSolid(cp)
cp = CollisionPolygon(Point3(Mx, my, mz), Point3(Mx, My, mz), Point3(Mx, My, Mz), Point3(Mx, my, Mz))
cn.addSolid(cp)
cp = CollisionPolygon(Point3(mx, my, mz), Point3(Mx, my, mz), Point3(Mx, my, Mz), Point3(mx, my, Mz))
cn.addSolid(cp)
cp = CollisionPolygon(Point3(Mx, My, mz), Point3(mx, My, mz), Point3(mx, My, Mz), Point3(Mx, My, Mz))
cn.addSolid(cp)
if self.getIsTable():
cn = CollisionNode('topCollisionNode')
cn.setIntoCollideMask(ToontownGlobals.FurnitureTopBitmask)
self.collisionNodePath.attachNewNode(cn)
cp = CollisionPolygon(Point3(mx, my, Mz), Point3(Mx, my, Mz), Point3(Mx, My, Mz), Point3(mx, My, Mz))
cn.addSolid(cp)
class ObjectManager(NodePath, DirectObject):
notify = DirectNotifyGlobal.directNotify.newCategory('ObjectManager')
def __init__(self):
NodePath.__init__(self)
self.assign(render.attachNewNode('objectManager'))
self.objectDict = {}
self.selectedObject = None
self.movingObject = 0
self.deselectEvent = None
self.startPose = render.attachNewNode('startPose')
self.dragPointNP = self.attachNewNode('dragPoint')
self.gridSnapNP = self.dragPointNP.attachNewNode('gridSnap')
self.collisionOffsetNP = self.gridSnapNP.attachNewNode('collisionResponse')
self.iRay = SelectionRay()
self.iSegment = SelectionSegment(numSegments=6)
self.iSegment4 = SelectionSegment(numSegments=4)
self.iSphere = SelectionSphere()
self.houseExtents = None
self.doorBlocker = None
cp = CollisionPolygon(Point3(-100, -100, 0), Point3(100, -100, 0), Point3(100, 100, 0), Point3(-100, 100, 0))
cn = CollisionNode('dragCollisionNode')
cn.addSolid(cp)
cn.setIntoCollideMask(ToontownGlobals.FurnitureDragBitmask)
self.collisionNP = NodePath(cn)
self.lnp = LineNodePath()
self.fRecenter = 0
self.gridSpacing = None
self.firstTime = 0
guiModels = loader.loadModel('phase_5.5/models/gui/house_design_gui')
self.createSelectedObjectPanel(guiModels)
self.createMainControls(guiModels)
self.furnitureManager = None
self.atticPicker = None
self.inRoomPicker = None
self.inTrashPicker = None
self.dialog = None
self.deleteMode = 0
self.nonDeletableItem = None
self.verifyFrame = None
self.deleteItemText = None
self.okButton = None
self.cancelButton = None
self.itemIval = None
self.itemPanel = None
self.guiInterval = None
self.accept('enterFurnitureMode', self.enterFurnitureMode)
self.accept('exitFurnitureMode', self.exitFurnitureMode)
return
def enterFurnitureMode(self, furnitureManager, fDirector):
if not fDirector:
if self.furnitureManager:
self.exitFurnitureMode(self.furnitureManager)
return
if furnitureManager == self.furnitureManager:
return
if self.furnitureManager != None:
self.exitFurnitureMode(self.furnitureManager)
self.notify.info('enterFurnitureMode, fDirector = %s' % fDirector)
self.furnitureManager = furnitureManager
self.furnitureManager.d_avatarEnter()
house = furnitureManager.getInteriorObject()
house.hideExteriorWindows()
self.setTargetNodePath(house.interior)
self.createAtticPicker()
self.initializeDistributedFurnitureItems(furnitureManager.dfitems)
self.setCamPosIndex(DEFAULT_CAM_INDEX)
base.localAvatar.controlManager.collisionsOff()
taskMgr.remove('editModeTransition')
self.orientCamH(base.localAvatar.getH(self.targetNodePath))
self.accept('mouse1', self.moveObjectStart)
self.accept('mouse1-up', self.moveObjectStop)
self.furnitureGui.show()
self.deleteMode = 0
self.__updateDeleteButtons()
self.showAtticPicker()
base.localAvatar.laffMeter.stop()
base.setCellsAvailable(base.leftCells + [base.bottomCells[0]], 0)
if self.guiInterval:
self.guiInterval.finish()
self.guiInterval = self.furnitureGui.posHprScaleInterval(1.0, Point3(0.155, -0.6, -1.045), Vec3(0), Vec3(0.06), startPos=Point3(0.115, 0.0, -0.66), startHpr=Vec3(0), startScale=Vec3(0.04), blendType='easeInOut', name='lerpFurnitureButton')
self.guiInterval.start()
taskMgr.add(self.recenterButtonFrameTask, 'recenterButtonFrameTask', 10)
messenger.send('wakeup')
return
def exitFurnitureMode(self, furnitureManager):
if furnitureManager != self.furnitureManager:
return
self.notify.info('exitFurnitureMode')
house = furnitureManager.getInteriorObject()
if house:
house.showExteriorWindows()
self.furnitureManager.d_avatarExit()
self.furnitureManager = None
base.localAvatar.setCameraPositionByIndex(0)
self.exitDeleteMode()
self.houseExtents.detachNode()
self.doorBlocker.detachNode()
self.deselectObject()
self.ignore('mouse1')
self.ignore('mouse1-up')
if self.atticPicker:
self.atticPicker.destroy()
self.atticPicker = None
if self.inRoomPicker:
self.inRoomPicker.destroy()
self.inRoomPicker = None
if self.inTrashPicker:
self.inTrashPicker.destroy()
self.inTrashPicker = None
self.__cleanupVerifyDelete()
self.furnitureGui.hide()
base.setCellsAvailable(base.leftCells + [base.bottomCells[0]], 1)
base.localAvatar.laffMeter.start()
taskMgr.remove('recenterButtonFrameTask')
self.cleanupDialog()
taskMgr.remove('showHelpTextDoLater')
messenger.send('wakeup')
return
def initializeDistributedFurnitureItems(self, dfitems):
self.objectDict = {}
for item in dfitems:
mo = MovableObject(item, parent=self.targetNodePath)
self.objectDict[mo.get_key()] = mo
def setCamPosIndex(self, index):
self.camPosIndex = index
base.localAvatar.setCameraSettings(camPosList[index])
def zoomCamIn(self):
self.setCamPosIndex(max(0, self.camPosIndex - 1))
messenger.send('wakeup')
def zoomCamOut(self):
self.setCamPosIndex(min(len(camPosList) - 1, self.camPosIndex + 1))
messenger.send('wakeup')
def rotateCamCW(self):
self.orientCamH(base.localAvatar.getH(self.targetNodePath) - 90)
messenger.send('wakeup')
def rotateCamCCW(self):
self.orientCamH(base.localAvatar.getH(self.targetNodePath) + 90)
messenger.send('wakeup')
def orientCamH(self, toonH):
targetH = ROUND_TO(toonH, 90)
base.localAvatar.hprInterval(duration=1, hpr=Vec3(targetH, 0, 0), other=self.targetNodePath, blendType='easeInOut', name='editModeTransition').start()
def setTargetNodePath(self, nodePath):
self.targetNodePath = nodePath
if self.houseExtents:
self.houseExtents.removeNode()
if self.doorBlocker:
self.doorBlocker.removeNode()
self.makeHouseExtentsBox()
self.makeDoorBlocker()
self.collisionNP.reparentTo(self.targetNodePath)
def loadObject(self, filename):
mo = MovableObject(filename, parent=self.targetNodePath)
self.objectDict[mo.get_key()] = mo
self.selectObject(mo)
return mo
def pickObject(self):
self.iRay.setParentNP(base.cam)
entry = self.iRay.pickGeom(targetNodePath=self.targetNodePath, skipFlags=SKIP_ALL)
if entry:
nodePath = entry.getIntoNodePath()
if self.isMovableObject(nodePath):
self.selectObject(self.findObject(nodePath))
return
self.deselectObject()
def pickInRoom(self, objectId):
self.selectObject(self.objectDict.get(objectId))
def selectObject(self, selectedObject):
messenger.send('wakeup')
if self.selectedObject:
self.deselectObject()
if selectedObject:
self.selectedObject = selectedObject
self.deselectEvent = self.selectedObject.dfitem.uniqueName('disable')
self.acceptOnce(self.deselectEvent, self.deselectObject)
self.lnp.reset()
self.lnp.reparentTo(selectedObject)
self.lnp.moveTo(selectedObject.c0)
self.lnp.drawTo(selectedObject.c1)
self.lnp.drawTo(selectedObject.c2)
self.lnp.drawTo(selectedObject.c3)
self.lnp.drawTo(selectedObject.c0)
self.lnp.create()
self.buttonFrame.show()
self.enableButtonFrameTask()
self.atticRoof.hide()
# In case we dont want to move the Closet, Phone, Bank or Trunk to the attic
if config.GetBool('want-permanent-interactables', False):
if selectedObject.dfitem.item.getFlags() & CatalogFurnitureItem.FLCloset or \
selectedObject.dfitem.item.getFlags() & CatalogFurnitureItem.FLPhone or \
selectedObject.dfitem.item.getFlags() & CatalogFurnitureItem.FLBank or \
selectedObject.dfitem.item.getFlags() &CatalogFurnitureItem.FLTrunk:
self.sendToAtticButton.hide()
self.atticRoof.show()
else:
self.sendToAtticButton.show()
return
self.sendToAtticButton.show()
def deselectObject(self):
self.moveObjectStop()
if self.deselectEvent:
self.ignore(self.deselectEvent)
self.deselectEvent = None
self.selectedObject = None
self.lnp.detachNode()
self.buttonFrame.hide()
self.disableButtonFrameTask()
self.sendToAtticButton.hide()
self.atticRoof.show()
return
def isMovableObject(self, nodePath):
return nodePath.hasNetTag('movableObject')
def findObject(self, nodePath):
np = nodePath.findNetTag('movableObject')
if np.isEmpty():
return None
else:
return self.objectDict.get(np.get_key(), None)
return None
def moveObjectStop(self, *args):
if self.movingObject:
self.movingObject = 0
taskMgr.remove('moveObjectTask')
if self.selectedObject:
self.selectedObject.wrtReparentTo(self.targetNodePath)
self.selectedObject.collisionNodePath.unstash()
self.selectedObject.dfitem.stopAdjustPosHpr()
for object in self.objectDict.values():
object.unstashBuiltInCollisionNodes()
self.centerMarker['image'] = [self.grabUp, self.grabDown, self.grabRollover]
self.centerMarker.configure(text=['', TTLocalizer.HDMoveLabel], text_pos=(0, 1), text_scale=0.7, text_fg=(1, 1, 1, 1), text_shadow=(0, 0, 0, 1), image_scale=0.3)
def moveObjectStart(self):
self.moveObjectStop()
self.pickObject()
self.moveObjectContinue()
def moveObjectContinue(self, *args):
messenger.send('wakeup')
if self.selectedObject:
for object in self.objectDict.values():
object.stashBuiltInCollisionNodes()
self.selectedObject.collisionNodePath.stash()
self.selectedObject.dfitem.startAdjustPosHpr()
self.firstTime = 1
self.iPosHpr()
self.startPoseValid = 0
self.centerMarker['image'] = self.grabDown
self.centerMarker.configure(text=TTLocalizer.HDMoveLabel, text_pos=(0, 1), text_scale=0.7, text_fg=(1, 1, 1, 1), text_shadow=(0, 0, 0, 1), image_scale=0.3)
taskMgr.add(self.moveObjectTask, 'moveObjectTask')
self.movingObject = 1
def setLnpColor(self, r, g, b):
for i in range(5):
self.lnp.lineSegs.setVertexColor(i, r, g, b)
def markNewPosition(self, isValid):
if not isValid:
if self.startPoseValid:
self.collisionOffsetNP.setPosHpr(self.startPose, self.selectedObject.dragPoint, Vec3(0))
else:
self.startPoseValid = 1
def moveObjectTask(self, state):
so = self.selectedObject
target = self.targetNodePath
self.startPose.iPosHpr(so)
self.iRay.setParentNP(base.cam)
entry = self.iRay.pickBitMask(bitMask=ToontownGlobals.FurnitureDragBitmask, targetNodePath=target, skipFlags=SKIP_BACKFACE | SKIP_CAMERA | SKIP_UNPICKABLE)
if not entry:
return Task.cont
self.setPos(base.cam, entry.getSurfacePoint(base.cam))
if self.firstTime:
self.moveObjectInit()
self.firstTime = 0
else:
self.gridSnapNP.iPos()
self.collisionOffsetNP.iPosHpr()
if self.gridSpacing:
pos = self.dragPointNP.getPos(target)
self.gridSnapNP.setPos(target, ROUND_TO(pos[0], self.gridSpacing), ROUND_TO(pos[1], self.gridSpacing), pos[2])
self.iRay.setParentNP(base.cam)
entry = self.iRay.pickBitMask3D(bitMask=so.getWallBitmask(), targetNodePath=target, dir=Vec3(self.getNearProjectionPoint(self.gridSnapNP)), skipFlags=SKIP_BACKFACE | SKIP_CAMERA | SKIP_UNPICKABLE)
fWall = 0
if not so.getOnTable():
while entry:
intoMask = entry.getIntoNodePath().node().getIntoCollideMask()
fClosest = (intoMask & ToontownGlobals.WallBitmask).isZero()
if self.alignObject(entry, target, fClosest=fClosest):
fWall = 1
break
entry = self.iRay.findNextCollisionEntry(skipFlags=SKIP_BACKFACE | SKIP_CAMERA | SKIP_UNPICKABLE)
if so.getOnWall():
self.markNewPosition(fWall)
return Task.cont
self.iRay.setParentNP(target)
entry = self.iRay.pickBitMask3D(bitMask=so.getFloorBitmask(), targetNodePath=target, origin=Point3(self.gridSnapNP.getPos(target) + Vec3(0, 0, 10)), dir=Vec3(0, 0, -1), skipFlags=SKIP_BACKFACE | SKIP_CAMERA | SKIP_UNPICKABLE)
if not entry:
self.markNewPosition(0)
return Task.cont
nodePath = entry.getIntoNodePath()
if self.isMovableObject(nodePath):
self.gridSnapNP.setPos(target, Point3(entry.getSurfacePoint(target)))
else:
self.gridSnapNP.setPos(target, Point3(entry.getSurfacePoint(target) + Vec3(0, 0, ToontownGlobals.FloorOffset)))
if not fWall:
self.iSphere.setParentNP(self.gridSnapNP)
self.iSphere.setCenterRadius(0, Point3(0), so.radius * 1.25)
entry = self.iSphere.pickBitMask(bitMask=so.getWallBitmask(), targetNodePath=target, skipFlags=SKIP_CAMERA | SKIP_UNPICKABLE)
if entry:
self.alignObject(entry, target, fClosest=1)
isValid = self.collisionTest()
self.markNewPosition(isValid)
return Task.cont
def collisionTest(self):
so = self.selectedObject
target = self.targetNodePath
entry = self.segmentCollision()
if not entry:
return 1
offsetDict = {}
while entry:
offset = self.computeSegmentOffset(entry)
if offset:
eid = entry.getInto()
maxOffsetVec = offsetDict.get(eid, Vec3(0))
if offset.length() > maxOffsetVec.length():
maxOffsetVec.assign(offset)
offsetDict[eid] = maxOffsetVec
entry = self.iSegment.findNextCollisionEntry(skipFlags=SKIP_CAMERA | SKIP_UNPICKABLE)
if offsetDict:
keys = offsetDict.keys()
ortho1 = offsetDict[keys[0]]
ortho2 = Vec3(0)
v1 = Vec3(ortho1)
v1.normalize()
for key in keys[1:]:
offset = offsetDict[key]
v2 = Vec3(offset)
v2.normalize()
dp = v1.dot(v2)
if abs(dp) > 0.95:
if offset.length() > ortho1.length():
ortho1.assign(offset)
elif abs(dp) < 0.05:
if offset.length() > ortho2.length():
ortho2.assign(offset)
else:
o1Len = ortho1.length()
parallelVec = Vec3(ortho1 * offset.dot(ortho1) / (o1Len * o1Len))
perpVec = Vec3(offset - parallelVec)
if parallelVec.length() > o1Len:
ortho1.assign(parallelVec)
if perpVec.length() > ortho2.length():
ortho2.assign(perpVec)
totalOffset = ortho1 + ortho2
self.collisionOffsetNP.setPos(self.collisionOffsetNP, totalOffset)
if not self.segmentCollision():
return 1
m = self.startPose.getMat(so)
deltaMove = Vec3(m.getRow3(3))
if deltaMove.length() == 0:
return 1
self.iSegment4.setParentNP(so)
entry = self.iSegment4.pickBitMask(bitMask=so.getWallBitmask(), targetNodePath=target, endPointList=[(so.c0, Point3(m.xformPoint(so.c0))),
(so.c1, Point3(m.xformPoint(so.c1))),
(so.c2, Point3(m.xformPoint(so.c2))),
(so.c3, Point3(m.xformPoint(so.c3)))], skipFlags=SKIP_CAMERA | SKIP_UNPICKABLE)
maxLen = 0
maxOffset = None
while entry:
offset = Vec3(entry.getSurfacePoint(entry.getFromNodePath()) - entry.getFrom().getPointA())
offsetLen = Vec3(offset).length()
if offsetLen > maxLen:
maxLen = offsetLen
maxOffset = offset
entry = self.iSegment4.findNextCollisionEntry(skipFlags=SKIP_CAMERA | SKIP_UNPICKABLE)
if maxOffset:
self.collisionOffsetNP.setPos(self.collisionOffsetNP, maxOffset)
if not self.segmentCollision():
return 1
return 0
def segmentCollision(self):
so = self.selectedObject
self.iSegment.setParentNP(so)
entry = self.iSegment.pickBitMask(bitMask=so.getWallBitmask(), targetNodePath=self.targetNodePath, endPointList=[(so.c0, so.c1),
(so.c1, so.c2),
(so.c2, so.c3),
(so.c3, so.c0),
(so.c0, so.c2),
(so.c1, so.c3)], skipFlags=SKIP_CAMERA | SKIP_UNPICKABLE)
return entry
def computeSegmentOffset(self, entry):
fromNodePath = entry.getFromNodePath()
if entry.hasSurfaceNormal():
normal = entry.getSurfaceNormal(fromNodePath)
else:
return None
hitPoint = entry.getSurfacePoint(fromNodePath)
m = self.selectedObject.getMat(self.startPose)
hp = Point3(m.xformPoint(hitPoint))
hpn = Vec3(m.xformVec(normal))
hitPointVec = Vec3(hp - self.selectedObject.dragPoint)
if hitPointVec.dot(hpn) > 0:
return None
nLen = normal.length()
offsetVecA = hitPoint - entry.getFrom().getPointA()
offsetA = normal * offsetVecA.dot(normal) / (nLen * nLen)
if offsetA.dot(normal) > 0:
return offsetA * 1.01
else:
offsetVecB = hitPoint - entry.getFrom().getPointB()
offsetB = normal * offsetVecB.dot(normal) / (nLen * nLen)
return offsetB * 1.01
return None
def alignObject(self, entry, target, fClosest = 0, wallOffset = None):
if not entry.hasSurfaceNormal():
return 0
normal = entry.getSurfaceNormal(target)
if abs(normal.dot(Vec3(0, 0, 1))) < 0.1:
tempNP = target.attachNewNode('temp')
normal.setZ(0)
normal.normalize()
lookAtNormal = Point3(normal)
lookAtNormal *= -1
tempNP.lookAt(lookAtNormal)
realAngle = ROUND_TO(self.gridSnapNP.getH(tempNP), 90.0)
if fClosest:
angle = realAngle
else:
angle = 0
self.gridSnapNP.setHpr(tempNP, angle, 0, 0)
hitPoint = entry.getSurfacePoint(target)
tempNP.setPos(hitPoint)
if wallOffset == None:
wallOffset = self.selectedObject.getWallOffset()
self.gridSnapNP.setPos(tempNP, 0, -wallOffset, 0)
tempNP.removeNode()
if realAngle == 180.0:
self.gridSnapNP.setH(self.gridSnapNP.getH() + 180.0)
return 1
return 0
def rotateLeft(self):
if not self.selectedObject:
return
so = self.selectedObject
so.dfitem.startAdjustPosHpr()
self.iPosHpr(so)
self.moveObjectInit()
if so.getOnWall():
startR = self.gridSnapNP.getR()
newR = ROUND_TO(startR + 22.5, 22.5)
self.gridSnapNP.setR(newR)
else:
startH = self.gridSnapNP.getH(self.targetNodePath)
newH = ROUND_TO(startH - 22.5, 22.5)
self.iSphere.setParentNP(self.gridSnapNP)
self.iSphere.setCenterRadius(0, Point3(0), so.radius * 1.25)
entry = self.iSphere.pickBitMask(bitMask=so.getWallBitmask(), targetNodePath=self.targetNodePath, skipFlags=SKIP_CAMERA | SKIP_UNPICKABLE)
if not entry:
self.gridSnapNP.setHpr(self.targetNodePath, newH, 0, 0)
self.collisionTest()
so.wrtReparentTo(self.targetNodePath)
self.disableButtonFrameTask()
so.dfitem.stopAdjustPosHpr()
def rotateRight(self):
if not self.selectedObject:
return
so = self.selectedObject
so.dfitem.startAdjustPosHpr()
self.iPosHpr(so)
self.moveObjectInit()
if so.getOnWall():
startR = self.gridSnapNP.getR()
newR = ROUND_TO(startR - 22.5, 22.5)
self.gridSnapNP.setR(newR)
else:
startH = self.gridSnapNP.getH(self.targetNodePath)
newH = ROUND_TO(startH + 22.5, 22.5) % 360.0
self.iSphere.setParentNP(self.gridSnapNP)
self.iSphere.setCenterRadius(0, Point3(0), so.radius * 1.25)
entry = self.iSphere.pickBitMask(bitMask=so.getWallBitmask(), targetNodePath=self.targetNodePath, skipFlags=SKIP_CAMERA | SKIP_UNPICKABLE)
if not entry:
self.gridSnapNP.setHpr(self.targetNodePath, newH, 0, 0)
self.collisionTest()
so.wrtReparentTo(self.targetNodePath)
self.disableButtonFrameTask()
so.dfitem.stopAdjustPosHpr()
def moveObjectInit(self):
self.dragPointNP.setPosHpr(self.selectedObject, self.selectedObject.dragPoint, Vec3(0))
self.gridSnapNP.iPosHpr()
self.collisionOffsetNP.iPosHpr()
self.selectedObject.wrtReparentTo(self.collisionOffsetNP)
def resetFurniture(self):
for o in self.objectDict.values():
o.resetMovableObject()
self.objectDict = {}
self.deselectObject()
self.buttonFrame.hide()
def destroy(self):
self.ignore('enterFurnitureMode')
self.ignore('exitFurnitureMode')
if self.guiInterval:
self.guiInterval.finish()
if self.furnitureManager:
self.exitFurnitureMode(self.furnitureManager)
self.cleanupDialog()
self.resetFurniture()
self.buttonFrame.destroy()
self.furnitureGui.destroy()
if self.houseExtents:
self.houseExtents.removeNode()
if self.doorBlocker:
self.doorBlocker.removeNode()
self.removeNode()
if self.verifyFrame:
self.verifyFrame.destroy()
self.verifyFrame = None
self.deleteItemText = None
self.okButton = None
self.cancelButton = None
return
def createSelectedObjectPanel(self, guiModels):
self.buttonFrame = DirectFrame(scale=0.5)
self.grabUp = guiModels.find('**/handup')
self.grabDown = guiModels.find('**/handdown')
self.grabRollover = guiModels.find('**/handrollover')
self.centerMarker = DirectButton(parent=self.buttonFrame, text=['', TTLocalizer.HDMoveLabel], text_pos=(0, 1), text_scale=0.7, text_fg=(1, 1, 1, 1), text_shadow=(0, 0, 0, 1), image=[self.grabUp, self.grabDown, self.grabRollover], image_scale=0.3, relief=None, scale=0.12)
self.centerMarker.bind(DGG.B1PRESS, self.moveObjectContinue)
self.centerMarker.bind(DGG.B1RELEASE, self.moveObjectStop)
guiCCWArrowUp = guiModels.find('**/LarrowUp')
guiCCWArrowDown = guiModels.find('**/LarrowDown')
guiCCWArrowRollover = guiModels.find('**/LarrowRollover')
self.rotateLeftButton = DirectButton(parent=self.buttonFrame, relief=None, image=(guiCCWArrowUp,
guiCCWArrowDown,
guiCCWArrowRollover,
guiCCWArrowUp), image_pos=(0, 0, 0.1), image_scale=0.15, image3_color=Vec4(0.5, 0.5, 0.5, 0.75), text=('',
TTLocalizer.HDRotateCCWLabel,
TTLocalizer.HDRotateCCWLabel,
''), text_pos=(0.135, -0.1), text_scale=0.1, text_align=TextNode.ARight, text_fg=(1, 1, 1, 1), text_shadow=(0, 0, 0, 1), pos=(-.125, 0, -.2), scale=0.7, command=self.rotateLeft)
self.rotateLeftButton.bind(DGG.EXIT, self.enableButtonFrameTask)
guiCWArrowUp = guiModels.find('**/RarrowUp')
guiCWArrowDown = guiModels.find('**/RarrowDown')
guiCWArrowRollover = guiModels.find('**/RarrowRollover')
self.rotateRightButton = DirectButton(parent=self.buttonFrame, relief=None, image=(guiCWArrowUp,
guiCWArrowDown,
guiCWArrowRollover,
guiCWArrowUp), image_pos=(0, 0, 0.1), image_scale=0.15, image3_color=Vec4(0.5, 0.5, 0.5, 0.75), text=('',
TTLocalizer.HDRotateCWLabel,
TTLocalizer.HDRotateCWLabel,
''), text_pos=(-0.135, -0.1), text_scale=0.1, text_align=TextNode.ALeft, text_fg=(1, 1, 1, 1), text_shadow=(0, 0, 0, 1), pos=(0.125, 0, -0.2), scale=0.7, command=self.rotateRight)
self.rotateRightButton.bind(DGG.EXIT, self.enableButtonFrameTask)
self.buttonFrame.hide()
return
def recenterButtonFrameTask(self, state):
if self.selectedObject and self.fRecenter:
self.buttonFrame.setPos(self.getSelectedObjectScreenXY())
return Task.cont
def disableButtonFrameTask(self, event = None):
self.fRecenter = 0
def enableButtonFrameTask(self, event = None):
self.fRecenter = 1
def getNearProjectionPoint(self, nodePath):
origin = nodePath.getPos(camera)
if origin[1] != 0.0:
return origin * (base.camLens.getNear() / origin[1])
else:
return Point3(0, base.camLens.getNear(), 0)
def getSelectedObjectScreenXY(self):
tNodePath = self.selectedObject.attachNewNode('temp')
tNodePath.setPos(self.selectedObject.center)
nearVec = self.getNearProjectionPoint(tNodePath)
nearVec *= base.camLens.getFocalLength() / base.camLens.getNear()
render2dX = CLAMP(nearVec[0] / (base.camLens.getFilmSize()[0] / 2.0), -.9, 0.9)
aspect2dX = render2dX * base.getAspectRatio()
aspect2dZ = CLAMP(nearVec[2] / (base.camLens.getFilmSize()[1] / 2.0), -.8, 0.9)
tNodePath.removeNode()
return Vec3(aspect2dX, 0, aspect2dZ)
def createMainControls(self, guiModels):
attic = guiModels.find('**/attic')
self.furnitureGui = DirectFrame(relief=None, parent=base.a2dTopLeft, pos=(0.155, -0.6, -1.045), scale=0.04, image=attic)
bMoveStopUp = guiModels.find('**/bu_atticX/bu_attic_up')
bMoveStopDown = guiModels.find('**/bu_atticX/bu_attic_down')
bMoveStopRollover = guiModels.find('**/bu_atticX/bu_attic_rollover')
self.bStopMoveFurniture = DirectButton(parent=self.furnitureGui, relief=None, image=[bMoveStopUp,
bMoveStopDown,
bMoveStopRollover,
bMoveStopUp], text=['', TTLocalizer.HDStopMoveFurnitureButton, TTLocalizer.HDStopMoveFurnitureButton], text_fg=(1, 1, 1, 1), text_shadow=(0, 0, 0, 1), text_font=ToontownGlobals.getInterfaceFont(), pos=(-0.3, 0, 9.4), command=base.localAvatar.stopMoveFurniture)
self.bindHelpText(self.bStopMoveFurniture, 'DoneMoving')
self.atticRoof = DirectLabel(parent=self.furnitureGui, relief=None, image=guiModels.find('**/rooftile'))
self.itemBackgroundFrame = DirectFrame(parent=self.furnitureGui, relief=None, image=guiModels.find('**/item_backgroun'), image_pos=(0, 0, -22), image_scale=(1, 1, 5))
self.scrollUpFrame = DirectFrame(parent=self.furnitureGui, relief=None, image=guiModels.find('**/scrollup'), pos=(0, 0, -0.58))
self.camButtonFrame = DirectFrame(parent=self.furnitureGui, relief=None, image=guiModels.find('**/low'), pos=(0, 0, -11.69))
tagUp = guiModels.find('**/tag_up')
tagDown = guiModels.find('**/tag_down')
tagRollover = guiModels.find('**/tag_rollover')
self.inAtticButton = DirectButton(parent=self.itemBackgroundFrame, relief=None, text=TTLocalizer.HDInAtticLabel, text_pos=(-0.1, -0.25), image=[tagUp, tagDown, tagRollover], pos=(2.85, 0, 4), scale=0.8, command=self.showAtticPicker)
self.bindHelpText(self.inAtticButton, 'Attic')
self.inRoomButton = DirectButton(parent=self.itemBackgroundFrame, relief=None, text=TTLocalizer.HDInRoomLabel, text_pos=(-0.1, -0.25), image=[tagUp, tagDown, tagRollover], pos=(2.85, 0, 1.1), scale=0.8, command=self.showInRoomPicker)
self.bindHelpText(self.inRoomButton, 'Room')
self.inTrashButton = DirectButton(parent=self.itemBackgroundFrame, relief=None, text=TTLocalizer.HDInTrashLabel, text_pos=(-0.1, -0.25), image=[tagUp, tagDown, tagRollover], pos=(2.85, 0, -1.8), scale=0.8, command=self.showInTrashPicker)
self.bindHelpText(self.inTrashButton, 'Trash')
for i in range(4):
self.inAtticButton.component('text%d' % i).setR(-90)
self.inRoomButton.component('text%d' % i).setR(-90)
self.inTrashButton.component('text%d' % i).setR(-90)
backInAtticUp = guiModels.find('**/bu_backinattic_up1')
backInAtticDown = guiModels.find('**/bu_backinattic_down1')
backInAtticRollover = guiModels.find('**/bu_backinattic_rollover2')
self.sendToAtticButton = DirectButton(parent=self.furnitureGui, relief=None, pos=(0.4, 0, 12.8), text=['', TTLocalizer.HDToAtticLabel], text_fg=(1, 1, 1, 1), text_shadow=(0, 0, 0, 1), text_pos=(1.2, -0.3), image=[backInAtticUp, backInAtticDown, backInAtticRollover], command=self.sendItemToAttic)
self.sendToAtticButton.hide()
self.bindHelpText(self.sendToAtticButton, 'SendToAttic')
zoomInUp = guiModels.find('**/bu_RzoomOut_up')
zoomInDown = guiModels.find('**/bu_RzoomOut_down')
zoomInRollover = guiModels.find('**/bu_RzoomOut_rollover')
self.zoomInButton = DirectButton(parent=self.camButtonFrame, image=[zoomInUp, zoomInDown, zoomInRollover], relief=None, pos=(0.9, 0, -0.75), command=self.zoomCamIn)
self.bindHelpText(self.zoomInButton, 'ZoomIn')
zoomOutUp = guiModels.find('**/bu_LzoomIn_up')
zoomOutDown = guiModels.find('**/bu_LzoomIn_down')
zoomOutRollover = guiModels.find('**/buLzoomIn_rollover')
self.zoomOutButton = DirectButton(parent=self.camButtonFrame, image=[zoomOutUp, zoomOutDown, zoomOutRollover], relief=None, pos=(-1.4, 0, -0.75), command=self.zoomCamOut)
self.bindHelpText(self.zoomOutButton, 'ZoomOut')
camCCWUp = guiModels.find('**/bu_Rarrow_up1')
camCCWDown = guiModels.find('**/bu_Rarrow_down1')
camCCWRollover = guiModels.find('**/bu_Rarrow_orllover')
self.rotateCamLeftButton = DirectButton(parent=self.camButtonFrame, image=[camCCWUp, camCCWDown, camCCWRollover], relief=None, pos=(0.9, 0, -3.0), command=self.rotateCamCCW)
self.bindHelpText(self.rotateCamLeftButton, 'RotateLeft')
camCWUp = guiModels.find('**/bu_Larrow_up1')
camCWDown = guiModels.find('**/bu_Larrow_down1')
camCWRollover = guiModels.find('**/bu_Larrow_rollover2')
self.rotateCamRightButton = DirectButton(parent=self.camButtonFrame, image=[camCWUp, camCWDown, camCWRollover], relief=None, pos=(-1.4, 0, -3.0), command=self.rotateCamCW)
self.bindHelpText(self.rotateCamRightButton, 'RotateRight')
trashcanGui = loader.loadModel('phase_3/models/gui/trashcan_gui')
trashcanUp = trashcanGui.find('**/TrashCan_CLSD')
trashcanDown = trashcanGui.find('**/TrashCan_OPEN')
trashcanRollover = trashcanGui.find('**/TrashCan_RLVR')
self.deleteEnterButton = DirectButton(parent=self.furnitureGui, image=(trashcanUp,
trashcanDown,
trashcanRollover,
trashcanUp), text=['',
TTLocalizer.InventoryDelete,
TTLocalizer.InventoryDelete,
''], text_fg=(1, 1, 1, 1), text_shadow=(0, 0, 0, 1), text_scale=0.1, text_align=TextNode.ACenter, text_pos=(0, -0.12), text_font=ToontownGlobals.getInterfaceFont(), textMayChange=0, relief=None, pos=(3.7, 0.0, -13.8), scale=7.13, command=self.enterDeleteMode)
self.bindHelpText(self.deleteEnterButton, 'DeleteEnter')
self.deleteExitButton = DirectButton(parent=self.furnitureGui, image=(trashcanUp,
trashcanDown,
trashcanRollover,
trashcanUp), text=('',
TTLocalizer.InventoryDone,
TTLocalizer.InventoryDone,
''), text_fg=(1, 1, 1, 1), text_shadow=(0, 0, 0, 1), text_scale=0.1, text_align=TextNode.ACenter, text_pos=(0, -0.12), text_font=ToontownGlobals.getInterfaceFont(), textMayChange=0, relief=None, pos=(3.7, 0.0, -13.8), scale=7.13, command=self.exitDeleteMode)
self.bindHelpText(self.deleteExitButton, 'DeleteExit')
self.deleteExitButton.hide()
self.trashcanBase = DirectLabel(parent=self.furnitureGui, image=guiModels.find('**/trashcan_base'), relief=None, pos=(0, 0, -11.64))
self.furnitureGui.hide()
self.helpText = DirectLabel(parent=self.furnitureGui, relief=DGG.SUNKEN, frameSize=(-0.5,
10,
-3,
0.9), frameColor=(0.2, 0.2, 0.2, 0.5), borderWidth=(0.01, 0.01), text='', text_wordwrap=12, text_fg=(1, 1, 1, 1), text_shadow=(0, 0, 0, 1), text_scale=0.8, pos=(3, 0.0, -7), scale=1, text_align=TextNode.ALeft)
self.helpText.hide()
return
def createAtticPicker(self):
self.atticItemPanels = []
for itemIndex in range(len(self.furnitureManager.atticItems)):
panel = FurnitureItemPanel(self.furnitureManager.atticItems[itemIndex], itemIndex, command=self.bringItemFromAttic, deleteMode=self.deleteMode, helpCategory='FurnitureItemPanelAttic')
self.atticItemPanels.append(panel)
self.atticWallpaperPanels = []
for itemIndex in range(len(self.furnitureManager.atticWallpaper)):
panel = FurnitureItemPanel(self.furnitureManager.atticWallpaper[itemIndex], itemIndex, command=self.bringWallpaperFromAttic, deleteMode=self.deleteMode, helpCategory='FurnitureItemPanelAttic')
self.atticWallpaperPanels.append(panel)
self.atticWindowPanels = []
for itemIndex in range(len(self.furnitureManager.atticWindows)):
panel = FurnitureItemPanel(self.furnitureManager.atticWindows[itemIndex], itemIndex, command=self.bringWindowFromAttic, deleteMode=self.deleteMode, helpCategory='FurnitureItemPanelAttic')
self.atticWindowPanels.append(panel)
self.regenerateAtticPicker()
def regenerateAtticPicker(self):
selectedIndex = 0
if self.atticPicker:
selectedIndex = self.atticPicker.getSelectedIndex()
for panel in self.atticItemPanels:
panel.detachNode()
for panel in self.atticWallpaperPanels:
panel.detachNode()
for panel in self.atticWindowPanels:
panel.detachNode()
self.atticPicker.destroy()
self.atticPicker = None
itemList = self.atticItemPanels + self.atticWallpaperPanels + self.atticWindowPanels
if self.deleteMode:
text = TTLocalizer.HDDeletePickerLabel
else:
text = TTLocalizer.HDAtticPickerLabel
self.atticPicker = self.createScrolledList(itemList, text, 'atticPicker', selectedIndex)
if self.inRoomPicker or self.inTrashPicker:
self.atticPicker.hide()
else:
self.atticPicker.show()
return
def createInRoomPicker(self):
self.inRoomPanels = []
for objectId, object in self.objectDict.items():
panel = FurnitureItemPanel(object.dfitem.item, objectId, command=self.requestReturnToAttic, deleteMode=self.deleteMode, withinFunc=self.pickInRoom, helpCategory='FurnitureItemPanelRoom')
self.inRoomPanels.append(panel)
self.regenerateInRoomPicker()
def regenerateInRoomPicker(self):
selectedIndex = 0
if self.inRoomPicker:
selectedIndex = self.inRoomPicker.getSelectedIndex()
for panel in self.inRoomPanels:
panel.detachNode()
self.inRoomPicker.destroy()
self.inRoomPicker = None
if self.deleteMode:
text = TTLocalizer.HDDeletePickerLabel
else:
text = TTLocalizer.HDInRoomPickerLabel
self.inRoomPicker = self.createScrolledList(self.inRoomPanels, text, 'inRoomPicker', selectedIndex)
return
def createInTrashPicker(self):
self.inTrashPanels = []
for itemIndex in range(len(self.furnitureManager.deletedItems)):
panel = FurnitureItemPanel(self.furnitureManager.deletedItems[itemIndex], itemIndex, command=self.requestReturnToAtticFromTrash, helpCategory='FurnitureItemPanelTrash')
self.inTrashPanels.append(panel)
self.regenerateInTrashPicker()
def regenerateInTrashPicker(self):
selectedIndex = 0
if self.inTrashPicker:
selectedIndex = self.inTrashPicker.getSelectedIndex()
for panel in self.inTrashPanels:
panel.detachNode()
self.inTrashPicker.destroy()
self.inTrashPicker = None
text = TTLocalizer.HDInTrashPickerLabel
self.inTrashPicker = self.createScrolledList(self.inTrashPanels, text, 'inTrashPicker', selectedIndex)
return
def createScrolledList(self, itemList, text, name, selectedIndex):
gui = loader.loadModel('phase_3.5/models/gui/friendslist_gui')
picker = DirectScrolledList(parent=self.furnitureGui, pos=(-0.38, 0.0, 3), scale=7.125, relief=None, items=itemList, numItemsVisible=5, text=text, text_fg=(1, 1, 1, 1), text_shadow=(0, 0, 0, 1), text_scale=0.1, text_pos=(0, 0.4), decButton_image=(gui.find('**/FndsLst_ScrollUp'),
gui.find('**/FndsLst_ScrollDN'),
gui.find('**/FndsLst_ScrollUp_Rllvr'),
gui.find('**/FndsLst_ScrollUp')), decButton_relief=None, decButton_scale=(1.5, 1.5, 1.5), decButton_pos=(0, 0, 0.3), decButton_image3_color=Vec4(1, 1, 1, 0.1), incButton_image=(gui.find('**/FndsLst_ScrollUp'),
gui.find('**/FndsLst_ScrollDN'),
gui.find('**/FndsLst_ScrollUp_Rllvr'),
gui.find('**/FndsLst_ScrollUp')), incButton_relief=None, incButton_scale=(1.5, 1.5, -1.5), incButton_pos=(0, 0, -1.878), incButton_image3_color=Vec4(1, 1, 1, 0.1))
picker.setName(name)
picker.scrollTo(selectedIndex)
return picker
def reset():
self.destroy()
furnitureMenu.destroy()
def showAtticPicker(self):
if self.inRoomPicker:
self.inRoomPicker.destroy()
self.inRoomPicker = None
if self.inTrashPicker:
self.inTrashPicker.destroy()
self.inTrashPicker = None
self.atticPicker.show()
self.inAtticButton['image_color'] = Vec4(1, 1, 1, 1)
self.inRoomButton['image_color'] = Vec4(0.8, 0.8, 0.8, 1)
self.inTrashButton['image_color'] = Vec4(0.8, 0.8, 0.8, 1)
self.deleteExitButton['state'] = 'normal'
self.deleteEnterButton['state'] = 'normal'
return
def showInRoomPicker(self):
messenger.send('wakeup')
if not self.inRoomPicker:
self.createInRoomPicker()
self.atticPicker.hide()
if self.inTrashPicker:
self.inTrashPicker.destroy()
self.inTrashPicker = None
self.inAtticButton['image_color'] = Vec4(0.8, 0.8, 0.8, 1)
self.inRoomButton['image_color'] = Vec4(1, 1, 1, 1)
self.inTrashButton['image_color'] = Vec4(0.8, 0.8, 0.8, 1)
self.deleteExitButton['state'] = 'normal'
self.deleteEnterButton['state'] = 'normal'
return
def showInTrashPicker(self):
messenger.send('wakeup')
if not self.inTrashPicker:
self.createInTrashPicker()
self.atticPicker.hide()
if self.inRoomPicker:
self.inRoomPicker.destroy()
self.inRoomPicker = None
self.inAtticButton['image_color'] = Vec4(0.8, 0.8, 0.8, 1)
self.inRoomButton['image_color'] = Vec4(0.8, 0.8, 0.8, 1)
self.inTrashButton['image_color'] = Vec4(1, 1, 1, 1)
self.deleteExitButton['state'] = 'disabled'
self.deleteEnterButton['state'] = 'disabled'
return
def sendItemToAttic(self):
if config.GetBool('want-qa-regression', 0):
self.notify.info('QA-REGRESSION: ESTATE: Send Item to Attic')
messenger.send('wakeup')
if self.selectedObject:
callback = PythonUtil.Functor(self.__sendItemToAtticCallback, self.selectedObject.get_key())
self.furnitureManager.moveItemToAttic(self.selectedObject.dfitem, callback)
self.deselectObject()
def __sendItemToAtticCallback(self, objectId, retcode, item):
self.__enableItemButtons(1)
if retcode < 0:
self.notify.info('Unable to send item %s to attic, reason %s.' % (item.getName(), retcode))
return
del self.objectDict[objectId]
if self.selectedObject != None and self.selectedObject.get_key() == objectId:
self.selectedObject.detachNode()
self.deselectObject()
itemIndex = len(self.atticItemPanels)
panel = FurnitureItemPanel(item, itemIndex, command=self.bringItemFromAttic, deleteMode=self.deleteMode, helpCategory='FurnitureItemPanelAttic')
self.atticItemPanels.append(panel)
self.regenerateAtticPicker()
if self.inRoomPicker:
for i in range(len(self.inRoomPanels)):
if self.inRoomPanels[i].itemId == objectId:
del self.inRoomPanels[i]
self.regenerateInRoomPicker()
return
return
def cleanupDialog(self, buttonValue = None):
if self.dialog:
self.dialog.cleanup()
self.dialog = None
self.__enableItemButtons(1)
return
def enterDeleteMode(self):
self.deleteMode = 1
self.__updateDeleteMode()
def exitDeleteMode(self):
self.deleteMode = 0
self.__updateDeleteMode()
def __updateDeleteMode(self):
if not self.atticPicker:
return
self.notify.debug('__updateDeleteMode deleteMode=%s' % self.deleteMode)
if self.deleteMode:
framePanelColor = DeletePickerPanelColor
atticText = TTLocalizer.HDDeletePickerLabel
inRoomText = TTLocalizer.HDDeletePickerLabel
helpCategory = 'FurnitureItemPanelDelete'
else:
framePanelColor = NormalPickerPanelColor
atticText = TTLocalizer.HDAtticPickerLabel
inRoomText = TTLocalizer.HDInRoomPickerLabel
helpCategory = None
if self.inRoomPicker:
self.inRoomPicker['text'] = inRoomText
for panel in self.inRoomPicker['items']:
panel.setDeleteMode(self.deleteMode)
panel.bindHelpText(helpCategory)
if self.atticPicker:
self.atticPicker['text'] = atticText
for panel in self.atticPicker['items']:
panel.setDeleteMode(self.deleteMode)
panel.bindHelpText(helpCategory)
self.__updateDeleteButtons()
return
def __updateDeleteButtons(self):
if self.deleteMode:
self.deleteExitButton.show()
self.deleteEnterButton.hide()
else:
self.deleteEnterButton.show()
self.deleteExitButton.hide()
def deleteItemFromRoom(self, dfitem, objectId, itemIndex):
messenger.send('wakeup')
callback = PythonUtil.Functor(self.__deleteItemFromRoomCallback, objectId, itemIndex)
self.furnitureManager.deleteItemFromRoom(dfitem, callback)
def __deleteItemFromRoomCallback(self, objectId, itemIndex, retcode, item):
self.__enableItemButtons(1)
if retcode < 0:
self.notify.info('Unable to delete item %s from room, reason %s.' % (item.getName(), retcode))
return
del self.objectDict[objectId]
if self.selectedObject != None and self.selectedObject.get_key() == objectId:
self.selectedObject.detachNode()
self.deselectObject()
if self.inRoomPicker and itemIndex is not None:
del self.inRoomPanels[itemIndex]
self.regenerateInRoomPicker()
return
def bringItemFromAttic(self, item, itemIndex):
if config.GetBool('want-qa-regression', 0):
self.notify.info('QA-REGRESSION: ESTATE: Place Item in Room')
messenger.send('wakeup')
self.__enableItemButtons(0)
if self.deleteMode:
self.requestDelete(item, itemIndex, self.deleteItemFromAttic)
return
pos = self.targetNodePath.getRelativePoint(base.localAvatar, Point3(0, 2, 0))
hpr = Point3(0, 0, 0)
if abs(pos[0]) > 3000 or abs(pos[1]) > 3000 or abs(pos[2]) > 300:
self.notify.warning('bringItemFromAttic extreme pos targetNodePath=%s avatar=%s %s' % (repr(self.targetNodePath.getPos(render)), repr(base.localAvatar.getPos(render)), repr(pos)))
if item.getFlags() & CatalogFurnitureItem.FLPainting:
for object in self.objectDict.values():
object.stashBuiltInCollisionNodes()
self.gridSnapNP.iPosHpr()
target = self.targetNodePath
self.iRay.setParentNP(base.localAvatar)
entry = self.iRay.pickBitMask3D(bitMask=ToontownGlobals.WallBitmask, targetNodePath=target, origin=Point3(0, 0, 6), dir=Vec3(0, 1, 0), skipFlags=SKIP_BACKFACE | SKIP_CAMERA | SKIP_UNPICKABLE)
for object in self.objectDict.values():
object.unstashBuiltInCollisionNodes()
if entry:
self.alignObject(entry, target, fClosest=0, wallOffset=0.1)
pos = self.gridSnapNP.getPos(target)
hpr = self.gridSnapNP.getHpr(target)
else:
self.notify.warning('wall not found for painting')
self.furnitureManager.moveItemFromAttic(itemIndex, (pos[0],
pos[1],
pos[2],
hpr[0],
hpr[1],
hpr[2]), self.__bringItemFromAtticCallback)
def __bringItemFromAtticCallback(self, retcode, dfitem, itemIndex):
self.__enableItemButtons(1)
if retcode < 0:
self.notify.info('Unable to bring furniture item %s into room, reason %s.' % (itemIndex, retcode))
return
mo = self.loadObject(dfitem)
objectId = mo.get_key()
self.atticItemPanels[itemIndex].destroy()
del self.atticItemPanels[itemIndex]
for i in range(itemIndex, len(self.atticItemPanels)):
self.atticItemPanels[i].itemId -= 1
self.regenerateAtticPicker()
if self.inRoomPicker:
panel = FurnitureItemPanel(dfitem.item, objectId, command=self.requestReturnToAttic, helpCategory='FurnitureItemPanelRoom')
self.inRoomPanels.append(panel)
self.regenerateInRoomPicker()
def deleteItemFromAttic(self, item, itemIndex):
messenger.send('wakeup')
self.furnitureManager.deleteItemFromAttic(item, itemIndex, self.__deleteItemFromAtticCallback)
def __deleteItemFromAtticCallback(self, retcode, item, itemIndex):
self.__enableItemButtons(1)
if retcode < 0:
self.notify.info('Unable to delete furniture item %s, reason %s.' % (itemIndex, retcode))
return
self.atticItemPanels[itemIndex].destroy()
del self.atticItemPanels[itemIndex]
for i in range(itemIndex, len(self.atticItemPanels)):
self.atticItemPanels[i].itemId -= 1
self.regenerateAtticPicker()
def bringWallpaperFromAttic(self, item, itemIndex):
messenger.send('wakeup')
self.__enableItemButtons(0)
if self.deleteMode:
self.requestDelete(item, itemIndex, self.deleteWallpaperFromAttic)
return
if base.localAvatar.getY() < 2.3:
room = 0
else:
room = 1
self.furnitureManager.moveWallpaperFromAttic(itemIndex, room, self.__bringWallpaperFromAtticCallback)
def __bringWallpaperFromAtticCallback(self, retcode, itemIndex, room):
self.__enableItemButtons(1)
if retcode < 0:
self.notify.info('Unable to bring wallpaper %s into room %s, reason %s.' % (itemIndex, room, retcode))
return
self.atticWallpaperPanels[itemIndex].destroy()
item = self.furnitureManager.atticWallpaper[itemIndex]
panel = FurnitureItemPanel(item, itemIndex, command=self.bringWallpaperFromAttic, deleteMode=self.deleteMode, helpCategory='FurnitureItemPanelAttic')
self.atticWallpaperPanels[itemIndex] = panel
self.regenerateAtticPicker()
def deleteWallpaperFromAttic(self, item, itemIndex):
messenger.send('wakeup')
self.furnitureManager.deleteWallpaperFromAttic(item, itemIndex, self.__deleteWallpaperFromAtticCallback)
def __deleteWallpaperFromAtticCallback(self, retcode, item, itemIndex):
self.__enableItemButtons(1)
if retcode < 0:
self.notify.info('Unable to delete wallpaper %s, reason %s.' % (itemIndex, retcode))
return
self.atticWallpaperPanels[itemIndex].destroy()
del self.atticWallpaperPanels[itemIndex]
for i in range(itemIndex, len(self.atticWallpaperPanels)):
self.atticWallpaperPanels[i].itemId -= 1
self.regenerateAtticPicker()
def bringWindowFromAttic(self, item, itemIndex):
messenger.send('wakeup')
self.__enableItemButtons(0)
if self.deleteMode:
self.requestDelete(item, itemIndex, self.deleteWindowFromAttic)
return
if base.localAvatar.getY() < 2.3:
slot = 2
else:
slot = 4
self.furnitureManager.moveWindowFromAttic(itemIndex, slot, self.__bringWindowFromAtticCallback)
def __bringWindowFromAtticCallback(self, retcode, itemIndex, slot):
self.__enableItemButtons(1)
if retcode < 0:
self.notify.info('Unable to bring window %s into slot %s, reason %s.' % (itemIndex, slot, retcode))
return
if retcode == ToontownGlobals.FM_SwappedItem:
self.atticWindowPanels[itemIndex].destroy()
item = self.furnitureManager.atticWindows[itemIndex]
panel = FurnitureItemPanel(item, itemIndex, command=self.bringWindowFromAttic, deleteMode=self.deleteMode, helpCategory='FurnitureItemPanelAttic')
self.atticWindowPanels[itemIndex] = panel
else:
self.atticWindowPanels[itemIndex].destroy()
del self.atticWindowPanels[itemIndex]
for i in range(itemIndex, len(self.atticWindowPanels)):
self.atticWindowPanels[i].itemId -= 1
self.regenerateAtticPicker()
def deleteWindowFromAttic(self, item, itemIndex):
messenger.send('wakeup')
self.furnitureManager.deleteWindowFromAttic(item, itemIndex, self.__deleteWindowFromAtticCallback)
def __deleteWindowFromAtticCallback(self, retcode, item, itemIndex):
self.__enableItemButtons(1)
if retcode < 0:
self.notify.info('Unable to delete window %s, reason %s.' % (itemIndex, retcode))
return
self.atticWindowPanels[itemIndex].destroy()
del self.atticWindowPanels[itemIndex]
for i in range(itemIndex, len(self.atticWindowPanels)):
self.atticWindowPanels[i].itemId -= 1
self.regenerateAtticPicker()
def setGridSpacingString(self, spacingStr):
spacing = eval(spacingStr)
self.setGridSpacing(spacing)
def setGridSpacing(self, gridSpacing):
self.gridSpacing = gridSpacing
def makeHouseExtentsBox(self):
houseGeom = self.targetNodePath.findAllMatches('**/group*')
targetBounds = houseGeom.getTightBounds()
self.houseExtents = self.targetNodePath.attachNewNode('furnitureCollisionNode')
mx = targetBounds[0][0]
Mx = targetBounds[1][0]
my = targetBounds[0][1]
My = targetBounds[1][1]
mz = targetBounds[0][2]
Mz = targetBounds[1][2]
cn = CollisionNode('extentsCollisionNode')
cn.setIntoCollideMask(ToontownGlobals.GhostBitmask)
self.houseExtents.attachNewNode(cn)
cp = CollisionPolygon(Point3(mx, my, mz), Point3(mx, My, mz), Point3(mx, My, Mz), Point3(mx, my, Mz))
cn.addSolid(cp)
cp = CollisionPolygon(Point3(Mx, My, mz), Point3(Mx, my, mz), Point3(Mx, my, Mz), Point3(Mx, My, Mz))
cn.addSolid(cp)
cp = CollisionPolygon(Point3(Mx, my, mz), Point3(mx, my, mz), Point3(mx, my, Mz), Point3(Mx, my, Mz))
cn.addSolid(cp)
cp = CollisionPolygon(Point3(mx, My, mz), Point3(Mx, My, mz), Point3(Mx, My, Mz), Point3(mx, My, Mz))
cn.addSolid(cp)
def makeDoorBlocker(self):
self.doorBlocker = self.targetNodePath.attachNewNode('doorBlocker')
cn = CollisionNode('doorBlockerCollisionNode')
cn.setIntoCollideMask(ToontownGlobals.FurnitureSideBitmask)
self.doorBlocker.attachNewNode(cn)
cs = CollisionSphere(Point3(-12, -33, 0), 7.5)
cn.addSolid(cs)
def createVerifyDialog(self, item, verifyText, okFunc, cancelFunc):
if self.verifyFrame == None:
buttons = loader.loadModel('phase_3/models/gui/dialog_box_buttons_gui')
okButtonImage = (buttons.find('**/ChtBx_OKBtn_UP'), buttons.find('**/ChtBx_OKBtn_DN'), buttons.find('**/ChtBx_OKBtn_Rllvr'))
cancelButtonImage = (buttons.find('**/CloseBtn_UP'), buttons.find('**/CloseBtn_DN'), buttons.find('**/CloseBtn_Rllvr'))
self.verifyFrame = DirectFrame(pos=(-0.4, 0.1, 0.3), scale=0.75, relief=None, image=DGG.getDefaultDialogGeom(), image_color=ToontownGlobals.GlobalDialogColor, image_scale=(1.2, 1, 1.3), text='', text_wordwrap=19, text_scale=0.06, text_pos=(0, 0.5), textMayChange=1, sortOrder=NO_FADE_SORT_INDEX)
self.okButton = DirectButton(parent=self.verifyFrame, image=okButtonImage, relief=None, text=OTPLocalizer.DialogOK, text_scale=0.05, text_pos=(0.0, -0.1), textMayChange=0, pos=(-0.22, 0.0, -0.5))
self.cancelButton = DirectButton(parent=self.verifyFrame, image=cancelButtonImage, relief=None, text=OTPLocalizer.DialogCancel, text_scale=0.05, text_pos=(0.0, -0.1), textMayChange=0, pos=(0.22, 0.0, -0.5))
self.deleteItemText = DirectLabel(parent=self.verifyFrame, relief=None, text='', text_wordwrap=16, pos=(0.0, 0.0, -0.4), scale=0.09)
self.verifyFrame['text'] = verifyText
self.deleteItemText['text'] = item.getName()
self.okButton['command'] = okFunc
self.cancelButton['command'] = cancelFunc
self.verifyFrame.show()
self.itemPanel, self.itemIval = item.getPicture(base.localAvatar)
if self.itemPanel:
self.itemPanel.reparentTo(self.verifyFrame, -1)
self.itemPanel.setPos(0, 0, 0.05)
self.itemPanel.setScale(0.35)
self.deleteItemText.setPos(0.0, 0.0, -0.4)
else:
self.deleteItemText.setPos(0, 0, 0.07)
if self.itemIval:
self.itemIval.loop()
return
def __handleVerifyDeleteOK(self):
if config.GetBool('want-qa-regression', 0):
self.notify.info('QA-REGRESSION: ESTATE: Send Item to Trash')
deleteFunction = self.verifyItems[0]
deleteFunctionArgs = self.verifyItems[1:]
self.__cleanupVerifyDelete()
deleteFunction(*deleteFunctionArgs)
def __cleanupVerifyDelete(self, *args):
if self.nonDeletableItem:
self.nonDeletableItem.cleanup()
self.nonDeletableItem = None
if self.verifyFrame:
self.verifyFrame.hide()
if self.itemIval:
self.itemIval.finish()
self.itemIval = None
if self.itemPanel:
self.itemPanel.destroy()
self.itemPanel = None
self.verifyItems = None
return
def __enableItemButtons(self, enabled):
self.notify.debug('__enableItemButtons %d' % enabled)
if enabled:
buttonState = DGG.NORMAL
else:
buttonState = DGG.DISABLED
if hasattr(self, 'inAtticButton'):
self.inAtticButton['state'] = buttonState
if hasattr(self, 'inRoomButton'):
self.inRoomButton['state'] = buttonState
if hasattr(self, 'inTrashButton'):
self.inTrashButton['state'] = buttonState
pickers = [self.atticPicker, self.inRoomPicker, self.inTrashPicker]
for picker in pickers:
if picker:
for panel in picker['items']:
if not panel.isEmpty():
panel.enable(enabled)
def __resetAndCleanup(self, *args):
self.__enableItemButtons(1)
self.__cleanupVerifyDelete()
def requestDelete(self, item, itemIndex, deleteFunction):
self.__cleanupVerifyDelete()
if self.furnitureManager.ownerId != base.localAvatar.doId or not item.isDeletable():
self.warnNonDeletableItem(item)
return
self.createVerifyDialog(item, TTLocalizer.HDDeleteItem, self.__handleVerifyDeleteOK, self.__resetAndCleanup)
self.verifyItems = (deleteFunction, item, itemIndex)
def requestRoomDelete(self, dfitem, objectId, itemIndex):
self.__cleanupVerifyDelete()
item = dfitem.item
if self.furnitureManager.ownerId != base.localAvatar.doId or not item.isDeletable():
self.warnNonDeletableItem(item)
return
self.createVerifyDialog(item, TTLocalizer.HDDeleteItem, self.__handleVerifyDeleteOK, self.__resetAndCleanup)
self.verifyItems = (self.deleteItemFromRoom,
dfitem,
objectId,
itemIndex)
def warnNonDeletableItem(self, item):
message = TTLocalizer.HDNonDeletableItem
if not item.isDeletable():
if item.getFlags() & CatalogFurnitureItem.FLBank:
message = TTLocalizer.HDNonDeletableBank
elif item.getFlags() & CatalogFurnitureItem.FLCloset:
message = TTLocalizer.HDNonDeletableCloset
elif item.getFlags() & CatalogFurnitureItem.FLPhone:
message = TTLocalizer.HDNonDeletablePhone
elif item.getFlags() & CatalogFurnitureItem.FLTrunk:
message = TTLocalizer.HDNonDeletableTrunk
if self.furnitureManager.ownerId != base.localAvatar.doId:
message = TTLocalizer.HDNonDeletableNotOwner % self.furnitureManager.ownerName
self.nonDeletableItem = TTDialog.TTDialog(text=message, style=TTDialog.Acknowledge, fadeScreen=0, command=self.__resetAndCleanup)
self.nonDeletableItem.show()
def requestReturnToAttic(self, item, objectId):
self.__cleanupVerifyDelete()
itemIndex = None
for i in range(len(self.inRoomPanels)):
if self.inRoomPanels[i].itemId == objectId:
itemIndex = i
self.__enableItemButtons(0)
break
if self.deleteMode:
dfitem = self.objectDict[objectId].dfitem
self.requestRoomDelete(dfitem, objectId, itemIndex)
return
self.createVerifyDialog(item, TTLocalizer.HDReturnVerify, self.__handleVerifyReturnOK, self.__resetAndCleanup)
self.verifyItems = (item, objectId)
return
def __handleVerifyReturnOK(self):
item, objectId = self.verifyItems
self.__cleanupVerifyDelete()
self.pickInRoom(objectId)
self.sendItemToAttic()
def requestReturnToAtticFromTrash(self, item, itemIndex):
self.__cleanupVerifyDelete()
self.__enableItemButtons(0)
self.createVerifyDialog(item, TTLocalizer.HDReturnFromTrashVerify, self.__handleVerifyReturnFromTrashOK, self.__resetAndCleanup)
self.verifyItems = (item, itemIndex)
def __handleVerifyReturnFromTrashOK(self):
if config.GetBool('want-qa-regression', 0):
self.notify.info('QA-REGRESSION: ESTATE: Send Item to Attic')
item, itemIndex = self.verifyItems
self.__cleanupVerifyDelete()
self.recoverDeletedItem(item, itemIndex)
def recoverDeletedItem(self, item, itemIndex):
messenger.send('wakeup')
self.furnitureManager.recoverDeletedItem(item, itemIndex, self.__recoverDeletedItemCallback)
def __recoverDeletedItemCallback(self, retcode, item, itemIndex):
self.__cleanupVerifyDelete()
if retcode < 0:
if retcode == ToontownGlobals.FM_HouseFull:
self.showHouseFullDialog()
self.notify.info('Unable to recover deleted item %s, reason %s.' % (itemIndex, retcode))
return
self.__enableItemButtons(1)
self.inTrashPanels[itemIndex].destroy()
del self.inTrashPanels[itemIndex]
for i in range(itemIndex, len(self.inTrashPanels)):
self.inTrashPanels[i].itemId -= 1
self.regenerateInTrashPicker()
itemType = item.getTypeCode()
if itemType == CatalogItemTypes.WALLPAPER_ITEM or itemType == CatalogItemTypes.FLOORING_ITEM or itemType == CatalogItemTypes.MOULDING_ITEM or itemType == CatalogItemTypes.WAINSCOTING_ITEM:
itemIndex = len(self.atticWallpaperPanels)
bringCommand = self.bringWallpaperFromAttic
elif itemType == CatalogItemTypes.WINDOW_ITEM:
itemIndex = len(self.atticWindowPanels)
bringCommand = self.bringWindowFromAttic
else:
itemIndex = len(self.atticItemPanels)
bringCommand = self.bringItemFromAttic
panel = FurnitureItemPanel(item, itemIndex, command=bringCommand, deleteMode=self.deleteMode, helpCategory='FurnitureItemPanelAttic')
if itemType == CatalogItemTypes.WALLPAPER_ITEM or itemType == CatalogItemTypes.FLOORING_ITEM or itemType == CatalogItemTypes.MOULDING_ITEM or itemType == CatalogItemTypes.WAINSCOTING_ITEM:
self.atticWallpaperPanels.append(panel)
elif itemType == CatalogItemTypes.WINDOW_ITEM:
self.atticWindowPanels.append(panel)
else:
self.atticItemPanels.append(panel)
self.regenerateAtticPicker()
def showHouseFullDialog(self):
self.cleanupDialog()
self.dialog = TTDialog.TTDialog(style=TTDialog.Acknowledge, text=TTLocalizer.HDHouseFull, text_wordwrap=15, command=self.cleanupDialog)
self.dialog.show()
def bindHelpText(self, button, category):
button.bind(DGG.ENTER, self.showHelpText, extraArgs=[category, None])
button.bind(DGG.EXIT, self.hideHelpText)
return
def showHelpText(self, category, itemName, xy):
def showIt(task):
helpText = TTLocalizer.HDHelpDict.get(category)
if helpText:
if itemName:
helpText = helpText % itemName
self.helpText['text'] = helpText
self.helpText.show()
else:
print 'category: %s not found'
taskMgr.doMethodLater(0.75, showIt, 'showHelpTextDoLater')
def hideHelpText(self, xy):
taskMgr.remove('showHelpTextDoLater')
self.helpText['text'] = ''
self.helpText.hide()
| |
#!/usr/bin/env python3
# Snippets to read from the colmap database taken from:
# https://github.com/colmap/colmap/blob/ad7bd93f1a27af7533121aa043a167fe1490688c /
# scripts/python/export_to_bundler.py
# scripts/python/read_write_model.py
# License is that derived from those files.
import argparse
import logging
import math
import os
import sqlite3
from collections import defaultdict
from pathlib import Path
from struct import unpack
import cv2
import matplotlib.pyplot as pl
import numpy as np
import opensfm.actions.undistort as osfm_u
from matplotlib import cm
from mpl_toolkits.axes_grid1 import make_axes_locatable
from opensfm import dataset
from opensfm import features
from opensfm import pygeometry
from opensfm import pysfm
from opensfm import types
EXPORT_DIR_NAME = "opensfm_export"
logger = logging.getLogger(__name__)
camera_models = {
0: ("SIMPLE_PINHOLE", 3),
1: ("PINHOLE", 4),
2: ("SIMPLE_RADIAL", 4),
3: ("RADIAL", 5),
4: ("OPENCV", 8),
5: ("OPENCV_FISHEYE", 8),
6: ("FULL_OPENCV", 12),
7: ("FOV", 5),
8: ("SIMPLE_RADIAL_FISHEYE", 4),
9: ("RADIAL_FISHEYE", 5),
10: ("THIN_PRISM_FISHEYE", 12),
}
def compute_and_save_undistorted_reconstruction(
reconstruction, tracks_manager, data, udata
):
image_format = data.config["undistorted_image_format"]
urec = types.Reconstruction()
utracks_manager = pysfm.TracksManager()
undistorted_shots = []
for shot in reconstruction.shots.values():
if shot.camera.projection_type == "perspective":
ucamera = osfm_u.perspective_camera_from_perspective(shot.camera)
elif shot.camera.projection_type == "brown":
ucamera = osfm_u.perspective_camera_from_brown(shot.camera)
elif shot.camera.projection_type == "fisheye":
ucamera = osfm_u.perspective_camera_from_fisheye(shot.camera)
else:
raise ValueError
urec.add_camera(ucamera)
ushot = osfm_u.get_shot_with_different_camera(urec, shot, ucamera, image_format)
if tracks_manager:
osfm_u.add_subshot_tracks(tracks_manager, utracks_manager, shot, ushot)
undistorted_shots.append(ushot)
image = data.load_image(shot.id, unchanged=True, anydepth=True)
if image is not None:
max_size = data.config["undistorted_image_max_size"]
undistorted = osfm_u.undistort_image(
shot, undistorted_shots, image, cv2.INTER_AREA, max_size
)
for k, v in undistorted.items():
udata.save_undistorted_image(k, v)
udata.save_undistorted_reconstruction([urec])
if tracks_manager:
udata.save_undistorted_tracks_manager(utracks_manager)
return urec
def small_colorbar(ax, mappable=None):
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
pl.colorbar(cax=cax, mappable=mappable)
def depth_colormap(d, cmap=None, invalid_val=0, invalid_color=(0.5, 0.5, 0.5)):
"""
Colormaps and sets 0 (invalid) values to zero_color
"""
sm = cm.ScalarMappable(cmap=cm.get_cmap(cmap))
sm.set_array(d)
rgb = sm.to_rgba(d)[:, :, :3]
rgb[d == invalid_val] = invalid_color
return rgb, sm
def import_cameras_images(db, data):
cursor = db.cursor()
cursor.execute(
"SELECT camera_id, model, width, height, prior_focal_length, params FROM "
"cameras;"
)
cameras = {}
for row in cursor:
camera_id, camera_model_id, width, height, prior_focal, params = row
params = np.fromstring(params, dtype=np.double)
cam = cam_from_colmap_params(
camera_model_id, width, height, params, prior_focal
)
cam.id = str(camera_id)
cameras[camera_id] = cam
data.save_camera_models(cameras)
images_map = {}
cursor.execute("SELECT image_id, camera_id, name FROM images;")
for row in cursor:
image_id, camera_id, filename = int(row[0]), int(row[1]), row[2]
images_map[image_id] = (filename, camera_id)
cam = cameras[camera_id]
focal_ratio = cam.focal_x if cam.projection_type == "brown" else cam.focal
exif_data = {
"make": "unknown",
"model": "unknown",
"width": cam.width,
"height": cam.height,
"projection_type": cam.projection_type,
"focal_ratio": focal_ratio,
"orientation": 1,
"camera": "{}".format(camera_id),
"skey": "TheSequence",
"capture_time": 0.0,
"gps": {},
}
data.save_exif(filename, exif_data)
cursor.close()
return cameras, images_map
def pair_id_to_image_ids(pair_id):
image_id2 = pair_id % 2147483647
image_id1 = (pair_id - image_id2) // 2147483647
return image_id1, image_id2
def get_scale_orientation_from_affine(arr):
# (x, y, a_11, a_12, a_21, a_22)
a11 = arr[:, 2]
a12 = arr[:, 3]
a21 = arr[:, 4]
a22 = arr[:, 5]
scale_x = np.sqrt(a11 * a11 + a21 * a21)
scale_y = np.sqrt(a12 * a12 + a22 * a22)
orientation = np.arctan2(a21, a11)
# shear = np.arctan2(-a12, a22) - orientation
scale = (scale_x + scale_y) / 2
return scale, orientation
def import_features(db, data, image_map, camera_map):
cursor = db.cursor()
cursor.execute("SELECT image_id, rows, cols, data FROM keypoints;")
keypoints = {}
colors = {}
for row in cursor:
image_id, n_rows, n_cols, arr = row
filename, camera_id = image_map[image_id]
cam = camera_map[camera_id]
arr = np.fromstring(arr, dtype=np.float32).reshape((n_rows, n_cols))
rgb = data.load_image(filename).astype(np.float32)
xc = np.clip(arr[:, 1].astype(int), 0, rgb.shape[0] - 1)
yc = np.clip(arr[:, 0].astype(int), 0, rgb.shape[1] - 1)
colors[image_id] = rgb[xc, yc, :]
arr[:, :2] = features.normalized_image_coordinates(
arr[:, :2], cam.width, cam.height
)
if n_cols == 4:
x, y, s, o = arr[:, 0], arr[:, 1], arr[:, 2], arr[:, 3]
elif n_cols == 6:
x, y = arr[:, 0], arr[:, 1]
s, o = get_scale_orientation_from_affine(arr)
elif n_cols == 2:
x, y = arr[:, 0], arr[:, 1]
s = np.zeros_like(x)
o = np.zeros_like(x)
else:
raise ValueError
s = s / max(cam.width, cam.height)
keypoints[image_id] = np.vstack((x, y, s, o)).T
cursor.execute("SELECT image_id, rows, cols, data FROM descriptors;")
for row in cursor:
image_id, n_rows, n_cols, arr = row
filename, _ = image_map[image_id]
descriptors = np.fromstring(arr, dtype=np.uint8).reshape((n_rows, n_cols))
kp = keypoints[image_id]
data.save_features(filename, kp, descriptors, colors[image_id])
cursor.close()
return keypoints
def import_matches(db, data, image_map):
cursor = db.cursor()
min_matches = 1
cursor.execute(
"SELECT pair_id, data FROM two_view_geometries WHERE rows>=?;", (min_matches,)
)
matches_per_im1 = {m[0]: {} for m in image_map.values()}
for row in cursor:
pair_id = row[0]
inlier_matches = np.fromstring(row[1], dtype=np.uint32).reshape(-1, 2)
image_id1, image_id2 = pair_id_to_image_ids(pair_id)
image_name1 = image_map[image_id1][0]
image_name2 = image_map[image_id2][0]
matches_per_im1[image_name1][image_name2] = inlier_matches
for image_name1, matches in matches_per_im1.items():
data.save_matches(image_name1, matches)
cursor.close()
def import_cameras_reconstruction(path_cameras, rec):
"""
Imports cameras from a COLMAP reconstruction cameras.bin file
"""
logger.info("Importing cameras from {}".format(path_cameras))
with open(path_cameras, "rb") as f:
n_cameras = unpack("<Q", f.read(8))[0]
for _ in range(n_cameras):
camera_id = unpack("<i", f.read(4))[0]
camera_model_id = unpack("<i", f.read(4))[0]
width = unpack("<Q", f.read(8))[0]
height = unpack("<Q", f.read(8))[0]
params = []
n_params = camera_models[camera_model_id][1]
for _ in range(n_params):
params.append(unpack("<d", f.read(8))[0])
cam = cam_from_colmap_params(camera_model_id, width, height, params)
cam.id = str(camera_id)
rec.add_camera(cam)
def cam_from_colmap_params(camera_model_id, width, height, params, prior_focal=1):
"""
Helper function to map from colmap parameters to an OpenSfM camera
"""
mapping = {1: "pinhole", 3: "perspective", 9: "fisheye"}
if camera_model_id not in mapping.keys():
raise ValueError("Not supported: " + camera_models[camera_model_id][0])
projection_type = mapping[camera_model_id]
normalizer = max(width, height)
focal = params[0] / normalizer if prior_focal else 0.85
if projection_type == "perspective":
cam = pygeometry.Camera.create_perspective(focal, params[3], params[4])
elif projection_type == "pinhole":
cam = pygeometry.Camera.create_perspective(focal, 0, 0)
else: # projection_type == 'fisheye'
cam = pygeometry.Camera.create_fisheye(focal, params[3], 0)
cam.width = width
cam.height = height
return cam
def import_points_reconstruction(path_points, rec):
logger.info("Importing points from {}".format(path_points))
with open(path_points, "rb") as f:
n_points = unpack("<Q", f.read(8))[0]
for _ in range(n_points):
pid = unpack("<Q", f.read(8))[0]
x = unpack("<d", f.read(8))[0]
y = unpack("<d", f.read(8))[0]
z = unpack("<d", f.read(8))[0]
r = unpack("<B", f.read(1))[0]
g = unpack("<B", f.read(1))[0]
b = unpack("<B", f.read(1))[0]
_ = unpack("<d", f.read(8))[0] # error
track_len = unpack("<Q", f.read(8))[0]
# Ignore track info
f.seek(8 * track_len, 1)
p = rec.create_point(str(pid), (x, y, z))
p.color = (r, g, b)
def read_colmap_ply(path_ply):
"""
Reads the ply output from COLMAP.
This is not a generic ply binary reader but a quick hack to read only this file
"""
logger.info("Reading fused pointcloud {}".format(path_ply))
header_should_be = [
"ply\n",
"format binary_little_endian 1.0\n",
"element vertex\n",
"property float x\n",
"property float y\n",
"property float z\n",
"property float nx\n",
"property float ny\n",
"property float nz\n",
"property uchar red\n",
"property uchar green\n",
"property uchar blue\n",
"end_header\n",
]
properties = [
("x", "<f4"),
("y", "<f4"),
("z", "<f4"),
("nx", "<f4"),
("ny", "<f4"),
("nz", "<f4"),
("red", "<u1"),
("green", "<u1"),
("blue", "<u1"),
]
n_vertices = 0
with open(path_ply, "rb") as f:
header = []
for line in f:
line = line.decode()
if line.startswith("element vertex"):
n_vertices = int(line.strip().split()[-1])
line = "element vertex\n"
header.append(line)
if line == header_should_be[-1]:
break
assert header == header_should_be
data = np.fromfile(f, dtype=properties, count=n_vertices)
points, normals, colors = [], [], []
for row in data:
points.append(np.array([row[0], row[1], row[2]]))
normals.append(np.array([row[3], row[4], row[5]]))
colors.append(np.array([row[6], row[7], row[8]]))
return np.array(points), np.array(normals), np.array(colors)
def import_images_reconstruction(path_images, keypoints, rec):
"""
Read images.bin, building shots and tracks graph
"""
logger.info("Importing images from {}".format(path_images))
tracks_manager = pysfm.TracksManager()
image_ix_to_shot_id = {}
with open(path_images, "rb") as f:
n_ims = unpack("<Q", f.read(8))[0]
for image_ix in range(n_ims):
image_id = unpack("<I", f.read(4))[0]
q0 = unpack("<d", f.read(8))[0]
q1 = unpack("<d", f.read(8))[0]
q2 = unpack("<d", f.read(8))[0]
q3 = unpack("<d", f.read(8))[0]
t0 = unpack("<d", f.read(8))[0]
t1 = unpack("<d", f.read(8))[0]
t2 = unpack("<d", f.read(8))[0]
camera_id = unpack("<I", f.read(4))[0]
filename = ""
while True:
c = f.read(1).decode()
if c == "\0":
break
filename += c
q = np.array([q0, q1, q2, q3])
q /= np.linalg.norm(q)
t = np.array([t0, t1, t2])
pose = pygeometry.Pose(rotation=quaternion_to_angle_axis(q), translation=t)
shot = rec.create_shot(filename, str(camera_id), pose)
image_ix_to_shot_id[image_ix] = shot.id
n_points_2d = unpack("<Q", f.read(8))[0]
for point2d_ix in range(n_points_2d):
x = unpack("<d", f.read(8))[0]
y = unpack("<d", f.read(8))[0]
point3d_id = unpack("<Q", f.read(8))[0]
if point3d_id != np.iinfo(np.uint64).max:
kp = keypoints[image_id][point2d_ix]
r, g, b = rec.points[str(point3d_id)].color
obs = pysfm.Observation(
x,
y,
kp[2],
int(r),
int(g),
int(b),
point2d_ix,
)
tracks_manager.add_observation(shot.id, str(point3d_id), obs)
return tracks_manager, image_ix_to_shot_id
def read_vis(path_vis, image_ix_to_shot_id):
logger.info("Reading visibility file {}".format(path_vis))
points_seen = defaultdict(list)
with open(path_vis, "rb") as f:
n_points = unpack("<Q", f.read(8))[0]
for point_ix in range(n_points):
n_images = unpack("<I", f.read(4))[0]
for _ in range(n_images):
image_ix = unpack("<I", f.read(4))[0]
shot_id = image_ix_to_shot_id[image_ix]
points_seen[shot_id].append(point_ix)
for ixs in points_seen.values():
assert len(ixs) == len(set(ixs))
return points_seen
def import_depthmaps_from_fused_pointcloud(udata, urec, image_ix_to_shot_id, path_ply):
"""
Imports the depthmaps by reprojecting the fused pointcloud
"""
# Read ply
points, normals, colors = read_colmap_ply(path_ply)
# Read visibility file
points_seen = read_vis(path_ply.with_suffix(".ply.vis"), image_ix_to_shot_id)
# Project to shots and save as depthmaps
max_size = udata.config["depthmap_resolution"]
for shot_id, points_seen_ixs in points_seen.items():
logger.info("Projecting shot {}".format(shot_id))
project_pointcloud_save_depth(
udata, urec, points[points_seen_ixs], shot_id, max_size
)
def project_pointcloud_save_depth(udata, urec, points, shot_id, max_sz):
# Project points to the undistorted image
shot = urec.shots[shot_id]
w, h = shot.camera.width, shot.camera.height
large = max(w, h)
if large > max_sz:
ar = w / h
if w > h:
w = max_sz
h = int(w / ar)
else:
h = max_sz
w = int(ar * h)
points_2d = shot.project_many(points)
pixel_coords = features.denormalized_image_coordinates(points_2d, w, h).astype(int)
# Filter out points that fall out of the image
# <<< aren't we supposed to have points that are visible from this image only??!?!
mask = np.ones(pixel_coords.shape[0], dtype=bool)
mask[pixel_coords[:, 0] < 0] = 0
mask[pixel_coords[:, 1] < 0] = 0
mask[pixel_coords[:, 0] >= w] = 0
mask[pixel_coords[:, 1] >= h] = 0
pixel_coords = pixel_coords[mask]
# Compute the depth
distances = np.linalg.norm(points - shot.pose.get_origin(), axis=1)
viewing_angles = np.arctan2(np.linalg.norm(points_2d, axis=1), shot.camera.focal)
depths = distances * np.cos(viewing_angles)
depths[depths > udata.config["depthmap_max_depth"]] = 0
# Create depth image
depth_image = np.zeros([h, w])
depth_image[pixel_coords[:, 1], pixel_coords[:, 0]] = depths[mask]
# Save numpy
filepath = Path(udata.depthmap_file(shot_id, "clean.npz"))
filepath.parent.mkdir(exist_ok=True, parents=True)
np.savez_compressed(
filepath, depth=depth_image, plane=np.zeros(1), score=np.zeros(1)
)
# Save jpg for visualization
import matplotlib.pyplot as plt
fig = plt.figure()
rgb, sm = depth_colormap(depth_image)
plt.imshow(rgb)
small_colorbar(plt.gca(), mappable=sm)
filepath = Path(udata.data_path) / "plot_depthmaps" / "{}.png".format(shot_id)
filepath.parent.mkdir(exist_ok=True, parents=True)
plt.savefig(filepath, dpi=300)
plt.close(fig)
def quaternion_to_angle_axis(quaternion):
if quaternion[0] > 1:
quaternion = quaternion / np.linalg.norm(quaternion)
qw, qx, qy, qz = quaternion
s = max(0.001, math.sqrt(1 - qw * qw))
x = qx / s
y = qy / s
z = qz / s
angle = 2 * math.acos(qw)
return [angle * x, angle * y, angle * z]
def main():
parser = argparse.ArgumentParser(
description="Convert COLMAP database to OpenSfM dataset"
)
parser.add_argument("database", help="path to the database to be processed")
parser.add_argument("images", help="path to the images")
args = parser.parse_args()
logger.info(f"Converting {args.database} to COLMAP format")
p_db = Path(args.database)
assert p_db.is_file()
export_folder = p_db.parent / EXPORT_DIR_NAME
export_folder.mkdir(exist_ok=True)
images_path = export_folder / "images"
if not images_path.exists():
os.symlink(os.path.abspath(args.images), images_path, target_is_directory=True)
# Copy the config if this is an colmap export of an opensfm export
if (
p_db.parent.name == "colmap_export"
and not (export_folder / "config.yaml").exists()
):
os.symlink(p_db.parent.parent / "config.yaml", export_folder / "config.yaml")
data = dataset.DataSet(export_folder)
db = sqlite3.connect(p_db.as_posix())
camera_map, image_map = import_cameras_images(db, data)
# Create image_list.txt
with open(export_folder / "image_list.txt", "w") as f:
for _, (filename, _) in image_map.items():
f.write("images/" + filename + "\n")
data.load_image_list()
keypoints = import_features(db, data, image_map, camera_map)
import_matches(db, data, image_map)
rec_cameras = p_db.parent / "cameras.bin"
rec_points = p_db.parent / "points3D.bin"
rec_images = p_db.parent / "images.bin"
if rec_cameras.exists() and rec_images.exists() and rec_points.exists():
reconstruction = types.Reconstruction()
import_cameras_reconstruction(rec_cameras, reconstruction)
import_points_reconstruction(rec_points, reconstruction)
tracks_manager, _ = import_images_reconstruction(
rec_images, keypoints, reconstruction
)
data.save_reconstruction([reconstruction])
data.save_tracks_manager(tracks_manager)
# Save undistorted reconstruction as well
udata = dataset.UndistortedDataSet(data)
urec = compute_and_save_undistorted_reconstruction(
reconstruction, tracks_manager, data, udata
)
# Project colmap's fused pointcloud to save depths in opensfm format
path_ply = p_db.parent / "dense/fused.ply"
if path_ply.is_file():
rec_cameras = p_db.parent / "dense/sparse/cameras.bin"
rec_images = p_db.parent / "dense/sparse/images.bin"
rec_points = p_db.parent / "points3D.bin"
reconstruction = types.Reconstruction()
import_cameras_reconstruction(rec_cameras, reconstruction)
import_points_reconstruction(rec_points, reconstruction)
_, image_ix_to_shot_id = import_images_reconstruction(
rec_images, keypoints, reconstruction
)
logger.info(f"Projecting {path_ply} to depth images")
import_depthmaps_from_fused_pointcloud(
udata, urec, image_ix_to_shot_id, path_ply
)
else:
logger.info(
"Not importing dense reconstruction: Didn't find {}".format(path_ply)
)
else:
logger.info(
"Didn't find some of the reconstruction files at {}".format(p_db.parent)
)
db.close()
if __name__ == "__main__":
main()
| |
# Copyright 2012 VMware, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import contextlib
import copy
import mock
import netaddr
from oslo.config import cfg
from webob import exc
from neutron.api.rpc.handlers import l3_rpc
from neutron.api.v2 import attributes
from neutron.common import constants as l3_constants
from neutron.common import exceptions as n_exc
from neutron import context
from neutron.db import common_db_mixin
from neutron.db import db_base_plugin_v2
from neutron.db import external_net_db
from neutron.db import l3_agentschedulers_db
from neutron.db import l3_attrs_db
from neutron.db import l3_db
from neutron.db import l3_dvr_db
from neutron.extensions import external_net
from neutron.extensions import l3
from neutron.extensions import portbindings
from neutron import manager
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants as service_constants
from neutron.tests import base
from neutron.tests import fake_notifier
from neutron.tests.unit import test_agent_ext_plugin
from neutron.tests.unit import test_api_v2
from neutron.tests.unit import test_api_v2_extension
from neutron.tests.unit import test_db_plugin
from neutron.tests.unit import testlib_plugin
LOG = logging.getLogger(__name__)
_uuid = uuidutils.generate_uuid
_get_path = test_api_v2._get_path
class L3TestExtensionManager(object):
def get_resources(self):
# Add the resources to the global attribute map
# This is done here as the setup process won't
# initialize the main API router which extends
# the global attribute map
attributes.RESOURCE_ATTRIBUTE_MAP.update(
l3.RESOURCE_ATTRIBUTE_MAP)
return l3.L3.get_resources()
def get_actions(self):
return []
def get_request_extensions(self):
return []
class L3NatExtensionTestCase(test_api_v2_extension.ExtensionTestCase):
fmt = 'json'
def setUp(self):
super(L3NatExtensionTestCase, self).setUp()
self._setUpExtension(
'neutron.extensions.l3.RouterPluginBase', None,
l3.RESOURCE_ATTRIBUTE_MAP, l3.L3, '',
allow_pagination=True, allow_sorting=True,
supported_extension_aliases=['router'],
use_quota=True)
def test_router_create(self):
router_id = _uuid()
data = {'router': {'name': 'router1', 'admin_state_up': True,
'tenant_id': _uuid(),
'external_gateway_info': None}}
return_value = copy.deepcopy(data['router'])
return_value.update({'status': "ACTIVE", 'id': router_id})
instance = self.plugin.return_value
instance.create_router.return_value = return_value
instance.get_routers_count.return_value = 0
res = self.api.post(_get_path('routers', fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
instance.create_router.assert_called_with(mock.ANY,
router=data)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('router', res)
router = res['router']
self.assertEqual(router['id'], router_id)
self.assertEqual(router['status'], "ACTIVE")
self.assertEqual(router['admin_state_up'], True)
def test_router_list(self):
router_id = _uuid()
return_value = [{'name': 'router1', 'admin_state_up': True,
'tenant_id': _uuid(), 'id': router_id}]
instance = self.plugin.return_value
instance.get_routers.return_value = return_value
res = self.api.get(_get_path('routers', fmt=self.fmt))
instance.get_routers.assert_called_with(mock.ANY, fields=mock.ANY,
filters=mock.ANY,
sorts=mock.ANY,
limit=mock.ANY,
marker=mock.ANY,
page_reverse=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('routers', res)
self.assertEqual(1, len(res['routers']))
self.assertEqual(router_id, res['routers'][0]['id'])
def test_router_update(self):
router_id = _uuid()
update_data = {'router': {'admin_state_up': False}}
return_value = {'name': 'router1', 'admin_state_up': False,
'tenant_id': _uuid(),
'status': "ACTIVE", 'id': router_id}
instance = self.plugin.return_value
instance.update_router.return_value = return_value
res = self.api.put(_get_path('routers', id=router_id,
fmt=self.fmt),
self.serialize(update_data))
instance.update_router.assert_called_with(mock.ANY, router_id,
router=update_data)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('router', res)
router = res['router']
self.assertEqual(router['id'], router_id)
self.assertEqual(router['status'], "ACTIVE")
self.assertEqual(router['admin_state_up'], False)
def test_router_get(self):
router_id = _uuid()
return_value = {'name': 'router1', 'admin_state_up': False,
'tenant_id': _uuid(),
'status': "ACTIVE", 'id': router_id}
instance = self.plugin.return_value
instance.get_router.return_value = return_value
res = self.api.get(_get_path('routers', id=router_id,
fmt=self.fmt))
instance.get_router.assert_called_with(mock.ANY, router_id,
fields=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('router', res)
router = res['router']
self.assertEqual(router['id'], router_id)
self.assertEqual(router['status'], "ACTIVE")
self.assertEqual(router['admin_state_up'], False)
def test_router_delete(self):
router_id = _uuid()
res = self.api.delete(_get_path('routers', id=router_id))
instance = self.plugin.return_value
instance.delete_router.assert_called_with(mock.ANY, router_id)
self.assertEqual(res.status_int, exc.HTTPNoContent.code)
def test_router_add_interface(self):
router_id = _uuid()
subnet_id = _uuid()
port_id = _uuid()
interface_data = {'subnet_id': subnet_id}
return_value = copy.deepcopy(interface_data)
return_value['port_id'] = port_id
instance = self.plugin.return_value
instance.add_router_interface.return_value = return_value
path = _get_path('routers', id=router_id,
action="add_router_interface",
fmt=self.fmt)
res = self.api.put(path, self.serialize(interface_data))
instance.add_router_interface.assert_called_with(mock.ANY, router_id,
interface_data)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('port_id', res)
self.assertEqual(res['port_id'], port_id)
self.assertEqual(res['subnet_id'], subnet_id)
class L3NatExtensionTestCaseXML(L3NatExtensionTestCase):
fmt = 'xml'
# This base plugin class is for tests.
class TestL3NatBasePlugin(db_base_plugin_v2.NeutronDbPluginV2,
external_net_db.External_net_db_mixin):
__native_pagination_support = True
__native_sorting_support = True
def create_network(self, context, network):
session = context.session
with session.begin(subtransactions=True):
net = super(TestL3NatBasePlugin, self).create_network(context,
network)
self._process_l3_create(context, net, network['network'])
return net
def update_network(self, context, id, network):
session = context.session
with session.begin(subtransactions=True):
net = super(TestL3NatBasePlugin, self).update_network(context, id,
network)
self._process_l3_update(context, net, network['network'])
return net
def delete_network(self, context, id):
with context.session.begin(subtransactions=True):
self._process_l3_delete(context, id)
super(TestL3NatBasePlugin, self).delete_network(context, id)
def delete_port(self, context, id, l3_port_check=True):
plugin = manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT)
if plugin:
if l3_port_check:
plugin.prevent_l3_port_deletion(context, id)
plugin.disassociate_floatingips(context, id)
return super(TestL3NatBasePlugin, self).delete_port(context, id)
# This plugin class is for tests with plugin that integrates L3.
class TestL3NatIntPlugin(TestL3NatBasePlugin,
l3_db.L3_NAT_db_mixin):
supported_extension_aliases = ["external-net", "router"]
# This plugin class is for tests with plugin that integrates L3 and L3 agent
# scheduling.
class TestL3NatIntAgentSchedulingPlugin(TestL3NatIntPlugin,
l3_agentschedulers_db.
L3AgentSchedulerDbMixin):
supported_extension_aliases = ["external-net", "router",
"l3_agent_scheduler"]
router_scheduler = importutils.import_object(
cfg.CONF.router_scheduler_driver)
# This plugin class is for tests with plugin not supporting L3.
class TestNoL3NatPlugin(TestL3NatBasePlugin):
__native_pagination_support = True
__native_sorting_support = True
supported_extension_aliases = ["external-net"]
# A L3 routing service plugin class for tests with plugins that
# delegate away L3 routing functionality
class TestL3NatServicePlugin(common_db_mixin.CommonDbMixin,
l3_dvr_db.L3_NAT_with_dvr_db_mixin,
l3_db.L3_NAT_db_mixin):
supported_extension_aliases = ["router"]
def get_plugin_type(self):
return service_constants.L3_ROUTER_NAT
def get_plugin_description(self):
return "L3 Routing Service Plugin for testing"
# A L3 routing with L3 agent scheduling service plugin class for tests with
# plugins that delegate away L3 routing functionality
class TestL3NatAgentSchedulingServicePlugin(TestL3NatServicePlugin,
l3_agentschedulers_db.
L3AgentSchedulerDbMixin):
supported_extension_aliases = ["router", "l3_agent_scheduler"]
class L3NATdbonlyMixinTestCase(base.BaseTestCase):
def setUp(self):
super(L3NATdbonlyMixinTestCase, self).setUp()
self.mixin = l3_db.L3_NAT_dbonly_mixin()
def test_build_routers_list_with_gw_port_mismatch(self):
routers = [{'gw_port_id': 'foo_gw_port_id', 'id': 'foo_router_id'}]
gw_ports = {}
routers = self.mixin._build_routers_list(mock.ANY, routers, gw_ports)
self.assertIsNone(routers[0].get('gw_port'))
class L3NatTestCaseMixin(object):
def _create_router(self, fmt, tenant_id, name=None,
admin_state_up=None, set_context=False,
arg_list=None, **kwargs):
data = {'router': {'tenant_id': tenant_id}}
if name:
data['router']['name'] = name
if admin_state_up:
data['router']['admin_state_up'] = admin_state_up
for arg in (('admin_state_up', 'tenant_id') + (arg_list or ())):
# Arg must be present and not empty
if arg in kwargs and kwargs[arg]:
data['router'][arg] = kwargs[arg]
router_req = self.new_create_request('routers', data, fmt)
if set_context and tenant_id:
# create a specific auth context for this request
router_req.environ['neutron.context'] = context.Context(
'', tenant_id)
return router_req.get_response(self.ext_api)
def _make_router(self, fmt, tenant_id, name=None, admin_state_up=None,
external_gateway_info=None, set_context=False,
arg_list=None, **kwargs):
if external_gateway_info:
arg_list = ('external_gateway_info', ) + (arg_list or ())
res = self._create_router(fmt, tenant_id, name,
admin_state_up, set_context,
arg_list=arg_list,
external_gateway_info=external_gateway_info,
**kwargs)
return self.deserialize(fmt, res)
def _add_external_gateway_to_router(self, router_id, network_id,
expected_code=exc.HTTPOk.code,
neutron_context=None):
return self._update('routers', router_id,
{'router': {'external_gateway_info':
{'network_id': network_id}}},
expected_code=expected_code,
neutron_context=neutron_context)
def _remove_external_gateway_from_router(self, router_id, network_id,
expected_code=exc.HTTPOk.code,
external_gw_info=None):
return self._update('routers', router_id,
{'router': {'external_gateway_info':
external_gw_info}},
expected_code=expected_code)
def _router_interface_action(self, action, router_id, subnet_id, port_id,
expected_code=exc.HTTPOk.code,
expected_body=None,
tenant_id=None):
interface_data = {}
if subnet_id:
interface_data.update({'subnet_id': subnet_id})
if port_id and (action != 'add' or not subnet_id):
interface_data.update({'port_id': port_id})
req = self.new_action_request('routers', interface_data, router_id,
"%s_router_interface" % action)
# if tenant_id was specified, create a tenant context for this request
if tenant_id:
req.environ['neutron.context'] = context.Context(
'', tenant_id)
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, expected_code)
response = self.deserialize(self.fmt, res)
if expected_body:
self.assertEqual(response, expected_body)
return response
@contextlib.contextmanager
def router(self, name='router1', admin_state_up=True,
fmt=None, tenant_id=_uuid(),
external_gateway_info=None, set_context=False,
**kwargs):
router = self._make_router(fmt or self.fmt, tenant_id, name,
admin_state_up, external_gateway_info,
set_context, **kwargs)
yield router
self._delete('routers', router['router']['id'])
def _set_net_external(self, net_id):
self._update('networks', net_id,
{'network': {external_net.EXTERNAL: True}})
def _create_floatingip(self, fmt, network_id, port_id=None,
fixed_ip=None, set_context=False):
data = {'floatingip': {'floating_network_id': network_id,
'tenant_id': self._tenant_id}}
if port_id:
data['floatingip']['port_id'] = port_id
if fixed_ip:
data['floatingip']['fixed_ip_address'] = fixed_ip
floatingip_req = self.new_create_request('floatingips', data, fmt)
if set_context and self._tenant_id:
# create a specific auth context for this request
floatingip_req.environ['neutron.context'] = context.Context(
'', self._tenant_id)
return floatingip_req.get_response(self.ext_api)
def _make_floatingip(self, fmt, network_id, port_id=None,
fixed_ip=None, set_context=False):
res = self._create_floatingip(fmt, network_id, port_id,
fixed_ip, set_context)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
return self.deserialize(fmt, res)
def _validate_floating_ip(self, fip):
body = self._list('floatingips')
self.assertEqual(len(body['floatingips']), 1)
self.assertEqual(body['floatingips'][0]['id'],
fip['floatingip']['id'])
body = self._show('floatingips', fip['floatingip']['id'])
self.assertEqual(body['floatingip']['id'],
fip['floatingip']['id'])
@contextlib.contextmanager
def floatingip_with_assoc(self, port_id=None, fmt=None, fixed_ip=None,
set_context=False):
with self.subnet(cidr='11.0.0.0/24') as public_sub:
self._set_net_external(public_sub['subnet']['network_id'])
private_port = None
if port_id:
private_port = self._show('ports', port_id)
with test_db_plugin.optional_ctx(private_port,
self.port) as private_port:
with self.router() as r:
sid = private_port['port']['fixed_ips'][0]['subnet_id']
private_sub = {'subnet': {'id': sid}}
floatingip = None
self._add_external_gateway_to_router(
r['router']['id'],
public_sub['subnet']['network_id'])
self._router_interface_action(
'add', r['router']['id'],
private_sub['subnet']['id'], None)
floatingip = self._make_floatingip(
fmt or self.fmt,
public_sub['subnet']['network_id'],
port_id=private_port['port']['id'],
fixed_ip=fixed_ip,
set_context=False)
yield floatingip
if floatingip:
self._delete('floatingips',
floatingip['floatingip']['id'])
self._router_interface_action(
'remove', r['router']['id'],
private_sub['subnet']['id'], None)
self._remove_external_gateway_from_router(
r['router']['id'],
public_sub['subnet']['network_id'])
@contextlib.contextmanager
def floatingip_no_assoc_with_public_sub(
self, private_sub, fmt=None, set_context=False, public_sub=None):
self._set_net_external(public_sub['subnet']['network_id'])
with self.router() as r:
floatingip = None
self._add_external_gateway_to_router(
r['router']['id'],
public_sub['subnet']['network_id'])
self._router_interface_action('add', r['router']['id'],
private_sub['subnet']['id'],
None)
floatingip = self._make_floatingip(
fmt or self.fmt,
public_sub['subnet']['network_id'],
set_context=set_context)
yield floatingip, r
if floatingip:
self._delete('floatingips',
floatingip['floatingip']['id'])
self._router_interface_action('remove', r['router']['id'],
private_sub['subnet']['id'],
None)
self._remove_external_gateway_from_router(
r['router']['id'],
public_sub['subnet']['network_id'])
@contextlib.contextmanager
def floatingip_no_assoc(self, private_sub, fmt=None, set_context=False):
with self.subnet(cidr='12.0.0.0/24') as public_sub:
with self.floatingip_no_assoc_with_public_sub(
private_sub, fmt, set_context, public_sub) as (f, r):
# Yield only the floating ip object
yield f
class ExtraAttributesMixinTestCase(base.BaseTestCase):
def setUp(self):
super(ExtraAttributesMixinTestCase, self).setUp()
self.mixin = l3_attrs_db.ExtraAttributesMixin()
def _test__extend_extra_router_dict(
self, extra_attributes, attributes, expected_attributes):
self.mixin._extend_extra_router_dict(
attributes, {'extra_attributes': extra_attributes})
self.assertEqual(expected_attributes, attributes)
def test__extend_extra_router_dict_string_default(self):
self.mixin.extra_attributes = [{
'name': "foo_key",
'default': 'foo_default'
}]
extension_attributes = {'foo_key': 'my_fancy_value'}
self._test__extend_extra_router_dict(
extension_attributes, {}, extension_attributes)
def test__extend_extra_router_dict_booleans_false_default(self):
self.mixin.extra_attributes = [{
'name': "foo_key",
'default': False
}]
extension_attributes = {'foo_key': True}
self._test__extend_extra_router_dict(
extension_attributes, {}, extension_attributes)
def test__extend_extra_router_dict_booleans_true_default(self):
self.mixin.extra_attributes = [{
'name': "foo_key",
'default': True
}]
# Test that the default is overridden
extension_attributes = {'foo_key': False}
self._test__extend_extra_router_dict(
extension_attributes, {}, extension_attributes)
def test__extend_extra_router_dict_no_extension_attributes(self):
self.mixin.extra_attributes = [{
'name': "foo_key",
'default': 'foo_value'
}]
self._test__extend_extra_router_dict({}, {}, {'foo_key': 'foo_value'})
def test__extend_extra_router_dict_none_extension_attributes(self):
self._test__extend_extra_router_dict(None, {}, {})
class L3NatTestCaseBase(L3NatTestCaseMixin):
def test_router_create(self):
name = 'router1'
tenant_id = _uuid()
expected_value = [('name', name), ('tenant_id', tenant_id),
('admin_state_up', True), ('status', 'ACTIVE'),
('external_gateway_info', None)]
with self.router(name='router1', admin_state_up=True,
tenant_id=tenant_id) as router:
for k, v in expected_value:
self.assertEqual(router['router'][k], v)
def test_router_create_call_extensions(self):
self.extension_called = False
def _extend_router_dict_test_attr(*args, **kwargs):
self.extension_called = True
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
l3.ROUTERS, [_extend_router_dict_test_attr])
self.assertFalse(self.extension_called)
with self.router():
self.assertTrue(self.extension_called)
def test_router_create_with_gwinfo(self):
with self.subnet() as s:
self._set_net_external(s['subnet']['network_id'])
data = {'router': {'tenant_id': _uuid()}}
data['router']['name'] = 'router1'
data['router']['external_gateway_info'] = {
'network_id': s['subnet']['network_id']}
router_req = self.new_create_request('routers', data, self.fmt)
res = router_req.get_response(self.ext_api)
router = self.deserialize(self.fmt, res)
self.assertEqual(
s['subnet']['network_id'],
router['router']['external_gateway_info']['network_id'])
self._delete('routers', router['router']['id'])
def test_router_list(self):
with contextlib.nested(self.router(),
self.router(),
self.router()
) as routers:
self._test_list_resources('router', routers)
def test_router_list_with_parameters(self):
with contextlib.nested(self.router(name='router1'),
self.router(name='router2'),
) as (router1, router2):
query_params = 'name=router1'
self._test_list_resources('router', [router1],
query_params=query_params)
query_params = 'name=router2'
self._test_list_resources('router', [router2],
query_params=query_params)
query_params = 'name=router3'
self._test_list_resources('router', [],
query_params=query_params)
def test_router_list_with_sort(self):
with contextlib.nested(self.router(name='router1'),
self.router(name='router2'),
self.router(name='router3')
) as (router1, router2, router3):
self._test_list_with_sort('router', (router3, router2, router1),
[('name', 'desc')])
def test_router_list_with_pagination(self):
with contextlib.nested(self.router(name='router1'),
self.router(name='router2'),
self.router(name='router3')
) as (router1, router2, router3):
self._test_list_with_pagination('router',
(router1, router2, router3),
('name', 'asc'), 2, 2)
def test_router_list_with_pagination_reverse(self):
with contextlib.nested(self.router(name='router1'),
self.router(name='router2'),
self.router(name='router3')
) as (router1, router2, router3):
self._test_list_with_pagination_reverse('router',
(router1, router2,
router3),
('name', 'asc'), 2, 2)
def test_router_update(self):
rname1 = "yourrouter"
rname2 = "nachorouter"
with self.router(name=rname1) as r:
body = self._show('routers', r['router']['id'])
self.assertEqual(body['router']['name'], rname1)
body = self._update('routers', r['router']['id'],
{'router': {'name': rname2}})
body = self._show('routers', r['router']['id'])
self.assertEqual(body['router']['name'], rname2)
def test_router_update_gateway(self):
with self.router() as r:
with self.subnet() as s1:
with self.subnet() as s2:
self._set_net_external(s1['subnet']['network_id'])
self._add_external_gateway_to_router(
r['router']['id'],
s1['subnet']['network_id'])
body = self._show('routers', r['router']['id'])
net_id = (body['router']
['external_gateway_info']['network_id'])
self.assertEqual(net_id, s1['subnet']['network_id'])
self._set_net_external(s2['subnet']['network_id'])
self._add_external_gateway_to_router(
r['router']['id'],
s2['subnet']['network_id'])
body = self._show('routers', r['router']['id'])
net_id = (body['router']
['external_gateway_info']['network_id'])
self.assertEqual(net_id, s2['subnet']['network_id'])
# Validate that we can clear the gateway with
# an empty dict, in any other case, we fall back
# on None as default value
self._remove_external_gateway_from_router(
r['router']['id'],
s2['subnet']['network_id'],
external_gw_info={})
def test_router_update_gateway_with_existed_floatingip(self):
with self.subnet() as subnet:
self._set_net_external(subnet['subnet']['network_id'])
with self.floatingip_with_assoc() as fip:
self._add_external_gateway_to_router(
fip['floatingip']['router_id'],
subnet['subnet']['network_id'],
expected_code=exc.HTTPConflict.code)
def test_router_update_gateway_to_empty_with_existed_floatingip(self):
with self.floatingip_with_assoc() as fip:
self._remove_external_gateway_from_router(
fip['floatingip']['router_id'], None,
expected_code=exc.HTTPConflict.code)
def test_router_add_interface_subnet(self):
exp_notifications = ['router.create.start',
'router.create.end',
'network.create.start',
'network.create.end',
'subnet.create.start',
'subnet.create.end',
'router.interface.create',
'router.interface.delete']
fake_notifier.reset()
with self.router() as r:
with self.subnet() as s:
body = self._router_interface_action('add',
r['router']['id'],
s['subnet']['id'],
None)
self.assertIn('port_id', body)
# fetch port and confirm device_id
r_port_id = body['port_id']
body = self._show('ports', r_port_id)
self.assertEqual(body['port']['device_id'], r['router']['id'])
body = self._router_interface_action('remove',
r['router']['id'],
s['subnet']['id'],
None)
body = self._show('ports', r_port_id,
expected_code=exc.HTTPNotFound.code)
self.assertEqual(
set(exp_notifications),
set(n['event_type'] for n in fake_notifier.NOTIFICATIONS))
for n in fake_notifier.NOTIFICATIONS:
if n['event_type'].startswith('router.interface.'):
payload = n['payload']['router_interface']
self.assertIn('id', payload)
self.assertEqual(payload['id'], r['router']['id'])
self.assertIn('tenant_id', payload)
stid = s['subnet']['tenant_id']
# tolerate subnet tenant deliberately to '' in the
# nsx metadata access case
self.assertIn(payload['tenant_id'], [stid, ''])
def test_router_add_interface_subnet_with_bad_tenant_returns_404(self):
with mock.patch('neutron.context.Context.to_dict') as tdict:
tenant_id = _uuid()
admin_context = {'roles': ['admin']}
tenant_context = {'tenant_id': 'bad_tenant',
'roles': []}
tdict.return_value = admin_context
with self.router(tenant_id=tenant_id) as r:
with self.network(tenant_id=tenant_id) as n:
with self.subnet(network=n) as s:
tdict.return_value = tenant_context
err_code = exc.HTTPNotFound.code
self._router_interface_action('add',
r['router']['id'],
s['subnet']['id'],
None,
err_code)
tdict.return_value = admin_context
body = self._router_interface_action('add',
r['router']['id'],
s['subnet']['id'],
None)
self.assertIn('port_id', body)
tdict.return_value = tenant_context
self._router_interface_action('remove',
r['router']['id'],
s['subnet']['id'],
None,
err_code)
tdict.return_value = admin_context
body = self._router_interface_action('remove',
r['router']['id'],
s['subnet']['id'],
None)
def test_router_add_interface_subnet_with_port_from_other_tenant(self):
tenant_id = _uuid()
other_tenant_id = _uuid()
with contextlib.nested(
self.router(tenant_id=tenant_id),
self.network(tenant_id=tenant_id),
self.network(tenant_id=other_tenant_id)) as (r, n1, n2):
with contextlib.nested(
self.subnet(network=n1, cidr='10.0.0.0/24'),
self.subnet(network=n2, cidr='10.1.0.0/24')) as (s1, s2):
body = self._router_interface_action(
'add',
r['router']['id'],
s2['subnet']['id'],
None)
self.assertIn('port_id', body)
self._router_interface_action(
'add',
r['router']['id'],
s1['subnet']['id'],
None,
tenant_id=tenant_id)
self.assertIn('port_id', body)
self._router_interface_action(
'remove',
r['router']['id'],
s1['subnet']['id'],
None,
tenant_id=tenant_id)
body = self._router_interface_action(
'remove',
r['router']['id'],
s2['subnet']['id'],
None)
def test_router_add_interface_port(self):
with self.router() as r:
with self.port(do_delete=False) as p:
body = self._router_interface_action('add',
r['router']['id'],
None,
p['port']['id'])
self.assertIn('port_id', body)
self.assertEqual(body['port_id'], p['port']['id'])
# fetch port and confirm device_id
body = self._show('ports', p['port']['id'])
self.assertEqual(body['port']['device_id'], r['router']['id'])
# clean-up
self._router_interface_action('remove',
r['router']['id'],
None,
p['port']['id'])
def test_router_add_interface_empty_port_and_subnet_ids(self):
with self.router() as r:
self._router_interface_action('add', r['router']['id'],
None, None,
expected_code=exc.
HTTPBadRequest.code)
def test_router_add_interface_port_bad_tenant_returns_404(self):
with mock.patch('neutron.context.Context.to_dict') as tdict:
admin_context = {'roles': ['admin']}
tenant_context = {'tenant_id': 'bad_tenant',
'roles': []}
tdict.return_value = admin_context
with self.router() as r:
with self.port(do_delete=False) as p:
tdict.return_value = tenant_context
err_code = exc.HTTPNotFound.code
self._router_interface_action('add',
r['router']['id'],
None,
p['port']['id'],
err_code)
tdict.return_value = admin_context
self._router_interface_action('add',
r['router']['id'],
None,
p['port']['id'])
tdict.return_value = tenant_context
# clean-up
self._router_interface_action('remove',
r['router']['id'],
None,
p['port']['id'],
err_code)
tdict.return_value = admin_context
self._router_interface_action('remove',
r['router']['id'],
None,
p['port']['id'])
def test_router_add_interface_dup_subnet1_returns_400(self):
with self.router() as r:
with self.subnet() as s:
self._router_interface_action('add',
r['router']['id'],
s['subnet']['id'],
None)
self._router_interface_action('add',
r['router']['id'],
s['subnet']['id'],
None,
expected_code=exc.
HTTPBadRequest.code)
self._router_interface_action('remove',
r['router']['id'],
s['subnet']['id'],
None)
def test_router_add_interface_dup_subnet2_returns_400(self):
with self.router() as r:
with self.subnet() as s:
with self.port(subnet=s, do_delete=False) as p1:
with self.port(subnet=s) as p2:
self._router_interface_action('add',
r['router']['id'],
None,
p1['port']['id'])
self._router_interface_action('add',
r['router']['id'],
None,
p2['port']['id'],
expected_code=exc.
HTTPBadRequest.code)
# clean-up
self._router_interface_action('remove',
r['router']['id'],
None,
p1['port']['id'])
def test_router_add_interface_overlapped_cidr_returns_400(self):
with self.router() as r:
with self.subnet(cidr='10.0.1.0/24') as s1:
self._router_interface_action('add',
r['router']['id'],
s1['subnet']['id'],
None)
def try_overlapped_cidr(cidr):
with self.subnet(cidr=cidr) as s2:
self._router_interface_action('add',
r['router']['id'],
s2['subnet']['id'],
None,
expected_code=exc.
HTTPBadRequest.code)
# another subnet with same cidr
try_overlapped_cidr('10.0.1.0/24')
# another subnet with overlapped cidr including s1
try_overlapped_cidr('10.0.0.0/16')
# clean-up
self._router_interface_action('remove',
r['router']['id'],
s1['subnet']['id'],
None)
def test_router_add_interface_no_data_returns_400(self):
with self.router() as r:
self._router_interface_action('add',
r['router']['id'],
None,
None,
expected_code=exc.
HTTPBadRequest.code)
def test_router_add_gateway_dup_subnet1_returns_400(self):
with self.router() as r:
with self.subnet() as s:
self._router_interface_action('add',
r['router']['id'],
s['subnet']['id'],
None)
self._set_net_external(s['subnet']['network_id'])
self._add_external_gateway_to_router(
r['router']['id'],
s['subnet']['network_id'],
expected_code=exc.HTTPBadRequest.code)
self._router_interface_action('remove',
r['router']['id'],
s['subnet']['id'],
None)
def test_router_add_gateway_dup_subnet2_returns_400(self):
with self.router() as r:
with self.subnet() as s:
self._set_net_external(s['subnet']['network_id'])
self._add_external_gateway_to_router(
r['router']['id'],
s['subnet']['network_id'])
self._router_interface_action('add',
r['router']['id'],
s['subnet']['id'],
None,
expected_code=exc.
HTTPBadRequest.code)
self._remove_external_gateway_from_router(
r['router']['id'],
s['subnet']['network_id'])
def test_router_add_gateway(self):
with self.router() as r:
with self.subnet() as s:
self._set_net_external(s['subnet']['network_id'])
self._add_external_gateway_to_router(
r['router']['id'],
s['subnet']['network_id'])
body = self._show('routers', r['router']['id'])
net_id = body['router']['external_gateway_info']['network_id']
self.assertEqual(net_id, s['subnet']['network_id'])
self._remove_external_gateway_from_router(
r['router']['id'],
s['subnet']['network_id'])
body = self._show('routers', r['router']['id'])
gw_info = body['router']['external_gateway_info']
self.assertIsNone(gw_info)
def test_router_add_gateway_tenant_ctx(self):
with self.router(tenant_id='noadmin',
set_context=True) as r:
with self.subnet() as s:
self._set_net_external(s['subnet']['network_id'])
ctx = context.Context('', 'noadmin')
self._add_external_gateway_to_router(
r['router']['id'],
s['subnet']['network_id'],
neutron_context=ctx)
body = self._show('routers', r['router']['id'])
net_id = body['router']['external_gateway_info']['network_id']
self.assertEqual(net_id, s['subnet']['network_id'])
self._remove_external_gateway_from_router(
r['router']['id'],
s['subnet']['network_id'])
body = self._show('routers', r['router']['id'])
gw_info = body['router']['external_gateway_info']
self.assertIsNone(gw_info)
def test_create_router_port_with_device_id_of_other_teants_router(self):
with self.router() as admin_router:
with self.network(tenant_id='tenant_a',
set_context=True) as n:
with self.subnet(network=n):
self._create_port(
self.fmt, n['network']['id'],
tenant_id='tenant_a',
device_id=admin_router['router']['id'],
device_owner='network:router_interface',
set_context=True,
expected_res_status=exc.HTTPConflict.code)
def test_create_non_router_port_device_id_of_other_teants_router_update(
self):
# This tests that HTTPConflict is raised if we create a non-router
# port that matches the device_id of another tenants router and then
# we change the device_owner to be network:router_interface.
with self.router() as admin_router:
with self.network(tenant_id='tenant_a',
set_context=True) as n:
with self.subnet(network=n):
port_res = self._create_port(
self.fmt, n['network']['id'],
tenant_id='tenant_a',
device_id=admin_router['router']['id'],
set_context=True)
port = self.deserialize(self.fmt, port_res)
neutron_context = context.Context('', 'tenant_a')
data = {'port': {'device_owner':
'network:router_interface'}}
self._update('ports', port['port']['id'], data,
neutron_context=neutron_context,
expected_code=exc.HTTPConflict.code)
self._delete('ports', port['port']['id'])
def test_update_port_device_id_to_different_tenants_router(self):
with self.router() as admin_router:
with self.router(tenant_id='tenant_a',
set_context=True) as tenant_router:
with self.network(tenant_id='tenant_a',
set_context=True) as n:
with self.subnet(network=n) as s:
port = self._router_interface_action(
'add', tenant_router['router']['id'],
s['subnet']['id'], None, tenant_id='tenant_a')
neutron_context = context.Context('', 'tenant_a')
data = {'port':
{'device_id': admin_router['router']['id']}}
self._update('ports', port['port_id'], data,
neutron_context=neutron_context,
expected_code=exc.HTTPConflict.code)
self._router_interface_action(
'remove', tenant_router['router']['id'],
s['subnet']['id'], None, tenant_id='tenant_a')
def test_router_add_gateway_invalid_network_returns_404(self):
with self.router() as r:
self._add_external_gateway_to_router(
r['router']['id'],
"foobar", expected_code=exc.HTTPNotFound.code)
def test_router_add_gateway_net_not_external_returns_400(self):
with self.router() as r:
with self.subnet() as s:
# intentionally do not set net as external
self._add_external_gateway_to_router(
r['router']['id'],
s['subnet']['network_id'],
expected_code=exc.HTTPBadRequest.code)
def test_router_add_gateway_no_subnet_returns_400(self):
with self.router() as r:
with self.network() as n:
self._set_net_external(n['network']['id'])
self._add_external_gateway_to_router(
r['router']['id'],
n['network']['id'], expected_code=exc.HTTPBadRequest.code)
def test_router_remove_interface_inuse_returns_409(self):
with self.router() as r:
with self.subnet() as s:
self._router_interface_action('add',
r['router']['id'],
s['subnet']['id'],
None)
self._delete('routers', r['router']['id'],
expected_code=exc.HTTPConflict.code)
# remove interface so test can exit without errors
self._router_interface_action('remove',
r['router']['id'],
s['subnet']['id'],
None)
def test_router_remove_interface_wrong_subnet_returns_400(self):
with self.router() as r:
with self.subnet() as s:
with self.port(do_delete=False) as p:
self._router_interface_action('add',
r['router']['id'],
None,
p['port']['id'])
self._router_interface_action('remove',
r['router']['id'],
s['subnet']['id'],
p['port']['id'],
exc.HTTPBadRequest.code)
#remove properly to clean-up
self._router_interface_action('remove',
r['router']['id'],
None,
p['port']['id'])
def test_router_remove_interface_returns_200(self):
with self.router() as r:
with self.port(do_delete=False) as p:
body = self._router_interface_action('add',
r['router']['id'],
None,
p['port']['id'])
self._router_interface_action('remove',
r['router']['id'],
None,
p['port']['id'],
expected_body=body)
def test_router_remove_interface_wrong_port_returns_404(self):
with self.router() as r:
with self.subnet():
with self.port(do_delete=False) as p:
self._router_interface_action('add',
r['router']['id'],
None,
p['port']['id'])
# create another port for testing failure case
res = self._create_port(self.fmt, p['port']['network_id'])
p2 = self.deserialize(self.fmt, res)
self._router_interface_action('remove',
r['router']['id'],
None,
p2['port']['id'],
exc.HTTPNotFound.code)
# remove correct interface to cleanup
self._router_interface_action('remove',
r['router']['id'],
None,
p['port']['id'])
# remove extra port created
self._delete('ports', p2['port']['id'])
def test_router_delete(self):
with self.router() as router:
router_id = router['router']['id']
req = self.new_show_request('router', router_id)
res = req.get_response(self._api_for_resource('router'))
self.assertEqual(res.status_int, 404)
def test_router_delete_with_port_existed_returns_409(self):
with self.subnet() as subnet:
res = self._create_router(self.fmt, _uuid())
router = self.deserialize(self.fmt, res)
self._router_interface_action('add',
router['router']['id'],
subnet['subnet']['id'],
None)
self._delete('routers', router['router']['id'],
exc.HTTPConflict.code)
self._router_interface_action('remove',
router['router']['id'],
subnet['subnet']['id'],
None)
self._delete('routers', router['router']['id'])
def test_router_delete_with_floatingip_existed_returns_409(self):
with self.port() as p:
private_sub = {'subnet': {'id':
p['port']['fixed_ips'][0]['subnet_id']}}
with self.subnet(cidr='12.0.0.0/24') as public_sub:
self._set_net_external(public_sub['subnet']['network_id'])
res = self._create_router(self.fmt, _uuid())
r = self.deserialize(self.fmt, res)
self._add_external_gateway_to_router(
r['router']['id'],
public_sub['subnet']['network_id'])
self._router_interface_action('add', r['router']['id'],
private_sub['subnet']['id'],
None)
res = self._create_floatingip(
self.fmt, public_sub['subnet']['network_id'],
port_id=p['port']['id'])
self.assertEqual(res.status_int, exc.HTTPCreated.code)
floatingip = self.deserialize(self.fmt, res)
self._delete('routers', r['router']['id'],
expected_code=exc.HTTPConflict.code)
# Cleanup
self._delete('floatingips', floatingip['floatingip']['id'])
self._router_interface_action('remove', r['router']['id'],
private_sub['subnet']['id'],
None)
self._delete('routers', r['router']['id'])
def test_router_show(self):
name = 'router1'
tenant_id = _uuid()
expected_value = [('name', name), ('tenant_id', tenant_id),
('admin_state_up', True), ('status', 'ACTIVE'),
('external_gateway_info', None)]
with self.router(name='router1', admin_state_up=True,
tenant_id=tenant_id) as router:
res = self._show('routers', router['router']['id'])
for k, v in expected_value:
self.assertEqual(res['router'][k], v)
def test_network_update_external_failure(self):
with self.router() as r:
with self.subnet() as s1:
self._set_net_external(s1['subnet']['network_id'])
self._add_external_gateway_to_router(
r['router']['id'],
s1['subnet']['network_id'])
self._update('networks', s1['subnet']['network_id'],
{'network': {external_net.EXTERNAL: False}},
expected_code=exc.HTTPConflict.code)
self._remove_external_gateway_from_router(
r['router']['id'],
s1['subnet']['network_id'])
def test_network_update_external(self):
with self.router() as r:
with self.network('test_net') as testnet:
self._set_net_external(testnet['network']['id'])
with self.subnet() as s1:
self._set_net_external(s1['subnet']['network_id'])
self._add_external_gateway_to_router(
r['router']['id'],
s1['subnet']['network_id'])
self._update('networks', testnet['network']['id'],
{'network': {external_net.EXTERNAL: False}})
self._remove_external_gateway_from_router(
r['router']['id'],
s1['subnet']['network_id'])
def test_floatingip_crd_ops(self):
with self.floatingip_with_assoc() as fip:
self._validate_floating_ip(fip)
# post-delete, check that it is really gone
body = self._list('floatingips')
self.assertEqual(len(body['floatingips']), 0)
self._show('floatingips', fip['floatingip']['id'],
expected_code=exc.HTTPNotFound.code)
def _test_floatingip_with_assoc_fails(self, plugin_method):
with self.subnet(cidr='200.0.0.0/24') as public_sub:
self._set_net_external(public_sub['subnet']['network_id'])
with self.port() as private_port:
with self.router() as r:
sid = private_port['port']['fixed_ips'][0]['subnet_id']
private_sub = {'subnet': {'id': sid}}
self._add_external_gateway_to_router(
r['router']['id'],
public_sub['subnet']['network_id'])
self._router_interface_action('add', r['router']['id'],
private_sub['subnet']['id'],
None)
with mock.patch(plugin_method) as pl:
pl.side_effect = n_exc.BadRequest(
resource='floatingip',
msg='fake_error')
res = self._create_floatingip(
self.fmt,
public_sub['subnet']['network_id'],
port_id=private_port['port']['id'])
self.assertEqual(res.status_int, 400)
for p in self._list('ports')['ports']:
if (p['device_owner'] ==
l3_constants.DEVICE_OWNER_FLOATINGIP):
self.fail('garbage port is not deleted')
self._remove_external_gateway_from_router(
r['router']['id'],
public_sub['subnet']['network_id'])
self._router_interface_action('remove',
r['router']['id'],
private_sub['subnet']['id'],
None)
def test_floatingip_with_assoc_fails(self):
self._test_floatingip_with_assoc_fails(
'neutron.db.l3_db.L3_NAT_db_mixin._check_and_get_fip_assoc')
def test_create_floatingip_with_assoc(
self, expected_status=l3_constants.FLOATINGIP_STATUS_ACTIVE):
with self.floatingip_with_assoc() as fip:
body = self._show('floatingips', fip['floatingip']['id'])
self.assertEqual(body['floatingip']['id'],
fip['floatingip']['id'])
self.assertEqual(body['floatingip']['port_id'],
fip['floatingip']['port_id'])
self.assertEqual(expected_status, body['floatingip']['status'])
self.assertIsNotNone(body['floatingip']['fixed_ip_address'])
self.assertIsNotNone(body['floatingip']['router_id'])
def test_floatingip_update(
self, expected_status=l3_constants.FLOATINGIP_STATUS_ACTIVE):
with self.port() as p:
private_sub = {'subnet': {'id':
p['port']['fixed_ips'][0]['subnet_id']}}
with self.floatingip_no_assoc(private_sub) as fip:
body = self._show('floatingips', fip['floatingip']['id'])
self.assertIsNone(body['floatingip']['port_id'])
self.assertIsNone(body['floatingip']['fixed_ip_address'])
self.assertEqual(body['floatingip']['status'], expected_status)
port_id = p['port']['id']
ip_address = p['port']['fixed_ips'][0]['ip_address']
body = self._update('floatingips', fip['floatingip']['id'],
{'floatingip': {'port_id': port_id}})
self.assertEqual(body['floatingip']['port_id'], port_id)
self.assertEqual(body['floatingip']['fixed_ip_address'],
ip_address)
def test_floatingip_create_different_fixed_ip_same_port(self):
'''This tests that it is possible to delete a port that has
multiple floating ip addresses associated with it (each floating
address associated with a unique fixed address).
'''
with self.router() as r:
with self.subnet(cidr='11.0.0.0/24') as public_sub:
self._set_net_external(public_sub['subnet']['network_id'])
self._add_external_gateway_to_router(
r['router']['id'],
public_sub['subnet']['network_id'])
with self.subnet() as private_sub:
ip_range = list(netaddr.IPNetwork(
private_sub['subnet']['cidr']))
fixed_ips = [{'ip_address': str(ip_range[-3])},
{'ip_address': str(ip_range[-2])}]
self._router_interface_action(
'add', r['router']['id'],
private_sub['subnet']['id'], None)
with self.port(subnet=private_sub,
fixed_ips=fixed_ips) as p:
fip1 = self._make_floatingip(
self.fmt,
public_sub['subnet']['network_id'],
p['port']['id'],
fixed_ip=str(ip_range[-2]))
fip2 = self._make_floatingip(
self.fmt,
public_sub['subnet']['network_id'],
p['port']['id'],
fixed_ip=str(ip_range[-3]))
# Test that floating ips are assigned successfully.
body = self._show('floatingips',
fip1['floatingip']['id'])
self.assertEqual(
body['floatingip']['port_id'],
fip1['floatingip']['port_id'])
body = self._show('floatingips',
fip2['floatingip']['id'])
self.assertEqual(
body['floatingip']['port_id'],
fip2['floatingip']['port_id'])
# Test that port has been successfully deleted.
body = self._show('ports', p['port']['id'],
expected_code=exc.HTTPNotFound.code)
for fip in [fip1, fip2]:
self._delete('floatingips',
fip['floatingip']['id'])
self._router_interface_action(
'remove', r['router']['id'],
private_sub['subnet']['id'], None)
self._remove_external_gateway_from_router(
r['router']['id'],
public_sub['subnet']['network_id'])
def test_floatingip_update_different_fixed_ip_same_port(self):
with self.subnet() as s:
ip_range = list(netaddr.IPNetwork(s['subnet']['cidr']))
fixed_ips = [{'ip_address': str(ip_range[-3])},
{'ip_address': str(ip_range[-2])}]
with self.port(subnet=s, fixed_ips=fixed_ips) as p:
with self.floatingip_with_assoc(
port_id=p['port']['id'],
fixed_ip=str(ip_range[-3])) as fip:
body = self._show('floatingips', fip['floatingip']['id'])
self.assertEqual(fip['floatingip']['id'],
body['floatingip']['id'])
self.assertEqual(fip['floatingip']['port_id'],
body['floatingip']['port_id'])
self.assertEqual(str(ip_range[-3]),
body['floatingip']['fixed_ip_address'])
self.assertIsNotNone(body['floatingip']['router_id'])
body_2 = self._update(
'floatingips', fip['floatingip']['id'],
{'floatingip': {'port_id': p['port']['id'],
'fixed_ip_address': str(ip_range[-2])}
})
self.assertEqual(fip['floatingip']['port_id'],
body_2['floatingip']['port_id'])
self.assertEqual(str(ip_range[-2]),
body_2['floatingip']['fixed_ip_address'])
def test_floatingip_update_different_router(self):
# Create subnet with different CIDRs to account for plugins which
# do not support overlapping IPs
with contextlib.nested(self.subnet(cidr='10.0.0.0/24'),
self.subnet(cidr='10.0.1.0/24')) as (
s1, s2):
with contextlib.nested(self.port(subnet=s1),
self.port(subnet=s2)) as (p1, p2):
private_sub1 = {'subnet':
{'id':
p1['port']['fixed_ips'][0]['subnet_id']}}
private_sub2 = {'subnet':
{'id':
p2['port']['fixed_ips'][0]['subnet_id']}}
with self.subnet(cidr='12.0.0.0/24') as public_sub:
with contextlib.nested(
self.floatingip_no_assoc_with_public_sub(
private_sub1, public_sub=public_sub),
self.floatingip_no_assoc_with_public_sub(
private_sub2, public_sub=public_sub)) as (
(fip1, r1), (fip2, r2)):
def assert_no_assoc(fip):
body = self._show('floatingips',
fip['floatingip']['id'])
self.assertIsNone(body['floatingip']['port_id'])
self.assertIsNone(
body['floatingip']['fixed_ip_address'])
assert_no_assoc(fip1)
assert_no_assoc(fip2)
def associate_and_assert(fip, port):
port_id = port['port']['id']
ip_address = (port['port']['fixed_ips']
[0]['ip_address'])
body = self._update(
'floatingips', fip['floatingip']['id'],
{'floatingip': {'port_id': port_id}})
self.assertEqual(body['floatingip']['port_id'],
port_id)
self.assertEqual(
body['floatingip']['fixed_ip_address'],
ip_address)
return body['floatingip']['router_id']
fip1_r1_res = associate_and_assert(fip1, p1)
self.assertEqual(fip1_r1_res, r1['router']['id'])
# The following operation will associate the floating
# ip to a different router
fip1_r2_res = associate_and_assert(fip1, p2)
self.assertEqual(fip1_r2_res, r2['router']['id'])
fip2_r1_res = associate_and_assert(fip2, p1)
self.assertEqual(fip2_r1_res, r1['router']['id'])
# disassociate fip1
self._update(
'floatingips', fip1['floatingip']['id'],
{'floatingip': {'port_id': None}})
fip2_r2_res = associate_and_assert(fip2, p2)
self.assertEqual(fip2_r2_res, r2['router']['id'])
def test_floatingip_port_delete(self):
with self.subnet() as private_sub:
with self.floatingip_no_assoc(private_sub) as fip:
with self.port(subnet=private_sub) as p:
body = self._update('floatingips', fip['floatingip']['id'],
{'floatingip':
{'port_id': p['port']['id']}})
# note: once this port goes out of scope, the port will be
# deleted, which is what we want to test. We want to confirm
# that the fields are set back to None
body = self._show('floatingips', fip['floatingip']['id'])
self.assertEqual(body['floatingip']['id'],
fip['floatingip']['id'])
self.assertIsNone(body['floatingip']['port_id'])
self.assertIsNone(body['floatingip']['fixed_ip_address'])
self.assertIsNone(body['floatingip']['router_id'])
def test_two_fips_one_port_invalid_return_409(self):
with self.floatingip_with_assoc() as fip1:
res = self._create_floatingip(
self.fmt,
fip1['floatingip']['floating_network_id'],
fip1['floatingip']['port_id'])
self.assertEqual(res.status_int, exc.HTTPConflict.code)
def test_floating_ip_direct_port_delete_returns_409(self):
found = False
with self.floatingip_with_assoc():
for p in self._list('ports')['ports']:
if p['device_owner'] == l3_constants.DEVICE_OWNER_FLOATINGIP:
self._delete('ports', p['id'],
expected_code=exc.HTTPConflict.code)
found = True
self.assertTrue(found)
def _test_floatingip_with_invalid_create_port(self, plugin_class):
with self.port() as p:
private_sub = {'subnet': {'id':
p['port']['fixed_ips'][0]['subnet_id']}}
with self.subnet(cidr='12.0.0.0/24') as public_sub:
self._set_net_external(public_sub['subnet']['network_id'])
res = self._create_router(self.fmt, _uuid())
r = self.deserialize(self.fmt, res)
self._add_external_gateway_to_router(
r['router']['id'],
public_sub['subnet']['network_id'])
self._router_interface_action(
'add', r['router']['id'],
private_sub['subnet']['id'],
None)
with mock.patch(plugin_class + '.create_port') as createport:
createport.return_value = {'fixed_ips': []}
res = self._create_floatingip(
self.fmt, public_sub['subnet']['network_id'],
port_id=p['port']['id'])
self.assertEqual(res.status_int,
exc.HTTPBadRequest.code)
self._router_interface_action('remove',
r['router']['id'],
private_sub
['subnet']['id'],
None)
self._delete('routers', r['router']['id'])
def test_floatingip_with_invalid_create_port(self):
self._test_floatingip_with_invalid_create_port(
'neutron.db.db_base_plugin_v2.NeutronDbPluginV2')
def test_create_floatingip_no_ext_gateway_return_404(self):
with self.subnet() as public_sub:
self._set_net_external(public_sub['subnet']['network_id'])
with self.port() as private_port:
with self.router():
res = self._create_floatingip(
self.fmt,
public_sub['subnet']['network_id'],
port_id=private_port['port']['id'])
# this should be some kind of error
self.assertEqual(res.status_int, exc.HTTPNotFound.code)
def test_create_floating_non_ext_network_returns_400(self):
with self.subnet() as public_sub:
# normally we would set the network of public_sub to be
# external, but the point of this test is to handle when
# that is not the case
with self.router():
res = self._create_floatingip(
self.fmt,
public_sub['subnet']['network_id'])
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_create_floatingip_no_public_subnet_returns_400(self):
with self.network() as public_network:
with self.port() as private_port:
with self.router() as r:
sid = private_port['port']['fixed_ips'][0]['subnet_id']
private_sub = {'subnet': {'id': sid}}
self._router_interface_action('add', r['router']['id'],
private_sub['subnet']['id'],
None)
res = self._create_floatingip(
self.fmt,
public_network['network']['id'],
port_id=private_port['port']['id'])
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
# cleanup
self._router_interface_action('remove',
r['router']['id'],
private_sub['subnet']['id'],
None)
def test_create_floatingip_invalid_floating_network_id_returns_400(self):
# API-level test - no need to create all objects for l3 plugin
res = self._create_floatingip(self.fmt, 'iamnotanuuid',
uuidutils.generate_uuid(), '192.168.0.1')
self.assertEqual(res.status_int, 400)
def test_create_floatingip_invalid_floating_port_id_returns_400(self):
# API-level test - no need to create all objects for l3 plugin
res = self._create_floatingip(self.fmt, uuidutils.generate_uuid(),
'iamnotanuuid', '192.168.0.1')
self.assertEqual(res.status_int, 400)
def test_create_floatingip_invalid_fixed_ip_address_returns_400(self):
# API-level test - no need to create all objects for l3 plugin
res = self._create_floatingip(self.fmt, uuidutils.generate_uuid(),
uuidutils.generate_uuid(), 'iamnotnanip')
self.assertEqual(res.status_int, 400)
def test_floatingip_list_with_sort(self):
with contextlib.nested(self.subnet(cidr="10.0.0.0/24"),
self.subnet(cidr="11.0.0.0/24"),
self.subnet(cidr="12.0.0.0/24")
) as (s1, s2, s3):
network_id1 = s1['subnet']['network_id']
network_id2 = s2['subnet']['network_id']
network_id3 = s3['subnet']['network_id']
self._set_net_external(network_id1)
self._set_net_external(network_id2)
self._set_net_external(network_id3)
fp1 = self._make_floatingip(self.fmt, network_id1)
fp2 = self._make_floatingip(self.fmt, network_id2)
fp3 = self._make_floatingip(self.fmt, network_id3)
try:
self._test_list_with_sort('floatingip', (fp3, fp2, fp1),
[('floating_ip_address', 'desc')])
finally:
self._delete('floatingips', fp1['floatingip']['id'])
self._delete('floatingips', fp2['floatingip']['id'])
self._delete('floatingips', fp3['floatingip']['id'])
def test_floatingip_list_with_port_id(self):
with self.floatingip_with_assoc() as fip:
port_id = fip['floatingip']['port_id']
res = self._list('floatingips',
query_params="port_id=%s" % port_id)
self.assertEqual(len(res['floatingips']), 1)
res = self._list('floatingips', query_params="port_id=aaa")
self.assertEqual(len(res['floatingips']), 0)
def test_floatingip_list_with_pagination(self):
with contextlib.nested(self.subnet(cidr="10.0.0.0/24"),
self.subnet(cidr="11.0.0.0/24"),
self.subnet(cidr="12.0.0.0/24")
) as (s1, s2, s3):
network_id1 = s1['subnet']['network_id']
network_id2 = s2['subnet']['network_id']
network_id3 = s3['subnet']['network_id']
self._set_net_external(network_id1)
self._set_net_external(network_id2)
self._set_net_external(network_id3)
fp1 = self._make_floatingip(self.fmt, network_id1)
fp2 = self._make_floatingip(self.fmt, network_id2)
fp3 = self._make_floatingip(self.fmt, network_id3)
try:
self._test_list_with_pagination(
'floatingip', (fp1, fp2, fp3),
('floating_ip_address', 'asc'), 2, 2)
finally:
self._delete('floatingips', fp1['floatingip']['id'])
self._delete('floatingips', fp2['floatingip']['id'])
self._delete('floatingips', fp3['floatingip']['id'])
def test_floatingip_list_with_pagination_reverse(self):
with contextlib.nested(self.subnet(cidr="10.0.0.0/24"),
self.subnet(cidr="11.0.0.0/24"),
self.subnet(cidr="12.0.0.0/24")
) as (s1, s2, s3):
network_id1 = s1['subnet']['network_id']
network_id2 = s2['subnet']['network_id']
network_id3 = s3['subnet']['network_id']
self._set_net_external(network_id1)
self._set_net_external(network_id2)
self._set_net_external(network_id3)
fp1 = self._make_floatingip(self.fmt, network_id1)
fp2 = self._make_floatingip(self.fmt, network_id2)
fp3 = self._make_floatingip(self.fmt, network_id3)
try:
self._test_list_with_pagination_reverse(
'floatingip', (fp1, fp2, fp3),
('floating_ip_address', 'asc'), 2, 2)
finally:
self._delete('floatingips', fp1['floatingip']['id'])
self._delete('floatingips', fp2['floatingip']['id'])
self._delete('floatingips', fp3['floatingip']['id'])
def test_floatingip_delete_router_intf_with_subnet_id_returns_409(self):
found = False
with self.floatingip_with_assoc():
for p in self._list('ports')['ports']:
if p['device_owner'] == l3_constants.DEVICE_OWNER_ROUTER_INTF:
subnet_id = p['fixed_ips'][0]['subnet_id']
router_id = p['device_id']
self._router_interface_action(
'remove', router_id, subnet_id, None,
expected_code=exc.HTTPConflict.code)
found = True
break
self.assertTrue(found)
def test_floatingip_delete_router_intf_with_port_id_returns_409(self):
found = False
with self.floatingip_with_assoc():
for p in self._list('ports')['ports']:
if p['device_owner'] == l3_constants.DEVICE_OWNER_ROUTER_INTF:
router_id = p['device_id']
self._router_interface_action(
'remove', router_id, None, p['id'],
expected_code=exc.HTTPConflict.code)
found = True
break
self.assertTrue(found)
def test_router_delete_subnet_inuse_returns_409(self):
with self.router() as r:
with self.subnet() as s:
self._router_interface_action('add',
r['router']['id'],
s['subnet']['id'],
None)
# subnet cannot be delete as it's attached to a router
self._delete('subnets', s['subnet']['id'],
expected_code=exc.HTTPConflict.code)
# remove interface so test can exit without errors
self._router_interface_action('remove',
r['router']['id'],
s['subnet']['id'],
None)
def test_delete_ext_net_with_disassociated_floating_ips(self):
with self.network() as net:
net_id = net['network']['id']
self._set_net_external(net_id)
with self.subnet(network=net, do_delete=False):
self._make_floatingip(self.fmt, net_id)
class L3AgentDbTestCaseBase(L3NatTestCaseMixin):
"""Unit tests for methods called by the L3 agent."""
def test_l3_agent_routers_query_interfaces(self):
with self.router() as r:
with self.port(do_delete=False) as p:
self._router_interface_action('add',
r['router']['id'],
None,
p['port']['id'])
routers = self.plugin.get_sync_data(
context.get_admin_context(), None)
self.assertEqual(1, len(routers))
interfaces = routers[0][l3_constants.INTERFACE_KEY]
self.assertEqual(1, len(interfaces))
subnet_id = interfaces[0]['subnet']['id']
wanted_subnetid = p['port']['fixed_ips'][0]['subnet_id']
self.assertEqual(wanted_subnetid, subnet_id)
# clean-up
self._router_interface_action('remove',
r['router']['id'],
None,
p['port']['id'])
def test_l3_agent_routers_query_ignore_interfaces_with_moreThanOneIp(self):
with self.router() as r:
with self.subnet(cidr='9.0.1.0/24') as subnet:
with self.port(subnet=subnet,
do_delete=False,
fixed_ips=[{'ip_address': '9.0.1.3'}]) as p:
self._router_interface_action('add',
r['router']['id'],
None,
p['port']['id'])
port = {'port': {'fixed_ips':
[{'ip_address': '9.0.1.4',
'subnet_id': subnet['subnet']['id']},
{'ip_address': '9.0.1.5',
'subnet_id': subnet['subnet']['id']}]}}
ctx = context.get_admin_context()
self.core_plugin.update_port(ctx, p['port']['id'], port)
routers = self.plugin.get_sync_data(ctx, None)
self.assertEqual(1, len(routers))
interfaces = routers[0].get(l3_constants.INTERFACE_KEY, [])
self.assertEqual(1, len(interfaces))
# clean-up
self._router_interface_action('remove',
r['router']['id'],
None,
p['port']['id'])
def test_l3_agent_routers_query_gateway(self):
with self.router() as r:
with self.subnet() as s:
self._set_net_external(s['subnet']['network_id'])
self._add_external_gateway_to_router(
r['router']['id'],
s['subnet']['network_id'])
routers = self.plugin.get_sync_data(
context.get_admin_context(), [r['router']['id']])
self.assertEqual(1, len(routers))
gw_port = routers[0]['gw_port']
self.assertEqual(s['subnet']['id'], gw_port['subnet']['id'])
self._remove_external_gateway_from_router(
r['router']['id'],
s['subnet']['network_id'])
def test_l3_agent_routers_query_floatingips(self):
with self.floatingip_with_assoc() as fip:
routers = self.plugin.get_sync_data(
context.get_admin_context(), [fip['floatingip']['router_id']])
self.assertEqual(1, len(routers))
floatingips = routers[0][l3_constants.FLOATINGIP_KEY]
self.assertEqual(1, len(floatingips))
self.assertEqual(floatingips[0]['id'],
fip['floatingip']['id'])
self.assertEqual(floatingips[0]['port_id'],
fip['floatingip']['port_id'])
self.assertIsNotNone(floatingips[0]['fixed_ip_address'])
self.assertIsNotNone(floatingips[0]['router_id'])
def _test_notify_op_agent(self, target_func, *args):
l3_rpc_agent_api_str = (
'neutron.api.rpc.agentnotifiers.l3_rpc_agent_api.L3AgentNotifyAPI')
plugin = manager.NeutronManager.get_service_plugins()[
service_constants.L3_ROUTER_NAT]
oldNotify = plugin.l3_rpc_notifier
try:
with mock.patch(l3_rpc_agent_api_str) as notifyApi:
plugin.l3_rpc_notifier = notifyApi
kargs = [item for item in args]
kargs.append(notifyApi)
target_func(*kargs)
except Exception:
plugin.l3_rpc_notifier = oldNotify
raise
else:
plugin.l3_rpc_notifier = oldNotify
def _test_router_gateway_op_agent(self, notifyApi):
with self.router() as r:
with self.subnet() as s:
self._set_net_external(s['subnet']['network_id'])
self._add_external_gateway_to_router(
r['router']['id'],
s['subnet']['network_id'])
self._remove_external_gateway_from_router(
r['router']['id'],
s['subnet']['network_id'])
self.assertEqual(
2, notifyApi.routers_updated.call_count)
def test_router_gateway_op_agent(self):
self._test_notify_op_agent(self._test_router_gateway_op_agent)
def _test_interfaces_op_agent(self, r, notifyApi):
with self.port(do_delete=False) as p:
self._router_interface_action('add',
r['router']['id'],
None,
p['port']['id'])
# clean-up
self._router_interface_action('remove',
r['router']['id'],
None,
p['port']['id'])
self.assertEqual(2, notifyApi.routers_updated.call_count)
def test_interfaces_op_agent(self):
with self.router() as r:
self._test_notify_op_agent(
self._test_interfaces_op_agent, r)
def _test_floatingips_op_agent(self, notifyApi):
with self.floatingip_with_assoc():
pass
# add gateway, add interface, associate, deletion of floatingip,
# delete gateway, delete interface
self.assertEqual(6, notifyApi.routers_updated.call_count)
def test_floatingips_op_agent(self):
self._test_notify_op_agent(self._test_floatingips_op_agent)
class L3BaseForIntTests(test_db_plugin.NeutronDbPluginV2TestCase,
testlib_plugin.NotificationSetupHelper):
mock_rescheduling = True
def setUp(self, plugin=None, ext_mgr=None, service_plugins=None):
if not plugin:
plugin = 'neutron.tests.unit.test_l3_plugin.TestL3NatIntPlugin'
# for these tests we need to enable overlapping ips
cfg.CONF.set_default('allow_overlapping_ips', True)
ext_mgr = ext_mgr or L3TestExtensionManager()
if self.mock_rescheduling:
mock.patch('%s._check_router_needs_rescheduling' % plugin,
new=lambda *a: False).start()
super(L3BaseForIntTests, self).setUp(plugin=plugin, ext_mgr=ext_mgr,
service_plugins=service_plugins)
self.setup_notification_driver()
class L3BaseForSepTests(test_db_plugin.NeutronDbPluginV2TestCase,
testlib_plugin.NotificationSetupHelper):
def setUp(self, plugin=None, ext_mgr=None):
# the plugin without L3 support
if not plugin:
plugin = 'neutron.tests.unit.test_l3_plugin.TestNoL3NatPlugin'
# the L3 service plugin
l3_plugin = ('neutron.tests.unit.test_l3_plugin.'
'TestL3NatServicePlugin')
service_plugins = {'l3_plugin_name': l3_plugin}
# for these tests we need to enable overlapping ips
cfg.CONF.set_default('allow_overlapping_ips', True)
if not ext_mgr:
ext_mgr = L3TestExtensionManager()
super(L3BaseForSepTests, self).setUp(plugin=plugin, ext_mgr=ext_mgr,
service_plugins=service_plugins)
self.setup_notification_driver()
class L3NatDBIntAgentSchedulingTestCase(L3BaseForIntTests,
L3NatTestCaseMixin,
test_agent_ext_plugin.
AgentDBTestMixIn):
"""Unit tests for core plugin with L3 routing and scheduling integrated."""
def setUp(self, plugin='neutron.tests.unit.test_l3_plugin.'
'TestL3NatIntAgentSchedulingPlugin',
ext_mgr=None, service_plugins=None):
self.mock_rescheduling = False
super(L3NatDBIntAgentSchedulingTestCase, self).setUp(
plugin, ext_mgr, service_plugins)
self.adminContext = context.get_admin_context()
def _assert_router_on_agent(self, router_id, agent_host):
plugin = manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT)
agents = plugin.list_l3_agents_hosting_router(
self.adminContext, router_id)['agents']
self.assertEqual(len(agents), 1)
self.assertEqual(agents[0]['host'], agent_host)
def test_update_gateway_agent_exists_supporting_network(self):
with contextlib.nested(self.router(),
self.subnet(),
self.subnet()) as (r, s1, s2):
self._set_net_external(s1['subnet']['network_id'])
l3_rpc_cb = l3_rpc.L3RpcCallback()
self._register_one_l3_agent(
host='host1',
ext_net_id=s1['subnet']['network_id'])
self._register_one_l3_agent(
host='host2', internal_only=False,
ext_net_id=s2['subnet']['network_id'])
l3_rpc_cb.sync_routers(self.adminContext,
host='host1')
self._assert_router_on_agent(r['router']['id'], 'host1')
self._add_external_gateway_to_router(
r['router']['id'],
s1['subnet']['network_id'])
self._assert_router_on_agent(r['router']['id'], 'host1')
self._set_net_external(s2['subnet']['network_id'])
self._add_external_gateway_to_router(
r['router']['id'],
s2['subnet']['network_id'])
self._assert_router_on_agent(r['router']['id'], 'host2')
self._remove_external_gateway_from_router(
r['router']['id'],
s2['subnet']['network_id'])
def test_update_gateway_agent_exists_supporting_multiple_network(self):
with contextlib.nested(self.router(),
self.subnet(),
self.subnet()) as (r, s1, s2):
self._set_net_external(s1['subnet']['network_id'])
l3_rpc_cb = l3_rpc.L3RpcCallback()
self._register_one_l3_agent(
host='host1',
ext_net_id=s1['subnet']['network_id'])
self._register_one_l3_agent(
host='host2', internal_only=False,
ext_net_id='', ext_bridge='')
l3_rpc_cb.sync_routers(self.adminContext,
host='host1')
self._assert_router_on_agent(r['router']['id'], 'host1')
self._add_external_gateway_to_router(
r['router']['id'],
s1['subnet']['network_id'])
self._assert_router_on_agent(r['router']['id'], 'host1')
self._set_net_external(s2['subnet']['network_id'])
self._add_external_gateway_to_router(
r['router']['id'],
s2['subnet']['network_id'])
self._assert_router_on_agent(r['router']['id'], 'host2')
self._remove_external_gateway_from_router(
r['router']['id'],
s2['subnet']['network_id'])
def test_router_update_gateway_no_eligible_l3_agent(self):
with self.router() as r:
with self.subnet() as s1:
with self.subnet() as s2:
self._set_net_external(s1['subnet']['network_id'])
self._set_net_external(s2['subnet']['network_id'])
self._add_external_gateway_to_router(
r['router']['id'],
s1['subnet']['network_id'],
expected_code=exc.HTTPBadRequest.code)
class L3RpcCallbackTestCase(base.BaseTestCase):
def setUp(self):
super(L3RpcCallbackTestCase, self).setUp()
self.mock_plugin = mock.patch.object(
l3_rpc.L3RpcCallback,
'plugin', new_callable=mock.PropertyMock).start()
self.mock_l3plugin = mock.patch.object(
l3_rpc.L3RpcCallback,
'l3plugin', new_callable=mock.PropertyMock).start()
self.l3_rpc_cb = l3_rpc.L3RpcCallback()
def test__ensure_host_set_on_port_update_on_concurrent_delete(self):
port_id = 'foo_port_id'
port = {
'id': port_id,
'device_owner': 'compute:None',
portbindings.HOST_ID: '',
portbindings.VIF_TYPE: portbindings.VIF_TYPE_BINDING_FAILED
}
router_id = 'foo_router_id'
self.l3_rpc_cb.plugin.update_port.side_effect = n_exc.PortNotFound(
port_id=port_id)
with mock.patch.object(l3_rpc.LOG, 'debug') as mock_log:
self.l3_rpc_cb._ensure_host_set_on_port(
mock.ANY, mock.ANY, port, router_id)
self.l3_rpc_cb.plugin.update_port.assert_called_once_with(
mock.ANY, port_id, {'port': {'binding:host_id': mock.ANY}})
self.assertTrue(mock_log.call_count)
expected_message = ('Port foo_port_id not found while updating '
'agent binding for router foo_router_id.')
actual_message = mock_log.call_args[0][0]
self.assertEqual(expected_message, actual_message)
class L3AgentDbIntTestCase(L3BaseForIntTests, L3AgentDbTestCaseBase):
"""Unit tests for methods called by the L3 agent for
the case where core plugin implements L3 routing.
"""
def setUp(self):
super(L3AgentDbIntTestCase, self).setUp()
self.core_plugin = TestL3NatIntPlugin()
self.plugin = self.core_plugin
class L3AgentDbSepTestCase(L3BaseForSepTests, L3AgentDbTestCaseBase):
"""Unit tests for methods called by the L3 agent for the
case where separate service plugin implements L3 routing.
"""
def setUp(self):
super(L3AgentDbSepTestCase, self).setUp()
self.core_plugin = TestNoL3NatPlugin()
self.plugin = TestL3NatServicePlugin()
class L3NatDBIntTestCase(L3BaseForIntTests, L3NatTestCaseBase):
"""Unit tests for core plugin with L3 routing integrated."""
pass
class L3NatDBSepTestCase(L3BaseForSepTests, L3NatTestCaseBase):
"""Unit tests for a separate L3 routing service plugin."""
pass
class L3NatDBIntTestCaseXML(L3NatDBIntTestCase):
fmt = 'xml'
class L3NatDBSepTestCaseXML(L3NatDBSepTestCase):
fmt = 'xml'
| |
import shutil
import tempfile
from model_mommy import mommy
from django.test import TestCase, override_settings
from django.urls import reverse
from django.utils.text import slugify
from rest_framework import status
from cities.models import City
from meupet import forms
from meupet.models import Kind, Pet, PetStatus, StatusGroup
from meupet.views import paginate_pets
from users.models import OwnerProfile
MEDIA_ROOT = tempfile.mkdtemp()
@override_settings(MEDIA_ROOT=MEDIA_ROOT)
class MeuPetTestCase(TestCase):
def setUp(self):
self.admin = OwnerProfile.objects.create_user(
username="admin", password="admin", facebook="http://www.facebook.com/owner_profile"
)
self.test_city = mommy.make(City, name="Testing City")
def create_pet(self, status=None, kind=None, **kwargs):
if not status:
group = mommy.make(StatusGroup)
status = mommy.make(PetStatus, final=False, group=group)
pet = mommy.make(Pet, status=status, owner=self.admin, _create_files=True, **kwargs)
if kind:
kind, _ = Kind.objects.get_or_create(kind=kind, slug=slugify(kind))
if kwargs.get("_quantity", None):
for p in pet:
p.kind = kind
p.save()
else:
pet.kind = kind
pet.save()
return pet
@classmethod
def tearDownClass(cls):
shutil.rmtree(MEDIA_ROOT, ignore_errors=True)
super().tearDownClass()
class MeuPetTest(MeuPetTestCase):
def test_titleize_name(self):
"""Force the case of the pet's name to be titleized"""
data = {"name": "TESTING NAME"}
form = forms.PetForm(data=data)
form.is_valid()
self.assertEquals(form.cleaned_data["name"], "Testing Name")
def test_display_all_pets(self):
"""Display recently adds pets in the index page"""
first_pet = self.create_pet()
second_pet = self.create_pet()
home = self.client.get(reverse("meupet:index"))
self.assertContains(home, first_pet.name)
self.assertContains(home, second_pet.name)
def test_display_kinds_sidebar(self):
"""The side bar should show only kinds that have pets registered and active"""
Kind.objects.get_or_create(kind="0 Pets")
first_pet = self.create_pet(kind="Cat")
second_pet = self.create_pet(kind="Dog")
inactive_pet = self.create_pet(kind="Inactive", active=False)
home = self.client.get(reverse("meupet:index"))
self.assertContains(home, first_pet.kind.kind)
self.assertContains(home, second_pet.kind.kind)
self.assertNotContains(home, "0 Pets")
self.assertNotContains(home, inactive_pet.kind.kind)
def test_display_only_pets_from_kind(self):
"""Only display the actives pets from the kind being shown"""
first_cat = self.create_pet(kind="Cat")
second_cat = self.create_pet(kind="Cat", status=first_cat.status)
inactive_cat = self.create_pet(kind="Cat", active=False)
dog = self.create_pet(kind="Dog")
kind = Kind.objects.get(kind="Cat")
content = self.client.get(reverse("meupet:pet_list", args=[first_cat.status.group.slug, kind.slug]))
pets_count = Pet.objects.actives().filter(kind=kind).count()
self.assertContains(content, first_cat.name)
self.assertContains(content, second_cat.name)
self.assertNotContains(content, inactive_cat.name)
self.assertNotContains(content, dog.name)
self.assertEqual(2, pets_count)
def test_pet_list_should_return_404_on_group_not_exists(self):
response = self.client.get(reverse("meupet:pet_list", args=["invalid", "group"]))
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_show_edit_button_for_own_if_logged_pet(self):
"""Show the edit button only if the owner is logged in"""
pet = self.create_pet()
self.client.login(username="admin", password="admin")
response = self.client.get(pet.get_absolute_url())
self.assertContains(response, "Edit")
self.assertContains(response, reverse("meupet:edit", args=[pet.slug]))
def test_load_data_for_editing_pet(self):
"""Assert that the saved data is being preloaded in the edit page"""
pet = self.create_pet()
self.client.login(username="admin", password="admin")
response = self.client.get(reverse("meupet:edit", args=[pet.slug]))
self.assertTemplateUsed(response, "meupet/edit.html")
self.assertContains(response, pet.name)
self.assertContains(response, pet.description)
self.assertContains(response, "Save Changes")
def test_can_edit_pet(self):
"""Pet's owner can edit it's own pet"""
self.client.login(username="admin", password="admin")
pet = self.create_pet(kind="Cat")
response_post = self.client.post(
reverse("meupet:edit", args=[pet.slug]),
data={
"name": "Testing Fuzzy Boots",
"description": "My lovely cat",
"state": self.test_city.state.code,
"city": self.test_city.code,
"kind": pet.kind.id,
"status": pet.status.id,
"profile_picture": pet.profile_picture.url,
},
)
response_get = self.client.get(pet.get_absolute_url())
self.assertRedirects(response_post, pet.get_absolute_url())
self.assertContains(response_get, "Testing Fuzzy Boots")
def test_show_facebook_link(self):
"""Displays a link to the Facebook profile of the owner in the of the pet's details"""
pet = self.create_pet()
resp_with_facebook = self.client.get(pet.get_absolute_url())
self.assertContains(resp_with_facebook, "http://www.facebook.com/owner_profile")
def test_show_link_for_owner_profile(self):
"""Displays a link to the profile of the owner"""
pet = self.create_pet()
response = self.client.get(pet.get_absolute_url())
self.assertContains(response, reverse("users:user_profile", args=[self.admin.id]))
def test_should_redirect_if_not_confirmed(self):
"""Don't allow users to register a pet if their info are not confirmed"""
self.client.login(username="admin", password="admin")
response = self.client.get(reverse("meupet:register"))
self.assertRedirects(response, "/user/profile/edit/")
def test_should_access_if_confirmed(self):
"""Allow user to access the pet register page if the information is confirmed"""
self.admin.is_information_confirmed = True
self.admin.save()
self.client.login(username="admin", password="admin")
response = self.client.get(reverse("meupet:register"))
self.assertTemplateUsed(response, "meupet/register_pet.html")
def test_only_owner_can_see_edit_page(self):
"""Do not show the edit page if the logged user is not the owner"""
OwnerProfile.objects.create_user(username="Other User", password="otherpass")
pet = self.create_pet()
self.client.login(username="Other User", password="otherpass")
response = self.client.get(reverse("meupet:edit", args=[pet.slug]))
self.assertRedirects(response, pet.get_absolute_url())
def test_display_status_on_pet_page(self):
"""Show the name of the pet and the readable status name"""
missing_pet = self.create_pet()
response_missing = self.client.get(missing_pet.get_absolute_url())
self.assertContains(
response_missing, "{0} - {1}".format(missing_pet.name, missing_pet.status.description)
)
def test_incorrect_form_submission_reload_page_with_values(self):
"""Incomplete form submission should reload the page
preserving the submitted information"""
self.client.login(username="admin", password="admin")
response = self.client.post(
reverse("meupet:register"), {"description": "Test Description"}, follow=True
)
self.assertContains(response, "Test Description")
def test_show_add_photo_button_in_pet_page_owner_logged_in(self):
"""Display button to add more photos to the pet profile"""
pet = self.create_pet()
self.client.login(username="admin", password="admin")
response = self.client.get(pet.get_absolute_url())
self.assertContains(response, "Submit Image")
self.assertContains(response, "another_picture")
def test_show_city(self):
"""Display the name of the city where the pet belongs"""
pet = self.create_pet(city=self.test_city)
response = self.client.get(pet.get_absolute_url())
self.assertContains(response, self.test_city)
def test_show_size(self):
"""Display the human readable size of the pet"""
pet = self.create_pet(size=Pet.SMALL)
response = self.client.get(pet.get_absolute_url())
self.assertContains(response, "Small")
def test_search_without_filters(self):
"""Show message informing the user that she needs to use at least
one filter to be able to search"""
response = self.client.post(reverse("meupet:search"), {})
self.assertContains(response, "You must select at least one filter")
def test_search_with_filter(self):
"""Search by city should show the pet"""
pet = self.create_pet(city=self.test_city)
inactive_pet = self.create_pet(city=self.test_city, active=False)
response = self.client.post(reverse("meupet:search"), {"city": self.test_city.id}, follow=True)
self.assertContains(response, pet.name)
self.assertNotContains(response, inactive_pet.name)
self.assertContains(response, pet.city)
def test_show_pet_sex(self):
"""Display the human readable sex of the pet"""
pet = self.create_pet(sex=Pet.FEMALE)
response = self.client.get(pet.get_absolute_url())
self.assertContains(response, "Female")
def test_get_pets_unpublished(self):
"""Manager method should return pets not published on Facebook yet"""
pet = self.create_pet()
self.create_pet(published=True, _quantity=3)
pets = Pet.objects.get_unpublished_pets()
self.assertIn(pet, pets)
self.assertEqual(pets.count(), 1)
class PaginationListPetViewTest(MeuPetTestCase):
def setUp(self):
super(PaginationListPetViewTest, self).setUp()
self.status_group = mommy.make(StatusGroup)
self.status = mommy.make(PetStatus, group=self.status_group)
self.pet = self.create_pet(kind="First Kind", status=self.status)
def test_get_page_query_string(self):
"""Should return the page informed in the query string"""
resp = self.client.get(
reverse("meupet:pet_list", args=[self.status_group.slug, self.pet.kind.slug]), {"page": 1}
)
self.assertContains(resp, self.pet.name)
def test_page_not_integer(self):
"""In case of a non-integer page, i.e, incorrent url, we should show the first page"""
pets, _ = paginate_pets(Pet.objects.all(), "page")
self.assertEqual(1, len(pets))
self.assertEqual("First Kind", pets[0].kind.kind)
def test_empty_page(self):
"""If the user inform an empty page we should present the last page with data"""
pets, _ = paginate_pets(Pet.objects.all(), 42)
self.assertEqual(1, len(pets))
self.assertEqual("First Kind", pets[0].kind.kind)
| |
from kessel import combinators
from kessel import primitives
from kessel import test_utils
class TryTest(test_utils.TestCase):
def test_parses(self):
self.assertParse(combinators.try_(primitives.match(lambda c: c == "h")),
"h", "ello", "hello")
def test_fails_to_parse(self):
e = self.assertParseFailure(
combinators.try_(primitives.error({"error!"})), "hello")
self.assertEqual({"error!"}, e.expected)
def test_fails_to_parse_ll2(self):
e = self.assertParseFailure(
combinators.try_(combinators.sequence(
primitives.any_, primitives.any_,
primitives.error({"error!"}))),
"test")
self.assertEqual({"error!"}, e.expected)
class MapfTest(test_utils.TestCase):
def test_parses(self):
self.assertParse(combinators.mapf(primitives.unit(lambda: 1))
(lambda x: x),
1, "test", "test")
def test_parses_star(self):
self.assertParse(combinators.mapf_star(primitives.unit(lambda: [1, 2]))
(lambda x, y: x + y),
3, "test", "test")
class ChoiceTest(test_utils.TestCase):
def test_parses(self):
self.assertParse(
combinators.choice(primitives.match(lambda c: c == "h"),
primitives.match(lambda c: c == "a")),
"h", "ello", "hello")
def test_parses_second(self):
self.assertParse(
combinators.choice(primitives.match(lambda c: c == "h"),
primitives.match(lambda c: c == "y")),
"y", "ello", "yello")
def test_fails_to_parse(self):
e = self.assertParseFailure(
combinators.choice(primitives.error({"error1!"}),
primitives.error({"error2!"})),
"hello")
self.assertEqual({"error1!", "error2!"}, e.expected)
def test_fails_to_parse_non_ll1(self):
e = self.assertSimpleParseFailure(
combinators.choice(
combinators.sequence(primitives.match(lambda c: c == "h"),
primitives.match(lambda c: c == "a")),
combinators.sequence(primitives.match(lambda c: c == "b"),
primitives.match(lambda c: c == "e"))),
"hello")
self.assertEqual("e", e.value)
self.assertEqual(list("llo"), list(e.it))
self.assertEqual(set(), e.expected)
class SequenceTest(test_utils.TestCase):
def test_parses(self):
self.assertParse(
combinators.sequence(primitives.any_, primitives.any_),
["h", "e"], "llo", "hello")
def test_fails_to_parse(self):
e = self.assertParseFailure(
combinators.sequence(primitives.error({"error!"}), primitives.any_),
"hello")
self.assertEqual({"error!"}, e.expected)
def test_fails_to_parse_second(self):
e = self.assertSimpleParseFailure(
combinators.sequence(primitives.any_, primitives.error({"error!"})),
"hello")
self.assertEqual("e", e.value)
self.assertEqual(list("llo"), list(e.it))
self.assertEqual({"error!"}, e.expected)
class SequenceLTest(test_utils.TestCase):
def test_parses(self):
self.assertParse(
combinators.sequence_l(primitives.any_, primitives.any_),
"h", "llo", "hello")
def test_fails_to_parse(self):
e = self.assertParseFailure(
combinators.sequence(primitives.error({"error!"}), primitives.any_),
"hello")
self.assertEqual({"error!"}, e.expected)
def test_fails_to_parse_second(self):
e = self.assertSimpleParseFailure(
combinators.sequence(primitives.any_, primitives.error({"error!"})),
"hello")
self.assertEqual("e", e.value)
self.assertEqual(list("llo"), list(e.it))
self.assertEqual({"error!"}, e.expected)
class SequenceRTest(test_utils.TestCase):
def test_parses(self):
self.assertParse(
combinators.sequence_r(primitives.any_, primitives.any_),
"e", "llo", "hello")
def test_fails_to_parse(self):
e = self.assertParseFailure(
combinators.sequence(primitives.error({"error!"}), primitives.any_),
"hello")
self.assertEqual({"error!"}, e.expected)
def test_fails_to_parse_second(self):
e = self.assertSimpleParseFailure(
combinators.sequence(primitives.any_, primitives.error({"error!"})),
"hello")
self.assertEqual("e", e.value)
self.assertEqual(list("llo"), list(e.it))
self.assertEqual({"error!"}, e.expected)
class CountTest(test_utils.TestCase):
def test_parses(self):
self.assertParse(
combinators.count(5, primitives.any_), list("hello"), "", "hello")
def test_fails_to_parse(self):
e = self.assertParseFailure(
combinators.count(1, primitives.error({"error!"})), "hello")
self.assertEqual({"error!"}, e.expected)
def test_fails_to_parse_second(self):
e = self.assertSimpleParseFailure(
combinators.count(5, primitives.match(lambda c: c == "h",
{"error!"})),
"hello")
self.assertEqual("e", e.value)
self.assertEqual(list("llo"), list(e.it))
self.assertEqual({"error!"}, e.expected)
class OptionTest(test_utils.TestCase):
def test_parses(self):
self.assertParse(combinators.option(primitives.any_), "h", "ello",
"hello")
def test_parses_none(self):
self.assertParse(combinators.option(primitives.error(set())), None,
"hello", "hello")
def test_fails_to_parse_non_ll1(self):
e = self.assertSimpleParseFailure(
combinators.option(
combinators.sequence(primitives.match(lambda c: c == "h"),
primitives.match(lambda c: c == "e"))),
"hzllo")
self.assertEqual("z", e.value)
self.assertEqual(list("llo"), list(e.it))
self.assertEqual(set(), e.expected)
class ManyTest(test_utils.TestCase):
def test_parses(self):
self.assertParse(combinators.many(primitives.any_), list("hello"), "",
"hello")
def test_parses_some(self):
self.assertParse(
combinators.many1(primitives.match(lambda c: c == "h")), ["h"],
"ello", "hello")
def test_parses_none(self):
self.assertParse(combinators.many(primitives.error(set())), [], "hello",
"hello")
class Many1Test(test_utils.TestCase):
def test_parses(self):
self.assertParse(combinators.many1(primitives.any_), list("hello"), "",
"hello")
def test_parses_some(self):
self.assertParse(
combinators.many1(primitives.match(lambda c: c == "h")), ["h"],
"ello", "hello")
def test_fails_to_parse(self):
e = self.assertParseFailure(
combinators.many1(primitives.error({"error!"})), "hello")
self.assertEqual({"error!"}, e.expected)
class BetweenTest(test_utils.TestCase):
def test_parses(self):
self.assertParse(
combinators.between(primitives.match(lambda c: c == "h"),
primitives.match(lambda c: c == "l"),
primitives.match(lambda c: c == "e")),
"e", "lo", "hello")
def test_fails_to_parse_front(self):
e = self.assertParseFailure(
combinators.between(primitives.error({"error!"}),
primitives.match(lambda c: c == "l"),
primitives.match(lambda c: c == "e")),
"hello")
self.assertEqual({"error!"}, e.expected)
def test_fails_to_parse(self):
e = self.assertSimpleParseFailure(
combinators.between(primitives.match(lambda c: c == "h"),
primitives.match(lambda c: c == "l"),
primitives.error({"error!"})),
"hello")
self.assertEqual("e", e.value)
self.assertEqual(list("llo"), list(e.it))
self.assertEqual({"error!"}, e.expected)
def test_fails_to_parse_back(self):
e = self.assertSimpleParseFailure(
combinators.between(primitives.match(lambda c: c == "h"),
primitives.error({"error!"}),
primitives.match(lambda c: c == "e")),
"hello")
self.assertEqual("l", e.value)
self.assertEqual(list("lo"), list(e.it))
self.assertEqual({"error!"}, e.expected)
class SepByTest(test_utils.TestCase):
def test_parses(self):
self.assertParse(
combinators.sep_by(primitives.match(lambda c: c == ","),
primitives.match(lambda c: c == "h")),
["h", "h", "h"], "ello", "h,h,hello")
def test_parses_none(self):
self.assertParse(
combinators.sep_by(primitives.match(lambda c: c == ","),
primitives.match(lambda c: c == "h")),
[], "ello", "ello")
def test_parse_some(self):
self.assertParse(
combinators.sep_by(primitives.error({"error!"}),
primitives.match(lambda c: c == "h")),
["h"], "ello", "hello")
def test_parses_none_2(self):
self.assertParse(
combinators.sep_by(primitives.match(lambda c: c == ","),
primitives.error({"error!"})),
[], "hello", "hello")
class SepBy1Test(test_utils.TestCase):
def test_parses(self):
self.assertParse(
combinators.sep_by1(primitives.match(lambda c: c == ","),
primitives.match(lambda c: c == "h")),
["h", "h", "h"], "ello", "h,h,hello")
def test_fails_to_parse(self):
e = self.assertParseFailure(
combinators.sep_by1(primitives.match(lambda c: c == ","),
primitives.match(lambda c: c == "h",
{"error!"})),
"ello")
self.assertEqual({"error!"}, e.expected)
def test_parse_some(self):
self.assertParse(
combinators.sep_by1(primitives.error({"error!"}),
primitives.match(lambda c: c == "h")),
["h"], "ello", "hello")
def test_fails_to_parse_2(self):
e = self.assertParseFailure(
combinators.sep_by1(primitives.match(lambda c: c == ","),
primitives.error({"error!"})),
"hello")
self.assertEqual({"error!"}, e.expected)
class NotFollowedByTest(test_utils.TestCase):
def test_parses(self):
self.assertParse(
combinators.not_followed_by(primitives.match(lambda c: c == "h")),
None, "ello", "ello")
def test_parses_non_ll1(self):
self.assertParse(
combinators.not_followed_by(combinators.sequence(
primitives.match(lambda c: c == "h"),
primitives.match(lambda c: c == "e"))),
None, "hllo", "hllo")
def test_fails_to_parse(self):
e = self.assertSimpleParseFailure(
combinators.not_followed_by(primitives.match(lambda c: c == "h")),
"hello")
self.assertEqual(set(), e.expected)
class EofTest(test_utils.TestCase):
def test_parses(self):
self.assertParse(combinators.eof, None, "", "")
def test_fails_to_parse(self):
e = self.assertSimpleParseFailure(combinators.eof, "hello")
self.assertEqual({primitives.EOF}, e.expected)
def test_eof_repr(self):
self.assertEqual("end of file", repr(primitives.EOF))
| |
# -*- coding: utf-8 -*-
"""Parser for Linux UTMP files."""
import construct
import logging
import os
import socket
from plaso.lib import errors
from plaso.lib import event
from plaso.lib import eventdata
from plaso.lib import timelib
from plaso.parsers import interface
from plaso.parsers import manager
__author__ = 'Joaquin Moreno Garijo (Joaquin.MorenoGarijo.2013@live.rhul.ac.uk)'
class UtmpEvent(event.EventObject):
"""Convenience class for an UTMP event."""
DATA_TYPE = u'linux:utmp:event'
def __init__(
self, timestamp, microsecond, user, computer_name,
terminal, status, ip_address, structure):
"""Initializes the event object.
Args:
timestamp: Epoch when the terminal was started.
microsecond: number of microseconds related with timestamp.
user: active user name.
computer_name: name of the computer.
terminal: type of terminal.
status: login status.
ip_address: ip_address from the connection is done.
structure: entry structure parsed.
exit: integer that represents the exit status.
pid: integer with the process ID.
terminal_id: integer with the Inittab ID.
"""
super(UtmpEvent, self).__init__()
self.timestamp = timelib.Timestamp.FromPosixTimeWithMicrosecond(
timestamp, microsecond)
self.timestamp_desc = eventdata.EventTimestamp.START_TIME
self.user = user
self.computer_name = computer_name
self.terminal = terminal
self.status = status
self.ip_address = ip_address
self.exit = structure.exit
self.pid = structure.pid
self.terminal_id = structure.terminal_id
class UtmpParser(interface.SingleFileBaseParser):
"""Parser for Linux/Unix UTMP files."""
_INITIAL_FILE_OFFSET = None
NAME = u'utmp'
DESCRIPTION = u'Parser for Linux/Unix UTMP files.'
LINUX_UTMP_ENTRY = construct.Struct(
u'utmp_linux',
construct.ULInt32(u'type'),
construct.ULInt32(u'pid'),
construct.String(u'terminal', 32),
construct.ULInt32(u'terminal_id'),
construct.String(u'username', 32),
construct.String(u'hostname', 256),
construct.ULInt16(u'termination'),
construct.ULInt16(u'exit'),
construct.ULInt32(u'session'),
construct.ULInt32(u'timestamp'),
construct.ULInt32(u'microsecond'),
construct.ULInt32(u'address_a'),
construct.ULInt32(u'address_b'),
construct.ULInt32(u'address_c'),
construct.ULInt32(u'address_d'),
construct.Padding(20))
LINUX_UTMP_ENTRY_SIZE = LINUX_UTMP_ENTRY.sizeof()
STATUS_TYPE = {
0: u'EMPTY',
1: u'RUN_LVL',
2: u'BOOT_TIME',
3: u'NEW_TIME',
4: u'OLD_TIME',
5: u'INIT_PROCESS',
6: u'LOGIN_PROCESS',
7: u'USER_PROCESS',
8: u'DEAD_PROCESS',
9: u'ACCOUNTING'}
# Set a default test value for few fields, this is supposed to be a text
# that is highly unlikely to be seen in a terminal field, or a username field.
# It is important that this value does show up in such fields, but otherwise
# it can be a free flowing text field.
_DEFAULT_TEST_VALUE = u'Ekki Fraedilegur Moguleiki, thetta er bull ! = + _<>'
def ParseFileObject(self, parser_mediator, file_object, **kwargs):
"""Parses an UTMP file-like object.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
file_object: The file-like object to extract data from.
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
file_object.seek(0, os.SEEK_SET)
try:
structure = self.LINUX_UTMP_ENTRY.parse_stream(file_object)
except (IOError, construct.FieldError) as exception:
raise errors.UnableToParseFile(
u'Unable to parse UTMP Header with error: {0:s}'.format(exception))
if structure.type not in self.STATUS_TYPE:
raise errors.UnableToParseFile((
u'Not an UTMP file, unknown type '
u'[{0:d}].').format(structure.type))
if not self._VerifyTextField(structure.terminal):
raise errors.UnableToParseFile(
u'Not an UTMP file, unknown terminal.')
if not self._VerifyTextField(structure.username):
raise errors.UnableToParseFile(
u'Not an UTMP file, unknown username.')
if not self._VerifyTextField(structure.hostname):
raise errors.UnableToParseFile(
u'Not an UTMP file, unknown hostname.')
# Check few values.
terminal = self._GetTextFromNullTerminatedString(
structure.terminal, self._DEFAULT_TEST_VALUE)
if terminal == self._DEFAULT_TEST_VALUE:
raise errors.UnableToParseFile(
u'Not an UTMP file, no terminal set.')
username = self._GetTextFromNullTerminatedString(
structure.username, self._DEFAULT_TEST_VALUE)
if username == self._DEFAULT_TEST_VALUE:
raise errors.UnableToParseFile(
u'Not an UTMP file, no username set.')
if not structure.timestamp:
raise errors.UnableToParseFile(
u'Not an UTMP file, no timestamp set in the first record.')
file_object.seek(0, os.SEEK_SET)
event_object = self._ReadUtmpEvent(file_object)
while event_object:
event_object.offset = file_object.tell()
parser_mediator.ProduceEvent(event_object)
event_object = self._ReadUtmpEvent(file_object)
def _VerifyTextField(self, text):
"""Check if a byte stream is a null terminated string.
Args:
event_object: text field from the structure.
Return:
True if it is a null terminated string, False otherwise.
"""
_, _, null_chars = text.partition(b'\x00')
if not null_chars:
return False
return len(null_chars) == null_chars.count(b'\x00')
def _ReadUtmpEvent(self, file_object):
"""Returns an UtmpEvent from a single UTMP entry.
Args:
file_object: a file-like object that points to an UTMP file.
Returns:
An event object constructed from a single UTMP record or None if we
have reached the end of the file (or EOF).
"""
offset = file_object.tell()
data = file_object.read(self.LINUX_UTMP_ENTRY_SIZE)
if not data or len(data) != self.LINUX_UTMP_ENTRY_SIZE:
return
try:
entry = self.LINUX_UTMP_ENTRY.parse(data)
except (IOError, construct.FieldError):
logging.warning((
u'UTMP entry at 0x{:x} couldn\'t be parsed.').format(offset))
return self._ReadUtmpEvent(file_object)
user = self._GetTextFromNullTerminatedString(entry.username)
terminal = self._GetTextFromNullTerminatedString(entry.terminal)
if terminal == u'~':
terminal = u'system boot'
computer_name = self._GetTextFromNullTerminatedString(entry.hostname)
if computer_name == u'N/A' or computer_name == u':0':
computer_name = u'localhost'
status = self.STATUS_TYPE.get(entry.type, u'N/A')
if not entry.address_b:
try:
ip_address = socket.inet_ntoa(
construct.ULInt32(u'int').build(entry.address_a))
if ip_address == u'0.0.0.0':
ip_address = u'localhost'
except (IOError, construct.FieldError, socket.error):
ip_address = u'N/A'
else:
ip_address = u'{0:d}.{1:d}.{2:d}.{3:d}'.format(
entry.address_a, entry.address_b, entry.address_c, entry.address_d)
return UtmpEvent(
entry.timestamp, entry.microsecond, user, computer_name, terminal,
status, ip_address, entry)
def _GetTextFromNullTerminatedString(
self, null_terminated_string, default_string=u'N/A'):
"""Get a UTF-8 text from a raw null terminated string.
Args:
null_terminated_string: Raw string terminated with null character.
default_string: The default string returned if the parser fails.
Returns:
A decoded UTF-8 string or if unable to decode, the supplied default
string.
"""
text, _, _ = null_terminated_string.partition(b'\x00')
try:
text = text.decode(u'utf-8')
except UnicodeDecodeError:
logging.warning(
u'[UTMP] Decode UTF8 failed, the message string may be cut short.')
text = text.decode(u'utf-8', u'ignore')
if not text:
return default_string
return text
manager.ParsersManager.RegisterParser(UtmpParser)
| |
from twisted.internet import defer
from twisted.trial import unittest
from twisted.python.failure import Failure
import trex
from trex import redis
from .mixins import REDIS_HOST, REDIS_PORT
class SortedSetsTests(unittest.TestCase):
'''
Tests for sorted sets
'''
_KEYS = ['trex:testssets1', 'trex:testssets2',
'trex:testssets3', 'trex:testssets4']
_NUMBERS = ["zero", "one", "two", "three", "four",
"five", "six", "seven", "eight", "nine"]
@defer.inlineCallbacks
def test_zaddrem(self):
key = self._getKey()
t = self.assertEqual
r = yield self.db.zadd(key, 1, "one")
t(r, 1)
r = yield self.db.zadd(key, 2, "two")
t(r, 1)
# Try adding multiple items
r = yield self.db.zadd(key, 3, "three", 4, "four", 5, "five")
t(r, 3)
r = yield self.db.zcount(key, '-inf', '+inf')
t(r, 5)
# Try deleting one item
r = yield self.db.zrem(key, "one")
# Try deleting some items
r = yield self.db.zrem(key, "two", "three")
# Test if calling zadd with odd number of arguments errors out
yield self.db.zadd(key, 1, "one", 2).addBoth(
self._check_invaliddata_error, shouldError=True)
# Now try doing it the right way
yield self.db.zadd(key, 1, "one", 2, "two").addBoth(
self._check_invaliddata_error)
@defer.inlineCallbacks
def test_zcard_zcount(self):
key = self._getKey()
t = self.assertEqual
yield self._make_sorted_set(key)
r = yield self.db.zcard(key) # Check ZCARD
t(r, 10)
r = yield self.db.zcount(key) # ZCOUNT with default args
t(r, 10)
r = yield self.db.zcount(key, 1, 5) # ZCOUNT with args
t(r, 5)
r = yield self.db.zcount(key, '(1', 5) # Exclude arg1
t(r, 4)
r = yield self.db.zcount(key, '(1', '(3') # Exclue arg1 & arg2
t(r, 1)
@defer.inlineCallbacks
def test_zincrby(self):
key = self._getKey()
t = self.assertEqual
yield self._make_sorted_set(key, 1, 3)
r = yield self.db.zincrby(key, 2, "one")
t(r, 3)
r = yield self.db.zrange(key, withscores=True)
t(r, [('two', 2), ('one', 3)])
# Also test zincr
r = yield self.db.zincr(key, "one")
t(r, 4)
r = yield self.db.zrange(key, withscores=True)
t(r, [('two', 2), ('one', 4)])
# And zdecr
r = yield self.db.zdecr(key, "one")
t(r, 3)
r = yield self.db.zrange(key, withscores=True)
t(r, [('two', 2), ('one', 3)])
def test_zrange(self):
return self._test_zrange(False)
def test_zrevrange(self):
return self._test_zrange(True)
def test_zrank(self):
return self._test_zrank(False)
def test_zrevrank(self):
return self._test_zrank(True)
@defer.inlineCallbacks
def test_zscore(self):
key = self._getKey()
r, l = yield self._make_sorted_set(key)
for k, s in l:
r = yield self.db.zscore(key, k)
self.assertEqual(r, s)
r = yield self.db.zscore(key, 'none')
self.assertTrue(r is None)
r = yield self.db.zscore('none', 'one')
self.assertTrue(r is None)
@defer.inlineCallbacks
def test_zremrangebyrank(self):
key = self._getKey()
t = self.assertEqual
r, l = yield self._make_sorted_set(key)
r = yield self.db.zremrangebyrank(key)
t(r, len(l))
r = yield self.db.zrange(key)
t(r, []) # Check default args
yield self._make_sorted_set(key, begin=1, end=4)
r = yield self.db.zremrangebyrank(key, 0, 1)
t(r, 2)
r = yield self.db.zrange(key, withscores=True)
t(r, [('three', 3)])
@defer.inlineCallbacks
def test_zremrangebyscore(self):
key = self._getKey()
t = self.assertEqual
r, l = yield self._make_sorted_set(key, end=4)
r = yield self.db.zremrangebyscore(key)
t(r, len(l))
r = yield self.db.zrange(key)
t(r, []) # Check default args
yield self._make_sorted_set(key, begin=1, end=4)
r = yield self.db.zremrangebyscore(key, '-inf', '(2')
t(r, 1)
r = yield self.db.zrange(key, withscores=True)
t(r, [('two', 2), ('three', 3)])
def test_zrangebyscore(self):
return self._test_zrangebyscore(False)
def test_zrevrangebyscore(self):
return self._test_zrangebyscore(True)
def test_zinterstore(self):
agg_map = {
'min': (('min', min), {
-1: [('three', -3)],
0: [(u'three', 0)],
1: [(u'three', 3)],
2: [(u'three', 3)],
}),
'max': (('max', max), {
-1: [('three', 3)],
0: [('three', 3)],
1: [('three', 3)],
2: [('three', 6)],
}),
'sum': (('sum', sum), {
-1: [('three', 0)],
0: [('three', 3)],
1: [('three', 6)],
2: [('three', 9)],
})
}
return self._test_zunion_inter_store(agg_map)
def test_zunionstore(self):
agg_map = {
'min': (('min', min), {
-1: [('five', -5), ('four', -4), ('three', -3),
('one', 1), ('two', 2)],
0: [('five', 0), ('four', 0), ('three', 0),
('one', 1), ('two', 2)],
1: [('one', 1), ('two', 2), ('three', 3),
('four', 4), ('five', 5)],
2: [('one', 1), ('two', 2), ('three', 3),
('four', 8), ('five', 10)]
}),
'max': (('max', max), {
-1: [('five', -5), ('four', -4), ('one', 1),
('two', 2), ('three', 3)],
0: [('five', 0), ('four', 0), ('one', 1),
('two', 2), ('three', 3)],
1: [('one', 1), ('two', 2), ('three', 3),
('four', 4), ('five', 5)],
2: [('one', 1), ('two', 2), ('three', 6),
('four', 8), ('five', 10)]
}),
'sum': (('sum', sum), {
-1: [('five', -5), ('four', -4), ('three', 0),
('one', 1), ('two', 2)],
0: [('five', 0), ('four', 0), ('one', 1),
('two', 2), ('three', 3)],
1: [('one', 1), ('two', 2), ('four', 4),
('five', 5), ('three', 6)],
2: [('one', 1), ('two', 2), ('four', 8),
('three', 9), ('five', 10)]
})
}
return self._test_zunion_inter_store(agg_map, True)
@defer.inlineCallbacks
def _test_zunion_inter_store(self, agg_function_map, union=False):
if union:
cmd = self.db.zunionstore
else:
cmd = self.db.zinterstore
key = self._getKey()
t = self.assertEqual
key1 = self._getKey(1)
destKey = self._getKey(2)
r, l = yield self._make_sorted_set(key, begin=1, end=4)
r1, l1 = yield self._make_sorted_set(key1, begin=3, end=6)
for agg_fn_name in agg_function_map:
for agg_fn in agg_function_map[agg_fn_name][0]:
for key1_weight in range(-1, 3):
if key1_weight == 1:
keys = [key, key1]
else:
keys = {key: 1, key1: key1_weight}
r = yield cmd(destKey, keys, aggregate=agg_fn)
if union:
t(r, len(set(l + l1)))
else:
t(r, len(set(l) & set(l1)))
r = yield self.db.zrange(destKey, withscores=True)
t(r, agg_function_map[agg_fn_name][1][key1_weight])
yield self.db.delete(destKey)
# Finally, test for invalid aggregate functions
yield self.db.delete(key, key1)
yield self._make_sorted_set(key, begin=1, end=4)
yield self._make_sorted_set(key1, begin=3, end=6)
yield cmd(destKey, [key, key1], aggregate='SIN').addBoth(
self._check_invaliddata_error, shouldError=True)
yield cmd(destKey, [key, key1], aggregate=lambda a, b: a + b).addBoth(
self._check_invaliddata_error, shouldError=True)
yield self.db.delete(destKey)
@defer.inlineCallbacks
def _test_zrangebyscore(self, reverse):
key = self._getKey()
t = self.assertEqual
if reverse:
command = self.db.zrevrangebyscore
else:
command = self.db.zrangebyscore
for ws in [True, False]:
r, l = yield self._make_sorted_set(key, begin=1, end=4)
if reverse:
l.reverse()
r = yield command(key, withscores=ws)
if ws:
t(r, l)
else:
t(r, [x[0] for x in l])
r = yield command(key, withscores=ws, offset=1, count=1)
if ws:
t(r, [('two', 2)])
else:
t(r, ['two'])
yield self.db.delete(key)
# Test for invalid offset and count
yield self._make_sorted_set(key, begin=1, end=4)
yield command(key, offset=1).addBoth(
self._check_invaliddata_error, shouldError=True)
yield command(key, count=1).addBoth(
self._check_invaliddata_error, shouldError=True)
@defer.inlineCallbacks
def _test_zrank(self, reverse):
key = self._getKey()
r, l = yield self._make_sorted_set(key)
if reverse:
command = self.db.zrevrank
l.reverse()
else:
command = self.db.zrank
for k, s in l:
r = yield command(key, k)
self.assertEqual(l[r][0], k)
r = yield command(key, 'none') # non-existant member
self.assertTrue(r is None)
r = yield command('none', 'one')
self.assertTrue(r is None)
@defer.inlineCallbacks
def _test_zrange(self, reverse):
key = self._getKey()
t = self.assertEqual
r, l = yield self._make_sorted_set(key)
if reverse:
command = self.db.zrevrange
l.reverse()
else:
command = self.db.zrange
r = yield command(key)
t(r, [x[0] for x in l])
r = yield command(key, withscores=True)
# Ensure that WITHSCORES returns tuples
t(r, l)
# Test with args
r = yield command(key, start='5', end='8', withscores=True)
t(r, l[5:9])
# Test to ensure empty results return empty lists
r = yield command(key, start=-20, end=-40, withscores=True)
t(r, [])
def _getKey(self, n=0):
return self._KEYS[n]
def _to_words(self, n):
l = []
while True:
n, r = divmod(n, 10)
l.append(self._NUMBERS[r])
if n == 0:
break
return ' '.join(l)
def _sorted_set_check(self, r, l):
self.assertEqual(r, len(l))
return r, l
def _make_sorted_set(self, key, begin=0, end=10):
l = []
for x in range(begin, end):
l.extend((x, self._to_words(x)))
return self.db.zadd(key, *l).addCallback(
self._sorted_set_check, zip(l[1::2], l[::2]))
@defer.inlineCallbacks
def setUp(self):
self.db = yield redis.Connection(REDIS_HOST, REDIS_PORT,
reconnect=False)
def tearDown(self):
return defer.gatherResults(
[self.db.delete(x) for x in self._KEYS]).addCallback(
lambda ign: self.db.disconnect())
def _check_invaliddata_error(self, response, shouldError=False):
if shouldError:
self.assertIsInstance(response, Failure)
self.assertIsInstance(response.value, trex.exceptions.InvalidData)
else:
self.assertNotIsInstance(response, Failure)
| |
from __future__ import print_function
import datetime
from typing import Dict, List, Optional, Union
import collections
import os
import sys
import numpy as np
import time
from ray.util.annotations import PublicAPI, DeveloperAPI
from ray.util.queue import Queue
from ray.tune.callback import Callback
from ray.tune.logger import pretty_print, logger
from ray.tune.result import (
DEFAULT_METRIC,
EPISODE_REWARD_MEAN,
MEAN_ACCURACY,
MEAN_LOSS,
NODE_IP,
PID,
TRAINING_ITERATION,
TIME_TOTAL_S,
TIMESTEPS_TOTAL,
AUTO_RESULT_KEYS,
)
from ray.tune.trial import DEBUG_PRINT_INTERVAL, Trial, Location
from ray.tune.utils import unflattened_lookup
from ray.tune.utils.log import Verbosity, has_verbosity
try:
from collections.abc import Mapping, MutableMapping
except ImportError:
from collections import Mapping, MutableMapping
try:
from tabulate import tabulate
except ImportError:
raise ImportError(
"ray.tune in ray > 0.7.5 requires 'tabulate'. "
"Please re-run 'pip install ray[tune]' or "
"'pip install ray[rllib]'."
)
try:
class_name = get_ipython().__class__.__name__
IS_NOTEBOOK = True if "Terminal" not in class_name else False
except NameError:
IS_NOTEBOOK = False
@PublicAPI
class ProgressReporter:
"""Abstract class for experiment progress reporting.
`should_report()` is called to determine whether or not `report()` should
be called. Tune will call these functions after trial state transitions,
receiving training results, and so on.
"""
def should_report(self, trials: List[Trial], done: bool = False):
"""Returns whether or not progress should be reported.
Args:
trials (list[Trial]): Trials to report on.
done (bool): Whether this is the last progress report attempt.
"""
raise NotImplementedError
def report(self, trials: List[Trial], done: bool, *sys_info: Dict):
"""Reports progress across trials.
Args:
trials (list[Trial]): Trials to report on.
done (bool): Whether this is the last progress report attempt.
sys_info: System info.
"""
raise NotImplementedError
def set_search_properties(self, metric: Optional[str], mode: Optional[str]):
return True
def set_total_samples(self, total_samples: int):
pass
@DeveloperAPI
class TuneReporterBase(ProgressReporter):
"""Abstract base class for the default Tune reporters.
If metric_columns is not overridden, Tune will attempt to automatically
infer the metrics being outputted, up to 'infer_limit' number of
metrics.
Args:
metric_columns (dict[str, str]|list[str]): Names of metrics to
include in progress table. If this is a dict, the keys should
be metric names and the values should be the displayed names.
If this is a list, the metric name is used directly.
parameter_columns (dict[str, str]|list[str]): Names of parameters to
include in progress table. If this is a dict, the keys should
be parameter names and the values should be the displayed names.
If this is a list, the parameter name is used directly. If empty,
defaults to all available parameters.
max_progress_rows (int): Maximum number of rows to print
in the progress table. The progress table describes the
progress of each trial. Defaults to 20.
max_error_rows (int): Maximum number of rows to print in the
error table. The error table lists the error file, if any,
corresponding to each trial. Defaults to 20.
max_report_frequency (int): Maximum report frequency in seconds.
Defaults to 5s.
infer_limit (int): Maximum number of metrics to automatically infer
from tune results.
print_intermediate_tables (bool|None): Print intermediate result
tables. If None (default), will be set to True for verbosity
levels above 3, otherwise False. If True, intermediate tables
will be printed with experiment progress. If False, tables
will only be printed at then end of the tuning run for verbosity
levels greater than 2.
metric (str): Metric used to determine best current trial.
mode (str): One of [min, max]. Determines whether objective is
minimizing or maximizing the metric attribute.
sort_by_metric (bool): Sort terminated trials by metric in the
intermediate table. Defaults to False.
"""
# Truncated representations of column names (to accommodate small screens).
DEFAULT_COLUMNS = collections.OrderedDict(
{
MEAN_ACCURACY: "acc",
MEAN_LOSS: "loss",
TRAINING_ITERATION: "iter",
TIME_TOTAL_S: "total time (s)",
TIMESTEPS_TOTAL: "ts",
EPISODE_REWARD_MEAN: "reward",
}
)
VALID_SUMMARY_TYPES = {
int,
float,
np.float32,
np.float64,
np.int32,
np.int64,
type(None),
}
def __init__(
self,
metric_columns: Union[None, List[str], Dict[str, str]] = None,
parameter_columns: Union[None, List[str], Dict[str, str]] = None,
total_samples: Optional[int] = None,
max_progress_rows: int = 20,
max_error_rows: int = 20,
max_report_frequency: int = 5,
infer_limit: int = 3,
print_intermediate_tables: Optional[bool] = None,
metric: Optional[str] = None,
mode: Optional[str] = None,
sort_by_metric: bool = False,
):
self._total_samples = total_samples
self._metrics_override = metric_columns is not None
self._inferred_metrics = {}
self._metric_columns = metric_columns or self.DEFAULT_COLUMNS.copy()
self._parameter_columns = parameter_columns or []
self._max_progress_rows = max_progress_rows
self._max_error_rows = max_error_rows
self._infer_limit = infer_limit
if print_intermediate_tables is None:
self._print_intermediate_tables = has_verbosity(Verbosity.V3_TRIAL_DETAILS)
else:
self._print_intermediate_tables = print_intermediate_tables
self._max_report_freqency = max_report_frequency
self._last_report_time = 0
self._start_time = time.time()
self._metric = metric
self._mode = mode
if metric is None or mode is None:
self._sort_by_metric = False
else:
self._sort_by_metric = sort_by_metric
def set_search_properties(self, metric: Optional[str], mode: Optional[str]):
if self._metric and metric:
return False
if self._mode and mode:
return False
if metric:
self._metric = metric
if mode:
self._mode = mode
if self._metric is None and self._mode:
# If only a mode was passed, use anonymous metric
self._metric = DEFAULT_METRIC
return True
def set_total_samples(self, total_samples: int):
self._total_samples = total_samples
def set_start_time(self, timestamp: Optional[float] = None):
if timestamp is not None:
self._start_time = time.time()
else:
self._start_time = timestamp
def should_report(self, trials: List[Trial], done: bool = False):
if time.time() - self._last_report_time > self._max_report_freqency:
self._last_report_time = time.time()
return True
return done
def add_metric_column(self, metric: str, representation: Optional[str] = None):
"""Adds a metric to the existing columns.
Args:
metric (str): Metric to add. This must be a metric being returned
in training step results.
representation (str): Representation to use in table. Defaults to
`metric`.
"""
self._metrics_override = True
if metric in self._metric_columns:
raise ValueError("Column {} already exists.".format(metric))
if isinstance(self._metric_columns, MutableMapping):
representation = representation or metric
self._metric_columns[metric] = representation
else:
if representation is not None and representation != metric:
raise ValueError(
"`representation` cannot differ from `metric` "
"if this reporter was initialized with a list "
"of metric columns."
)
self._metric_columns.append(metric)
def add_parameter_column(
self, parameter: str, representation: Optional[str] = None
):
"""Adds a parameter to the existing columns.
Args:
parameter (str): Parameter to add. This must be a parameter
specified in the configuration.
representation (str): Representation to use in table. Defaults to
`parameter`.
"""
if parameter in self._parameter_columns:
raise ValueError("Column {} already exists.".format(parameter))
if isinstance(self._parameter_columns, MutableMapping):
representation = representation or parameter
self._parameter_columns[parameter] = representation
else:
if representation is not None and representation != parameter:
raise ValueError(
"`representation` cannot differ from `parameter` "
"if this reporter was initialized with a list "
"of metric columns."
)
self._parameter_columns.append(parameter)
def _progress_str(
self,
trials: List[Trial],
done: bool,
*sys_info: Dict,
fmt: str = "psql",
delim: str = "\n",
):
"""Returns full progress string.
This string contains a progress table and error table. The progress
table describes the progress of each trial. The error table lists
the error file, if any, corresponding to each trial. The latter only
exists if errors have occurred.
Args:
trials (list[Trial]): Trials to report on.
done (bool): Whether this is the last progress report attempt.
fmt (str): Table format. See `tablefmt` in tabulate API.
delim (str): Delimiter between messages.
"""
if not self._metrics_override:
user_metrics = self._infer_user_metrics(trials, self._infer_limit)
self._metric_columns.update(user_metrics)
messages = [
"== Status ==",
time_passed_str(self._start_time, time.time()),
memory_debug_str(),
*sys_info,
]
if done:
max_progress = None
max_error = None
else:
max_progress = self._max_progress_rows
max_error = self._max_error_rows
current_best_trial, metric = self._current_best_trial(trials)
if current_best_trial:
messages.append(
best_trial_str(current_best_trial, metric, self._parameter_columns)
)
if has_verbosity(Verbosity.V1_EXPERIMENT):
# Will filter the table in `trial_progress_str`
messages.append(
trial_progress_str(
trials,
metric_columns=self._metric_columns,
parameter_columns=self._parameter_columns,
total_samples=self._total_samples,
force_table=self._print_intermediate_tables,
fmt=fmt,
max_rows=max_progress,
done=done,
metric=self._metric,
mode=self._mode,
sort_by_metric=self._sort_by_metric,
)
)
messages.append(trial_errors_str(trials, fmt=fmt, max_rows=max_error))
return delim.join(messages) + delim
def _infer_user_metrics(self, trials: List[Trial], limit: int = 4):
"""Try to infer the metrics to print out."""
if len(self._inferred_metrics) >= limit:
return self._inferred_metrics
self._inferred_metrics = {}
for t in trials:
if not t.last_result:
continue
for metric, value in t.last_result.items():
if metric not in self.DEFAULT_COLUMNS:
if metric not in AUTO_RESULT_KEYS:
if type(value) in self.VALID_SUMMARY_TYPES:
self._inferred_metrics[metric] = metric
if len(self._inferred_metrics) >= limit:
return self._inferred_metrics
return self._inferred_metrics
def _current_best_trial(self, trials: List[Trial]):
if not trials:
return None, None
metric, mode = self._metric, self._mode
# If no metric has been set, see if exactly one has been reported
# and use that one. `mode` must still be set.
if not metric:
if len(self._inferred_metrics) == 1:
metric = list(self._inferred_metrics.keys())[0]
if not metric or not mode:
return None, metric
metric_op = 1.0 if mode == "max" else -1.0
best_metric = float("-inf")
best_trial = None
for t in trials:
if not t.last_result:
continue
if metric not in t.last_result:
continue
if not best_metric or t.last_result[metric] * metric_op > best_metric:
best_metric = t.last_result[metric] * metric_op
best_trial = t
return best_trial, metric
@PublicAPI
class JupyterNotebookReporter(TuneReporterBase):
"""Jupyter notebook-friendly Reporter that can update display in-place.
Args:
overwrite (bool): Flag for overwriting the last reported progress.
metric_columns (dict[str, str]|list[str]): Names of metrics to
include in progress table. If this is a dict, the keys should
be metric names and the values should be the displayed names.
If this is a list, the metric name is used directly.
parameter_columns (dict[str, str]|list[str]): Names of parameters to
include in progress table. If this is a dict, the keys should
be parameter names and the values should be the displayed names.
If this is a list, the parameter name is used directly. If empty,
defaults to all available parameters.
max_progress_rows (int): Maximum number of rows to print
in the progress table. The progress table describes the
progress of each trial. Defaults to 20.
max_error_rows (int): Maximum number of rows to print in the
error table. The error table lists the error file, if any,
corresponding to each trial. Defaults to 20.
max_report_frequency (int): Maximum report frequency in seconds.
Defaults to 5s.
infer_limit (int): Maximum number of metrics to automatically infer
from tune results.
print_intermediate_tables (bool|None): Print intermediate result
tables. If None (default), will be set to True for verbosity
levels above 3, otherwise False. If True, intermediate tables
will be printed with experiment progress. If False, tables
will only be printed at then end of the tuning run for verbosity
levels greater than 2.
metric (str): Metric used to determine best current trial.
mode (str): One of [min, max]. Determines whether objective is
minimizing or maximizing the metric attribute.
sort_by_metric (bool): Sort terminated trials by metric in the
intermediate table. Defaults to False.
"""
def __init__(
self,
overwrite: bool,
metric_columns: Union[None, List[str], Dict[str, str]] = None,
parameter_columns: Union[None, List[str], Dict[str, str]] = None,
total_samples: Optional[int] = None,
max_progress_rows: int = 20,
max_error_rows: int = 20,
max_report_frequency: int = 5,
infer_limit: int = 3,
print_intermediate_tables: Optional[bool] = None,
metric: Optional[str] = None,
mode: Optional[str] = None,
sort_by_metric: bool = False,
):
super(JupyterNotebookReporter, self).__init__(
metric_columns,
parameter_columns,
total_samples,
max_progress_rows,
max_error_rows,
max_report_frequency,
infer_limit,
print_intermediate_tables,
metric,
mode,
sort_by_metric,
)
if not IS_NOTEBOOK:
logger.warning(
"You are using the `JupyterNotebookReporter`, but not "
"IPython/Jupyter-compatible environment was detected. "
"If this leads to unformatted output (e.g. like "
"<IPython.core.display.HTML object>), consider passing "
"a `CLIReporter` as the `progress_reporter` argument "
"to `tune.run()` instead."
)
self._overwrite = overwrite
self._output_queue = None
def set_output_queue(self, queue: Queue):
self._output_queue = queue
def report(self, trials: List[Trial], done: bool, *sys_info: Dict):
overwrite = self._overwrite
progress_str = self._progress_str(
trials, done, *sys_info, fmt="html", delim="<br>"
)
def update_output():
from IPython.display import clear_output
from IPython.core.display import display, HTML
if overwrite:
clear_output(wait=True)
display(HTML(progress_str))
if self._output_queue is not None:
# If an output queue is set, send callable (e.g. when using
# Ray client)
self._output_queue.put(update_output)
else:
# Else, output directly
update_output()
@PublicAPI
class CLIReporter(TuneReporterBase):
"""Command-line reporter
Args:
metric_columns (dict[str, str]|list[str]): Names of metrics to
include in progress table. If this is a dict, the keys should
be metric names and the values should be the displayed names.
If this is a list, the metric name is used directly.
parameter_columns (dict[str, str]|list[str]): Names of parameters to
include in progress table. If this is a dict, the keys should
be parameter names and the values should be the displayed names.
If this is a list, the parameter name is used directly. If empty,
defaults to all available parameters.
max_progress_rows (int): Maximum number of rows to print
in the progress table. The progress table describes the
progress of each trial. Defaults to 20.
max_error_rows (int): Maximum number of rows to print in the
error table. The error table lists the error file, if any,
corresponding to each trial. Defaults to 20.
max_report_frequency (int): Maximum report frequency in seconds.
Defaults to 5s.
infer_limit (int): Maximum number of metrics to automatically infer
from tune results.
print_intermediate_tables (bool|None): Print intermediate result
tables. If None (default), will be set to True for verbosity
levels above 3, otherwise False. If True, intermediate tables
will be printed with experiment progress. If False, tables
will only be printed at then end of the tuning run for verbosity
levels greater than 2.
metric (str): Metric used to determine best current trial.
mode (str): One of [min, max]. Determines whether objective is
minimizing or maximizing the metric attribute.
sort_by_metric (bool): Sort terminated trials by metric in the
intermediate table. Defaults to False.
"""
def __init__(
self,
metric_columns: Union[None, List[str], Dict[str, str]] = None,
parameter_columns: Union[None, List[str], Dict[str, str]] = None,
total_samples: Optional[int] = None,
max_progress_rows: int = 20,
max_error_rows: int = 20,
max_report_frequency: int = 5,
infer_limit: int = 3,
print_intermediate_tables: Optional[bool] = None,
metric: Optional[str] = None,
mode: Optional[str] = None,
sort_by_metric: bool = False,
):
super(CLIReporter, self).__init__(
metric_columns,
parameter_columns,
total_samples,
max_progress_rows,
max_error_rows,
max_report_frequency,
infer_limit,
print_intermediate_tables,
metric,
mode,
sort_by_metric,
)
def report(self, trials: List[Trial], done: bool, *sys_info: Dict):
print(self._progress_str(trials, done, *sys_info))
def memory_debug_str():
try:
import ray # noqa F401
import psutil
total_gb = psutil.virtual_memory().total / (1024 ** 3)
used_gb = total_gb - psutil.virtual_memory().available / (1024 ** 3)
if used_gb > total_gb * 0.9:
warn = (
": ***LOW MEMORY*** less than 10% of the memory on "
"this node is available for use. This can cause "
"unexpected crashes. Consider "
"reducing the memory used by your application "
"or reducing the Ray object store size by setting "
"`object_store_memory` when calling `ray.init`."
)
else:
warn = ""
return "Memory usage on this node: {}/{} GiB{}".format(
round(used_gb, 1), round(total_gb, 1), warn
)
except ImportError:
return "Unknown memory usage. Please run `pip install psutil` " "to resolve)"
def time_passed_str(start_time: float, current_time: float):
current_time_dt = datetime.datetime.fromtimestamp(current_time)
start_time_dt = datetime.datetime.fromtimestamp(start_time)
delta: datetime.timedelta = current_time_dt - start_time_dt
rest = delta.total_seconds()
days = rest // (60 * 60 * 24)
rest -= days * (60 * 60 * 24)
hours = rest // (60 * 60)
rest -= hours * (60 * 60)
minutes = rest // 60
seconds = rest - minutes * 60
if days > 0:
running_for_str = f"{days:.0f} days, "
else:
running_for_str = ""
running_for_str += f"{hours:02.0f}:{minutes:02.0f}:{seconds:05.2f}"
return (
f"Current time: {current_time_dt:%Y-%m-%d %H:%M:%S} "
f"(running for {running_for_str})"
)
def _get_trials_by_state(trials: List[Trial]):
trials_by_state = collections.defaultdict(list)
for t in trials:
trials_by_state[t.status].append(t)
return trials_by_state
def trial_progress_str(
trials: List[Trial],
metric_columns: Union[List[str], Dict[str, str]],
parameter_columns: Union[None, List[str], Dict[str, str]] = None,
total_samples: int = 0,
force_table: bool = False,
fmt: str = "psql",
max_rows: Optional[int] = None,
done: bool = False,
metric: Optional[str] = None,
mode: Optional[str] = None,
sort_by_metric: bool = False,
):
"""Returns a human readable message for printing to the console.
This contains a table where each row represents a trial, its parameters
and the current values of its metrics.
Args:
trials (list[Trial]): List of trials to get progress string for.
metric_columns (dict[str, str]|list[str]): Names of metrics to include.
If this is a dict, the keys are metric names and the values are
the names to use in the message. If this is a list, the metric
name is used in the message directly.
parameter_columns (dict[str, str]|list[str]): Names of parameters to
include. If this is a dict, the keys are parameter names and the
values are the names to use in the message. If this is a list,
the parameter name is used in the message directly. If this is
empty, all parameters are used in the message.
total_samples (int): Total number of trials that will be generated.
force_table (bool): Force printing a table. If False, a table will
be printed only at the end of the training for verbosity levels
above `Verbosity.V2_TRIAL_NORM`.
fmt (str): Output format (see tablefmt in tabulate API).
max_rows (int): Maximum number of rows in the trial table. Defaults to
unlimited.
done (bool): True indicates that the tuning run finished.
metric (str): Metric used to sort trials.
mode (str): One of [min, max]. Determines whether objective is
minimizing or maximizing the metric attribute.
sort_by_metric (bool): Sort terminated trials by metric in the
intermediate table. Defaults to False.
"""
messages = []
delim = "<br>" if fmt == "html" else "\n"
if len(trials) < 1:
return delim.join(messages)
num_trials = len(trials)
trials_by_state = _get_trials_by_state(trials)
for local_dir in sorted({t.local_dir for t in trials}):
messages.append("Result logdir: {}".format(local_dir))
num_trials_strs = [
"{} {}".format(len(trials_by_state[state]), state)
for state in sorted(trials_by_state)
]
if total_samples and total_samples >= sys.maxsize:
total_samples = "infinite"
messages.append(
"Number of trials: {}{} ({})".format(
num_trials,
f"/{total_samples}" if total_samples else "",
", ".join(num_trials_strs),
)
)
if force_table or (has_verbosity(Verbosity.V2_TRIAL_NORM) and done):
messages += trial_progress_table(
trials,
metric_columns,
parameter_columns,
fmt,
max_rows,
metric,
mode,
sort_by_metric,
)
return delim.join(messages)
def trial_progress_table(
trials: List[Trial],
metric_columns: Union[List[str], Dict[str, str]],
parameter_columns: Union[None, List[str], Dict[str, str]] = None,
fmt: str = "psql",
max_rows: Optional[int] = None,
metric: Optional[str] = None,
mode: Optional[str] = None,
sort_by_metric: bool = False,
):
messages = []
num_trials = len(trials)
trials_by_state = _get_trials_by_state(trials)
# Sort terminated trials by metric and mode, descending if mode is "max"
if sort_by_metric:
trials_by_state[Trial.TERMINATED] = sorted(
trials_by_state[Trial.TERMINATED],
reverse=(mode == "max"),
key=lambda t: t.last_result[metric],
)
state_tbl_order = [
Trial.RUNNING,
Trial.PAUSED,
Trial.PENDING,
Trial.TERMINATED,
Trial.ERROR,
]
max_rows = max_rows or float("inf")
if num_trials > max_rows:
# TODO(ujvl): suggestion for users to view more rows.
trials_by_state_trunc = _fair_filter_trials(
trials_by_state, max_rows, sort_by_metric
)
trials = []
overflow_strs = []
for state in state_tbl_order:
if state not in trials_by_state:
continue
trials += trials_by_state_trunc[state]
num = len(trials_by_state[state]) - len(trials_by_state_trunc[state])
if num > 0:
overflow_strs.append("{} {}".format(num, state))
# Build overflow string.
overflow = num_trials - max_rows
overflow_str = ", ".join(overflow_strs)
else:
overflow = False
overflow_str = ""
trials = []
for state in state_tbl_order:
if state not in trials_by_state:
continue
trials += trials_by_state[state]
# Pre-process trials to figure out what columns to show.
if isinstance(metric_columns, Mapping):
metric_keys = list(metric_columns.keys())
else:
metric_keys = metric_columns
metric_keys = [
k
for k in metric_keys
if any(
unflattened_lookup(k, t.last_result, default=None) is not None
for t in trials
)
]
if not parameter_columns:
parameter_keys = sorted(set().union(*[t.evaluated_params for t in trials]))
elif isinstance(parameter_columns, Mapping):
parameter_keys = list(parameter_columns.keys())
else:
parameter_keys = parameter_columns
# Build trial rows.
trial_table = [
_get_trial_info(trial, parameter_keys, metric_keys) for trial in trials
]
# Format column headings
if isinstance(metric_columns, Mapping):
formatted_metric_columns = [metric_columns[k] for k in metric_keys]
else:
formatted_metric_columns = metric_keys
if isinstance(parameter_columns, Mapping):
formatted_parameter_columns = [parameter_columns[k] for k in parameter_keys]
else:
formatted_parameter_columns = parameter_keys
columns = (
["Trial name", "status", "loc"]
+ formatted_parameter_columns
+ formatted_metric_columns
)
# Tabulate.
messages.append(
tabulate(trial_table, headers=columns, tablefmt=fmt, showindex=False)
)
if overflow:
messages.append(
"... {} more trials not shown ({})".format(overflow, overflow_str)
)
return messages
def trial_errors_str(
trials: List[Trial], fmt: str = "psql", max_rows: Optional[int] = None
):
"""Returns a readable message regarding trial errors.
Args:
trials (list[Trial]): List of trials to get progress string for.
fmt (str): Output format (see tablefmt in tabulate API).
max_rows (int): Maximum number of rows in the error table. Defaults to
unlimited.
"""
messages = []
failed = [t for t in trials if t.error_file]
num_failed = len(failed)
if num_failed > 0:
messages.append("Number of errored trials: {}".format(num_failed))
if num_failed > (max_rows or float("inf")):
messages.append(
"Table truncated to {} rows ({} overflow)".format(
max_rows, num_failed - max_rows
)
)
error_table = []
for trial in failed[:max_rows]:
row = [str(trial), trial.num_failures, trial.error_file]
error_table.append(row)
columns = ["Trial name", "# failures", "error file"]
messages.append(
tabulate(error_table, headers=columns, tablefmt=fmt, showindex=False)
)
delim = "<br>" if fmt == "html" else "\n"
return delim.join(messages)
def best_trial_str(
trial: Trial,
metric: str,
parameter_columns: Union[None, List[str], Dict[str, str]] = None,
):
"""Returns a readable message stating the current best trial."""
val = trial.last_result[metric]
config = trial.last_result.get("config", {})
parameter_columns = parameter_columns or list(config.keys())
if isinstance(parameter_columns, Mapping):
parameter_columns = parameter_columns.keys()
params = {p: unflattened_lookup(p, config) for p in parameter_columns}
return (
f"Current best trial: {trial.trial_id} with {metric}={val} and "
f"parameters={params}"
)
def _fair_filter_trials(
trials_by_state: Dict[str, List[Trial]],
max_trials: int,
sort_by_metric: bool = False,
):
"""Filters trials such that each state is represented fairly.
The oldest trials are truncated if necessary.
Args:
trials_by_state (dict[str, list[Trial]]: Trials by state.
max_trials (int): Maximum number of trials to return.
Returns:
Dict mapping state to List of fairly represented trials.
"""
num_trials_by_state = collections.defaultdict(int)
no_change = False
# Determine number of trials to keep per state.
while max_trials > 0 and not no_change:
no_change = True
for state in sorted(trials_by_state):
if num_trials_by_state[state] < len(trials_by_state[state]):
no_change = False
max_trials -= 1
num_trials_by_state[state] += 1
# Sort by start time, descending if the trails is not sorted by metric.
sorted_trials_by_state = dict()
for state in sorted(trials_by_state):
if state == Trial.TERMINATED and sort_by_metric:
sorted_trials_by_state[state] = trials_by_state[state]
else:
sorted_trials_by_state[state] = sorted(
trials_by_state[state], reverse=False, key=lambda t: t.trial_id
)
# Truncate oldest trials.
filtered_trials = {
state: sorted_trials_by_state[state][: num_trials_by_state[state]]
for state in sorted(trials_by_state)
}
return filtered_trials
def _get_trial_location(trial: Trial, result: dict) -> Location:
# we get the location from the result, as the one in trial will be
# reset when trial terminates
node_ip, pid = result.get(NODE_IP, None), result.get(PID, None)
if node_ip and pid:
location = Location(node_ip, pid)
else:
# fallback to trial location if there hasn't been a report yet
location = trial.location
return location
def _get_trial_info(trial: Trial, parameters: List[str], metrics: List[str]):
"""Returns the following information about a trial:
name | status | loc | params... | metrics...
Args:
trial (Trial): Trial to get information for.
parameters (list[str]): Names of trial parameters to include.
metrics (list[str]): Names of metrics to include.
"""
result = trial.last_result
config = trial.config
location = _get_trial_location(trial, result)
trial_info = [str(trial), trial.status, str(location)]
trial_info += [
unflattened_lookup(param, config, default=None) for param in parameters
]
trial_info += [
unflattened_lookup(metric, result, default=None) for metric in metrics
]
return trial_info
@DeveloperAPI
class TrialProgressCallback(Callback):
"""Reports (prints) intermediate trial progress.
This callback is automatically added to the callback stack. When a
result is obtained, this callback will print the results according to
the specified verbosity level.
For ``Verbosity.V3_TRIAL_DETAILS``, a full result list is printed.
For ``Verbosity.V2_TRIAL_NORM``, only one line is printed per received
result.
All other verbosity levels do not print intermediate trial progress.
Result printing is throttled on a per-trial basis. Per default, results are
printed only once every 30 seconds. Results are always printed when a trial
finished or errored.
"""
def __init__(self, metric: Optional[str] = None):
self._last_print = collections.defaultdict(float)
self._completed_trials = set()
self._last_result_str = {}
self._metric = metric
def on_trial_result(
self,
iteration: int,
trials: List["Trial"],
trial: "Trial",
result: Dict,
**info,
):
self.log_result(trial, result, error=False)
def on_trial_error(
self, iteration: int, trials: List["Trial"], trial: "Trial", **info
):
self.log_result(trial, trial.last_result, error=True)
def on_trial_complete(
self, iteration: int, trials: List["Trial"], trial: "Trial", **info
):
# Only log when we never logged that a trial was completed
if trial not in self._completed_trials:
self._completed_trials.add(trial)
print_result_str = self._print_result(trial.last_result)
last_result_str = self._last_result_str.get(trial, "")
# If this is a new result, print full result string
if print_result_str != last_result_str:
self.log_result(trial, trial.last_result, error=False)
else:
print(f"Trial {trial} completed. " f"Last result: {print_result_str}")
def log_result(self, trial: "Trial", result: Dict, error: bool = False):
done = result.get("done", False) is True
last_print = self._last_print[trial]
if done and trial not in self._completed_trials:
self._completed_trials.add(trial)
if has_verbosity(Verbosity.V3_TRIAL_DETAILS) and (
done or error or time.time() - last_print > DEBUG_PRINT_INTERVAL
):
print("Result for {}:".format(trial))
print(" {}".format(pretty_print(result).replace("\n", "\n ")))
self._last_print[trial] = time.time()
elif has_verbosity(Verbosity.V2_TRIAL_NORM) and (
done or error or time.time() - last_print > DEBUG_PRINT_INTERVAL
):
info = ""
if done:
info = " This trial completed."
metric_name = self._metric or "_metric"
metric_value = result.get(metric_name, -99.0)
print_result_str = self._print_result(result)
self._last_result_str[trial] = print_result_str
error_file = os.path.join(trial.logdir, "error.txt")
if error:
message = (
f"The trial {trial} errored with "
f"parameters={trial.config}. "
f"Error file: {error_file}"
)
elif self._metric:
message = (
f"Trial {trial} reported "
f"{metric_name}={metric_value:.2f} "
f"with parameters={trial.config}.{info}"
)
else:
message = (
f"Trial {trial} reported "
f"{print_result_str} "
f"with parameters={trial.config}.{info}"
)
print(message)
self._last_print[trial] = time.time()
def _print_result(self, result: Dict):
print_result = result.copy()
print_result.pop("config", None)
print_result.pop("hist_stats", None)
print_result.pop("trial_id", None)
print_result.pop("experiment_tag", None)
print_result.pop("done", None)
for auto_result in AUTO_RESULT_KEYS:
print_result.pop(auto_result, None)
print_result_str = ",".join([f"{k}={v}" for k, v in print_result.items()])
return print_result_str
def detect_reporter(**kwargs) -> TuneReporterBase:
"""Detect progress reporter class.
Will return a :class:`JupyterNotebookReporter` if a IPython/Jupyter-like
session was detected, and a :class:`CLIReporter` otherwise.
Keyword arguments are passed on to the reporter class.
"""
if IS_NOTEBOOK:
kwargs.setdefault("overwrite", not has_verbosity(Verbosity.V2_TRIAL_NORM))
progress_reporter = JupyterNotebookReporter(**kwargs)
else:
progress_reporter = CLIReporter(**kwargs)
return progress_reporter
| |
from tcga_encoder.utils.helpers import *
from tcga_encoder.data.data import *
from tcga_encoder.definitions.tcga import *
#from tcga_encoder.definitions.nn import *
from tcga_encoder.definitions.locations import *
from tcga_encoder.analyses.survival_functions import *
#from tcga_encoder.algorithms import *
import seaborn as sns
from sklearn.manifold import TSNE, locally_linear_embedding
from lifelines import CoxPHFitter
from lifelines.datasets import load_regression_dataset
from lifelines.utils import k_fold_cross_validation
from lifelines import KaplanMeierFitter
from lifelines.statistics import logrank_test, multivariate_logrank_test
def main( data_location, results_location ):
data_path = os.path.join( HOME_DIR ,data_location ) #, "data.h5" )
results_path = os.path.join( HOME_DIR, results_location )
data_filename = os.path.join( data_path, "data.h5")
fill_filename = os.path.join( results_path, "full_vae_fill.h5" )
survival_dir = os.path.join( results_path, "survival_p_values" )
check_and_mkdir(survival_dir)
survival_curves_dir = os.path.join( survival_dir, "sig_curves" )
check_and_mkdir(survival_curves_dir)
print "HOME_DIR: ", HOME_DIR
print "data_filename: ", data_filename
print "fill_filename: ", fill_filename
print "LOADING stores"
data_store = pd.HDFStore( data_filename, "r" )
fill_store = pd.HDFStore( fill_filename, "r" )
Z_train = fill_store["/Z/TRAIN/Z/mu"]
Z_val = fill_store["/Z/VAL/Z/mu"]
Z = np.vstack( (Z_train.values, Z_val.values) )
n_z = Z.shape[1]
#pdb.set_trace()
z_names = ["z_%d"%z_idx for z_idx in range(Z.shape[1])]
Z = pd.DataFrame( Z, index = np.hstack( (Z_train.index.values, Z_val.index.values)), columns = z_names )
barcodes = np.union1d( Z_train.index.values, Z_val.index.values )
Z=Z.loc[barcodes]
tissues = data_store["/CLINICAL/TISSUE"].loc[barcodes]
#Overall Survival (OS) The event call is derived from "vital status" parameter. The time_to_event is in days, equals to days_to_death if patient deceased; in the case of a patient is still living, the time variable is the maximum(days_to_last_known_alive, days_to_last_followup). This pair of clinical parameters are called _EVENT and _TIME_TO_EVENT on the cancer browser.
ALL_SURVIVAL = data_store["/CLINICAL/data"][["patient.days_to_last_followup","patient.days_to_death","patient.days_to_birth"]]
tissue_barcodes = np.array( ALL_SURVIVAL.index.tolist(), dtype=str )
surv_barcodes = np.array([ x+"_"+y for x,y in tissue_barcodes])
NEW_SURVIVAL = pd.DataFrame( ALL_SURVIVAL.values, index =surv_barcodes, columns = ALL_SURVIVAL.columns )
NEW_SURVIVAL = NEW_SURVIVAL.loc[barcodes]
#clinical = data_store["/CLINICAL/data"].loc[barcodes]
Age = NEW_SURVIVAL[ "patient.days_to_birth" ].values.astype(int)
Times = NEW_SURVIVAL[ "patient.days_to_last_followup" ].fillna(0).values.astype(int)+NEW_SURVIVAL[ "patient.days_to_death" ].fillna(0).values.astype(int)
Events = (1-np.isnan( NEW_SURVIVAL[ "patient.days_to_death" ].astype(float)) ).astype(int)
ok_age_query = Age<-10
ok_age = pp.find(ok_age_query )
tissues = tissues[ ok_age_query ]
#pdb.set_trace()
Age=-Age[ok_age]
Times = Times[ok_age]
Events = Events[ok_age]
barcodes = barcodes[ok_age]
NEW_SURVIVAL = NEW_SURVIVAL.loc[barcodes]
#ok_followup_query = NEW_SURVIVAL[ "patient.days_to_last_followup" ].fillna(0).values>=0
#ok_followup = pp.find( ok_followup_query )
bad_followup_query = NEW_SURVIVAL[ "patient.days_to_last_followup" ].fillna(0).values.astype(int)<0
bad_followup = pp.find( bad_followup_query )
ok_followup_query = 1-bad_followup_query
ok_followup = pp.find( ok_followup_query )
bad_death_query = NEW_SURVIVAL[ "patient.days_to_death" ].fillna(0).values.astype(int)<0
bad_death = pp.find( bad_death_query )
#pdb.set_trace()
Age=Age[ok_followup]
Times = Times[ok_followup]
Events = Events[ok_followup]
barcodes = barcodes[ok_followup]
NEW_SURVIVAL = NEW_SURVIVAL.loc[barcodes]
Z = Z.loc[barcodes]
Z["E"] = Events
Z["T"] = Times
Z["Age"] = np.log(Age)
tissues = data_store["/CLINICAL/TISSUE"].loc[barcodes]
tissue_names = tissues.columns
tissue_idx = np.argmax( tissues.values, 1 )
Z["Tissue"] = tissue_idx
n_tissues = len(tissue_names)
n_random = 100
random_names = ["r_%d"%(trial_idx) for trial_idx in range(n_random)]
alpha=0.001
split_nbrs = [2,4]
nbr_to_plot = 5
split_p_values = {}
split_p_values_random = {}
for split_nbr in split_nbrs:
split_p_values[ split_nbr ] = pd.DataFrame( np.nan*np.ones((n_tissues,n_z) ), index = tissue_names, columns=z_names )
split_p_values_random[ split_nbr ] = pd.DataFrame( np.nan*np.ones((n_tissues,n_random) ), index = tissue_names, columns=random_names )
for t_idx in range(n_tissues):
t_ids = tissue_idx == t_idx
tissue_name = tissue_names[t_idx]
if tissue_name == "gbm":
print "skipping gbm"
continue
print "working %s"%(tissue_name)
bcs = barcodes[t_ids]
Z_tissue = Z.loc[ bcs ]
events = Z_tissue["E"]
times = Z_tissue["T"]
Z_values = Z_tissue[z_names].values
n_tissue = len(bcs)
print " using z_values"
for z_idx in range(n_z):
z = Z_values[:,z_idx]
I = np.argsort(z)
for split_nbr in split_nbrs:
I_splits = survival_splits( events, I, split_nbr )
groups = groups_by_splits( n_tissue, I_splits )
results = multivariate_logrank_test(times, groups=groups, event_observed=events )
split_p_values[ split_nbr ]["z_%d"%(z_idx)].loc[tissue_name] = results.p_value
#
# if split_nbr == 2:
# results2 = logrank_test(times[I_splits[0]], times[I_splits[1]], events[I_splits[0]], events[I_splits[1]] )
# print results.p_value, results2.p_value
# pdb.set_trace()
print " using random"
for r_idx in range(n_random):
I = np.random.permutation(n_tissue)
for split_nbr in split_nbrs:
I_splits = survival_splits( events, I, split_nbr )
groups = groups_by_splits( n_tissue, I_splits )
results = multivariate_logrank_test(times, groups=groups, event_observed=events )
split_p_values_random[ split_nbr ]["r_%d"%(r_idx)].loc[tissue_name] = results.p_value
# print " plotting best"
# for split_nbr in split_nbrs:
# best_z_for_tissue = split_p_values[ split_nbr ].loc[ tissue_name ].sort_values()[:nbr_to_plot]
# random_pvalues = split_p_values_random[ split_nbr ].loc[ tissue_name ].values
#
#
# for z_name, p_value in zip( best_z_for_tissue.index.values, best_z_for_tissue.values ):
# worse_than_random_p_value = np.mean( p_value > random_pvalues )
# z_idx = int( z_name.split("_")[1] )
# z = Z_values[:,z_idx]
# I = np.argsort(z)
# I_splits = survival_splits( events, I, split_nbr )
#
#
# ax = plot_survival_by_splits( times, events, I_splits, at_risk_counts=False,show_censors=True,ci_show=False)
# pp.title( "%s %s p-value = %g vs random %0.3f"%( tissue_name, z_name, p_value, worse_than_random_p_value ) )
#
# pp.savefig( survival_curves_dir + "/%s_r%0.3f_p%0.12f_%s_q_%d.png"%(tissue_name, worse_than_random_p_value, p_value, z_name, split_nbr), format="png", dpi=300)
#
# if worse_than_random_p_value < 0.03:
# pp.savefig( survival_curves_dir + "/%s_r%0.3f_p%0.12f_%s_q_%d.png"%(z_name, worse_than_random_p_value, p_value , tissue_name, split_nbr), format="png", dpi=300)
# pp.savefig( survival_curves_dir + "/r%0.3f_%s_p%0.12f_%s_q_%d.png"%(worse_than_random_p_value, z_name, p_value, tissue_name, split_nbr), format="png", dpi=300)
#
# pp.close('all')
f=pp.figure()
for idx,split_nbr in zip( range(len(split_nbrs)), split_nbrs ):
split_p_values_random[ split_nbr ].drop("gbm",inplace=True)
split_p_values[ split_nbr ].drop("gbm",inplace=True)
split_p_values_random[ split_nbr ].to_csv( survival_dir + "/p_values_q%d_random.csv"%(split_nbr) )
split_p_values[ split_nbr ].to_csv( survival_dir + "/p_values_q%d.csv"%(split_nbr) )
#pdb.set_trace()
ax = f.add_subplot( 1,len(split_nbrs),idx+1 )
ax.hist( split_p_values_random[split_nbr].values.flatten(), bins=np.linspace(0,1,11), histtype="step", normed=True, color="red", lw=2 )
ax.hist( split_p_values[split_nbr].values.flatten(), bins=np.linspace(0,1,11), histtype="step", normed=True, color="blue", lw=2 )
#pdb.set_trace()
pp.title( "%d splits"%(split_nbr) )
pp.legend(["random","z-space"])
pp.savefig( survival_dir + "/p_values.png", format="png", dpi=300)
data_store.close()
fill_store.close()
pp.close('all')
if __name__ == "__main__":
data_location = sys.argv[1]
results_location = sys.argv[2]
main( data_location, results_location )
| |
import pygame, random, basic, sys, Pause, Pregame, pygame.mixer,database, time, Pygame_main, Category
Black = basic.black
White = basic.white
Blue = basic.blue
Red = basic.red
Green = basic.green
Yellow = basic.yellow
Gold = basic.gold
Dice_resize = (200,200)
changeL = -28.5
changeR = 29
changeU = -42.5
#images
#dices
dobbelNiks = pygame.image.load('DObelsteen niks.png')
dobbelImg1 = pygame.image.load('DObelsteen 1.png')
dobbelImg2 = pygame.image.load('DObelsteen 2.png')
dobbelImg3 = pygame.image.load('DObelsteen 3.png')
dobbelImg4 = pygame.image.load('DObelsteen 4.png')
dobbelImg5 = pygame.image.load('DObelsteen 5.png')
dobbelImg6 = pygame.image.load('DObelsteen 6.png')
#resize dice
dobbelNiks = pygame.transform.scale(dobbelNiks, Dice_resize)
dobbelImg1 = pygame.transform.scale(dobbelImg1, Dice_resize)
dobbelImg2 = pygame.transform.scale(dobbelImg2, Dice_resize)
dobbelImg3 = pygame.transform.scale(dobbelImg3, Dice_resize)
dobbelImg4 = pygame.transform.scale(dobbelImg4, Dice_resize)
dobbelImg5 = pygame.transform.scale(dobbelImg5, Dice_resize)
dobbelImg6 = pygame.transform.scale(dobbelImg6, Dice_resize)
pijltoetsen = pygame.image.load("pijltoetsen.png")
def dobbelsteen(x,y):
screen.blit(dobbelNiks,(x,y))
def dobbel1(x,y):
screen.blit(dobbelImg1,(x,y))
def dobbel2(x,y):
screen.blit(dobbelImg2,(x,y))
def dobbel3(x,y):
screen.blit(dobbelImg3,(x,y))
def dobbel4(x,y):
screen.blit(dobbelImg4,(x,y))
def dobbel5(x,y):
screen.blit(dobbelImg5,(x,y))
def dobbel6(x,y):
screen.blit(dobbelImg6,(x,y))
pygame.init()
pygame.display.set_caption("EUROMAST")
basic.screen
bg = pygame.image.load("eurimast.jpg")
player_one = pygame.image.load("avatar1_standing.png")
player_one = pygame.transform.scale(player_one, (25,34))
player_two = pygame.image.load("avatar2_standing.png")
player_two = pygame.transform.scale(player_two, (25,34))
player_three = pygame.image.load("avatar3_standing.png")
player_three = pygame.transform.scale(player_three, (25,34))
player_four = pygame.image.load("avatar4_standing.png")
player_four = pygame.transform.scale(player_four, (25,34))
player1_turn_count = 0
player2_turn_count = 0
player3_turn_count = 0
player4_turn_count = 0
avatar_resize = (42,50)
#een classe voor spelers aanmaken
class Player(pygame.sprite.Sprite):
change_x = 0
change_y = 0
def __init__(self, x, y, image, point):
pygame.sprite.Sprite.__init__(self) #super(type(self), self).__init__()
self.image = image
self.rect = self.image.get_rect()
self.x = x
self.y = y
self.point = point
def loop(self, x, y):
self.rect.x += x
self.rect.y += y
def update(self):
#move up and down
self.rect.x += self.change_x
self.rect.y += self.change_y
if not (0 < self.rect.y < 700):
player1.rect.x = 243
player1.rect.y = 660
# if not (0 < player2.rect.y < 450):
player2.rect.x = 273
player2.rect.y = 660
player3.rect.x = 183
player3.rect.y = 660
player4.rect.x = 213
player4.rect.y = 660
if self.rect.y <= 2:
self.rect.y = 2
#spelers botsen tegen elkaar
def collide(player_turn):
#p1 & p2
if player1.rect.colliderect(player2):
if player_turn == 1:
player2.rect.y += 42.5
elif player_turn == 2:
player1.rect.y += 42.5
#p1 & p3
elif player1.rect.colliderect(player3):
if player_turn == 1:
player3.rect.y += 42.5
elif player_turn == 3:
player1.rect.y += 42.5
#p1 &p4
elif player1.rect.colliderect(player4):
if player_turn == 1:
player4.rect.y += 42.5
elif player_turn == 4:
player1.rect.y += 42.5
#p2 &p3
elif player2.rect.colliderect(player3):
if player_turn == 2:
player3.rect.y += 42.5
elif player_turn == 3:
player2.rect.y += 42.5
#p2 &p4
elif player2.rect.colliderect(player4):
if player_turn == 2:
player4.rect.y += 42.5
elif player_turn == 4:
player2.rect.y += 42.5
#p3 &p4
elif player3.rect.colliderect(player4):
if player_turn == 3:
player4.rect.y += 42.5
elif player_turn == 4:
player3.rect.y += 42.5
#verticale lijnen (walls) waarop de poppetjes bewegen
class Wall(pygame.sprite.Sprite):
def __init__(self, x, y, width, height):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface([width, height])
self.rect = self.image.get_rect()
self.rect.y = y
self.rect.x = x
self.image.fill(Blue)
#kijkt of spelers/poppetjes buiten hun assen gaan
def wall_detection(player_turn):
if player_turn == 1:
if player1.rect.x < 183:
player1.rect.x = 270
if player1.rect.x > 273:
player1.rect.x = 183
if player1.rect.y <= 17:
player1.rect.y = 15
winning(1)
if player_turn == 2:
if player2.rect.x < 183:
player2.rect.x = 270
if player2.rect.x > 273:
player2.rect.x = 183
if player2.rect.y <= 17:
player2.rect.y = 15
winning(2)
if player_turn == 3:
if player3.rect.x < 183:
player3.rect.x = 270
if player3.rect.x > 273:
player3.rect.x = 180
if player3.rect.y <= 17:
player3.rect.y = 15
winning(3)
if player_turn == 4:
if player4.rect.x < 183:
player4.rect.x = 270
if player4.rect.x > 273:
player4.rect.x = 183
if player4.rect.y <= 17:
player4.rect.y = 15
winning(4)
collide(player_turn)
#winnende functie
def winning(player):
winning_player = player
victoryscreen = pygame.image.load("victory.jpg")
playerturn = 0
if player == 1:
playerturn = player1.point
print(player1_turn_count)
elif player == 2:
playerturn = player2.point
print(player3_turn_count)
elif player == 3:
playerturn = player3.point
print(player2_turn_count)
elif player == 4:
playerturn = player4.point
print(player4_turn_count)
Wsound = pygame.mixer.Sound("winner.wav")
Wsound.play()
higherscore = database.highscores("speler" + str(player), playerturn, None, None, None)
database.highscores.highscores_updaten(higherscore)
sprites2.draw(basic.screen)
basic.screen.blit(victoryscreen, (100, 70))
pygame.display.flip()
pygame.time.delay(4000)
Pygame_main.highscores()
#dobbelsteen
def steen(moves):
time = 1
while time < 2:
dobbelsteen(700,400)
updatetime()
time+=1
if moves == 1:
while time < 4:
dobbel1(700,400)
updatetime()
time+=1
elif moves == 2:
while time < 4:
dobbel2(700,400)
updatetime()
time+=1
elif moves == 3:
while time < 4:
dobbel3(700,400)
updatetime()
time+=1
def updatetime():
pygame.display.update()
pygame.time.delay(750)
# spelers rechts boven op het speelbord
def Medals():
ava_one = pygame.transform.scale(Pregame.ava_one, avatar_resize)
ava_two = pygame.transform.scale(Pregame.ava_two, avatar_resize)
ava_three = pygame.transform.scale(Pregame.ava_three, avatar_resize)
ava_four = pygame.transform.scale(Pregame.ava_four, avatar_resize)
basic.screen.blit(Pregame.Medal_speler_1, (610, 110))
basic.screen.blit(Pregame.Medal_speler_2, (810, 110))
basic.screen.blit(Pregame.Medal_speler_3, (810, 160))
basic.screen.blit(Pregame.Medal_speler_4, (610, 160))
basic.screen.blit(ava_one, (560, 110))
basic.screen.blit(ava_two, (1018, 110))
basic.screen.blit(ava_three, (1018, 160))
basic.screen.blit(ava_four, (560, 160))
#rechts boven wie er aan de beurt is
def MedalGold1():
basic.screen.blit(Pregame.MedalGold_speler_1,(610, 110))
def MedalGold2():
basic.screen.blit(Pregame.MedalGold_speler_2,(810, 110))
def MedalGold3():
basic.screen.blit(Pregame.MedalGold_speler_3,(810, 160))
def MedalGold4():
basic.screen.blit(Pregame.MedalGold_speler_4,(610, 160))
pygame.init()
screen = pygame.display.set_mode([basic.width, basic.height])
pygame.display.set_caption("EUROMAST")
sprites = pygame.sprite.Group()
sprites2 = pygame.sprite.Group()
#aanmaken en groeperen van walls
wall_list = pygame.sprite.Group()
wall_1 = Wall(190, 4, 10, 690)
wall_list.add(wall_1)
wall_1.image.fill(Blue)
sprites.add(wall_1)
wall_2 = Wall(220,4, 10, 690)
wall_list.add(wall_2)
wall_1.image.fill(Red)
sprites.add(wall_2)
wall_3 = Wall(250, 4, 10, 690)
wall_list.add(wall_3)
wall_3.image.fill(Yellow)
sprites.add(wall_3)
wall_4 = Wall(280, 4, 10, 690)
wall_list.add(wall_4)
wall_4.image.fill(Green)
sprites.add(wall_4)
#horizontale lijnen voor levels
pygame.draw.line(bg, Blue, (0, 560), (400, 560),2)
pygame.draw.line(bg, Blue, (0, 525), (400, 525),2)
pygame.draw.line(bg, Blue, (0, 490), (400, 490),2)
pygame.draw.line(bg, Blue, (0, 455), (400, 455),2)
pygame.draw.line(bg, Blue, (0, 420), (400, 420),2)
pygame.draw.line(bg, Blue, (0, 385), (400, 385),2)
pygame.draw.line(bg, Blue, (0, 350), (400, 350),2)
pygame.draw.line(bg, Blue, (0, 315), (400, 315),2)
pygame.draw.line(bg, Blue, (0, 280), (400, 280),2)
pygame.draw.line(bg, Blue, (0, 245), (400, 245),2)
pygame.draw.line(bg, Blue, (0, 210), (400, 210),2)
pygame.draw.line(bg, Blue, (0, 175), (400, 175),2)
pygame.draw.line(bg, Blue, (0, 140), (400, 140),2)
pygame.draw.line(bg, Blue, (0, 105), (400, 105),2)
pygame.draw.line(bg, Blue, (0, 70), (400, 70),2)
pygame.draw.line(bg, Gold, (0, 35), (400, 35),3)
pygame.draw.line(bg, Gold, (0, 0), (400, 0),3)
#speler groeperen
player1_list = pygame.sprite.Group()
player2_list = pygame.sprite.Group()
#aanmaken van 4 spelers
player1 = Player(150, 660, player_one, 0) #gele pop
player1_list.add(player1)
sprites2.add(player1)
player2 = Player(185, 660, player_two, 0) #groene pop
sprites2.add(player2)
player3 = Player(210, 660, player_three, 0) #rode pop
sprites2.add(player3)
player4 = Player(240, 660, player_four, 0) #blauwe pop
sprites2.add(player4)
font = pygame.font.SysFont('Arial', 18, False, False)
clock = pygame.time.Clock()
#player_moves wordt de dobbelsteen worp uitkomst
def dice_roll():
move_check = random.randint(1, 10)
if move_check <= 6:
moves = 1
elif move_check <= 9:
moves = 2
elif move_check <= 10:
moves = 3
steen(moves)
return moves
done = False
gameover = False
#defineerd wie het eerst aan de beurt is
def player_game(first_player_re):
player_turn = first_player_re
player_main_game(player_turn)
#gaat naar een categorie
def ga_category():
blit_bg()
sprites2.update()
sprites2.draw(basic.screen)
blit_allbutbg()
pygame.display.flip()
pygame.time.delay(1500)
Category.typeQroll()
#het hele spel
def player_main_game(player_turn):
global bg
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == pygame.KEYDOWN and gameover == False:
if event.key == pygame.K_p:
Pause.pause_pressed()
else:
player_moves = dice_roll()
#Left key
if event.key == pygame.K_LEFT:
#player1
if player_turn == 1:
player1.point += 1
while player_moves > 0:
player1.loop(changeL,0)
player_moves -= 1
wall_detection(player_turn)
player_turn = 2
ga_category()
#player2
elif player_turn == 2:
player2.point += 1
while player_moves > 0:
player2.loop(changeL,0)
player_moves -= 1
wall_detection(player_turn)
player_turn = 3
ga_category()
#player3
elif player_turn == 3:
player3.point += 1
while player_moves > 0:
player3.loop(changeL,0)
player_moves -= 1
wall_detection(player_turn)
player_turn = 4
ga_category()
#player4
elif player_turn == 4:
player4.point += 1
while player_moves > 0:
player4.loop(changeL,0)
player_moves -= 1
wall_detection(player_turn)
player_turn = 1
ga_category()
#Right key
elif event.key == pygame.K_RIGHT:
#player1
if player_turn == 1:
player1.point += 1
while player_moves > 0:
player1.loop(changeR,0)
player_moves -= 1
wall_detection(player_turn)
player_turn = 2
ga_category()
#player2
elif player_turn == 2:
player2.point += 1
while player_moves > 0:
player2.loop(changeR,0)
player_moves -= 1
wall_detection(player_turn)
player_turn = 3
ga_category()
#player3
elif player_turn == 3:
player3.point += 1
while player_moves > 0:
player3.loop(changeR,0)
player_moves -= 1
wall_detection(player_turn)
player_turn = 4
ga_category()
#player4
elif player_turn == 4:
player4.point += 1
while player_moves > 0:
player4.loop(changeR,0)
player_moves -= 1
wall_detection(player_turn)
player_turn = 1
ga_category()
#Up key
elif event.key == pygame.K_UP:
#player1
if player_turn == 1:
player1.point += 1
while player_moves > 0:
player1.loop(0,changeU)
player_moves -= 1
wall_detection(player_turn)
player_turn = 2
ga_category()
#player2
elif player_turn == 2:
player2.point += 1
while player_moves > 0:
player2.loop(0,changeU)
player_moves -= 1
wall_detection(player_turn)
player_turn = 3
ga_category()
#player3
elif player_turn == 3:
player3.point += 1
while player_moves > 0:
player3.loop(0,changeU)
player_moves -= 1
wall_detection(player_turn)
player_turn = 4
ga_category()
#player4
elif player_turn == 4:
player4.point += 1
while player_moves > 0:
player4.loop(0,changeU)
player_moves -= 1
wall_detection(player_turn)
player_turn = 1
ga_category()
sprites.update()
sprites2.update()
blit_bg()
sprites.update()
Medals()
if player_turn == 1:
MedalGold1()
if player_turn == 2:
MedalGold2()
if player_turn == 3:
MedalGold3()
if player_turn == 4:
MedalGold4()
blit_allbutbg()
pygame.display.flip()
clock.tick(60)
#tekent de level text
def blit_allbutbg():
level_1 = font.render("level 1", True, (Black))
level_5 = font.render("level 5", True, (Black))
level_10 = font.render("level 10", True, (Black))
level_15= font.render("level 15", True, (Black))
finish = font.render("Finish", True, (Yellow))
sprites.draw(basic.screen)
sprites2.draw(basic.screen)
basic.screen.blit(level_1, (30, 660))
basic.screen.blit(level_5, (30, 485))
basic.screen.blit(level_10, (30, 270))
basic.screen.blit(level_15, (30, 55))
basic.screen.blit(finish, (30, 10))
basic.screen.blit(pijltoetsen, (1000, 470))
#laat de background zien
def blit_bg():
global bg
bg = pygame.transform.scale(bg,(1280,800))
basic.screen.blit(bg, (0,0))
| |
# -*- coding: utf-8 -*-
"""
flask_security.core
~~~~~~~~~~~~~~~~~~~
Flask-Security core module
:copyright: (c) 2012 by Matt Wright.
:license: MIT, see LICENSE for more details.
"""
from flask import current_app, render_template
from flask_login import AnonymousUserMixin, UserMixin as BaseUserMixin, \
LoginManager, current_user
from flask_principal import Principal, RoleNeed, UserNeed, Identity, \
identity_loaded
from itsdangerous import URLSafeTimedSerializer
from passlib.context import CryptContext
from werkzeug.datastructures import ImmutableList
from werkzeug.local import LocalProxy
from werkzeug.security import safe_str_cmp
from .utils import config_value as cv, get_config, md5, url_for_security, string_types
from .views import create_blueprint
from .forms import LoginForm, ConfirmRegisterForm, RegisterForm, \
ForgotPasswordForm, ChangePasswordForm, ResetPasswordForm, \
SendConfirmationForm, PasswordlessLoginForm
# Convenient references
_security = LocalProxy(lambda: current_app.extensions['security'])
#: Default Flask-Security configuration
_default_config = {
'BLUEPRINT_NAME': 'security',
'URL_PREFIX': None,
'SUBDOMAIN': None,
'FLASH_MESSAGES': True,
'PASSWORD_HASH': 'plaintext',
'PASSWORD_SALT': None,
'LOGIN_URL': '/login',
'LOGOUT_URL': '/logout',
'REGISTER_URL': '/register',
'RESET_URL': '/reset',
'CHANGE_URL': '/change',
'CONFIRM_URL': '/confirm',
'POST_LOGIN_VIEW': '/',
'POST_LOGOUT_VIEW': '/',
'CONFIRM_ERROR_VIEW': None,
'POST_REGISTER_VIEW': None,
'POST_CONFIRM_VIEW': None,
'POST_RESET_VIEW': None,
'POST_CHANGE_VIEW': None,
'UNAUTHORIZED_VIEW': None,
'FORGOT_PASSWORD_TEMPLATE': 'security/forgot_password.html',
'LOGIN_USER_TEMPLATE': 'security/login_user.html',
'REGISTER_USER_TEMPLATE': 'security/register_user.html',
'RESET_PASSWORD_TEMPLATE': 'security/reset_password.html',
'CHANGE_PASSWORD_TEMPLATE': 'security/change_password.html',
'SEND_CONFIRMATION_TEMPLATE': 'security/send_confirmation.html',
'SEND_LOGIN_TEMPLATE': 'security/send_login.html',
'CONFIRMABLE': False,
'REGISTERABLE': False,
'RECOVERABLE': False,
'TRACKABLE': False,
'PASSWORDLESS': False,
'CHANGEABLE': False,
'SEND_REGISTER_EMAIL': True,
'SEND_PASSWORD_CHANGE_EMAIL': True,
'SEND_PASSWORD_RESET_NOTICE_EMAIL': True,
'LOGIN_WITHIN': '1 days',
'CONFIRM_EMAIL_WITHIN': '5 days',
'RESET_PASSWORD_WITHIN': '5 days',
'LOGIN_WITHOUT_CONFIRMATION': False,
'EMAIL_SENDER': 'no-reply@localhost',
'TOKEN_AUTHENTICATION_KEY': 'auth_token',
'TOKEN_AUTHENTICATION_HEADER': 'Authentication-Token',
'TOKEN_MAX_AGE': None,
'CONFIRM_SALT': 'confirm-salt',
'RESET_SALT': 'reset-salt',
'LOGIN_SALT': 'login-salt',
'CHANGE_SALT': 'change-salt',
'REMEMBER_SALT': 'remember-salt',
'DEFAULT_REMEMBER_ME': False,
'DEFAULT_HTTP_AUTH_REALM': 'Login Required',
'EMAIL_SUBJECT_REGISTER': 'Welcome',
'EMAIL_SUBJECT_CONFIRM': 'Please confirm your email',
'EMAIL_SUBJECT_PASSWORDLESS': 'Login instructions',
'EMAIL_SUBJECT_PASSWORD_NOTICE': 'Your password has been reset',
'EMAIL_SUBJECT_PASSWORD_CHANGE_NOTICE': 'Your password has been changed',
'EMAIL_SUBJECT_PASSWORD_RESET': 'Password reset instructions',
'USER_IDENTITY_ATTRIBUTES': ['email'],
'PASSWORD_SCHEMES': [
'bcrypt',
'des_crypt',
'pbkdf2_sha256',
'pbkdf2_sha512',
'sha256_crypt',
'sha512_crypt',
# And always last one...
'plaintext'
],
'DEPRECATED_PASSWORD_SCHEMES': ['auto']
}
#: Default Flask-Security messages
_default_messages = {
'UNAUTHORIZED': (
'You do not have permission to view this resource.', 'error'),
'CONFIRM_REGISTRATION': (
'Thank you. Confirmation instructions have been sent to %(email)s.', 'success'),
'EMAIL_CONFIRMED': (
'Thank you. Your email has been confirmed.', 'success'),
'ALREADY_CONFIRMED': (
'Your email has already been confirmed.', 'info'),
'INVALID_CONFIRMATION_TOKEN': (
'Invalid confirmation token.', 'error'),
'EMAIL_ALREADY_ASSOCIATED': (
'%(email)s is already associated with an account.', 'error'),
'PASSWORD_MISMATCH': (
'Password does not match', 'error'),
'RETYPE_PASSWORD_MISMATCH': (
'Passwords do not match', 'error'),
'INVALID_REDIRECT': (
'Redirections outside the domain are forbidden', 'error'),
'PASSWORD_RESET_REQUEST': (
'Instructions to reset your password have been sent to %(email)s.', 'info'),
'PASSWORD_RESET_EXPIRED': (
'You did not reset your password within %(within)s. New instructions have been sent '
'to %(email)s.', 'error'),
'INVALID_RESET_PASSWORD_TOKEN': (
'Invalid reset password token.', 'error'),
'CONFIRMATION_REQUIRED': (
'Email requires confirmation.', 'error'),
'CONFIRMATION_REQUEST': (
'Confirmation instructions have been sent to %(email)s.', 'info'),
'CONFIRMATION_EXPIRED': (
'You did not confirm your email within %(within)s. New instructions to confirm your email '
'have been sent to %(email)s.', 'error'),
'LOGIN_EXPIRED': (
'You did not login within %(within)s. New instructions to login have been sent to '
'%(email)s.', 'error'),
'LOGIN_EMAIL_SENT': (
'Instructions to login have been sent to %(email)s.', 'success'),
'INVALID_LOGIN_TOKEN': (
'Invalid login token.', 'error'),
'DISABLED_ACCOUNT': (
'Account is disabled.', 'error'),
'EMAIL_NOT_PROVIDED': (
'Email not provided', 'error'),
'INVALID_EMAIL_ADDRESS': (
'Invalid email address', 'error'),
'PASSWORD_NOT_PROVIDED': (
'Password not provided', 'error'),
'PASSWORD_NOT_SET': (
'No password is set for this user', 'error'),
'PASSWORD_INVALID_LENGTH': (
'Password must be at least 6 characters', 'error'),
'USER_DOES_NOT_EXIST': (
'Specified user does not exist', 'error'),
'INVALID_PASSWORD': (
'Invalid password', 'error'),
'PASSWORDLESS_LOGIN_SUCCESSFUL': (
'You have successfuly logged in.', 'success'),
'PASSWORD_RESET': (
'You successfully reset your password and you have been logged in automatically.',
'success'),
'PASSWORD_IS_THE_SAME': (
'Your new password must be different than your previous password.', 'error'),
'PASSWORD_CHANGE': (
'You successfully changed your password.', 'success'),
'LOGIN': (
'Please log in to access this page.', 'info'),
'REFRESH': (
'Please reauthenticate to access this page.', 'info'),
}
_default_field_labels = {
'EMAIL': 'Email Address',
'PASSWORD': 'Password',
'REMEMBER_ME': 'Remember Me',
'LOGIN': 'Login',
'RETYPE_PASSWORD': 'Retype Password',
'REGISTER': 'Register',
'SEND_CONFIRMATION': 'Resend Confirmation Instructions',
'RECOVER_PASSWORD': 'Recover Password',
'RESET_PASSWORD': 'Reset Password',
'RETYPE_PASSWORD': 'Retype Password',
'NEW_PASSWORD': 'New Password',
'CHANGE_PASSWORD': 'Change Password',
'SEND_LOGIN_LINK': 'Send Login Link'
}
_default_forms = {
'login_form': LoginForm,
'confirm_register_form': ConfirmRegisterForm,
'register_form': RegisterForm,
'forgot_password_form': ForgotPasswordForm,
'reset_password_form': ResetPasswordForm,
'change_password_form': ChangePasswordForm,
'send_confirmation_form': SendConfirmationForm,
'passwordless_login_form': PasswordlessLoginForm,
}
def _user_loader(user_id):
return _security.datastore.find_user(id=user_id)
def _token_loader(token):
try:
data = _security.remember_token_serializer.loads(token, max_age=_security.token_max_age)
user = _security.datastore.find_user(id=data[0])
if user and safe_str_cmp(md5(user.password), data[1]):
return user
except:
pass
return _security.login_manager.anonymous_user()
def _identity_loader():
if not isinstance(current_user._get_current_object(), AnonymousUserMixin):
identity = Identity(current_user.id)
return identity
def _on_identity_loaded(sender, identity):
if hasattr(current_user, 'id'):
identity.provides.add(UserNeed(current_user.id))
for role in current_user.roles:
identity.provides.add(RoleNeed(role.name))
identity.user = current_user
def _get_login_manager(app, anonymous_user):
lm = LoginManager()
lm.anonymous_user = anonymous_user or AnonymousUser
lm.login_view = '%s.login' % cv('BLUEPRINT_NAME', app=app)
lm.user_loader(_user_loader)
lm.token_loader(_token_loader)
if cv('FLASH_MESSAGES', app=app):
lm.login_message, lm.login_message_category = cv('MSG_LOGIN', app=app)
lm.needs_refresh_message, lm.needs_refresh_message_category = cv('MSG_REFRESH', app=app)
else:
lm.login_message = None
lm.needs_refresh_message = None
lm.init_app(app)
return lm
def _get_principal(app):
p = Principal(app, use_sessions=False)
p.identity_loader(_identity_loader)
return p
def _get_pwd_context(app):
pw_hash = cv('PASSWORD_HASH', app=app)
schemes = cv('PASSWORD_SCHEMES', app=app)
deprecated = cv('DEPRECATED_PASSWORD_SCHEMES', app=app)
if pw_hash not in schemes:
allowed = (', '.join(schemes[:-1]) + ' and ' + schemes[-1])
raise ValueError("Invalid hash scheme %r. Allowed values are %s" % (pw_hash, allowed))
return CryptContext(schemes=schemes, default=pw_hash, deprecated=deprecated)
def _get_serializer(app, name):
secret_key = app.config.get('SECRET_KEY')
salt = app.config.get('SECURITY_%s_SALT' % name.upper())
return URLSafeTimedSerializer(secret_key=secret_key, salt=salt)
def _get_state(app, datastore, anonymous_user=None, **kwargs):
for key, value in get_config(app).items():
kwargs[key.lower()] = value
kwargs.update(dict(
app=app,
datastore=datastore,
login_manager=_get_login_manager(app, anonymous_user),
principal=_get_principal(app),
pwd_context=_get_pwd_context(app),
remember_token_serializer=_get_serializer(app, 'remember'),
login_serializer=_get_serializer(app, 'login'),
reset_serializer=_get_serializer(app, 'reset'),
confirm_serializer=_get_serializer(app, 'confirm'),
_context_processors={},
_send_mail_task=None,
_unauthorized_callback=None
))
for key, value in _default_forms.items():
if key not in kwargs or not kwargs[key]:
kwargs[key] = value
return _SecurityState(**kwargs)
def _context_processor():
return dict(url_for_security=url_for_security, security=_security)
class RoleMixin(object):
"""Mixin for `Role` model definitions"""
def __eq__(self, other):
return (self.name == other or
self.name == getattr(other, 'name', None))
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.name)
class UserMixin(BaseUserMixin):
"""Mixin for `User` model definitions"""
def is_active(self):
"""Returns `True` if the user is active."""
return self.active
def get_auth_token(self):
"""Returns the user's authentication token."""
data = [str(self.id), md5(self.password)]
return _security.remember_token_serializer.dumps(data)
def has_role(self, role):
"""Returns `True` if the user identifies with the specified role.
:param role: A role name or `Role` instance"""
if isinstance(role, string_types):
return role in (role.name for role in self.roles)
else:
return role in self.roles
class AnonymousUser(AnonymousUserMixin):
"""AnonymousUser definition"""
def __init__(self):
self.roles = ImmutableList()
def has_role(self, *args):
"""Returns `False`"""
return False
class _SecurityState(object):
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key.lower(), value)
def _add_ctx_processor(self, endpoint, fn):
group = self._context_processors.setdefault(endpoint, [])
fn not in group and group.append(fn)
def _run_ctx_processor(self, endpoint):
rv = {}
for g in [None, endpoint]:
for fn in self._context_processors.setdefault(g, []):
rv.update(fn())
return rv
def context_processor(self, fn):
self._add_ctx_processor(None, fn)
def forgot_password_context_processor(self, fn):
self._add_ctx_processor('forgot_password', fn)
def login_context_processor(self, fn):
self._add_ctx_processor('login', fn)
def register_context_processor(self, fn):
self._add_ctx_processor('register', fn)
def reset_password_context_processor(self, fn):
self._add_ctx_processor('reset_password', fn)
def change_password_context_processor(self, fn):
self._add_ctx_processor('change_password', fn)
def send_confirmation_context_processor(self, fn):
self._add_ctx_processor('send_confirmation', fn)
def send_login_context_processor(self, fn):
self._add_ctx_processor('send_login', fn)
def mail_context_processor(self, fn):
self._add_ctx_processor('mail', fn)
def send_mail_task(self, fn):
self._send_mail_task = fn
def unauthorized_handler(self, fn):
self._unauthorized_callback = fn
class Security(object):
"""The :class:`Security` class initializes the Flask-Security extension.
:param app: The application.
:param datastore: An instance of a user datastore.
"""
def __init__(self, app=None, datastore=None, **kwargs):
self.app = app
self.datastore = datastore
if app is not None and datastore is not None:
self._state = self.init_app(app, datastore, **kwargs)
def init_app(self, app, datastore=None, register_blueprint=True,
login_form=None, confirm_register_form=None,
register_form=None, forgot_password_form=None,
reset_password_form=None, change_password_form=None,
send_confirmation_form=None, passwordless_login_form=None,
anonymous_user=None):
"""Initializes the Flask-Security extension for the specified
application and datastore implentation.
:param app: The application.
:param datastore: An instance of a user datastore.
:param register_blueprint: to register the Security blueprint or not.
"""
datastore = datastore or self.datastore
for key, value in _default_config.items():
app.config.setdefault('SECURITY_' + key, value)
for key, value in _default_messages.items():
app.config.setdefault('SECURITY_MSG_' + key, value)
for key, value in _default_field_labels.items():
app.config.setdefault('SECURITY_LABEL_' + key, value)
identity_loaded.connect_via(app)(_on_identity_loaded)
state = _get_state(app, datastore,
login_form=login_form,
confirm_register_form=confirm_register_form,
register_form=register_form,
forgot_password_form=forgot_password_form,
reset_password_form=reset_password_form,
change_password_form=change_password_form,
send_confirmation_form=send_confirmation_form,
passwordless_login_form=passwordless_login_form,
anonymous_user=anonymous_user)
if register_blueprint:
app.register_blueprint(create_blueprint(state, __name__))
app.context_processor(_context_processor)
state.render_template = self.render_template
app.extensions['security'] = state
return state
def render_template(self, *args, **kwargs):
return render_template(*args, **kwargs)
def __getattr__(self, name):
return getattr(self._state, name, None)
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import copy
import dataclasses
from typing import Any, Dict, List, Optional
from superset.common.chart_data import ChartDataResultType
from superset.utils.core import AnnotationType, DTTM_ALIAS, TimeRangeEndpoint
query_birth_names = {
"extras": {
"where": "",
"time_range_endpoints": (
TimeRangeEndpoint.INCLUSIVE,
TimeRangeEndpoint.EXCLUSIVE,
),
"time_grain_sqla": "P1D",
},
"columns": ["name"],
"metrics": [{"label": "sum__num"}],
"orderby": [("sum__num", False)],
"row_limit": 100,
"granularity": "ds",
"time_range": "100 years ago : now",
"timeseries_limit": 0,
"timeseries_limit_metric": None,
"order_desc": True,
"filters": [
{"col": "gender", "op": "==", "val": "boy"},
{"col": "num", "op": "IS NOT NULL"},
{"col": "name", "op": "NOT IN", "val": ["<NULL>", '"abc"']},
],
"having": "",
"having_filters": [],
"where": "",
}
QUERY_OBJECTS: Dict[str, Dict[str, object]] = {
"birth_names": query_birth_names,
# `:suffix` are overrides only
"birth_names:include_time": {"groupby": [DTTM_ALIAS, "name"],},
"birth_names:orderby_dup_alias": {
"metrics": [
{
"expressionType": "SIMPLE",
"column": {"column_name": "num_girls", "type": "BIGINT(20)"},
"aggregate": "SUM",
"label": "num_girls",
},
{
"expressionType": "SIMPLE",
"column": {"column_name": "num_boys", "type": "BIGINT(20)"},
"aggregate": "SUM",
"label": "num_boys",
},
],
"orderby": [
[
{
"expressionType": "SIMPLE",
"column": {"column_name": "num_girls", "type": "BIGINT(20)"},
"aggregate": "SUM",
# the same underlying expression, but different label
"label": "SUM(num_girls)",
},
False,
],
# reference the ambiguous alias in SIMPLE metric
[
{
"expressionType": "SIMPLE",
"column": {"column_name": "num_boys", "type": "BIGINT(20)"},
"aggregate": "AVG",
"label": "AVG(num_boys)",
},
False,
],
# reference the ambiguous alias in CUSTOM SQL metric
[
{
"expressionType": "SQL",
"sqlExpression": "MAX(CASE WHEN num_boys > 0 THEN 1 ELSE 0 END)",
"label": "MAX(CASE WHEN...",
},
True,
],
],
},
"birth_names:only_orderby_has_metric": {"metrics": [],},
}
ANNOTATION_LAYERS = {
AnnotationType.FORMULA: {
"annotationType": "FORMULA",
"color": "#ff7f44",
"hideLine": False,
"name": "my formula",
"opacity": "",
"overrides": {"time_range": None},
"show": True,
"showMarkers": False,
"sourceType": "",
"style": "solid",
"value": "3+x",
"width": 5,
},
AnnotationType.EVENT: {
"name": "my event",
"annotationType": "EVENT",
"sourceType": "NATIVE",
"color": "#e04355",
"opacity": "",
"style": "solid",
"width": 5,
"showMarkers": False,
"hideLine": False,
"value": 1,
"overrides": {"time_range": None},
"show": True,
"titleColumn": "",
"descriptionColumns": [],
"timeColumn": "",
"intervalEndColumn": "",
},
AnnotationType.INTERVAL: {
"name": "my interval",
"annotationType": "INTERVAL",
"sourceType": "NATIVE",
"color": "#e04355",
"opacity": "",
"style": "solid",
"width": 1,
"showMarkers": False,
"hideLine": False,
"value": 1,
"overrides": {"time_range": None},
"show": True,
"titleColumn": "",
"descriptionColumns": [],
"timeColumn": "",
"intervalEndColumn": "",
},
AnnotationType.TIME_SERIES: {
"annotationType": "TIME_SERIES",
"color": None,
"descriptionColumns": [],
"hideLine": False,
"intervalEndColumn": "",
"name": "my line",
"opacity": "",
"overrides": {"time_range": None},
"show": True,
"showMarkers": False,
"sourceType": "line",
"style": "dashed",
"timeColumn": "",
"titleColumn": "",
"value": 837,
"width": 5,
},
}
POSTPROCESSING_OPERATIONS = {
"birth_names": [
{
"operation": "aggregate",
"options": {
"groupby": ["gender"],
"aggregates": {
"q1": {
"operator": "percentile",
"column": "sum__num",
"options": {"q": 25},
},
"median": {"operator": "median", "column": "sum__num",},
},
},
},
{"operation": "sort", "options": {"columns": {"q1": False, "gender": True},},},
]
}
def get_query_object(
query_name: str, add_postprocessing_operations: bool, add_time_offsets: bool,
) -> Dict[str, Any]:
if query_name not in QUERY_OBJECTS:
raise Exception(f"QueryObject fixture not defined for datasource: {query_name}")
obj = QUERY_OBJECTS[query_name]
# apply overrides
if ":" in query_name:
parent_query_name = query_name.split(":")[0]
obj = {
**QUERY_OBJECTS[parent_query_name],
**obj,
}
query_object = copy.deepcopy(obj)
if add_postprocessing_operations:
query_object["post_processing"] = _get_postprocessing_operation(query_name)
if add_time_offsets:
query_object["time_offsets"] = ["1 year ago"]
return query_object
def _get_postprocessing_operation(query_name: str) -> List[Dict[str, Any]]:
if query_name not in QUERY_OBJECTS:
raise Exception(
f"Post-processing fixture not defined for datasource: {query_name}"
)
return copy.deepcopy(POSTPROCESSING_OPERATIONS[query_name])
@dataclasses.dataclass
class Table:
id: int
type: str
name: str
class QueryContextGenerator:
def generate(
self,
query_name: str,
add_postprocessing_operations: bool = False,
add_time_offsets: bool = False,
table_id=1,
table_type="table",
form_data: Optional[Dict[str, Any]] = None,
) -> Dict[str, Any]:
form_data = form_data or {}
table_name = query_name.split(":")[0]
table = self.get_table(table_name, table_id, table_type)
return {
"datasource": {"id": table.id, "type": table.type},
"queries": [
get_query_object(
query_name, add_postprocessing_operations, add_time_offsets,
)
],
"result_type": ChartDataResultType.FULL,
"form_data": form_data,
}
def get_table(self, name, id_, type_):
return Table(id_, type_, name)
| |
import sys, os
import scipy
from pylab import *
from matplotlib import *
from scipy.stats import *
from numpy import *
from scipy import *
import kepfit
import kepmsg
"""
This code is based on the PyKE routine kepsff
found at keplerscience.arc.nasa.gov
The kepsff code is based on Vanderberg and Johnson 2014.
If you use this you must cite V&J 2014.
"""
def martinsff(intime,indata,centr1,centr2,
npoly_cxcy,sigma_cxcy,npoly_ardx,
npoly_dsdt,sigma_dsdt,npoly_arfl,sigma_arfl,verbose,logfile,
status):
# startup parameters
status = 0
labelsize = 16
ticksize = 14
xsize = 20
ysize = 8
lcolor = '#0000ff'
lwidth = 1.0
fcolor = '#ffff00'
falpha = 0.2
seterr(all="ignore")
# fit centroid data with low-order polynomial
cfit = zeros((len(centr2)))
csig = zeros((len(centr2)))
functype = 'poly' + str(npoly_cxcy)
pinit = array([nanmean(centr2)])
if npoly_cxcy > 0:
for j in range(npoly_cxcy):
pinit = append(pinit,0.0)
try:
coeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx, ploty, status = \
kepfit.lsqclip(functype,pinit,centr1,centr2,None,sigma_cxcy,sigma_cxcy,10,logfile,verbose)
for j in range(len(coeffs)):
cfit += coeffs[j] * numpy.power(centr1,j)
csig[:] = sigma
except:
message = 'ERROR -- KEPSFF: could not fit centroid data with polynomial. There are no data points within the range of input rows %d - %d. Either increase the stepsize (with an appreciation of the effects on light curve quality this will have!), or better yet - cut the timeseries up to remove large gaps in the input light curve using kepclip.' % (t1,t2)
status = kepmsg.err(logfile,message,verbose)
# sys.exit('')
os._exit(1)
# reject outliers
time_good = array([],'float64')
centr1_good = array([],'float32')
centr2_good = array([],'float32')
flux_good = array([],'float32')
cad_good = array([],'int')
for i in range(len(cfit)):
if abs(centr2[i] - cfit[i]) < sigma_cxcy * csig[i]:
time_good = append(time_good,intime[i])
centr1_good = append(centr1_good,centr1[i])
centr2_good = append(centr2_good,centr2[i])
flux_good = append(flux_good,indata[i])
# covariance matrix for centroid time series
centr = concatenate([[centr1_good] - mean(centr1_good), [centr2_good] - mean(centr2_good)])
covar = cov(centr)
# eigenvector eigenvalues of covariance matrix
[eval, evec] = numpy.linalg.eigh(covar)
ex = arange(-10.0,10.0,0.1)
epar = evec[1,1] / evec[0,1] * ex
enor = evec[1,0] / evec[0,0] * ex
ex = ex + mean(centr1)
epar = epar + mean(centr2_good)
enor = enor + mean(centr2_good)
# rotate centroid data
centr_rot = dot(evec.T,centr)
# fit polynomial to rotated centroids
rfit = zeros((len(centr2)))
rsig = zeros((len(centr2)))
functype = 'poly' + str(npoly_ardx)
pinit = array([nanmean(centr_rot[0,:])])
pinit = array([1.0])
if npoly_ardx > 0:
for j in range(npoly_ardx):
pinit = append(pinit,0.0)
try:
coeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx, ploty, status = \
kepfit.lsqclip(functype,pinit,centr_rot[1,:],centr_rot[0,:],None,100.0,100.0,1,
logfile,verbose)
except:
message = 'ERROR -- KEPSFF: could not fit rotated centroid data with polynomial'
status = kepmsg.err(logfile,message,verbose)
rx = linspace(nanmin(centr_rot[1,:]),nanmax(centr_rot[1,:]),100)
ry = zeros((len(rx)))
for i in range(len(coeffs)):
ry = ry + coeffs[i] * numpy.power(rx,i)
# calculate arclength of centroids
s = zeros((len(rx)))
for i in range(1,len(s)):
work3 = ((ry[i] - ry[i-1]) / (rx[i] - rx[i-1]))**2
s[i] = s[i-1] + math.sqrt(1.0 + work3) * (rx[i] - rx[i-1])
# fit arclength as a function of strongest eigenvector
sfit = zeros((len(centr2)))
ssig = zeros((len(centr2)))
functype = 'poly' + str(npoly_ardx)
pinit = array([nanmean(s)])
if npoly_ardx > 0:
for j in range(npoly_ardx):
pinit = append(pinit,0.0)
try:
acoeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx, ploty, status = \
kepfit.lsqclip(functype,pinit,rx,s,None,100.0,100.0,100,logfile,verbose)
except:
message = 'ERROR -- KEPSFF: could not fit rotated centroid data with polynomial'
status = kepmsg.err(logfile,message,verbose)
# correlate arclength with detrended flux
t = copy(time_good)
y = copy(flux_good)
z = centr_rot[1,:]
x = zeros((len(z)))
for i in range(len(acoeffs)):
x = x + acoeffs[i] * numpy.power(z,i)
# calculate time derivative of arclength s
dx = zeros((len(x)))
for i in range(1,len(x)):
dx[i] = (x[i] - x[i-1]) / (t[i] - t[i-1])
dx[0] = dx[1]
# fit polynomial to derivative and flag outliers (thruster firings)
dfit = zeros((len(dx)))
dsig = zeros((len(dx)))
functype = 'poly' + str(npoly_dsdt)
pinit = array([nanmean(dx)])
if npoly_dsdt > 0:
for j in range(npoly_dsdt):
pinit = append(pinit,0.0)
try:
dcoeffs, errors, covar, iiter, dsigma, chi2, dof, fit, dumx, dumy, status = \
kepfit.lsqclip(functype,pinit,t,dx,None,3.0,3.0,10,logfile,verbose)
except:
message = 'ERROR -- KEPSFF: could not fit rotated centroid data with polynomial'
status = kepmsg.err(logfile,message,verbose)
for i in range(len(dcoeffs)):
dfit = dfit + dcoeffs[i] * numpy.power(t,i)
centr1_pnt = array([],'float32')
centr2_pnt = array([],'float32')
time_pnt = array([],'float64')
flux_pnt = array([],'float32')
dx_pnt = array([],'float32')
s_pnt = array([],'float32')
time_thr = array([],'float64')
flux_thr = array([],'float32')
dx_thr = array([],'float32')
thr_cadence = zeros(len(t),dtype=bool)
for i in range(len(t)):
if dx[i] < dfit[i] + sigma_dsdt * dsigma and dx[i] > dfit[i] - sigma_dsdt * dsigma:
time_pnt = append(time_pnt,time_good[i])
flux_pnt = append(flux_pnt,flux_good[i])
dx_pnt = append(dx_pnt,dx[i])
s_pnt = append(s_pnt,x[i])
centr1_pnt = append(centr1_pnt,centr1_good[i])
centr2_pnt = append(centr2_pnt,centr2_good[i])
else:
time_thr = append(time_thr,time_good[i])
flux_thr = append(flux_thr,flux_good[i])
dx_thr = append(dx_thr,dx[i])
thr_cadence[i] = True
# fit arclength-flux correlation
cfit = zeros((len(time_pnt)))
csig = zeros((len(time_pnt)))
functype = 'poly' + str(npoly_arfl)
pinit = array([nanmean(flux_pnt)])
if npoly_arfl > 0:
for j in range(npoly_arfl):
pinit = append(pinit,0.0)
try:
ccoeffs, errors, covar, iiter, sigma, chi2, dof, fit, plx, ply, status = \
kepfit.lsqclip(functype,pinit,s_pnt,flux_pnt,None,sigma_arfl,sigma_arfl,100,logfile,verbose)
except:
message = 'ERROR -- KEPSFF: could not fit rotated centroid data with polynomial'
status = kepmsg.err(logfile,message,verbose)
# correction factors for unfiltered data
centr = concatenate([[centr1] - mean(centr1_good), [centr2] - mean(centr2_good)])
centr_rot = dot(evec.T,centr)
yy = copy(indata)
zz = centr_rot[1,:]
xx = zeros((len(zz)))
cfac = zeros((len(zz)))
for i in range(len(acoeffs)):
xx = xx + acoeffs[i] * numpy.power(zz,i)
for i in range(len(ccoeffs)):
cfac = cfac + ccoeffs[i] * numpy.power(xx,i)
# apply correction to flux time-series
out_detsap = indata / cfac
return out_detsap, cfac, thr_cadence
| |
"""
Contexts are the "values" that Python would return. However Contexts are at the
same time also the "contexts" that a user is currently sitting in.
A ContextSet is typically used to specify the return of a function or any other
static analysis operation. In jedi there are always multiple returns and not
just one.
"""
from parso.python.tree import ExprStmt, CompFor
from jedi import debug
from jedi._compatibility import Python3Method, zip_longest, unicode
from jedi.parser_utils import clean_scope_docstring, get_doc_with_call_signature
from jedi.common import BaseContextSet, BaseContext
from jedi.evaluate.helpers import EvaluatorIndexError, EvaluatorTypeError, \
EvaluatorKeyError
class Context(BaseContext):
"""
Should be defined, otherwise the API returns empty types.
"""
predefined_names = {}
tree_node = None
"""
To be defined by subclasses.
"""
@property
def api_type(self):
# By default just lower name of the class. Can and should be
# overwritten.
return self.__class__.__name__.lower()
@debug.increase_indent
def execute(self, arguments):
"""
In contrast to py__call__ this function is always available.
`hasattr(x, py__call__)` can also be checked to see if a context is
executable.
"""
if self.evaluator.is_analysis:
arguments.eval_all()
debug.dbg('execute: %s %s', self, arguments)
from jedi.evaluate import stdlib
try:
# Some stdlib functions like super(), namedtuple(), etc. have been
# hard-coded in Jedi to support them.
return stdlib.execute(self.evaluator, self, arguments)
except stdlib.NotInStdLib:
pass
try:
func = self.py__call__
except AttributeError:
debug.warning("no execution possible %s", self)
return NO_CONTEXTS
else:
context_set = func(arguments)
debug.dbg('execute result: %s in %s', context_set, self)
return context_set
return self.evaluator.execute(self, arguments)
def execute_evaluated(self, *value_list):
"""
Execute a function with already executed arguments.
"""
from jedi.evaluate.arguments import ValuesArguments
arguments = ValuesArguments([ContextSet(value) for value in value_list])
return self.execute(arguments)
def iterate(self, contextualized_node=None, is_async=False):
debug.dbg('iterate %s', self)
try:
if is_async:
iter_method = self.py__aiter__
else:
iter_method = self.py__iter__
except AttributeError:
if contextualized_node is not None:
from jedi.evaluate import analysis
analysis.add(
contextualized_node.context,
'type-error-not-iterable',
contextualized_node.node,
message="TypeError: '%s' object is not iterable" % self)
return iter([])
else:
return iter_method()
def get_item(self, index_contexts, contextualized_node):
from jedi.evaluate.compiled import CompiledObject
from jedi.evaluate.context.iterable import Slice, Sequence
result = ContextSet()
for index in index_contexts:
if isinstance(index, Slice):
index = index.obj
if isinstance(index, CompiledObject):
try:
index = index.get_safe_value()
except ValueError:
pass
if type(index) not in (float, int, str, unicode, slice, bytes):
# If the index is not clearly defined, we have to get all the
# possiblities.
if isinstance(self, Sequence) and self.array_type == 'dict':
result |= self.dict_values()
else:
result |= iterate_contexts(ContextSet(self))
continue
# The actual getitem call.
try:
getitem = self.py__getitem__
except AttributeError:
from jedi.evaluate import analysis
# TODO this context is probably not right.
analysis.add(
contextualized_node.context,
'type-error-not-subscriptable',
contextualized_node.node,
message="TypeError: '%s' object is not subscriptable" % self
)
else:
try:
result |= getitem(index)
except EvaluatorIndexError:
result |= iterate_contexts(ContextSet(self))
except EvaluatorKeyError:
# Must be a dict. Lists don't raise KeyErrors.
result |= self.dict_values()
except EvaluatorTypeError:
# The type is wrong and therefore it makes no sense to do
# anything anymore.
result = NO_CONTEXTS
return result
def eval_node(self, node):
return self.evaluator.eval_element(self, node)
@Python3Method
def py__getattribute__(self, name_or_str, name_context=None, position=None,
search_global=False, is_goto=False,
analysis_errors=True):
"""
:param position: Position of the last statement -> tuple of line, column
"""
if name_context is None:
name_context = self
from jedi.evaluate import finder
f = finder.NameFinder(self.evaluator, self, name_context, name_or_str,
position, analysis_errors=analysis_errors)
filters = f.get_filters(search_global)
if is_goto:
return f.filter_name(filters)
return f.find(filters, attribute_lookup=not search_global)
def create_context(self, node, node_is_context=False, node_is_object=False):
return self.evaluator.create_context(self, node, node_is_context, node_is_object)
def is_class(self):
return False
def py__bool__(self):
"""
Since Wrapper is a super class for classes, functions and modules,
the return value will always be true.
"""
return True
def py__doc__(self, include_call_signature=False):
try:
self.tree_node.get_doc_node
except AttributeError:
return ''
else:
if include_call_signature:
return get_doc_with_call_signature(self.tree_node)
else:
return clean_scope_docstring(self.tree_node)
return None
def iterate_contexts(contexts, contextualized_node=None, is_async=False):
"""
Calls `iterate`, on all contexts but ignores the ordering and just returns
all contexts that the iterate functions yield.
"""
return ContextSet.from_sets(
lazy_context.infer()
for lazy_context in contexts.iterate(contextualized_node, is_async=is_async)
)
class TreeContext(Context):
def __init__(self, evaluator, parent_context, tree_node):
super(TreeContext, self).__init__(evaluator, parent_context)
self.predefined_names = {}
self.tree_node = tree_node
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.tree_node)
class ContextualizedNode(object):
def __init__(self, context, node):
self.context = context
self.node = node
def get_root_context(self):
return self.context.get_root_context()
def infer(self):
return self.context.eval_node(self.node)
class ContextualizedName(ContextualizedNode):
# TODO merge with TreeNameDefinition?!
@property
def name(self):
return self.node
def assignment_indexes(self):
"""
Returns an array of tuple(int, node) of the indexes that are used in
tuple assignments.
For example if the name is ``y`` in the following code::
x, (y, z) = 2, ''
would result in ``[(1, xyz_node), (0, yz_node)]``.
"""
indexes = []
node = self.node.parent
compare = self.node
while node is not None:
if node.type in ('testlist', 'testlist_comp', 'testlist_star_expr', 'exprlist'):
for i, child in enumerate(node.children):
if child == compare:
indexes.insert(0, (int(i / 2), node))
break
else:
raise LookupError("Couldn't find the assignment.")
elif isinstance(node, (ExprStmt, CompFor)):
break
compare = node
node = node.parent
return indexes
class ContextSet(BaseContextSet):
def py__class__(self):
return ContextSet.from_iterable(c.py__class__() for c in self._set)
def iterate(self, contextualized_node=None, is_async=False):
from jedi.evaluate.lazy_context import get_merged_lazy_context
type_iters = [c.iterate(contextualized_node, is_async=is_async) for c in self._set]
for lazy_contexts in zip_longest(*type_iters):
yield get_merged_lazy_context(
[l for l in lazy_contexts if l is not None]
)
NO_CONTEXTS = ContextSet()
def iterator_to_context_set(func):
def wrapper(*args, **kwargs):
return ContextSet.from_iterable(func(*args, **kwargs))
return wrapper
| |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import threading
from absl.testing import parameterized
from tensorflow.python.distribute.parallel_device import parallel_device
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.module import module
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import collective_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_resource_variable_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.saved_model import load
from tensorflow.python.saved_model import save
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training.tracking import util as tracking
from tensorflow.python.util import nest
# When running collectives asynchronously, we need to give each parallel device
# execution a unique ID so the collectives don't interfere. Since the op is
# replicated with group/instance key intact, the replicated nodes will
# communicate.
# TODO(allenl): Switch to using a collective manager.
_COUNTER_LOCK = threading.Lock()
_COUNTER = 100
def _collective_reduce(inputs, operation, num_replicas):
def _reduce_tensor(tensor):
with _COUNTER_LOCK:
global _COUNTER
keys = _COUNTER
_COUNTER += 1
return collective_ops.all_reduce(
t=tensor,
group_size=num_replicas,
merge_op=operation,
group_key=keys,
instance_key=keys,
final_op="Id")
return nest.map_structure(_reduce_tensor, inputs)
def _collective_sum(inputs, num_replicas):
return _collective_reduce(
inputs=inputs, operation="Add", num_replicas=num_replicas)
class _Dense(module.Module):
def __init__(self, output_size):
self.output_size = output_size
self.kernel = None
self.bias = None
def __call__(self, x):
if self.kernel is None:
self.kernel = variables.Variable(
array_ops.ones(
array_ops.stack([self.output_size,
array_ops.shape(x)[-1]])))
self.bias = variables.Variable(array_ops.ones([self.output_size]))
return math_ops.matmul(x, self.kernel, transpose_b=True) + self.bias
class _VirtualDeviceTestCase(test.TestCase):
def setUp(self):
super(_VirtualDeviceTestCase, self).setUp()
ctx = context.context()
if ctx.list_physical_devices("TPU"):
self.device_type = "TPU"
elif ctx.list_physical_devices("GPU"):
self.device_type = "GPU"
gpus = ctx.list_physical_devices(self.device_type)
ctx.set_logical_device_configuration(gpus[0], [
context.LogicalDeviceConfiguration(memory_limit=100),
context.LogicalDeviceConfiguration(memory_limit=100),
])
else:
self.device_type = "CPU"
cpus = ctx.list_physical_devices("CPU")
ctx.set_logical_device_configuration(cpus[0], [
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration(),
])
self.device = parallel_device.ParallelDevice(components=[
"/job:localhost/device:{}:0".format(self.device_type),
self.device_type + ":1"
])
self.assertIn(self.device_type + ":0", self.device.components[0])
self.assertIn(self.device_type + ":1", self.device.components[1])
class ParallelDeviceTests(_VirtualDeviceTestCase, parameterized.TestCase):
def test_register_parallel_device(self):
with self.device:
c = constant_op.constant(1.)
d = constant_op.constant(2.)
e = c + d
outputs = self.device.unpack(e)
self.assertAllClose([3., 3.], outputs)
self.assertIn(self.device.components[0], outputs[0].backing_device)
self.assertIn(self.device.components[1], outputs[1].backing_device)
def test_string_representation(self):
x = self.device.pack(
[constant_op.constant([5., 6.]),
constant_op.constant([6., 7.])])
parallel_str = str(x)
self.assertIn("5", parallel_str)
self.assertIn("7", parallel_str)
self.assertIn(self.device_type + ":0", parallel_str)
self.assertIn(self.device_type + ":1", parallel_str)
parallel_repr = repr(x)
self.assertIn("5", parallel_repr)
self.assertIn("7", parallel_repr)
self.assertIn(self.device_type + ":0", parallel_repr)
self.assertIn(self.device_type + ":1", parallel_repr)
def test_device_id(self):
device_ids = self.device.unpack(self.device.device_ids)
self.assertAllClose([0, 1], device_ids)
# TODO(allenl): Should device IDs be int64 so they can be placed on GPUs?
# Currently backing_device is CPU.
self.assertIn(self.device.components[0], device_ids[0].device)
self.assertIn(self.device.components[1], device_ids[1].device)
def test_collective_reduce(self):
if self.device_type == "TPU":
self.skipTest("ParallelDevice collectives on TPUs need work")
with self.device:
x = self.device.pack(
[constant_op.constant(-1.5),
constant_op.constant(3.5)])
reduced = _collective_sum(x, num_replicas=2)
outputs = self.device.unpack(reduced)
self.assertAllClose([2., 2.], outputs)
self.assertIn(self.device.components[0], outputs[0].backing_device)
self.assertIn(self.device.components[1], outputs[1].backing_device)
def test_collective_reduce_async_scope(self):
if self.device_type == "TPU":
self.skipTest("ParallelDevice collectives on TPUs need work")
# Note that ops on the parallel device currently don't execute
# asynchronously. The test is just that we don't get deadlocks.
with context.async_scope(), self.device:
x = self.device.pack(
[constant_op.constant(-1.5),
constant_op.constant(3.5)])
reduced = _collective_sum(x, num_replicas=2)
outputs = self.device.unpack(reduced)
self.assertAllClose([2., 2.], outputs)
self.assertIn(self.device.components[0], outputs[0].backing_device)
self.assertIn(self.device.components[1], outputs[1].backing_device)
def test_collective_reduce_async_context(self):
if self.device_type == "TPU":
self.skipTest("ParallelDevice collectives on TPUs need work")
previous = config.get_synchronous_execution()
try:
context._reset_context()
config.set_synchronous_execution(False)
self.setUp()
# Note that ops on the parallel device currently don't execute
# asynchronously. The test is just that we don't get deadlocks.
with self.device:
x = self.device.pack(
[constant_op.constant(-1.5),
constant_op.constant(3.5)])
reduced = _collective_sum(x, num_replicas=2)
outputs = self.device.unpack(reduced)
self.assertAllClose([2., 2.], outputs)
self.assertIn(self.device.components[0], outputs[0].backing_device)
self.assertIn(self.device.components[1], outputs[1].backing_device)
finally:
context._reset_context()
config.set_synchronous_execution(previous)
@parameterized.named_parameters(
[("RunFunctionsEagerly", True),
("", False)])
def test_cond(self, run_functions_eagerly):
try:
def_function.run_functions_eagerly(run_functions_eagerly)
with self.device:
pred = self.device.pack([True, False])
capture = self.device.pack([[1.], [2.]])
result = control_flow_ops.cond(
pred,
def_function.function(lambda: capture * 2.),
def_function.function(lambda: capture * 4.))
self.assertAllClose(
[[2.], [8.]], self.device.unpack(result))
finally:
def_function.run_functions_eagerly(False)
def test_cond_with_variable(self):
with self.device:
pred = self.device.pack([True, False])
capture = self.device.pack([[1.], [2.]])
v = None
@def_function.function
def true_branch():
nonlocal v
if v is None:
v = variables.Variable(constant_op.constant(2.))
return v * capture
result = control_flow_ops.cond(
pred, true_branch, def_function.function(lambda: capture * 4.))
self.assertAllClose(
[[2.], [8.]], self.device.unpack(result))
self.assertAllClose(
[2., 2.], self.device.unpack(v))
# There are two unique variable handles with separate storage.
h1, _ = self.device.unpack(v.handle)
gen_resource_variable_ops.assign_variable_op(h1, constant_op.constant(3.))
self.assertAllClose(
[3., 2.], self.device.unpack(v))
def test_collective_in_function(self):
if self.device_type == "TPU":
self.skipTest("ParallelDevice collectives on TPUs need work")
c = constant_op.constant([2])
@def_function.function
def broadcast_send_recv(device_id):
@def_function.function
def send():
s0 = collective_ops.broadcast_send(
c * 3, c.shape, c.dtype, group_size=2, group_key=1, instance_key=1)
with ops.control_dependencies([s0.op]):
return array_ops.identity(c)
@def_function.function
def recv():
r0 = collective_ops.broadcast_recv(
c.shape, c.dtype, group_size=2, group_key=1, instance_key=1)
return r0
return control_flow_ops.switch_case(
device_id, branch_fns={0: send, 1: recv})
with self.device:
result = broadcast_send_recv(self.device.device_ids)
self.assertAllClose([[2], [6]], self.device.unpack(result))
def test_use_in_graph_error_is_informative(self):
@def_function.function
def uses_parallel():
with self.device:
return self.device.unpack(array_ops.ones([]))
with self.assertRaisesRegex(NotImplementedError, "inside `tf.function`"):
uses_parallel()
def test_checkpointing(self):
prefix = os.path.join(self.get_temp_dir(), "ckpt")
with self.device:
different_values = self.device.pack(
[constant_op.constant(-1.),
constant_op.constant(3.)])
v = variables.Variable(different_values)
checkpoint = tracking.Checkpoint(v=v)
save_path = checkpoint.save(prefix)
with self.device:
v.assign(constant_op.constant(0.))
checkpoint.restore(save_path).assert_consumed()
with self.device:
outputs = self.device.unpack(v)
self.assertAllClose([-1., 3.], outputs)
with self.device:
restore_on_create = tracking.Checkpoint()
restore_on_create.restore(save_path)
restore_on_create.v = variables.Variable(0.)
outputs = self.device.unpack(restore_on_create.v)
self.assertAllClose([-1., 3.], outputs)
# Changing the number of devices / restoring into a single-device copy is OK
single_device = tracking.Checkpoint(v=variables.Variable(0.))
status = single_device.restore(save_path)
status.assert_existing_objects_matched()
self.assertAllClose(-1., single_device.v)
with self.assertRaisesRegex(AssertionError, "parallel_component_1"):
# There are parts of the variable that aren't restored into a
# single-device copy.
status.assert_consumed()
def test_saved_model(self):
with self.device:
different_values = self.device.pack(
[constant_op.constant(-1.),
constant_op.constant(3.)])
m = module.Module()
m.v = variables.Variable(different_values)
m.f = def_function.function(lambda: m.v * 2.)
self.assertAllClose([-2., 6.], self.device.unpack(m.f()))
saved_model_path = os.path.join(self.get_temp_dir(), "saved_model")
save.save(m, saved_model_path)
context._reset_context()
self.setUp()
single_device_loaded = load.load(saved_model_path)
self.assertAllClose(-2., single_device_loaded.f())
with self.device:
parallel_loaded = load.load(saved_model_path)
self.assertAllClose([-2., 6.], self.device.unpack(parallel_loaded.f()))
self.assertAllClose([-1., 3.], self.device.unpack(parallel_loaded.v))
parallel_loaded.v.assign(self.device.pack([.1, .2]))
self.assertAllClose([.2, .4], self.device.unpack(parallel_loaded.f()))
def _assert_close_to_non_parallel(self, computation):
"""Asserts that replication of `computation` works and is equivalent."""
with self.device:
parallel_result = computation()
non_parallel_result = computation()
# The computations should have the same number and structure of Tensor
# objects, even though the tensors themselves will be on different devices
# and represent different numbers of values.
nest.assert_same_structure(parallel_result, non_parallel_result)
non_parallel_flat = nest.flatten(non_parallel_result)
parallel_flat = nest.flatten(parallel_result)
self.assertGreater(len(parallel_flat), 0)
for non_parallel, parallel in zip(non_parallel_flat, parallel_flat):
self.assertEqual(self.device._name, parallel.device)
self.assertNotEqual(self.device._name, non_parallel.device)
for parallel_component in self.device.unpack(parallel):
self.assertAllClose(non_parallel, parallel_component)
def test_capturing(self):
with self.device:
x = constant_op.constant([1., 2.])
x = array_ops.identity(x)
@def_function.function
def f(y):
return x + y
y = array_ops.ones([2])
parallel_result = f(y)
self.assertAllClose([[2., 3.]] * 2, self.device.unpack(parallel_result))
def test_euclidean_norm(self):
def _test_fn():
with backprop.GradientTape() as tape:
x = array_ops.ones([5, 5])
tape.watch(x)
y = math_ops.reduce_euclidean_norm(x, axis=constant_op.constant(1))
return y, tape.gradient(y, x)
self._assert_close_to_non_parallel(_test_fn)
def test_reduce_sum(self):
def _test_fn():
with backprop.GradientTape() as tape:
x = array_ops.ones([5, 5])
tape.watch(x)
y = math_ops.reduce_sum(x, axis=constant_op.constant(1))
return y, tape.gradient(y, x)
self._assert_close_to_non_parallel(_test_fn)
def test_variable_created_in_function(self):
class M(module.Module):
def __init__(self):
self.v = None
self.w = None
self.x = None
self.z = None
@def_function.function(autograph=False)
def __call__(self, x):
if self.v is None:
with ops.init_scope():
initial_value = constant_op.constant(2.)
self.z = variables.Variable(initial_value)
self.x = variables.Variable(initial_value)
self.w = variables.Variable(lambda: constant_op.constant(2.))
self.v = variables.Variable(constant_op.constant(2.))
return x * self.v * self.w * self.x * self.z
with self.device:
m = M()
packed_outputs = m(array_ops.ones([]))
outputs = self.device.unpack(packed_outputs)
self.assertAllClose([16., 16.], outputs)
def test_different_shapes(self):
with self.device:
x = self.device.pack(
[constant_op.constant([1., 2.]),
constant_op.constant([5.])])
y = x * 2.
with self.assertRaisesRegex(Exception,
"components do not all have the same shape"):
y.shape # pylint: disable=pointless-statement
self.assertAllClose([[2., 4.], [10.]], self.device.unpack(y))
different_axes = self.device.pack(
[constant_op.constant([1., 2.]),
constant_op.constant([[5.]])])
with self.assertRaisesRegex(Exception,
"components do not all have the same shape"):
different_axes.shape # pylint: disable=pointless-statement
class LayerTests(_VirtualDeviceTestCase):
def test_layer_forward(self):
with self.device:
layer = _Dense(5)
x = constant_op.constant([[2.]])
y = layer(x)
outputs = self.device.unpack(y)
self.assertAllClose([[3.] * 5], outputs[0])
self.assertAllClose([[3.] * 5], outputs[1])
self.assertIn(self.device.components[0], outputs[0].backing_device)
self.assertIn(self.device.components[1], outputs[1].backing_device)
# With different Layer inputs we get different outputs
with self.device:
x = self.device.pack(
[constant_op.constant([[-0.5]]),
constant_op.constant([[0.5]])])
y = layer(x)
outputs = self.device.unpack(y)
self.assertGreater(
math_ops.reduce_max(math_ops.abs(outputs[0] - outputs[1])), 1e-5)
self.assertIn(self.device.components[0], outputs[0].backing_device)
self.assertIn(self.device.components[1], outputs[1].backing_device)
def test_layer_sync_training(self):
if self.device_type == "TPU":
self.skipTest("ParallelDevice collectives on TPUs need work")
with self.device:
layer = _Dense(5)
with backprop.GradientTape() as tape:
x = self.device.pack(
[constant_op.constant([[-0.5]]),
constant_op.constant([[0.5]])])
y = layer(x)
loss = (y - math_ops.range(5.))**2.
parameters = layer.trainable_variables
unreduced_gradients = tape.gradient(loss, parameters)
reduced_gradients = _collective_sum(unreduced_gradients, num_replicas=2)
for grad, param in zip(reduced_gradients, parameters):
param.assign_sub(0.01 * grad)
final_kernels = self.device.unpack(layer.kernel)
self.assertAllClose(final_kernels[0], final_kernels[1])
final_bias = self.device.unpack(layer.bias)
expected_bias = (1. - 0.01 * 2. * (1. + .5 - math_ops.range(5.)) -
0.01 * 2. * (1. - .5 - math_ops.range(5.)))
self.assertAllClose(expected_bias, final_bias[0])
self.assertAllClose(expected_bias, final_bias[1])
self.assertIn(self.device.components[0], final_kernels[0].backing_device)
self.assertIn(self.device.components[1], final_kernels[1].backing_device)
def test_layer_divergent_buffer_training(self):
with self.device:
layer = _Dense(5)
with backprop.GradientTape() as tape:
x = self.device.pack(
[constant_op.constant([[-0.5]]),
constant_op.constant([[0.5]])])
y = layer(x)
loss = (y - math_ops.range(5.))**2.
parameters = layer.trainable_variables
unreduced_gradients = tape.gradient(loss, parameters)
for grad, param in zip(unreduced_gradients, parameters):
param.assign_sub(0.01 * grad)
final_kernels = self.device.unpack(layer.kernel)
self.assertNotAllClose(final_kernels[0], final_kernels[1])
final_bias = self.device.unpack(layer.bias)
self.assertAllClose(1. - 0.01 * 2. * (1. - .5 - math_ops.range(5.)),
final_bias[0])
self.assertAllClose(1. - 0.01 * 2. * (1. + .5 - math_ops.range(5.)),
final_bias[1])
self.assertIn(self.device.components[0], final_kernels[0].backing_device)
self.assertIn(self.device.components[1], final_kernels[1].backing_device)
def test_training_loop(self):
if self.device_type == "TPU":
self.skipTest("ParallelDevice collectives on TPUs need work")
for _ in range(5):
layer = _Dense(5)
checkpoint = tracking.Checkpoint(layer=layer)
manager = checkpoint_management.CheckpointManager(
checkpoint, directory=self.get_temp_dir(), max_to_keep=5)
manager.restore_or_initialize()
for _ in range(10):
with self.device:
with backprop.GradientTape() as tape:
x = self.device.pack(
[constant_op.constant([[-0.5]]),
constant_op.constant([[0.5]])])
y = layer(x)
loss = (y - math_ops.range(5.))**2.
parameters = layer.trainable_variables
unreduced_gradients = tape.gradient(loss, parameters)
reduced_gradients = _collective_sum(
unreduced_gradients, num_replicas=len(self.device.components))
for grad, param in zip(reduced_gradients, parameters):
param.assign_sub(0.01 * grad)
manager.save()
if __name__ == "__main__":
ops.enable_eager_execution()
test.main()
| |
from __future__ import absolute_import, unicode_literals
import docker
import time
from tests import unittest, mock
from tests.factories.docker_client_factory import DockerClientFactory
from freight_forwarder.container.config import Config
from freight_forwarder.container.container import Container
from freight_forwarder.container.host_config import HostConfig
class ContainerTest(unittest.TestCase):
def setUp(self):
"""
Setup Testing Environment for ContainerTest
"""
self.docker_client = DockerClientFactory()
def tearDown(self):
del self.docker_client
def test_config(self):
with mock.patch.object(Container, '_create_container'):
self.assertIsInstance(Container(self.docker_client, name='foo', image='bar').config, Config)
def test_config_failure(self):
with self.assertRaises(TypeError):
with mock.patch.object(Container, '_create_container'):
Container(DockerClientFactory(), name='foo', image='bar').config = 0
def test_host_config(self):
with mock.patch.object(Container, '_create_container'):
test_container = Container(self.docker_client, name='foo', image='bar')
test_container.host_config = HostConfig({'log_config': {"config": {}, "type": "syslog"}})
self.assertIsInstance(test_container.host_config, HostConfig)
def test_host_config_failure(self):
with self.assertRaises(TypeError):
with mock.patch.object(Container, '_create_container'):
Container(self.docker_client, name='foo', image='bar').host_config = 0
@mock.patch.object(docker.api.ContainerApiMixin, 'attach')
def test_attach(self, mock_docker_container_mixin):
with mock.patch.object(Container, '_find_by_id'):
container = Container(self.docker_client, name='foo', image='bar', id='123')
container.id = '123'
attach_data = container.attach()
self.assertIsInstance(attach_data, list)
@mock.patch.object(docker.api.ContainerApiMixin, 'commit')
def test_commit(self, mock_docker_container_mixin):
mock_docker_container_mixin.return_value = {'Id': '123'}
with mock.patch.object(Container, '_find_by_id'):
container = Container(self.docker_client, name='foo', image='bar', id='123')
container.id = '123'
container.name = 'foo'
attach_data = container.commit(config=dict(), image_name='', tag='')
self.assertEqual(attach_data, container.id)
@mock.patch.object(docker.api.ContainerApiMixin, 'inspect_container')
@mock.patch.object(docker.api.ContainerApiMixin, 'remove_container')
def test_delete(self, mock_docker_container_remove, mock_docker_container_inspect):
mock_docker_container_inspect.return_value = {'state': {'running': False}}
mock_docker_container_remove.return_value = {}
with mock.patch.object(Container, '_find_by_id'):
container = Container(self.docker_client, name='foo', image='bar', id='123')
container.id = '123'
container.name = 'foo'
self.assertEqual(container.delete(), {})
def test_delete_failure(self):
pass
@mock.patch.object(docker.api.ContainerApiMixin, 'inspect_container')
def test_inspect(self, mock_docker_container_inspect):
mock_docker_container_inspect.return_value = {'state': {'running': True}}
with mock.patch.object(Container, '_find_by_id'):
container = Container(self.docker_client, name='foo', image='bar', id='123')
container.id = '123'
container.name = 'foo'
self.assertEqual(container.inspect(), {'state': {'running': True}})
def test_output(self):
pass
@mock.patch.object(Container, '_wait_for_exit_code')
@mock.patch.object(docker.api.ContainerApiMixin, 'inspect_container')
@mock.patch.object(docker.api.ContainerApiMixin, 'start')
def test_start(self, mock_docker_container_start, mock_docker_container_inspect, mock_container_wait):
mock_docker_container_inspect.return_value = {'state': {'running': True}}
with mock.patch.object(Container, '_find_by_id'):
container = Container(self.docker_client, name='foo', image='bar', id='123')
container.id = '123'
container.name = 'foo'
self.assertTrue(container.start())
mock_docker_container_inspect.return_value = {'state': {'running': False, 'exit_code': 0}}
mock_docker_container_start.return_value = None
mock_container_wait.return_value = 0
with mock.patch.object(Container, '_find_by_id'):
container = Container(self.docker_client, name='foo', image='bar', id='123')
container.id = '123'
container.name = 'foo'
self.assertTrue(container.start())
def test_start_failure(self):
pass
@mock.patch.object(Container, '_start_recording')
def test_start_transcribing(self, mock_container_start_recording):
with mock.patch.object(Container, '_find_by_id'):
container = Container(self.docker_client, name='foo', image='bar', id='123')
container.id = '123'
container.start_transcribing()
self.assertTrue(container._transcribe)
self.assertIsNotNone(container._transcribe_queue)
self.assertIsNotNone(container._transcribe_proc)
self.assertTrue(container._transcribe_proc.daemon)
@mock.patch.object(docker.api.ContainerApiMixin, 'inspect_container')
def test_state(self, mock_docker_container_inspect):
mock_docker_container_inspect.return_value = {'state': {'running': True}}
with mock.patch.object(Container, '_find_by_id'):
container = Container(self.docker_client, name='foo', image='bar', id='123')
container.id = '123'
self.assertEqual(container.state(), {'running': True})
@mock.patch.object(docker.api.ContainerApiMixin, 'inspect_container')
def test_running(self, mock_docker_container_inspect):
mock_docker_container_inspect.return_value = {'state': {'running': True}}
with mock.patch.object(Container, '_find_by_id'):
container = Container(self.docker_client, name='foo', image='bar', id='123')
container.id = '123'
self.assertTrue(container.running())
mock_docker_container_inspect.return_value = {'state': {'running': False}}
with mock.patch.object(Container, '_find_by_id'):
container = Container(self.docker_client, name='foo', image='bar', id='123')
container.id = '123'
self.assertFalse(container.running())
@mock.patch.object(docker.api.ContainerApiMixin, 'inspect_container')
@mock.patch.object(docker.api.ContainerApiMixin, 'stop')
def test_stop(self, mock_docker_container_stop, mock_docker_container_inspect):
mock_docker_container_inspect.return_value = {'state': {'running': False}}
mock_docker_container_stop.return_value = True
with mock.patch.object(Container, '_find_by_id'):
container = Container(self.docker_client, name='foo', image='bar', id='123')
container.id = '123'
container.name = 'foo'
self.assertTrue(container.stop())
@mock.patch.object(docker.api.ContainerApiMixin, 'wait')
def test_wait(self, mock_docker_container_wait):
mock_docker_container_wait.return_value = 0
with mock.patch.object(Container, '_find_by_id'):
container = Container(self.docker_client, name='foo', image='bar', id='123')
container.id = '123'
self.assertEqual(container.wait(), 0)
def test_dump_logs(self):
pass
@mock.patch.object(docker.api.ContainerApiMixin, 'containers')
def test_find_by_name(self, mock_docker_container_containers):
mock_docker_container_containers.return_value = [{
'Id': '123',
'Names': ['/foobar']
}]
with mock.patch.object(Container, '_find_by_id'):
containers = Container.find_by_name(DockerClientFactory(), 'foobar')
self.assertIsInstance(containers['foobar'], Container)
@mock.patch.object(docker.api.ContainerApiMixin, 'containers')
def test_find_by_name_failure(self, mock_docker_container_containers):
with self.assertRaises(TypeError):
Container.find_by_name(False, 'foobar')
mock_docker_container_containers.side_effect = Exception
with self.assertRaises(Exception):
Container.find_by_name(DockerClientFactory(), 'foobar')
@mock.patch.object(docker.api.ContainerApiMixin, 'create_container')
@mock.patch.object(docker.api.ContainerApiMixin, 'create_host_config')
def test_create_container(self, mock_docker_container_create_host_config, mock_docker_container_create_container):
mock_docker_container_create_container.return_value = {'Id': '123', 'Warnings': ['foobar']}
container = Container(DockerClientFactory(), name='foo', image='bar')
self.assertEqual(container.id, '123')
@mock.patch.object(docker.api.ContainerApiMixin, 'create_container')
@mock.patch.object(docker.api.ContainerApiMixin, 'create_host_config')
def test_create_container_failure(self, mock_docker_container_create_host_config, mock_docker_container_create_container):
with self.assertRaises(TypeError):
Container(DockerClientFactory(), name='foo', image='bar', container_config=[])
with self.assertRaises(TypeError):
Container(DockerClientFactory(), name='foo', image='bar', host_config=[])
with self.assertRaises(TypeError):
Container(None)
with self.assertRaises(AttributeError):
Container(DockerClientFactory(), name='foo')
with self.assertRaises(TypeError):
Container(DockerClientFactory(), name=1, image='bar')
with self.assertRaises(TypeError):
Container(DockerClientFactory(), name='foo', image=1)
mock_docker_container_create_container.side_effect = Exception
with self.assertRaises(Exception):
Container(DockerClientFactory(), name='foo', image='bar')
@mock.patch.object(docker.api.ContainerApiMixin, 'inspect_container')
def test_find_by_id(self, mock_docker_container_inspect):
mock_docker_container_inspect.return_value = {
'Id': '123',
'Name': 'foo',
'Image': 'bar',
'Created': '2016-01-20T23:05:25.351058124Z',
'Config': {},
'HostConfig': {}
}
container = Container(DockerClientFactory(), id='123')
self.assertEqual(container.id, '123')
def test_find_by_id_failure(self):
with self.assertRaises(TypeError):
Container(DockerClientFactory(), id=7)
def test_start_recording(self):
pass
@mock.patch.object(time, 'sleep')
@mock.patch.object(docker.api.ContainerApiMixin, 'inspect_container')
def test_wait_for_exit_code(self, mock_docker_container_inspect, mock_time_sleep):
mock_docker_container_inspect.return_value = {'state': {'running': False, 'exit_code': 0}}
with mock.patch.object(Container, '_find_by_id'):
container = Container(DockerClientFactory(), id='123')
container.id = '123'
exit_code = container._wait_for_exit_code()
self.assertEqual(exit_code, 0)
if __name__ == '__main__':
unittest.main()
| |
from tkinter import *
import ConnectFour
from ConnectFour import C4Game
from random import randint
import games
g = C4Game
# class GameState:
# def __init__(self, to_move, board, label=None, depth=8):
# self.to_move= to_move
# self.board = board
# self.label = label
# self.maxDepth = depth
#
# def __str__(self):
# if self.label == None:
# return super(GameState, self).__str__()
# return self.label
class GUI:
elementSize = 50
gridBorder = 3
gridColor = "#000000"
p1Color = "#FF0000"
p2Color = "#FFFF00"
backgroundColor = "#add8e6"
gameOn = False
def __init__(self, master):
self.master = master
master.title('Connect Four')
label = Label(master, text="Connect Four", font=("Times New Roman", 50))
label.grid(row=0,column=1)
player1label = Label(master,text="If Player 1 is Computer")
player2label = Label(master,text="If Player 2 is Computer")
player1button1 = Button(master,text="Click Here!", command=self.cpuDrop1)
player2button1 = Button(master,text="Click Here!",command=self.cpuDrop2)
player1label.grid(row=2,column=0,)
player2label.grid(row=2,column=2)
player1button1.grid(row=3,column=0,)
player2button1.grid(row=3,column=2)
button = Button(master, text="New Game!", command=self._newGameButton)
button.grid(row=3,column=1)
self.canvas = Canvas(master, width=200, height=50, background=self.backgroundColor, highlightthickness=0)
self.canvas.grid(row=5,column=1)
self.currentPlayerVar = StringVar(self.master, value="")
self.currentPlayerLabel = Label(self.master, textvariable=self.currentPlayerVar, anchor=W)
self.currentPlayerLabel.grid(row=6,column=1)
self.canvas.bind('<Button-1>', self._canvasClick)
self.newGame()
def cpuDrop1(self):
if(self.gameState.first_player == True):
if not self.gameOn: return
if self.gameState.game_over: return
self.adrop(self)
self.master.update()
self.drawGrid()
self.draw()
self._updateCurrentPlayer()
if self.gameState.game_over:
x = self.canvas.winfo_width() // 2
y = self.canvas.winfo_height() // 2
if self.gameState.game_over == 'draw':
t = 'DRAW!'
else:
winner = self.p1 if self.gameState.first_player else self.p2
t = winner + ' won!'
self.canvas.create_text(x, y, text=t, font=("Helvetica", 32), fill="#333")
def cpuDrop2(self):
if(self.gameState.first_player == False):
if not self.gameOn: return
if self.gameState.game_over: return
self.bdrop(self)
self.master.update()
self.drawGrid()
self.draw()
self._updateCurrentPlayer()
if self.gameState.game_over:
x = self.canvas.winfo_width() // 2
y = self.canvas.winfo_height() // 2
if self.gameState.game_over == 'draw':
t = 'DRAW!'
else:
winner = self.p1 if self.gameState.first_player else self.p2
t = winner + ' won!'
self.canvas.create_text(x, y, text=t, font=("Helvetica", 32), fill="#333")
def draw(self):
for c in range(self.gameState.size['c']):
for r in range(self.gameState.size['r']):
if r >= len(self.gameState.grid[c]): continue
x0 = c * self.elementSize
y0 = r * self.elementSize
x1 = (c + 1) * self.elementSize
y1 = (r + 1) * self.elementSize
fill = self.p1Color if self.gameState.grid[c][r] == self.gameState.players[True] else self.p2Color
self.canvas.create_oval(x0 + 2,
self.canvas.winfo_height() - (y0 + 2),
x1 - 2,
self.canvas.winfo_height() - (y1 - 2),
fill=fill, outline=self.gridColor)
def drawGrid(self):
x0, x1 = 0, self.canvas.winfo_width()
for r in range(1, self.gameState.size['r']):
y = r * self.elementSize
self.canvas.create_line(x0, y, x1, y, fill=self.gridColor)
y0, y1 = 0, self.canvas.winfo_height()
for c in range(1, self.gameState.size['c']):
x = c * self.elementSize
self.canvas.create_line(x, y0, x, y1, fill=self.gridColor)
# def drop(self, column):
# return self.gameState.drop(column)
def drop(self, column):
return self.gameState.drop(column)
def adrop(self,column):
if(self.gameState.first_player):
#print(test)
print(column.gameState.grid)
guess = randint(0,6)
return self.gameState.drop(guess)
else:
return self.gameState.drop(column)
def bdrop(self, column):
if(self.gameState.first_player):
# self.gameState.grid
# print(column.gameState.grid)
return self.gameState.drop(column)
else:
for x in range(0,7):
for y in range(0,len(column.gameState.grid[x])):
print(column.gameState.grid[x][y])
#d = {column.gameState.grid[x], x}
#print(column.gameState.grid[x])
#print(b)
#guess = randint(0, 6)
# guess = g.utility(self, self.gameState, self.currentPlayerLabel)
guess = games.alphabeta_search(self.gameState, self.game, 1)
newguess = guess
# print(d)
# print(column.gameState.grid)
return self.gameState.drop(guess)
def newGame(self):
self.p1 = 'Player 1'
self.p2 = 'Player 2'
columns = 7
rows = 6
self.gameState = ConnectFour.ConnectFour(columns=columns, rows=rows)
self.game = ConnectFour.C4Game(self.gameState)
self.canvas.delete(ALL)
self.canvas.config(width=(self.elementSize) * self.gameState.size['c'],
height=(self.elementSize) * self.gameState.size['r'])
self.master.update()
self.drawGrid()
self.draw()
self._updateCurrentPlayer()
self.gameOn = True
def _updateCurrentPlayer(self):
p = self.p1 if self.gameState.first_player else self.p2
self.currentPlayerVar.set('Current player: ' + p)
def _canvasClick(self, event):
if not self.gameOn: return
if self.gameState.game_over: return
c = event.x // self.elementSize
if (0 <= c < self.gameState.size['c']):
self.drop(c)
self.draw()
self._updateCurrentPlayer()
if self.gameState.game_over:
x = self.canvas.winfo_width() // 2
y = self.canvas.winfo_height() // 2
if self.gameState.game_over == 'draw':
t = 'DRAW!'
else:
winner = self.p1 if self.gameState.first_player else self.p2
t = winner + ' won!'
# self.canvas.create_text(x, y, text=t, font=("Times New Roman", 42), fill="#333")
self.canvas.create_text(175, y-120, text=t, font=("Times New Roman", 42), fill="#333")
def _newGameButton(self):
self.newGame()
def check_win(self, board):
if board[0] == 0 and board[1] == 0 and board[2] == 0:
return 1
return 0
root = Tk()
#root.configure(background="purple")
app = GUI(root)
root.wm_iconbitmap('4.ico')
root.mainloop()
| |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from astropy.table import Table, QTable, Column
from astropy.table.table_helpers import simple_table
from astropy.units import allclose as quantity_allclose
from astropy.units.quantity import QuantityInfo
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.misc import _NOT_OVERWRITING_MSG_MATCH
from astropy.io.misc.hdf5 import meta_path
from astropy.utils.compat.optional_deps import HAS_H5PY # noqa
if HAS_H5PY:
import h5py
from astropy.io.tests.mixin_columns import mixin_cols, compare_attrs, serialized_names
# HDF5 does not support object dtype (since it stores binary representations).
unsupported_cols = {name: col for name, col in mixin_cols.items()
if (isinstance(col, np.ndarray) and col.dtype.kind == 'O')}
mixin_cols = {name: col for name, col in mixin_cols.items()
if name not in unsupported_cols}
ALL_DTYPES = [np.uint8, np.uint16, np.uint32, np.uint64, np.int8,
np.int16, np.int32, np.int64, np.float32, np.float64,
np.bool_, '|S3']
def _default_values(dtype):
if dtype == np.bool_:
return [0, 1, 1]
elif dtype == '|S3':
return [b'abc', b'def', b'ghi']
else:
return [1, 2, 3]
@pytest.mark.skipif('not HAS_H5PY')
def test_write_nopath(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
with pytest.warns(UserWarning, match="table path was not set via the path= argument"):
t1.write(test_file)
t1 = Table.read(test_file, path='__astropy_table__')
@pytest.mark.skipif('not HAS_H5PY')
def test_write_nopath_nonempty(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path='bubu')
with pytest.raises(ValueError) as exc:
t1.write(test_file, append=True)
assert 'table path should always be set via the path=' in exc.value.args[0]
@pytest.mark.skipif('not HAS_H5PY')
def test_read_notable_nopath(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
h5py.File(test_file, 'w').close() # create empty file
with pytest.raises(ValueError, match='no table found in HDF5 group /'):
Table.read(test_file, path='/', format='hdf5')
@pytest.mark.skipif('not HAS_H5PY')
def test_read_nopath(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path="the_table")
t2 = Table.read(test_file)
assert np.all(t1['a'] == t2['a'])
@pytest.mark.skipif('not HAS_H5PY')
def test_read_nopath_multi_tables(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path="the_table")
t1.write(test_file, path="the_table_but_different", append=True,
overwrite=True)
with pytest.warns(AstropyUserWarning,
match=r"path= was not specified but multiple tables"):
t2 = Table.read(test_file)
assert np.all(t1['a'] == t2['a'])
@pytest.mark.skipif('not HAS_H5PY')
def test_write_invalid_path(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
with pytest.raises(ValueError) as exc:
t1.write(test_file, path='test/')
assert exc.value.args[0] == "table path should end with table name, not /"
@pytest.mark.skipif('not HAS_H5PY')
def test_read_invalid_path(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path='the_table')
with pytest.raises(OSError) as exc:
Table.read(test_file, path='test/')
assert exc.value.args[0] == "Path test/ does not exist"
@pytest.mark.skipif('not HAS_H5PY')
def test_read_missing_group(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
h5py.File(test_file, 'w').close() # create empty file
with pytest.raises(OSError) as exc:
Table.read(test_file, path='test/path/table')
assert exc.value.args[0] == "Path test/path/table does not exist"
@pytest.mark.skipif('not HAS_H5PY')
def test_read_missing_table(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
with h5py.File(test_file, 'w') as f:
f.create_group('test').create_group('path')
with pytest.raises(OSError) as exc:
Table.read(test_file, path='test/path/table')
assert exc.value.args[0] == "Path test/path/table does not exist"
@pytest.mark.skipif('not HAS_H5PY')
def test_read_missing_group_fileobj(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
with h5py.File(test_file, 'w') as f:
with pytest.raises(OSError) as exc:
Table.read(f, path='test/path/table')
assert exc.value.args[0] == "Path test/path/table does not exist"
@pytest.mark.skipif('not HAS_H5PY')
def test_read_write_simple(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path='the_table')
t2 = Table.read(test_file, path='the_table')
assert np.all(t2['a'] == [1, 2, 3])
@pytest.mark.skipif('not HAS_H5PY')
def test_read_write_existing_table(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path='the_table')
with pytest.raises(OSError) as exc:
t1.write(test_file, path='the_table', append=True)
assert exc.value.args[0] == "Table the_table already exists"
@pytest.mark.skipif('not HAS_H5PY')
def test_read_write_memory(tmpdir):
with h5py.File('test', 'w', driver='core', backing_store=False) as output_file:
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(output_file, path='the_table')
t2 = Table.read(output_file, path='the_table')
assert np.all(t2['a'] == [1, 2, 3])
@pytest.mark.skipif('not HAS_H5PY')
def test_read_write_existing(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
h5py.File(test_file, 'w').close() # create empty file
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH):
t1.write(test_file, path='the_table')
@pytest.mark.skipif('not HAS_H5PY')
def test_read_write_existing_overwrite(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
h5py.File(test_file, 'w').close() # create empty file
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path='the_table', overwrite=True)
t2 = Table.read(test_file, path='the_table')
assert np.all(t2['a'] == [1, 2, 3])
@pytest.mark.skipif('not HAS_H5PY')
def test_read_write_existing_append(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
h5py.File(test_file, 'w').close() # create empty file
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path='the_table_1', append=True)
t1.write(test_file, path='the_table_2', append=True)
t2 = Table.read(test_file, path='the_table_1')
assert np.all(t2['a'] == [1, 2, 3])
t3 = Table.read(test_file, path='the_table_2')
assert np.all(t3['a'] == [1, 2, 3])
@pytest.mark.skipif('not HAS_H5PY')
def test_read_write_existing_append_groups(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
with h5py.File(test_file, 'w') as f:
f.create_group('test_1')
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path='test_1/the_table_1', append=True)
t1.write(test_file, path='test_2/the_table_2', append=True)
t2 = Table.read(test_file, path='test_1/the_table_1')
assert np.all(t2['a'] == [1, 2, 3])
t3 = Table.read(test_file, path='test_2/the_table_2')
assert np.all(t3['a'] == [1, 2, 3])
@pytest.mark.skipif('not HAS_H5PY')
def test_read_write_existing_append_overwrite(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path='table1')
t1.write(test_file, path='table2', append=True)
t1v2 = Table()
t1v2.add_column(Column(name='a', data=[4, 5, 6]))
with pytest.raises(OSError) as exc:
t1v2.write(test_file, path='table1', append=True)
assert exc.value.args[0] == 'Table table1 already exists'
t1v2.write(test_file, path='table1', append=True, overwrite=True)
t2 = Table.read(test_file, path='table1')
assert np.all(t2['a'] == [4, 5, 6])
t3 = Table.read(test_file, path='table2')
assert np.all(t3['a'] == [1, 2, 3])
@pytest.mark.skipif('not HAS_H5PY')
def test_read_fileobj(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path='the_table')
import h5py
with h5py.File(test_file, 'r') as input_file:
t2 = Table.read(input_file, path='the_table')
assert np.all(t2['a'] == [1, 2, 3])
@pytest.mark.skipif('not HAS_H5PY')
def test_read_filobj_path(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path='path/to/data/the_table')
import h5py
with h5py.File(test_file, 'r') as input_file:
t2 = Table.read(input_file, path='path/to/data/the_table')
assert np.all(t2['a'] == [1, 2, 3])
@pytest.mark.skipif('not HAS_H5PY')
def test_read_filobj_group_path(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path='path/to/data/the_table')
import h5py
with h5py.File(test_file, 'r') as input_file:
t2 = Table.read(input_file['path/to'], path='data/the_table')
assert np.all(t2['a'] == [1, 2, 3])
@pytest.mark.skipif('not HAS_H5PY')
def test_read_wrong_fileobj():
class FakeFile:
def read(self):
pass
f = FakeFile()
with pytest.raises(TypeError, match='h5py can only open regular files'):
Table.read(f, format='hdf5')
@pytest.mark.skipif('not HAS_H5PY')
def test_write_fileobj(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
import h5py
with h5py.File(test_file, 'w') as output_file:
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(output_file, path='the_table')
t2 = Table.read(test_file, path='the_table')
assert np.all(t2['a'] == [1, 2, 3])
@pytest.mark.skipif('not HAS_H5PY')
def test_write_create_dataset_kwargs(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
the_path = 'the_table'
import h5py
with h5py.File(test_file, 'w') as output_file:
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(output_file, path=the_path,
maxshape=(None, ))
# A roundabout way of checking this, but the table created above should be
# resizable if the kwarg was passed through successfully
t2 = Table()
t2.add_column(Column(name='a', data=[4, 5]))
with h5py.File(test_file, 'a') as output_file:
output_file[the_path].resize((len(t1) + len(t2), ))
output_file[the_path][len(t1):] = t2.as_array()
t3 = Table.read(test_file, path='the_table')
assert np.all(t3['a'] == [1, 2, 3, 4, 5])
@pytest.mark.skipif('not HAS_H5PY')
def test_write_filobj_group(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
import h5py
with h5py.File(test_file, 'w') as output_file:
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(output_file, path='path/to/data/the_table')
t2 = Table.read(test_file, path='path/to/data/the_table')
assert np.all(t2['a'] == [1, 2, 3])
@pytest.mark.skipif('not HAS_H5PY')
def test_write_wrong_type():
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
with pytest.raises(TypeError) as exc:
t1.write(1212, path='path/to/data/the_table', format='hdf5')
assert exc.value.args[0] == ('output should be a string '
'or an h5py File or Group object')
@pytest.mark.skipif('not HAS_H5PY')
@pytest.mark.parametrize(('dtype'), ALL_DTYPES)
def test_preserve_single_dtypes(tmpdir, dtype):
test_file = str(tmpdir.join('test.hdf5'))
values = _default_values(dtype)
t1 = Table()
t1.add_column(Column(name='a', data=np.array(values, dtype=dtype)))
t1.write(test_file, path='the_table')
t2 = Table.read(test_file, path='the_table')
assert np.all(t2['a'] == values)
assert t2['a'].dtype == dtype
@pytest.mark.skipif('not HAS_H5PY')
def test_preserve_all_dtypes(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
for dtype in ALL_DTYPES:
values = _default_values(dtype)
t1.add_column(Column(name=str(dtype), data=np.array(values, dtype=dtype)))
t1.write(test_file, path='the_table')
t2 = Table.read(test_file, path='the_table')
for dtype in ALL_DTYPES:
values = _default_values(dtype)
assert np.all(t2[str(dtype)] == values)
assert t2[str(dtype)].dtype == dtype
@pytest.mark.skipif('not HAS_H5PY')
def test_preserve_meta(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.meta['a'] = 1
t1.meta['b'] = 'hello'
t1.meta['c'] = 3.14159
t1.meta['d'] = True
t1.meta['e'] = np.array([1, 2, 3])
t1.write(test_file, path='the_table')
t2 = Table.read(test_file, path='the_table')
for key in t1.meta:
assert np.all(t1.meta[key] == t2.meta[key])
@pytest.mark.skipif('not HAS_H5PY')
def test_preserve_serialized(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1['a'] = Column(data=[1, 2, 3], unit="s")
t1['a'].meta['a0'] = "A0"
t1['a'].meta['a1'] = {"a1": [0, 1]}
t1['a'].format = '7.3f'
t1['a'].description = 'A column'
t1.meta['b'] = 1
t1.meta['c'] = {"c0": [0, 1]}
t1.write(test_file, path='the_table', serialize_meta=True, overwrite=True)
t2 = Table.read(test_file, path='the_table')
assert t1['a'].unit == t2['a'].unit
assert t1['a'].format == t2['a'].format
assert t1['a'].description == t2['a'].description
assert t1['a'].meta == t2['a'].meta
assert t1.meta == t2.meta
# Check that the meta table is fixed-width bytes (see #11299)
h5 = h5py.File(test_file, 'r')
meta_lines = h5[meta_path('the_table')]
assert meta_lines.dtype.kind == 'S'
@pytest.mark.skipif('not HAS_H5PY')
def test_preserve_serialized_old_meta_format(tmpdir):
"""Test the old meta format
Only for some files created prior to v4.0, in compatibility mode.
"""
test_file = get_pkg_data_filename('data/old_meta_example.hdf5')
t1 = Table()
t1['a'] = Column(data=[1, 2, 3], unit="s")
t1['a'].meta['a0'] = "A0"
t1['a'].meta['a1'] = {"a1": [0, 1]}
t1['a'].format = '7.3f'
t1['a'].description = 'A column'
t1.meta['b'] = 1
t1.meta['c'] = {"c0": [0, 1]}
t2 = Table.read(test_file, path='the_table')
assert t1['a'].unit == t2['a'].unit
assert t1['a'].format == t2['a'].format
assert t1['a'].description == t2['a'].description
assert t1['a'].meta == t2['a'].meta
assert t1.meta == t2.meta
@pytest.mark.skipif('not HAS_H5PY')
def test_preserve_serialized_in_complicated_path(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1['a'] = Column(data=[1, 2, 3], unit="s")
t1['a'].meta['a0'] = "A0"
t1['a'].meta['a1'] = {"a1": [0, 1]}
t1['a'].format = '7.3f'
t1['a'].description = 'A column'
t1.meta['b'] = 1
t1.meta['c'] = {"c0": [0, 1]}
t1.write(test_file, path='the_table/complicated/path', serialize_meta=True,
overwrite=True)
t2 = Table.read(test_file, path='the_table/complicated/path')
assert t1['a'].format == t2['a'].format
assert t1['a'].unit == t2['a'].unit
assert t1['a'].description == t2['a'].description
assert t1['a'].meta == t2['a'].meta
assert t1.meta == t2.meta
@pytest.mark.skipif('not HAS_H5PY')
def test_metadata_very_large(tmpdir):
"""Test that very large datasets work, now!"""
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1['a'] = Column(data=[1, 2, 3], unit="s")
t1['a'].meta['a0'] = "A0"
t1['a'].meta['a1'] = {"a1": [0, 1]}
t1['a'].format = '7.3f'
t1['a'].description = 'A column'
t1.meta['b'] = 1
t1.meta['c'] = {"c0": [0, 1]}
t1.meta["meta_big"] = "0" * (2 ** 16 + 1)
t1.meta["meta_biggerstill"] = "0" * (2 ** 18)
t1.write(test_file, path='the_table', serialize_meta=True, overwrite=True)
t2 = Table.read(test_file, path='the_table')
assert t1['a'].unit == t2['a'].unit
assert t1['a'].format == t2['a'].format
assert t1['a'].description == t2['a'].description
assert t1['a'].meta == t2['a'].meta
assert t1.meta == t2.meta
@pytest.mark.skipif('not HAS_H5PY')
def test_skip_meta(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.meta['a'] = 1
t1.meta['b'] = 'hello'
t1.meta['c'] = 3.14159
t1.meta['d'] = True
t1.meta['e'] = np.array([1, 2, 3])
t1.meta['f'] = str
wtext = f"Attribute `f` of type {type(t1.meta['f'])} cannot be written to HDF5 files - skipping"
with pytest.warns(AstropyUserWarning, match=wtext) as w:
t1.write(test_file, path='the_table')
assert len(w) == 1
@pytest.mark.skipif('not HAS_H5PY')
def test_fail_meta_serialize(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.meta['f'] = str
with pytest.raises(Exception) as err:
t1.write(test_file, path='the_table', serialize_meta=True)
assert "cannot represent an object" in str(err.value)
assert "<class 'str'>" in str(err.value)
@pytest.mark.skipif('not HAS_H5PY')
def test_read_h5py_objects(tmpdir):
# Regression test - ensure that Datasets are recognized automatically
test_file = str(tmpdir.join('test.hdf5'))
import h5py
with h5py.File(test_file, 'w') as output_file:
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(output_file, path='the_table')
f = h5py.File(test_file, mode='r')
t2 = Table.read(f, path='the_table')
assert np.all(t2['a'] == [1, 2, 3])
t3 = Table.read(f['/'], path='the_table')
assert np.all(t3['a'] == [1, 2, 3])
t4 = Table.read(f['the_table'])
assert np.all(t4['a'] == [1, 2, 3])
f.close() # don't raise an error in 'test --open-files'
@pytest.mark.skipif('not HAS_H5PY')
def test_read_write_unicode_to_hdf5(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t = Table()
t['p'] = ['a', 'b', 'c']
t['q'] = [1, 2, 3]
t['r'] = [b'a', b'b', b'c']
t['s'] = ["\u2119", "\u01b4", "\u2602"]
t.write(test_file, path='the_table', overwrite=True)
t1 = Table.read(test_file, path='the_table', character_as_bytes=False)
for col, col1 in zip(t.itercols(), t1.itercols()):
assert np.all(col == col1)
assert np.all(t1['p'].info.dtype.kind == "U")
assert np.all(t1['q'].info.dtype.kind == "i")
assert np.all(t1['r'].info.dtype.kind == "U")
assert np.all(t1['s'].info.dtype.kind == "U")
# Test default (character_as_bytes=True)
t2 = Table.read(test_file, path='the_table')
for col, col1 in zip(t.itercols(), t2.itercols()):
assert np.all(col == col1)
assert np.all(t2['p'].info.dtype.kind == "S")
assert np.all(t2['q'].info.dtype.kind == "i")
assert np.all(t2['r'].info.dtype.kind == "S")
assert np.all(t2['s'].info.dtype.kind == "S")
def assert_objects_equal(obj1, obj2, attrs, compare_class=True):
if compare_class:
assert obj1.__class__ is obj2.__class__
info_attrs = ['info.name', 'info.format', 'info.unit', 'info.description', 'info.meta']
for attr in attrs + info_attrs:
a1 = obj1
a2 = obj2
for subattr in attr.split('.'):
try:
a1 = getattr(a1, subattr)
a2 = getattr(a2, subattr)
except AttributeError:
a1 = a1[subattr]
a2 = a2[subattr]
# Mixin info.meta can None instead of empty OrderedDict(), #6720 would
# fix this.
if attr == 'info.meta':
if a1 is None:
a1 = {}
if a2 is None:
a2 = {}
if isinstance(a1, np.ndarray) and a1.dtype.kind == 'f':
assert quantity_allclose(a1, a2, rtol=1e-15)
else:
assert np.all(a1 == a2)
@pytest.mark.skipif('not HAS_H5PY')
def test_hdf5_mixins_qtable_to_table(tmpdir):
"""Test writing as QTable and reading as Table. Ensure correct classes
come out.
"""
filename = str(tmpdir.join('test_simple.hdf5'))
names = sorted(mixin_cols)
t = QTable([mixin_cols[name] for name in names], names=names)
t.write(filename, format='hdf5', path='root', serialize_meta=True)
t2 = Table.read(filename, format='hdf5', path='root')
assert t.colnames == t2.colnames
for name, col in t.columns.items():
col2 = t2[name]
attrs = compare_attrs[name]
compare_class = True
if isinstance(col.info, QuantityInfo):
# Downgrade Quantity to Column + unit
assert type(col2) is Column
# Class-specific attributes like `value` or `wrap_angle` are lost.
attrs = ['unit']
compare_class = False
# Compare data values here (assert_objects_equal doesn't know how in this case)
assert np.all(col.value == col2)
assert_objects_equal(col, col2, attrs, compare_class)
@pytest.mark.skipif('not HAS_H5PY')
@pytest.mark.parametrize('table_cls', (Table, QTable))
def test_hdf5_mixins_as_one(table_cls, tmpdir):
"""Test write/read all cols at once and validate intermediate column names"""
filename = str(tmpdir.join('test_simple.hdf5'))
names = sorted(mixin_cols)
all_serialized_names = []
for name in names:
all_serialized_names.extend(serialized_names[name])
t = table_cls([mixin_cols[name] for name in names], names=names)
t.meta['C'] = 'spam'
t.meta['comments'] = ['this', 'is', 'a', 'comment']
t.meta['history'] = ['first', 'second', 'third']
t.write(filename, format="hdf5", path='root', serialize_meta=True)
t2 = table_cls.read(filename, format='hdf5', path='root')
assert t2.meta['C'] == 'spam'
assert t2.meta['comments'] == ['this', 'is', 'a', 'comment']
assert t2.meta['history'] == ['first', 'second', 'third']
assert t.colnames == t2.colnames
# Read directly via hdf5 and confirm column names
h5 = h5py.File(filename, 'r')
h5_names = list(h5['root'].dtype.names)
assert h5_names == all_serialized_names
h5.close()
@pytest.mark.skipif('not HAS_H5PY')
@pytest.mark.parametrize('name_col', list(mixin_cols.items()))
@pytest.mark.parametrize('table_cls', (Table, QTable))
def test_hdf5_mixins_per_column(table_cls, name_col, tmpdir):
"""Test write/read one col at a time and do detailed validation"""
filename = str(tmpdir.join('test_simple.hdf5'))
name, col = name_col
c = [1.0, 2.0]
t = table_cls([c, col, c], names=['c1', name, 'c2'])
t[name].info.description = 'my description'
t[name].info.meta = {'list': list(range(50)), 'dict': {'a': 'b' * 200}}
if not t.has_mixin_columns:
pytest.skip('column is not a mixin (e.g. Quantity subclass in Table)')
t.write(filename, format="hdf5", path='root', serialize_meta=True)
t2 = table_cls.read(filename, format='hdf5', path='root')
assert t.colnames == t2.colnames
for colname in t.colnames:
compare = ['data'] if colname in ('c1', 'c2') else compare_attrs[colname]
assert_objects_equal(t[colname], t2[colname], compare)
# Special case to make sure Column type doesn't leak into Time class data
if name.startswith('tm'):
assert t2[name]._time.jd1.__class__ is np.ndarray
assert t2[name]._time.jd2.__class__ is np.ndarray
@pytest.mark.parametrize('name_col', unsupported_cols.items())
@pytest.mark.xfail(reason='column type unsupported')
def test_fits_unsupported_mixin(self, name_col, tmpdir):
# Check that we actually fail in writing unsupported columns defined
# on top.
filename = str(tmpdir.join('test_simple.fits'))
name, col = name_col
Table([col], names=[name]).write(filename, format='hdf5', path='root',
serialize_meta=True)
@pytest.mark.skipif('not HAS_H5PY')
def test_round_trip_masked_table_default(tmpdir):
"""Test round-trip of MaskedColumn through HDF5 using default serialization
that writes a separate mask column. Note:
>>> simple_table(masked=True)
<Table masked=True length=3>
a b c
int64 float64 str1
----- ------- ----
-- 1.0 c
2 2.0 --
3 -- e
"""
filename = str(tmpdir.join('test.h5'))
t = simple_table(masked=True) # int, float, and str cols with one masked element
t['c'] = [b'c', b'd', b'e']
t['c'].mask[1] = True
t.write(filename, format='hdf5', path='root', serialize_meta=True)
t2 = Table.read(filename)
assert t2.masked is False
assert t2.colnames == t.colnames
for name in t2.colnames:
assert np.all(t2[name].mask == t[name].mask)
assert np.all(t2[name] == t[name])
# Data under the mask round-trips also (unmask data to show this).
t[name].mask = False
t2[name].mask = False
assert np.all(t2[name] == t[name])
@pytest.mark.skipif('not HAS_H5PY')
def test_overwrite_serialized_meta():
# This used to cause an error because the meta data table
# was not removed from the existing file.
with h5py.File('test_data.h5', 'w', driver='core', backing_store=False) as out:
t1 = Table()
t1.add_column(Column(data=[4, 8, 15], unit='cm'))
t1.write(out, path='data', serialize_meta=True)
t2 = Table.read(out, path='data')
assert all(t1 == t2)
assert t1.info(out=None) == t2.info(out=None)
t3 = Table()
t3.add_column(Column(data=[16, 23, 42], unit='g'))
t3.write(out, path='data', serialize_meta=True, append=True, overwrite=True)
t2 = Table.read(out, path='data')
assert all(t3 == t2)
assert t3.info(out=None) == t2.info(out=None)
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cql
from cql.marshal import (int32_pack, int32_unpack, uint16_pack, uint16_unpack,
int8_pack, int8_unpack)
from cql.cqltypes import lookup_cqltype
from cql.connection import Connection
from cql.cursor import Cursor, _VOID_DESCRIPTION, _COUNT_DESCRIPTION
from cql.apivalues import ProgrammingError, OperationalError
from cql.query import PreparedQuery, prepare_query, cql_quote_name
import socket
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from warnings import warn
PROTOCOL_VERSION = 0x01
PROTOCOL_VERSION_MASK = 0x7f
HEADER_DIRECTION_FROM_CLIENT = 0x00
HEADER_DIRECTION_TO_CLIENT = 0x80
HEADER_DIRECTION_MASK = 0x80
class ConsistencyLevel(object):
@classmethod
def name_from_value(cls, value):
return {0: 'ANY',
1: 'ONE',
2: 'TWO',
3: 'THREE',
4: 'QUORUM',
5: 'ALL',
6: 'LOCAL_QUORUM',
7: 'EACH_QUORUM'}[value]
@classmethod
def value_from_name(cls, name):
return {'ANY': 0,
'ONE': 1,
'TWO': 2,
'THREE': 3,
'QUORUM': 4,
'ALL': 5,
'LOCAL_QUORUM': 6,
'EACH_QUORUM': 7}[name]
class CqlResult:
def __init__(self, column_metadata, rows):
self.column_metadata = column_metadata
self.rows = rows
def __iter__(self):
return iter(self.rows)
def __str__(self):
return '<CqlResult: column_metadata=%r, rows=%r>' \
% (self.column_metadata, self.rows)
__repr__ = __str__
class PreparedResult:
def __init__(self, queryid, param_metadata):
self.queryid = queryid
self.param_metadata = param_metadata
def __str__(self):
return '<PreparedResult: queryid=%r, column_metadata=%r>' \
% (self.queryid, self.column_metadata)
__repr__ = __str__
_message_types_by_name = {}
_message_types_by_opcode = {}
class _register_msg_type(type):
def __init__(cls, name, bases, dct):
if not name.startswith('_'):
_message_types_by_name[cls.name] = cls
_message_types_by_opcode[cls.opcode] = cls
class _MessageType(object):
__metaclass__ = _register_msg_type
params = ()
def __init__(self, **kwargs):
for pname in self.params:
try:
pval = kwargs[pname]
except KeyError:
raise ValueError("%s instances need the %s keyword parameter"
% (self.__class__.__name__, pname))
setattr(self, pname, pval)
def send(self, f, streamid, compression=None):
body = StringIO()
self.send_body(body)
body = body.getvalue()
version = PROTOCOL_VERSION | HEADER_DIRECTION_FROM_CLIENT
flags = 0
if compression is not None and len(body) > 0:
body = compression(body)
flags |= 0x1
msglen = int32_pack(len(body))
header = ''.join(map(int8_pack, (version, flags, streamid, self.opcode))) \
+ msglen
f.write(header)
if len(body) > 0:
f.write(body)
def __str__(self):
paramstrs = ['%s=%r' % (pname, getattr(self, pname)) for pname in self.params]
return '<%s(%s)>' % (self.__class__.__name__, ', '.join(paramstrs))
__repr__ = __str__
def read_frame(f, decompressor=None):
header = f.read(8)
version, flags, stream, opcode = map(int8_unpack, header[:4])
body_len = int32_unpack(header[4:])
assert version & PROTOCOL_VERSION_MASK == PROTOCOL_VERSION, \
"Unsupported CQL protocol version %d" % version
assert version & HEADER_DIRECTION_MASK == HEADER_DIRECTION_TO_CLIENT, \
"Unexpected request from server with opcode %04x, stream id %r" % (opcode, stream)
assert body_len >= 0, "Invalid CQL protocol body_len %r" % body_len
body = f.read(body_len)
if flags & 0x1:
if decompressor is None:
raise ProtocolException("No decompressor available for compressed frame!")
body = decompressor(body)
flags ^= 0x1
if flags:
warn("Unknown protocol flags set: %02x. May cause problems." % flags)
msgclass = _message_types_by_opcode[opcode]
msg = msgclass.recv_body(StringIO(body))
msg.stream_id = stream
return msg
error_classes = {}
class ErrorMessage(_MessageType):
opcode = 0x00
name = 'ERROR'
params = ('code', 'message', 'info')
summary = 'Unknown'
@classmethod
def recv_body(cls, f):
code = read_int(f)
msg = read_string(f)
subcls = error_classes.get(code, cls)
extra_info = subcls.recv_error_info(f)
return subcls(code=code, message=msg, info=extra_info)
def summarymsg(self):
msg = 'code=%04x [%s] message="%s"' \
% (self.code, self.summary, self.message)
if self.info is not None:
msg += (' info=' + str(self.info))
return msg
def __str__(self):
return '<ErrorMessage %s>' % self.summarymsg()
__repr__ = __str__
@staticmethod
def recv_error_info(f):
pass
class ErrorMessageSubclass(_register_msg_type):
def __init__(cls, name, bases, dct):
if cls.errorcode is not None:
error_classes[cls.errorcode] = cls
class ErrorMessageSub(ErrorMessage):
__metaclass__ = ErrorMessageSubclass
errorcode = None
class RequestExecutionException(ErrorMessageSub):
pass
class RequestValidationException(ErrorMessageSub):
pass
class ServerError(ErrorMessageSub):
summary = 'Server error'
errorcode = 0x0000
class ProtocolException(ErrorMessageSub):
summary = 'Protocol error'
errorcode = 0x000A
class UnavailableExceptionErrorMessage(RequestExecutionException):
summary = 'Unavailable exception'
errorcode = 0x1000
@staticmethod
def recv_error_info(f):
return {
'consistencylevel': read_consistencylevel(f),
'required': read_int(f),
'alive': read_int(f),
}
class OverloadedErrorMessage(RequestExecutionException):
summary = 'Coordinator node overloaded'
errorcode = 0x1001
class IsBootstrappingErrorMessage(RequestExecutionException):
summary = 'Coordinator node is bootstrapping'
errorcode = 0x1002
class TruncateError(RequestExecutionException):
summary = 'Error during truncate'
errorcode = 0x1003
class RequestTimeoutException(RequestExecutionException):
pass
class WriteTimeoutErrorMessage(RequestTimeoutException):
summary = 'Timeout during write request'
errorcode = 0x1100
@staticmethod
def recv_error_info(f):
return {
'consistencylevel': read_consistencylevel(f),
'received': read_int(f),
'blockfor': read_int(f),
'writetype': read_string(f),
}
class ReadTimeoutErrorMessage(RequestTimeoutException):
summary = 'Timeout during read request'
errorcode = 0x1200
@staticmethod
def recv_error_info(f):
return {
'consistencylevel': read_consistencylevel(f),
'received': read_int(f),
'blockfor': read_int(f),
'data_present': bool(read_byte(f)),
}
class SyntaxException(RequestValidationException):
summary = 'Syntax error in CQL query'
errorcode = 0x2000
class UnauthorizedErrorMessage(RequestValidationException):
summary = 'Unauthorized'
errorcode = 0x2100
class InvalidRequestException(RequestValidationException):
summary = 'Invalid query'
errorcode = 0x2200
class ConfigurationException(RequestValidationException):
summary = 'Query invalid because of configuration issue'
errorcode = 0x2300
class AlreadyExistsException(ConfigurationException):
summary = 'Item already exists'
errorcode = 0x2400
@staticmethod
def recv_error_info(f):
return {
'keyspace': read_string(f),
'table': read_string(f),
}
class StartupMessage(_MessageType):
opcode = 0x01
name = 'STARTUP'
params = ('cqlversion', 'options')
KNOWN_OPTION_KEYS = set((
'CQL_VERSION',
'COMPRESSION',
))
def send_body(self, f):
optmap = self.options.copy()
optmap['CQL_VERSION'] = self.cqlversion
write_stringmap(f, optmap)
class ReadyMessage(_MessageType):
opcode = 0x02
name = 'READY'
params = ()
@classmethod
def recv_body(cls, f):
return cls()
class AuthenticateMessage(_MessageType):
opcode = 0x03
name = 'AUTHENTICATE'
params = ('authenticator',)
@classmethod
def recv_body(cls, f):
authname = read_string(f)
return cls(authenticator=authname)
class CredentialsMessage(_MessageType):
opcode = 0x04
name = 'CREDENTIALS'
params = ('creds',)
def send_body(self, f):
write_short(f, len(self.creds))
for credkey, credval in self.creds:
write_string(f, credkey)
write_string(f, credval)
class OptionsMessage(_MessageType):
opcode = 0x05
name = 'OPTIONS'
params = ()
def send_body(self, f):
pass
class SupportedMessage(_MessageType):
opcode = 0x06
name = 'SUPPORTED'
params = ('cqlversions', 'options',)
@classmethod
def recv_body(cls, f):
options = read_stringmultimap(f)
cqlversions = options.pop('CQL_VERSION')
return cls(cqlversions=cqlversions, options=options)
class QueryMessage(_MessageType):
opcode = 0x07
name = 'QUERY'
params = ('query', 'consistencylevel',)
def send_body(self, f):
write_longstring(f, self.query)
write_consistencylevel(f, self.consistencylevel)
class ResultMessage(_MessageType):
opcode = 0x08
name = 'RESULT'
params = ('kind', 'results',)
KIND_VOID = 0x0001
KIND_ROWS = 0x0002
KIND_SET_KEYSPACE = 0x0003
KIND_PREPARED = 0x0004
KIND_SCHEMA_CHANGE = 0x0005
type_codes = {
0x0001: 'ascii',
0x0002: 'bigint',
0x0003: 'blob',
0x0004: 'boolean',
0x0005: 'counter',
0x0006: 'decimal',
0x0007: 'double',
0x0008: 'float',
0x0009: 'int',
0x000A: 'text',
0x000B: 'timestamp',
0x000C: 'uuid',
0x000D: 'varchar',
0x000E: 'varint',
0x000F: 'timeuuid',
0x0010: 'inet',
0x0020: 'list',
0x0021: 'map',
0x0022: 'set',
}
FLAGS_GLOBAL_TABLES_SPEC = 0x0001
@classmethod
def recv_body(cls, f):
kind = read_int(f)
if kind == cls.KIND_VOID:
results = None
elif kind == cls.KIND_ROWS:
results = cls.recv_results_rows(f)
elif kind == cls.KIND_SET_KEYSPACE:
ksname = read_string(f)
results = ksname
elif kind == cls.KIND_PREPARED:
results = cls.recv_results_prepared(f)
elif kind == cls.KIND_SCHEMA_CHANGE:
results = cls.recv_results_schema_change(f)
return cls(kind=kind, results=results)
@classmethod
def recv_results_rows(cls, f):
colspecs = cls.recv_results_metadata(f)
rowcount = read_int(f)
rows = [cls.recv_row(f, len(colspecs)) for x in xrange(rowcount)]
return CqlResult(column_metadata=colspecs, rows=rows)
@classmethod
def recv_results_prepared(cls, f):
queryid = read_shortbytes(f)
colspecs = cls.recv_results_metadata(f)
return (queryid, colspecs)
@classmethod
def recv_results_metadata(cls, f):
flags = read_int(f)
glob_tblspec = bool(flags & cls.FLAGS_GLOBAL_TABLES_SPEC)
colcount = read_int(f)
if glob_tblspec:
ksname = read_string(f)
cfname = read_string(f)
colspecs = []
for x in xrange(colcount):
if glob_tblspec:
colksname = ksname
colcfname = cfname
else:
colksname = read_string(f)
colcfname = read_string(f)
colname = read_string(f)
coltype = cls.read_type(f)
colspecs.append((colksname, colcfname, colname, coltype))
return colspecs
@classmethod
def recv_results_schema_change(cls, f):
change = read_string(f)
ks = read_string(f)
cf = read_string(f)
return (change, ks, cf)
@classmethod
def read_type(cls, f):
optid = read_short(f)
try:
cqltype = lookup_cqltype(cls.type_codes[optid])
except KeyError:
raise cql.NotSupportedError("Unknown data type code 0x%x. Have to skip"
" entire result set." % optid)
if cqltype.typename in ('list', 'set'):
subtype = cls.read_type(f)
cqltype = cqltype.apply_parameters(subtype)
elif cqltype.typename == 'map':
keysubtype = cls.read_type(f)
valsubtype = cls.read_type(f)
cqltype = cqltype.apply_parameters(keysubtype, valsubtype)
return cqltype
@staticmethod
def recv_row(f, colcount):
return [read_value(f) for x in xrange(colcount)]
class PrepareMessage(_MessageType):
opcode = 0x09
name = 'PREPARE'
params = ('query',)
def send_body(self, f):
write_longstring(f, self.query)
class ExecuteMessage(_MessageType):
opcode = 0x0A
name = 'EXECUTE'
params = ('queryid', 'queryparams', 'consistencylevel',)
def send_body(self, f):
write_shortbytes(f, self.queryid)
write_short(f, len(self.queryparams))
for param in self.queryparams:
write_value(f, param)
write_consistencylevel(f, self.consistencylevel)
known_event_types = frozenset((
'TOPOLOGY_CHANGE',
'STATUS_CHANGE',
))
class RegisterMessage(_MessageType):
opcode = 0x0B
name = 'REGISTER'
params = ('eventlist',)
def send_body(self, f):
write_stringlist(f, self.eventlist)
class EventMessage(_MessageType):
opcode = 0x0C
name = 'EVENT'
params = ('eventtype', 'eventargs')
@classmethod
def recv_body(cls, f):
eventtype = read_string(f).upper()
if eventtype in known_event_types:
readmethod = getattr(cls, 'recv_' + eventtype.lower())
return cls(eventtype=eventtype, eventargs=readmethod(f))
raise cql.NotSupportedError('Unknown event type %r' % eventtype)
@classmethod
def recv_topology_change(cls, f):
# "NEW_NODE" or "REMOVED_NODE"
changetype = read_string(f)
address = read_inet(f)
return dict(changetype=changetype, address=address)
@classmethod
def recv_status_change(cls, f):
# "UP" or "DOWN"
changetype = read_string(f)
address = read_inet(f)
return dict(changetype=changetype, address=address)
def read_byte(f):
return int8_unpack(f.read(1))
def write_byte(f, b):
f.write(int8_pack(b))
def read_int(f):
return int32_unpack(f.read(4))
def write_int(f, i):
f.write(int32_pack(i))
def read_short(f):
return uint16_unpack(f.read(2))
def write_short(f, s):
f.write(uint16_pack(s))
def read_consistencylevel(f):
return ConsistencyLevel.name_from_value(read_short(f))
def write_consistencylevel(f, cl):
write_short(f, ConsistencyLevel.value_from_name(cl))
def read_string(f):
size = read_short(f)
contents = f.read(size)
return contents.decode('utf8')
def write_string(f, s):
if isinstance(s, unicode):
s = s.encode('utf8')
write_short(f, len(s))
f.write(s)
def read_longstring(f):
size = read_int(f)
contents = f.read(size)
return contents.decode('utf8')
def write_longstring(f, s):
if isinstance(s, unicode):
s = s.encode('utf8')
write_int(f, len(s))
f.write(s)
def read_shortbytes(f):
size = read_short(f)
contents = f.read(size)
return contents.encode('hex')
def write_shortbytes(f, sb):
decoded = sb.decode('hex')
write_short(f, len(decoded))
f.write(decoded)
def read_stringlist(f):
numstrs = read_short(f)
return [read_string(f) for x in xrange(numstrs)]
def write_stringlist(f, stringlist):
write_short(f, len(stringlist))
for s in stringlist:
write_string(f, s)
def read_stringmap(f):
numpairs = read_short(f)
strmap = {}
for x in xrange(numpairs):
k = read_string(f)
strmap[k] = read_string(f)
return strmap
def write_stringmap(f, strmap):
write_short(f, len(strmap))
for k, v in strmap.items():
write_string(f, k)
write_string(f, v)
def read_stringmultimap(f):
numkeys = read_short(f)
strmmap = {}
for x in xrange(numkeys):
k = read_string(f)
strmmap[k] = read_stringlist(f)
return strmmap
def write_stringmultimap(f, strmmap):
write_short(f, len(strmmap))
for k, v in strmmap.items():
write_string(f, k)
write_stringlist(f, v)
def read_value(f):
size = read_int(f)
if size < 0:
return None
return f.read(size)
def write_value(f, v):
if v is None:
write_int(f, -1)
else:
write_int(f, len(v))
f.write(v)
def read_inet(f):
size = read_byte(f)
addrbytes = f.read(size)
port = read_int(f)
if size == 4:
addrfam = socket.AF_INET
elif size == 16:
addrfam = socket.AF_INET6
else:
raise cql.InternalError("bad inet address: %r" % (addrbytes,))
return (socket.inet_ntop(addrfam, addrbytes), port)
def write_inet(f, addrtuple):
addr, port = addrtuple
if ':' in addr:
addrfam = socket.AF_INET6
else:
addrfam = socket.AF_INET
addrbytes = socket.inet_pton(addrfam, addr)
write_byte(f, len(addrbytes))
f.write(addrbytes)
write_int(f, port)
def ctzb(n):
if n == 0: return 127
if n & 0x1:
return 0
else:
c = 1
if n & 0xFFFFFFFFFFFFFFFF == 0:
n >>= 64; c += 64
if n & 0xFFFFFFFF == 0:
n >>= 32; c += 32
if n & 0xFFFF == 0:
n >>= 16; c += 16
if n & 0xFF == 0:
n >>= 8; c += 8
if n & 0xF == 0:
n >>= 4; c += 4
if n & 0x3 == 0:
n >>= 2; c += 2
c -= n & 0x1
return c
class NativeCursor(Cursor):
def prepare_query(self, query):
pquery, paramnames = prepare_query(query)
prepared = self._connection.wait_for_request(PrepareMessage(query=pquery))
if isinstance(prepared, ErrorMessage):
raise cql.Error('Query preparation failed: %s' % prepared.summarymsg())
if prepared.kind != ResultMessage.KIND_PREPARED:
raise cql.InternalError('Query preparation did not result in prepared query')
queryid, colspecs = prepared.results
kss, cfs, names, ctypes = zip(*colspecs)
return PreparedQuery(query, queryid, ctypes, paramnames)
def get_response(self, query, consistency_level):
qm = QueryMessage(query=query, consistencylevel=consistency_level)
return self._connection.wait_for_request(qm)
def get_response_prepared(self, prepared_query, params, consistency_level):
qparams = [params[pname] for pname in prepared_query.paramnames]
em = ExecuteMessage(queryid=prepared_query.itemid, queryparams=qparams,
consistencylevel=consistency_level)
return self._connection.wait_for_request(em)
def get_column_metadata(self, column_id):
return self.decoder.decode_metadata_and_type_native(column_id)
def columninfo(self, row):
return xrange(len(row))
def columnvalues(self, row):
return row
def handle_cql_execution_errors(self, response):
if not isinstance(response, ErrorMessage):
return
if isinstance(response, UnauthorizedErrorMessage):
eclass = cql.NotAuthenticated
elif isinstance(response, RequestExecutionException):
eclass = cql.OperationalError
elif isinstance(response, RequestValidationException):
eclass = cql.ProgrammingError
else:
eclass = cql.InternalError
raise eclass(response.summarymsg())
def process_execution_results(self, response, decoder=None):
self.handle_cql_execution_errors(response)
if not isinstance(response, ResultMessage):
raise cql.InternalError('Query execution resulted in %s!?' % (response,))
if response.kind == ResultMessage.KIND_PREPARED:
raise cql.InternalError('Query execution resulted in prepared query!?')
self.rs_idx = 0
self.description = None
self.result = []
self.name_info = ()
if response.kind == ResultMessage.KIND_VOID:
self.description = _VOID_DESCRIPTION
elif response.kind == ResultMessage.KIND_SET_KEYSPACE:
self._connection.keyspace_changed(response.results)
self.description = _VOID_DESCRIPTION
elif response.kind == ResultMessage.KIND_ROWS:
schema = response.results.column_metadata
self.decoder = (decoder or self.default_decoder)(schema)
self.result = response.results.rows
if self.result:
self.get_metadata_info(self.result[0])
elif response.kind == ResultMessage.KIND_SCHEMA_CHANGE:
self.description = _VOID_DESCRIPTION
else:
raise Exception('unknown response kind %s: %s' % (response.kind, response))
self.rowcount = len(self.result)
def get_compression(self):
return self._connection.compression
def set_compression(self, val):
if val != self.get_compression():
raise NotImplementedError("Setting per-cursor compression is not "
"supported in NativeCursor.")
compression = property(get_compression, set_compression)
class debugsock:
def __init__(self, sock):
self.sock = sock
def write(self, data):
print '[sending %r]' % (data,)
self.sock.send(data)
def read(self, readlen):
data = ''
while readlen > 0:
add = self.sock.recv(readlen)
print '[received %r]' % (add,)
if add == '':
raise cql.InternalError("short read of %s bytes (%s expected)"
% (len(data), len(data) + readlen))
data += add
readlen -= len(add)
return data
def close(self):
pass
locally_supported_compressions = {}
try:
import snappy
except ImportError:
pass
else:
# work around apparently buggy snappy decompress
def decompress(byts):
if byts == '\x00':
return ''
return snappy.decompress(byts)
locally_supported_compressions['snappy'] = (snappy.compress, decompress)
class NativeConnection(Connection):
cursorclass = NativeCursor
def __init__(self, *args, **kwargs):
self.reqid_slots = (1 << 128) - 1
self.responses = {}
self.waiting = {}
self.conn_ready = False
self.compressor = self.decompressor = None
self.event_watchers = {}
Connection.__init__(self, *args, **kwargs)
def make_reqid(self):
if self.reqid_slots == 0:
raise cql.ProgrammingError("Unable to acquire a stream id")
slot = ctzb(self.reqid_slots)
self.reqid_slots &= ~(1 << slot)
return slot
def release_reqid(self, reqid):
self.reqid_slots |= (1 << reqid)
def establish_connection(self):
self.conn_ready = False
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self.host, self.port))
self.socketf = s.makefile(bufsize=0)
self.sockfd = s
self.open_socket = True
supported = self.wait_for_request(OptionsMessage())
self.supported_cql_versions = supported.cqlversions
self.remote_supported_compressions = supported.options['COMPRESSION']
if self.cql_version:
if self.cql_version not in self.supported_cql_versions:
raise ProgrammingError("cql_version %r is not supported by"
" remote (w/ native protocol). Supported"
" versions: %r"
% (self.cql_version, self.supported_cql_versions))
else:
self.cql_version = self.supported_cql_versions[0]
opts = {}
compresstype = None
if self.compression:
overlap = set(locally_supported_compressions) \
& set(self.remote_supported_compressions)
if len(overlap) == 0:
warn("No available compression types supported on both ends."
" locally supported: %r. remotely supported: %r"
% (locally_supported_compressions,
self.remote_supported_compressions))
else:
compresstype = iter(overlap).next() # choose any
opts['COMPRESSION'] = compresstype
compr, decompr = locally_supported_compressions[compresstype]
# set the decompressor here, but set the compressor only after
# a successful Ready message
self.decompressor = decompr
sm = StartupMessage(cqlversion=self.cql_version, options=opts)
startup_response = self.wait_for_request(sm)
while True:
if isinstance(startup_response, ReadyMessage):
self.conn_ready = True
if compresstype:
self.compressor = compr
break
if isinstance(startup_response, AuthenticateMessage):
self.authenticator = startup_response.authenticator
if self.credentials is None:
raise ProgrammingError('Remote end requires authentication.')
cm = CredentialsMessage(creds=self.credentials)
startup_response = self.wait_for_request(cm)
elif isinstance(startup_response, ErrorMessage):
raise ProgrammingError("Server did not accept credentials. %s"
% startup_response.summarymsg())
else:
raise cql.InternalError("Unexpected response %r during connection setup"
% startup_response)
def set_initial_keyspace(self, keyspace):
c = self.cursor()
c.execute('USE %s' % cql_quote_name(self.keyspace))
c.close()
def terminate_connection(self):
self.socketf.close()
self.sockfd.close()
def wait_for_request(self, msg):
"""
Given a message, send it to the server, wait for a response, and
return the response.
"""
return self.wait_for_requests(msg)[0]
def send_msg(self, msg):
reqid = self.make_reqid()
msg.send(self.socketf, reqid, compression=self.compressor)
return reqid
def wait_for_requests(self, *msgs):
"""
Given any number of message objects, send them all to the server
and wait for responses to each one. Once they arrive, return all
of the responses in the same order as the messages to which they
respond.
"""
reqids = []
for msg in msgs:
reqid = self.send_msg(msg)
reqids.append(reqid)
resultdict = self.wait_for_results(*reqids)
return [resultdict[reqid] for reqid in reqids]
def wait_for_results(self, *reqids):
"""
Given any number of stream-ids, wait until responses have arrived for
each one, and return a dictionary mapping the stream-ids to the
appropriate results.
For internal use, None may be passed in place of a reqid, which will
be considered satisfied when a message of any kind is received (and, if
appropriate, handled).
"""
waiting_for = set(reqids)
results = {}
for r in reqids:
try:
result = self.responses.pop(r)
except KeyError:
pass
else:
results[r] = result
waiting_for.remove(r)
while waiting_for:
newmsg = read_frame(self.socketf, decompressor=self.decompressor)
if newmsg.stream_id in waiting_for:
results[newmsg.stream_id] = newmsg
waiting_for.remove(newmsg.stream_id)
else:
self.handle_incoming(newmsg)
if None in waiting_for:
results[None] = newmsg
waiting_for.remove(None)
if newmsg.stream_id >= 0:
self.release_reqid(newmsg.stream_id)
return results
def wait_for_result(self, reqid):
"""
Given a stream-id, wait until a response arrives with that stream-id,
and return the msg.
"""
return self.wait_for_results(reqid)[reqid]
def handle_incoming(self, msg):
if msg.stream_id < 0:
self.handle_pushed(msg)
return
try:
cb = self.waiting.pop(msg.stream_id)
except KeyError:
self.responses[msg.stream_id] = msg
else:
cb(msg)
def callback_when(self, reqid, cb):
"""
Callback cb with a message object once a message with a stream-id
of reqid is received. The callback may be immediate, if a response
is already in the received queue.
Otherwise, note also that the callback may not be called immediately
upon the arrival of the response packet; it may have to wait until
something else waits on a result.
"""
try:
msg = self.responses.pop(reqid)
except KeyError:
pass
else:
return cb(msg)
self.waiting[reqid] = cb
def request_and_callback(self, msg, cb):
"""
Given a message msg and a callable cb, send the message to the server
and call cb with the result once it arrives. Note that the callback
may not be called immediately upon the arrival of the response packet;
it may have to wait until something else waits on a result.
"""
reqid = self.send_msg(msg)
self.callback_when(reqid, cb)
def handle_pushed(self, msg):
"""
Process an incoming message originated by the server.
"""
watchers = self.event_watchers.get(msg.eventtype, ())
for cb in watchers:
cb(msg.eventargs)
def register_watcher(self, eventtype, cb):
"""
Request that any events of the given type be passed to the given
callback when they arrive. Note that the callback may not be called
immediately upon the arrival of the event packet; it may have to wait
until something else waits on a result, or until wait_for_even() is
called.
If the event type has not been registered for already, this may
block while a new REGISTER message is sent to the server.
The available event types are in the cql.native.known_event_types
list.
When an event arrives, a dictionary will be passed to the callback
with the info about the event. Some example result dictionaries:
(For STATUS_CHANGE events:)
{'changetype': u'DOWN', 'address': ('12.114.19.76', 8000)}
(For TOPOLOGY_CHANGE events:)
{'changetype': u'NEW_NODE', 'address': ('19.10.122.13', 8000)}
"""
if isinstance(eventtype, str):
eventtype = eventtype.decode('utf8')
try:
watchers = self.event_watchers[eventtype]
except KeyError:
ans = self.wait_for_request(RegisterMessage(eventlist=(eventtype,)))
if isinstance(ans, ErrorMessage):
raise cql.ProgrammingError("Server did not accept registration"
" for %s events: %s"
% (eventtype, ans.summarymsg()))
watchers = self.event_watchers.setdefault(eventtype, [])
watchers.append(cb)
def unregister_watcher(self, eventtype, cb):
"""
Given an eventtype and a callback previously registered with
register_watcher(), remove that callback from the list of watchers for
the given event type.
"""
if isinstance(eventtype, str):
eventtype = eventtype.decode('utf8')
self.event_watchers[eventtype].remove(cb)
def wait_for_event(self):
"""
Wait for any sort of event to arrive, and handle it via the
registered callbacks. It is recommended that some event watchers
be registered before calling this; otherwise, no events will be
sent by the server.
"""
eventsseen = []
def i_saw_an_event(ev):
eventsseen.append(ev)
wlists = self.event_watchers.values()
for wlist in wlists:
wlist.append(i_saw_an_event)
while not eventsseen:
self.wait_for_result(None)
for wlist in wlists:
wlist.remove(i_saw_an_event)
| |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import packaging.version
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import future
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import operation_async # type: ignore
from google.api_core import operations_v1
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.aiplatform_v1.services.migration_service import (
MigrationServiceAsyncClient,
)
from google.cloud.aiplatform_v1.services.migration_service import MigrationServiceClient
from google.cloud.aiplatform_v1.services.migration_service import pagers
from google.cloud.aiplatform_v1.services.migration_service import transports
from google.cloud.aiplatform_v1.services.migration_service.transports.base import (
_GOOGLE_AUTH_VERSION,
)
from google.cloud.aiplatform_v1.types import migratable_resource
from google.cloud.aiplatform_v1.types import migration_service
from google.longrunning import operations_pb2
from google.oauth2 import service_account
import google.auth
# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively
# through google-api-core:
# - Delete the auth "less than" test cases
# - Delete these pytest markers (Make the "greater than or equal to" tests the default).
requires_google_auth_lt_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"),
reason="This test requires google-auth < 1.25.0",
)
requires_google_auth_gte_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"),
reason="This test requires google-auth >= 1.25.0",
)
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert MigrationServiceClient._get_default_mtls_endpoint(None) is None
assert (
MigrationServiceClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
MigrationServiceClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
MigrationServiceClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
MigrationServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert (
MigrationServiceClient._get_default_mtls_endpoint(non_googleapi)
== non_googleapi
)
@pytest.mark.parametrize(
"client_class", [MigrationServiceClient, MigrationServiceAsyncClient,]
)
def test_migration_service_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "aiplatform.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.MigrationServiceGrpcTransport, "grpc"),
(transports.MigrationServiceGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_migration_service_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"client_class", [MigrationServiceClient, MigrationServiceAsyncClient,]
)
def test_migration_service_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "aiplatform.googleapis.com:443"
def test_migration_service_client_get_transport_class():
transport = MigrationServiceClient.get_transport_class()
available_transports = [
transports.MigrationServiceGrpcTransport,
]
assert transport in available_transports
transport = MigrationServiceClient.get_transport_class("grpc")
assert transport == transports.MigrationServiceGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(MigrationServiceClient, transports.MigrationServiceGrpcTransport, "grpc"),
(
MigrationServiceAsyncClient,
transports.MigrationServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
MigrationServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(MigrationServiceClient),
)
@mock.patch.object(
MigrationServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(MigrationServiceAsyncClient),
)
def test_migration_service_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(MigrationServiceClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(MigrationServiceClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class()
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(
MigrationServiceClient,
transports.MigrationServiceGrpcTransport,
"grpc",
"true",
),
(
MigrationServiceAsyncClient,
transports.MigrationServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(
MigrationServiceClient,
transports.MigrationServiceGrpcTransport,
"grpc",
"false",
),
(
MigrationServiceAsyncClient,
transports.MigrationServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
MigrationServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(MigrationServiceClient),
)
@mock.patch.object(
MigrationServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(MigrationServiceAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_migration_service_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(MigrationServiceClient, transports.MigrationServiceGrpcTransport, "grpc"),
(
MigrationServiceAsyncClient,
transports.MigrationServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_migration_service_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(MigrationServiceClient, transports.MigrationServiceGrpcTransport, "grpc"),
(
MigrationServiceAsyncClient,
transports.MigrationServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_migration_service_client_client_options_credentials_file(
client_class, transport_class, transport_name
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_migration_service_client_client_options_from_dict():
with mock.patch(
"google.cloud.aiplatform_v1.services.migration_service.transports.MigrationServiceGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = MigrationServiceClient(
client_options={"api_endpoint": "squid.clam.whelk"}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_search_migratable_resources(
transport: str = "grpc",
request_type=migration_service.SearchMigratableResourcesRequest,
):
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_migratable_resources), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = migration_service.SearchMigratableResourcesResponse(
next_page_token="next_page_token_value",
)
response = client.search_migratable_resources(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == migration_service.SearchMigratableResourcesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.SearchMigratableResourcesPager)
assert response.next_page_token == "next_page_token_value"
def test_search_migratable_resources_from_dict():
test_search_migratable_resources(request_type=dict)
def test_search_migratable_resources_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_migratable_resources), "__call__"
) as call:
client.search_migratable_resources()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == migration_service.SearchMigratableResourcesRequest()
@pytest.mark.asyncio
async def test_search_migratable_resources_async(
transport: str = "grpc_asyncio",
request_type=migration_service.SearchMigratableResourcesRequest,
):
client = MigrationServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_migratable_resources), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
migration_service.SearchMigratableResourcesResponse(
next_page_token="next_page_token_value",
)
)
response = await client.search_migratable_resources(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == migration_service.SearchMigratableResourcesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.SearchMigratableResourcesAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_search_migratable_resources_async_from_dict():
await test_search_migratable_resources_async(request_type=dict)
def test_search_migratable_resources_field_headers():
client = MigrationServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = migration_service.SearchMigratableResourcesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_migratable_resources), "__call__"
) as call:
call.return_value = migration_service.SearchMigratableResourcesResponse()
client.search_migratable_resources(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_search_migratable_resources_field_headers_async():
client = MigrationServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = migration_service.SearchMigratableResourcesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_migratable_resources), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
migration_service.SearchMigratableResourcesResponse()
)
await client.search_migratable_resources(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_search_migratable_resources_flattened():
client = MigrationServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_migratable_resources), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = migration_service.SearchMigratableResourcesResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.search_migratable_resources(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
def test_search_migratable_resources_flattened_error():
client = MigrationServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.search_migratable_resources(
migration_service.SearchMigratableResourcesRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_search_migratable_resources_flattened_async():
client = MigrationServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_migratable_resources), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = migration_service.SearchMigratableResourcesResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
migration_service.SearchMigratableResourcesResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.search_migratable_resources(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
@pytest.mark.asyncio
async def test_search_migratable_resources_flattened_error_async():
client = MigrationServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.search_migratable_resources(
migration_service.SearchMigratableResourcesRequest(), parent="parent_value",
)
def test_search_migratable_resources_pager():
client = MigrationServiceClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_migratable_resources), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
migration_service.SearchMigratableResourcesResponse(
migratable_resources=[
migratable_resource.MigratableResource(),
migratable_resource.MigratableResource(),
migratable_resource.MigratableResource(),
],
next_page_token="abc",
),
migration_service.SearchMigratableResourcesResponse(
migratable_resources=[], next_page_token="def",
),
migration_service.SearchMigratableResourcesResponse(
migratable_resources=[migratable_resource.MigratableResource(),],
next_page_token="ghi",
),
migration_service.SearchMigratableResourcesResponse(
migratable_resources=[
migratable_resource.MigratableResource(),
migratable_resource.MigratableResource(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.search_migratable_resources(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(
isinstance(i, migratable_resource.MigratableResource) for i in results
)
def test_search_migratable_resources_pages():
client = MigrationServiceClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_migratable_resources), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
migration_service.SearchMigratableResourcesResponse(
migratable_resources=[
migratable_resource.MigratableResource(),
migratable_resource.MigratableResource(),
migratable_resource.MigratableResource(),
],
next_page_token="abc",
),
migration_service.SearchMigratableResourcesResponse(
migratable_resources=[], next_page_token="def",
),
migration_service.SearchMigratableResourcesResponse(
migratable_resources=[migratable_resource.MigratableResource(),],
next_page_token="ghi",
),
migration_service.SearchMigratableResourcesResponse(
migratable_resources=[
migratable_resource.MigratableResource(),
migratable_resource.MigratableResource(),
],
),
RuntimeError,
)
pages = list(client.search_migratable_resources(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_search_migratable_resources_async_pager():
client = MigrationServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_migratable_resources),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
migration_service.SearchMigratableResourcesResponse(
migratable_resources=[
migratable_resource.MigratableResource(),
migratable_resource.MigratableResource(),
migratable_resource.MigratableResource(),
],
next_page_token="abc",
),
migration_service.SearchMigratableResourcesResponse(
migratable_resources=[], next_page_token="def",
),
migration_service.SearchMigratableResourcesResponse(
migratable_resources=[migratable_resource.MigratableResource(),],
next_page_token="ghi",
),
migration_service.SearchMigratableResourcesResponse(
migratable_resources=[
migratable_resource.MigratableResource(),
migratable_resource.MigratableResource(),
],
),
RuntimeError,
)
async_pager = await client.search_migratable_resources(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(
isinstance(i, migratable_resource.MigratableResource) for i in responses
)
@pytest.mark.asyncio
async def test_search_migratable_resources_async_pages():
client = MigrationServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_migratable_resources),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
migration_service.SearchMigratableResourcesResponse(
migratable_resources=[
migratable_resource.MigratableResource(),
migratable_resource.MigratableResource(),
migratable_resource.MigratableResource(),
],
next_page_token="abc",
),
migration_service.SearchMigratableResourcesResponse(
migratable_resources=[], next_page_token="def",
),
migration_service.SearchMigratableResourcesResponse(
migratable_resources=[migratable_resource.MigratableResource(),],
next_page_token="ghi",
),
migration_service.SearchMigratableResourcesResponse(
migratable_resources=[
migratable_resource.MigratableResource(),
migratable_resource.MigratableResource(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.search_migratable_resources(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
def test_batch_migrate_resources(
transport: str = "grpc", request_type=migration_service.BatchMigrateResourcesRequest
):
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_migrate_resources), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.batch_migrate_resources(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == migration_service.BatchMigrateResourcesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_batch_migrate_resources_from_dict():
test_batch_migrate_resources(request_type=dict)
def test_batch_migrate_resources_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_migrate_resources), "__call__"
) as call:
client.batch_migrate_resources()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == migration_service.BatchMigrateResourcesRequest()
@pytest.mark.asyncio
async def test_batch_migrate_resources_async(
transport: str = "grpc_asyncio",
request_type=migration_service.BatchMigrateResourcesRequest,
):
client = MigrationServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_migrate_resources), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.batch_migrate_resources(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == migration_service.BatchMigrateResourcesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_batch_migrate_resources_async_from_dict():
await test_batch_migrate_resources_async(request_type=dict)
def test_batch_migrate_resources_field_headers():
client = MigrationServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = migration_service.BatchMigrateResourcesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_migrate_resources), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.batch_migrate_resources(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_batch_migrate_resources_field_headers_async():
client = MigrationServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = migration_service.BatchMigrateResourcesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_migrate_resources), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.batch_migrate_resources(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_batch_migrate_resources_flattened():
client = MigrationServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_migrate_resources), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.batch_migrate_resources(
parent="parent_value",
migrate_resource_requests=[
migration_service.MigrateResourceRequest(
migrate_ml_engine_model_version_config=migration_service.MigrateResourceRequest.MigrateMlEngineModelVersionConfig(
endpoint="endpoint_value"
)
)
],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
assert args[0].migrate_resource_requests == [
migration_service.MigrateResourceRequest(
migrate_ml_engine_model_version_config=migration_service.MigrateResourceRequest.MigrateMlEngineModelVersionConfig(
endpoint="endpoint_value"
)
)
]
def test_batch_migrate_resources_flattened_error():
client = MigrationServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.batch_migrate_resources(
migration_service.BatchMigrateResourcesRequest(),
parent="parent_value",
migrate_resource_requests=[
migration_service.MigrateResourceRequest(
migrate_ml_engine_model_version_config=migration_service.MigrateResourceRequest.MigrateMlEngineModelVersionConfig(
endpoint="endpoint_value"
)
)
],
)
@pytest.mark.asyncio
async def test_batch_migrate_resources_flattened_async():
client = MigrationServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_migrate_resources), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.batch_migrate_resources(
parent="parent_value",
migrate_resource_requests=[
migration_service.MigrateResourceRequest(
migrate_ml_engine_model_version_config=migration_service.MigrateResourceRequest.MigrateMlEngineModelVersionConfig(
endpoint="endpoint_value"
)
)
],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
assert args[0].migrate_resource_requests == [
migration_service.MigrateResourceRequest(
migrate_ml_engine_model_version_config=migration_service.MigrateResourceRequest.MigrateMlEngineModelVersionConfig(
endpoint="endpoint_value"
)
)
]
@pytest.mark.asyncio
async def test_batch_migrate_resources_flattened_error_async():
client = MigrationServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.batch_migrate_resources(
migration_service.BatchMigrateResourcesRequest(),
parent="parent_value",
migrate_resource_requests=[
migration_service.MigrateResourceRequest(
migrate_ml_engine_model_version_config=migration_service.MigrateResourceRequest.MigrateMlEngineModelVersionConfig(
endpoint="endpoint_value"
)
)
],
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.MigrationServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.MigrationServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = MigrationServiceClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide scopes and a transport instance.
transport = transports.MigrationServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = MigrationServiceClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.MigrationServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = MigrationServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.MigrationServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.MigrationServiceGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.MigrationServiceGrpcTransport,
transports.MigrationServiceGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = MigrationServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.MigrationServiceGrpcTransport,)
def test_migration_service_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.MigrationServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_migration_service_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.aiplatform_v1.services.migration_service.transports.MigrationServiceTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.MigrationServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"search_migratable_resources",
"batch_migrate_resources",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
transport.operations_client
@requires_google_auth_gte_1_25_0
def test_migration_service_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.aiplatform_v1.services.migration_service.transports.MigrationServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.MigrationServiceTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@requires_google_auth_lt_1_25_0
def test_migration_service_base_transport_with_credentials_file_old_google_auth():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.aiplatform_v1.services.migration_service.transports.MigrationServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.MigrationServiceTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
def test_migration_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.aiplatform_v1.services.migration_service.transports.MigrationServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.MigrationServiceTransport()
adc.assert_called_once()
@requires_google_auth_gte_1_25_0
def test_migration_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
MigrationServiceClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@requires_google_auth_lt_1_25_0
def test_migration_service_auth_adc_old_google_auth():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
MigrationServiceClient()
adc.assert_called_once_with(
scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.MigrationServiceGrpcTransport,
transports.MigrationServiceGrpcAsyncIOTransport,
],
)
@requires_google_auth_gte_1_25_0
def test_migration_service_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class",
[
transports.MigrationServiceGrpcTransport,
transports.MigrationServiceGrpcAsyncIOTransport,
],
)
@requires_google_auth_lt_1_25_0
def test_migration_service_transport_auth_adc_old_google_auth(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus")
adc.assert_called_once_with(
scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.MigrationServiceGrpcTransport, grpc_helpers),
(transports.MigrationServiceGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_migration_service_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"aiplatform.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=["1", "2"],
default_host="aiplatform.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[
transports.MigrationServiceGrpcTransport,
transports.MigrationServiceGrpcAsyncIOTransport,
],
)
def test_migration_service_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_migration_service_host_no_port():
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="aiplatform.googleapis.com"
),
)
assert client.transport._host == "aiplatform.googleapis.com:443"
def test_migration_service_host_with_port():
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="aiplatform.googleapis.com:8000"
),
)
assert client.transport._host == "aiplatform.googleapis.com:8000"
def test_migration_service_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.MigrationServiceGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_migration_service_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.MigrationServiceGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.MigrationServiceGrpcTransport,
transports.MigrationServiceGrpcAsyncIOTransport,
],
)
def test_migration_service_transport_channel_mtls_with_client_cert_source(
transport_class,
):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.MigrationServiceGrpcTransport,
transports.MigrationServiceGrpcAsyncIOTransport,
],
)
def test_migration_service_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_migration_service_grpc_lro_client():
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_migration_service_grpc_lro_async_client():
client = MigrationServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_annotated_dataset_path():
project = "squid"
dataset = "clam"
annotated_dataset = "whelk"
expected = "projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}".format(
project=project, dataset=dataset, annotated_dataset=annotated_dataset,
)
actual = MigrationServiceClient.annotated_dataset_path(
project, dataset, annotated_dataset
)
assert expected == actual
def test_parse_annotated_dataset_path():
expected = {
"project": "octopus",
"dataset": "oyster",
"annotated_dataset": "nudibranch",
}
path = MigrationServiceClient.annotated_dataset_path(**expected)
# Check that the path construction is reversible.
actual = MigrationServiceClient.parse_annotated_dataset_path(path)
assert expected == actual
def test_dataset_path():
project = "cuttlefish"
location = "mussel"
dataset = "winkle"
expected = "projects/{project}/locations/{location}/datasets/{dataset}".format(
project=project, location=location, dataset=dataset,
)
actual = MigrationServiceClient.dataset_path(project, location, dataset)
assert expected == actual
def test_parse_dataset_path():
expected = {
"project": "nautilus",
"location": "scallop",
"dataset": "abalone",
}
path = MigrationServiceClient.dataset_path(**expected)
# Check that the path construction is reversible.
actual = MigrationServiceClient.parse_dataset_path(path)
assert expected == actual
def test_dataset_path():
project = "squid"
location = "clam"
dataset = "whelk"
expected = "projects/{project}/locations/{location}/datasets/{dataset}".format(
project=project, location=location, dataset=dataset,
)
actual = MigrationServiceClient.dataset_path(project, location, dataset)
assert expected == actual
def test_parse_dataset_path():
expected = {
"project": "octopus",
"location": "oyster",
"dataset": "nudibranch",
}
path = MigrationServiceClient.dataset_path(**expected)
# Check that the path construction is reversible.
actual = MigrationServiceClient.parse_dataset_path(path)
assert expected == actual
def test_dataset_path():
project = "cuttlefish"
dataset = "mussel"
expected = "projects/{project}/datasets/{dataset}".format(
project=project, dataset=dataset,
)
actual = MigrationServiceClient.dataset_path(project, dataset)
assert expected == actual
def test_parse_dataset_path():
expected = {
"project": "winkle",
"dataset": "nautilus",
}
path = MigrationServiceClient.dataset_path(**expected)
# Check that the path construction is reversible.
actual = MigrationServiceClient.parse_dataset_path(path)
assert expected == actual
def test_model_path():
project = "scallop"
location = "abalone"
model = "squid"
expected = "projects/{project}/locations/{location}/models/{model}".format(
project=project, location=location, model=model,
)
actual = MigrationServiceClient.model_path(project, location, model)
assert expected == actual
def test_parse_model_path():
expected = {
"project": "clam",
"location": "whelk",
"model": "octopus",
}
path = MigrationServiceClient.model_path(**expected)
# Check that the path construction is reversible.
actual = MigrationServiceClient.parse_model_path(path)
assert expected == actual
def test_model_path():
project = "oyster"
location = "nudibranch"
model = "cuttlefish"
expected = "projects/{project}/locations/{location}/models/{model}".format(
project=project, location=location, model=model,
)
actual = MigrationServiceClient.model_path(project, location, model)
assert expected == actual
def test_parse_model_path():
expected = {
"project": "mussel",
"location": "winkle",
"model": "nautilus",
}
path = MigrationServiceClient.model_path(**expected)
# Check that the path construction is reversible.
actual = MigrationServiceClient.parse_model_path(path)
assert expected == actual
def test_version_path():
project = "scallop"
model = "abalone"
version = "squid"
expected = "projects/{project}/models/{model}/versions/{version}".format(
project=project, model=model, version=version,
)
actual = MigrationServiceClient.version_path(project, model, version)
assert expected == actual
def test_parse_version_path():
expected = {
"project": "clam",
"model": "whelk",
"version": "octopus",
}
path = MigrationServiceClient.version_path(**expected)
# Check that the path construction is reversible.
actual = MigrationServiceClient.parse_version_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "oyster"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = MigrationServiceClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "nudibranch",
}
path = MigrationServiceClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = MigrationServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "cuttlefish"
expected = "folders/{folder}".format(folder=folder,)
actual = MigrationServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "mussel",
}
path = MigrationServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = MigrationServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "winkle"
expected = "organizations/{organization}".format(organization=organization,)
actual = MigrationServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nautilus",
}
path = MigrationServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = MigrationServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "scallop"
expected = "projects/{project}".format(project=project,)
actual = MigrationServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "abalone",
}
path = MigrationServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = MigrationServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "squid"
location = "clam"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = MigrationServiceClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "whelk",
"location": "octopus",
}
path = MigrationServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = MigrationServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.MigrationServiceTransport, "_prep_wrapped_messages"
) as prep:
client = MigrationServiceClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.MigrationServiceTransport, "_prep_wrapped_messages"
) as prep:
transport_class = MigrationServiceClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
| |
import numpy as np
import pytest
import dask.array as da
from dask.array.numpy_compat import _numpy_120
from dask.array.utils import assert_eq
from .test_dispatch import EncapsulateNDArray, WrappedArray
@pytest.mark.parametrize(
"func",
[
lambda x: np.append(x, x),
lambda x: np.concatenate([x, x, x]),
lambda x: np.cov(x, x),
lambda x: np.dot(x, x),
lambda x: np.dstack((x, x)),
lambda x: np.flip(x, axis=0),
lambda x: np.hstack((x, x)),
lambda x: np.matmul(x, x),
lambda x: np.mean(x),
lambda x: np.stack([x, x]),
lambda x: np.block([x, x]),
lambda x: np.sum(x),
lambda x: np.var(x),
lambda x: np.vstack((x, x)),
lambda x: np.linalg.norm(x),
lambda x: np.min(x),
lambda x: np.amin(x),
lambda x: np.round(x),
lambda x: np.insert(x, 0, 3, axis=0),
lambda x: np.delete(x, 0, axis=0),
lambda x: np.select(
[x < 0.3, x < 0.6, x > 0.7], [x * 2, x, x / 2], default=0.65
),
],
)
def test_array_function_dask(func):
x = np.random.random((100, 100))
y = da.from_array(x, chunks=(50, 50))
res_x = func(x)
res_y = func(y)
assert isinstance(res_y, da.Array)
assert_eq(res_y, res_x)
@pytest.mark.parametrize(
"func",
[
lambda x: np.dstack(x),
lambda x: np.hstack(x),
lambda x: np.vstack(x),
],
)
def test_stack_functions_require_sequence_of_arrays(func):
x = np.random.random((100, 100))
y = da.from_array(x, chunks=(50, 50))
with pytest.raises(
NotImplementedError, match="expects a sequence of arrays as the first argument"
):
func(y)
@pytest.mark.parametrize("func", [np.fft.fft, np.fft.fft2])
def test_array_function_fft(func):
x = np.random.random((100, 100))
y = da.from_array(x, chunks=(100, 100))
res_x = func(x)
res_y = func(y)
if func.__module__ != "mkl_fft._numpy_fft":
assert isinstance(res_y, da.Array)
assert_eq(res_y, res_x)
@pytest.mark.parametrize(
"func",
[
lambda x: np.min_scalar_type(x),
lambda x: np.linalg.det(x),
lambda x: np.linalg.eigvals(x),
],
)
def test_array_notimpl_function_dask(func):
x = np.random.random((100, 100))
y = da.from_array(x, chunks=(50, 50))
with pytest.warns(
FutureWarning, match="The `.*` function is not implemented by Dask"
):
func(y)
@pytest.mark.parametrize(
"func", [lambda x: np.real(x), lambda x: np.imag(x), lambda x: np.transpose(x)]
)
def test_array_function_sparse(func):
sparse = pytest.importorskip("sparse")
x = da.random.random((500, 500), chunks=(100, 100))
x[x < 0.9] = 0
y = x.map_blocks(sparse.COO)
assert_eq(func(x), func(y))
def test_array_function_sparse_tensordot():
sparse = pytest.importorskip("sparse")
x = np.random.random((2, 3, 4))
x[x < 0.9] = 0
y = np.random.random((4, 3, 2))
y[y < 0.9] = 0
xx = sparse.COO(x)
yy = sparse.COO(y)
assert_eq(
np.tensordot(x, y, axes=(2, 0)), np.tensordot(xx, yy, axes=(2, 0)).todense()
)
@pytest.mark.parametrize("chunks", [(100, 100), (500, 100)])
def test_array_function_cupy_svd(chunks):
cupy = pytest.importorskip("cupy")
x = cupy.random.random((500, 100))
y = da.from_array(x, chunks=chunks, asarray=False)
u_base, s_base, v_base = da.linalg.svd(y)
u, s, v = np.linalg.svd(y)
assert_eq(u, u_base)
assert_eq(s, s_base)
assert_eq(v, v_base)
@pytest.mark.parametrize(
"func",
[
lambda x: np.concatenate([x, x, x]),
lambda x: np.cov(x, x),
lambda x: np.dot(x, x),
lambda x: np.dstack((x, x)),
lambda x: np.flip(x, axis=0),
lambda x: np.hstack((x, x)),
lambda x: np.matmul(x, x),
lambda x: np.mean(x),
lambda x: np.stack([x, x]),
lambda x: np.sum(x),
lambda x: np.var(x),
lambda x: np.vstack((x, x)),
lambda x: np.linalg.norm(x),
],
)
def test_unregistered_func(func):
# Wrap a procol-based encapsulated ndarray
x = EncapsulateNDArray(np.random.random((100, 100)))
# See if Dask holds the array fine
y = da.from_array(x, chunks=(50, 50))
# Check if it's an equivalent array
assert_eq(x, y, check_meta=False, check_type=False)
# Perform two NumPy functions, one on the
# Encapsulated array
xx = func(x)
# And one on the Dask array holding these
# encapsulated arrays
yy = func(y)
# Check that they are equivalent arrays.
assert_eq(xx, yy, check_meta=False, check_type=False)
def test_non_existent_func():
# Regression test for __array_function__ becoming default in numpy 1.17
# dask has no sort function, so ensure that this still calls np.sort
x = da.from_array(np.array([1, 2, 4, 3]), chunks=(2,))
with pytest.warns(
FutureWarning, match="The `numpy.sort` function is not implemented by Dask"
):
assert list(np.sort(x)) == [1, 2, 3, 4]
@pytest.mark.parametrize(
"func",
[
np.equal,
np.matmul,
np.dot,
lambda x, y: np.stack([x, y]),
],
)
@pytest.mark.parametrize(
"arr_upcast, arr_downcast",
[
(
WrappedArray(np.random.random((10, 10))),
da.random.random((10, 10), chunks=(5, 5)),
),
(
da.random.random((10, 10), chunks=(5, 5)),
EncapsulateNDArray(np.random.random((10, 10))),
),
(
WrappedArray(np.random.random((10, 10))),
EncapsulateNDArray(np.random.random((10, 10))),
),
],
)
def test_binary_function_type_precedence(func, arr_upcast, arr_downcast):
"""Test proper dispatch on binary NumPy functions"""
assert (
type(func(arr_upcast, arr_downcast))
== type(func(arr_downcast, arr_upcast))
== type(arr_upcast)
)
@pytest.mark.parametrize("func", [da.array, da.asarray, da.asanyarray, da.tri])
def test_like_raises(func):
if _numpy_120:
assert_eq(func(1, like=func(1)), func(1))
else:
with pytest.raises(
RuntimeError, match="The use of ``like`` required NumPy >= 1.20"
):
func(1, like=func(1))
@pytest.mark.skipif(not _numpy_120, reason="NEP-35 is not available")
@pytest.mark.parametrize("func", [np.array, np.asarray, np.asanyarray])
def test_like_with_numpy_func(func):
assert_eq(func(1, like=da.array(1)), func(1))
@pytest.mark.skipif(not _numpy_120, reason="NEP-35 is not available")
@pytest.mark.parametrize("func", [np.array, np.asarray, np.asanyarray])
def test_like_with_numpy_func_and_dtype(func):
assert_eq(func(1, dtype=float, like=da.array(1)), func(1, dtype=float))
| |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Collection of useful coders.
Only those coders listed in __all__ are part of the public API of this module.
"""
from __future__ import absolute_import
import base64
from builtins import object
import google.protobuf
from google.protobuf import wrappers_pb2
from apache_beam.coders import coder_impl
from apache_beam.portability import common_urns
from apache_beam.portability import python_urns
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.utils import proto_utils
# This is for py2/3 compatibility. cPickle was renamed pickle in python 3.
try:
import cPickle as pickle # Python 2
except ImportError:
import pickle # Python 3
# pylint: disable=wrong-import-order, wrong-import-position, ungrouped-imports
try:
from .stream import get_varint_size
except ImportError:
from .slow_stream import get_varint_size
# pylint: enable=wrong-import-order, wrong-import-position, ungrouped-imports
# pylint: disable=wrong-import-order, wrong-import-position
# Avoid dependencies on the full SDK.
try:
# Import dill from the pickler module to make sure our monkey-patching of dill
# occurs.
from apache_beam.internal.pickler import dill
except ImportError:
# We fall back to using the stock dill library in tests that don't use the
# full Python SDK.
import dill
__all__ = ['Coder',
'BytesCoder', 'DillCoder', 'FastPrimitivesCoder', 'FloatCoder',
'IterableCoder', 'PickleCoder', 'ProtoCoder', 'SingletonCoder',
'StrUtf8Coder', 'TimestampCoder', 'TupleCoder',
'TupleSequenceCoder', 'VarIntCoder', 'WindowedValueCoder']
def serialize_coder(coder):
from apache_beam.internal import pickler
return '%s$%s' % (coder.__class__.__name__, pickler.dumps(coder))
def deserialize_coder(serialized):
from apache_beam.internal import pickler
return pickler.loads(serialized.split('$', 1)[1])
# pylint: enable=wrong-import-order, wrong-import-position
class Coder(object):
"""Base class for coders."""
def encode(self, value):
"""Encodes the given object into a byte string."""
raise NotImplementedError('Encode not implemented: %s.' % self)
def decode(self, encoded):
"""Decodes the given byte string into the corresponding object."""
raise NotImplementedError('Decode not implemented: %s.' % self)
def is_deterministic(self):
"""Whether this coder is guaranteed to encode values deterministically.
A deterministic coder is required for key coders in GroupByKey operations
to produce consistent results.
For example, note that the default coder, the PickleCoder, is not
deterministic: the ordering of picked entries in maps may vary across
executions since there is no defined order, and such a coder is not in
general suitable for usage as a key coder in GroupByKey operations, since
each instance of the same key may be encoded differently.
Returns:
Whether coder is deterministic.
"""
return False
def as_deterministic_coder(self, step_label, error_message=None):
"""Returns a deterministic version of self, if possible.
Otherwise raises a value error.
"""
if self.is_deterministic():
return self
else:
raise ValueError(error_message or "'%s' cannot be made deterministic.")
def estimate_size(self, value):
"""Estimates the encoded size of the given value, in bytes.
Dataflow estimates the encoded size of a PCollection processed in a pipeline
step by using the estimated size of a random sample of elements in that
PCollection.
The default implementation encodes the given value and returns its byte
size. If a coder can provide a fast estimate of the encoded size of a value
(e.g., if the encoding has a fixed size), it can provide its estimate here
to improve performance.
Arguments:
value: the value whose encoded size is to be estimated.
Returns:
The estimated encoded size of the given value.
"""
return len(self.encode(value))
# ===========================================================================
# Methods below are internal SDK details that don't need to be modified for
# user-defined coders.
# ===========================================================================
def _create_impl(self):
"""Creates a CoderImpl to do the actual encoding and decoding.
"""
return coder_impl.CallbackCoderImpl(self.encode, self.decode,
self.estimate_size)
def get_impl(self):
"""For internal use only; no backwards-compatibility guarantees.
Returns the CoderImpl backing this Coder.
"""
if not hasattr(self, '_impl'):
self._impl = self._create_impl()
assert isinstance(self._impl, coder_impl.CoderImpl)
return self._impl
def __getstate__(self):
return self._dict_without_impl()
def _dict_without_impl(self):
if hasattr(self, '_impl'):
d = dict(self.__dict__)
del d['_impl']
return d
return self.__dict__
@classmethod
def from_type_hint(cls, unused_typehint, unused_registry):
# If not overridden, just construct the coder without arguments.
return cls()
def is_kv_coder(self):
return False
def key_coder(self):
if self.is_kv_coder():
raise NotImplementedError('key_coder: %s' % self)
else:
raise ValueError('Not a KV coder: %s.' % self)
def value_coder(self):
if self.is_kv_coder():
raise NotImplementedError('value_coder: %s' % self)
else:
raise ValueError('Not a KV coder: %s.' % self)
def _get_component_coders(self):
"""For internal use only; no backwards-compatibility guarantees.
Returns the internal component coders of this coder."""
# This is an internal detail of the Coder API and does not need to be
# refined in user-defined Coders.
return []
def as_cloud_object(self):
"""For internal use only; no backwards-compatibility guarantees.
Returns Google Cloud Dataflow API description of this coder."""
# This is an internal detail of the Coder API and does not need to be
# refined in user-defined Coders.
value = {
# We pass coders in the form "<coder_name>$<pickled_data>" to make the
# job description JSON more readable. Data before the $ is ignored by
# the worker.
'@type': serialize_coder(self),
'component_encodings': list(
component.as_cloud_object()
for component in self._get_component_coders()
),
}
return value
def __repr__(self):
return self.__class__.__name__
# pylint: disable=protected-access
def __eq__(self, other):
return (self.__class__ == other.__class__
and self._dict_without_impl() == other._dict_without_impl())
def __hash__(self):
return hash((self.__class__,) +
tuple(sorted(self._dict_without_impl().items())))
# pylint: enable=protected-access
_known_urns = {}
@classmethod
def register_urn(cls, urn, parameter_type, fn=None):
"""Registers a urn with a constructor.
For example, if 'beam:fn:foo' had parameter type FooPayload, one could
write `RunnerApiFn.register_urn('bean:fn:foo', FooPayload, foo_from_proto)`
where foo_from_proto took as arguments a FooPayload and a PipelineContext.
This function can also be used as a decorator rather than passing the
callable in as the final parameter.
A corresponding to_runner_api_parameter method would be expected that
returns the tuple ('beam:fn:foo', FooPayload)
"""
def register(fn):
cls._known_urns[urn] = parameter_type, fn
return staticmethod(fn)
if fn:
# Used as a statement.
register(fn)
else:
# Used as a decorator.
return register
def to_runner_api(self, context):
urn, typed_param, components = self.to_runner_api_parameter(context)
return beam_runner_api_pb2.Coder(
spec=beam_runner_api_pb2.SdkFunctionSpec(
environment_id=(
context.default_environment_id() if context else None),
spec=beam_runner_api_pb2.FunctionSpec(
urn=urn,
payload=typed_param.SerializeToString()
if typed_param is not None else None)),
component_coder_ids=[context.coders.get_id(c) for c in components])
@classmethod
def from_runner_api(cls, coder_proto, context):
"""Converts from an SdkFunctionSpec to a Fn object.
Prefer registering a urn with its parameter type and constructor.
"""
parameter_type, constructor = cls._known_urns[coder_proto.spec.spec.urn]
return constructor(
proto_utils.parse_Bytes(coder_proto.spec.spec.payload, parameter_type),
[context.coders.get_by_id(c) for c in coder_proto.component_coder_ids],
context)
def to_runner_api_parameter(self, context):
return (
python_urns.PICKLED_CODER,
wrappers_pb2.BytesValue(value=serialize_coder(self)),
())
@staticmethod
def register_structured_urn(urn, cls):
"""Register a coder that's completely defined by its urn and its
component(s), if any, which are passed to construct the instance.
"""
cls.to_runner_api_parameter = (
lambda self, unused_context: (urn, None, self._get_component_coders()))
# pylint: disable=unused-variable
@Coder.register_urn(urn, None)
def from_runner_api_parameter(unused_payload, components, unused_context):
if components:
return cls(*components)
else:
return cls()
@Coder.register_urn(
python_urns.PICKLED_CODER, google.protobuf.wrappers_pb2.BytesValue)
def _pickle_from_runner_api_parameter(payload, components, context):
return deserialize_coder(payload.value)
class StrUtf8Coder(Coder):
"""A coder used for reading and writing strings as UTF-8."""
def encode(self, value):
return value.encode('utf-8')
def decode(self, value):
return value.decode('utf-8')
def is_deterministic(self):
return True
class ToStringCoder(Coder):
"""A default string coder used if no sink coder is specified."""
def encode(self, value):
try: # Python 2
if isinstance(value, unicode): # pylint: disable=unicode-builtin
return value.encode('utf-8')
except NameError: # Python 3
pass
return str(value)
def decode(self, _):
raise NotImplementedError('ToStringCoder cannot be used for decoding.')
def is_deterministic(self):
return True
class FastCoder(Coder):
"""Coder subclass used when a (faster) CoderImpl is supplied directly.
The Coder class defines _create_impl in terms of encode() and decode();
this class inverts that by defining encode() and decode() in terms of
_create_impl().
"""
def encode(self, value):
"""Encodes the given object into a byte string."""
return self.get_impl().encode(value)
def decode(self, encoded):
"""Decodes the given byte string into the corresponding object."""
return self.get_impl().decode(encoded)
def estimate_size(self, value):
return self.get_impl().estimate_size(value)
def _create_impl(self):
raise NotImplementedError
class BytesCoder(FastCoder):
"""Byte string coder."""
def _create_impl(self):
return coder_impl.BytesCoderImpl()
def is_deterministic(self):
return True
def as_cloud_object(self):
return {
'@type': 'kind:bytes',
}
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return hash(type(self))
Coder.register_structured_urn(common_urns.coders.BYTES.urn, BytesCoder)
class VarIntCoder(FastCoder):
"""Variable-length integer coder."""
def _create_impl(self):
return coder_impl.VarIntCoderImpl()
def is_deterministic(self):
return True
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return hash(type(self))
Coder.register_structured_urn(common_urns.coders.VARINT.urn, VarIntCoder)
class FloatCoder(FastCoder):
"""A coder used for floating-point values."""
def _create_impl(self):
return coder_impl.FloatCoderImpl()
def is_deterministic(self):
return True
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return hash(type(self))
class TimestampCoder(FastCoder):
"""A coder used for timeutil.Timestamp values."""
def _create_impl(self):
return coder_impl.TimestampCoderImpl()
def is_deterministic(self):
return True
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return hash(type(self))
class SingletonCoder(FastCoder):
"""A coder that always encodes exactly one value."""
def __init__(self, value):
self._value = value
def _create_impl(self):
return coder_impl.SingletonCoderImpl(self._value)
def is_deterministic(self):
return True
def __eq__(self, other):
return type(self) == type(other) and self._value == other._value
def __hash__(self):
return hash(self._value)
def maybe_dill_dumps(o):
"""Pickle using cPickle or the Dill pickler as a fallback."""
# We need to use the dill pickler for objects of certain custom classes,
# including, for example, ones that contain lambdas.
try:
return pickle.dumps(o, pickle.HIGHEST_PROTOCOL)
except Exception: # pylint: disable=broad-except
return dill.dumps(o)
def maybe_dill_loads(o):
"""Unpickle using cPickle or the Dill pickler as a fallback."""
try:
return pickle.loads(o)
except Exception: # pylint: disable=broad-except
return dill.loads(o)
class _PickleCoderBase(FastCoder):
"""Base class for pickling coders."""
def is_deterministic(self):
# Note that the default coder, the PickleCoder, is not deterministic (for
# example, the ordering of picked entries in maps may vary across
# executions), and so is not in general suitable for usage as a key coder in
# GroupByKey operations.
return False
def as_cloud_object(self, is_pair_like=True):
value = super(_PickleCoderBase, self).as_cloud_object()
# We currently use this coder in places where we cannot infer the coder to
# use for the value type in a more granular way. In places where the
# service expects a pair, it checks for the "is_pair_like" key, in which
# case we would fail without the hack below.
if is_pair_like:
value['is_pair_like'] = True
value['component_encodings'] = [
self.as_cloud_object(is_pair_like=False),
self.as_cloud_object(is_pair_like=False)
]
return value
# We allow .key_coder() and .value_coder() to be called on PickleCoder since
# we can't always infer the return values of lambdas in ParDo operations, the
# result of which may be used in a GroupBykey.
def is_kv_coder(self):
return True
def key_coder(self):
return self
def value_coder(self):
return self
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return hash(type(self))
class PickleCoder(_PickleCoderBase):
"""Coder using Python's pickle functionality."""
def _create_impl(self):
dumps = pickle.dumps
HIGHEST_PROTOCOL = pickle.HIGHEST_PROTOCOL
return coder_impl.CallbackCoderImpl(
lambda x: dumps(x, HIGHEST_PROTOCOL), pickle.loads)
def as_deterministic_coder(self, step_label, error_message=None):
return DeterministicFastPrimitivesCoder(self, step_label)
class DillCoder(_PickleCoderBase):
"""Coder using dill's pickle functionality."""
def _create_impl(self):
return coder_impl.CallbackCoderImpl(maybe_dill_dumps, maybe_dill_loads)
class DeterministicFastPrimitivesCoder(FastCoder):
"""Throws runtime errors when encoding non-deterministic values."""
def __init__(self, coder, step_label):
self._underlying_coder = coder
self._step_label = step_label
def _create_impl(self):
return coder_impl.DeterministicFastPrimitivesCoderImpl(
self._underlying_coder.get_impl(), self._step_label)
def is_deterministic(self):
return True
def is_kv_coder(self):
return True
def key_coder(self):
return self
def value_coder(self):
return self
class FastPrimitivesCoder(FastCoder):
"""Encodes simple primitives (e.g. str, int) efficiently.
For unknown types, falls back to another coder (e.g. PickleCoder).
"""
def __init__(self, fallback_coder=PickleCoder()):
self._fallback_coder = fallback_coder
def _create_impl(self):
return coder_impl.FastPrimitivesCoderImpl(
self._fallback_coder.get_impl())
def is_deterministic(self):
return self._fallback_coder.is_deterministic()
def as_deterministic_coder(self, step_label, error_message=None):
if self.is_deterministic():
return self
else:
return DeterministicFastPrimitivesCoder(self, step_label)
def as_cloud_object(self, is_pair_like=True):
value = super(FastCoder, self).as_cloud_object()
# We currently use this coder in places where we cannot infer the coder to
# use for the value type in a more granular way. In places where the
# service expects a pair, it checks for the "is_pair_like" key, in which
# case we would fail without the hack below.
if is_pair_like:
value['is_pair_like'] = True
value['component_encodings'] = [
self.as_cloud_object(is_pair_like=False),
self.as_cloud_object(is_pair_like=False)
]
return value
# We allow .key_coder() and .value_coder() to be called on FastPrimitivesCoder
# since we can't always infer the return values of lambdas in ParDo
# operations, the result of which may be used in a GroupBykey.
def is_kv_coder(self):
return True
def key_coder(self):
return self
def value_coder(self):
return self
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return hash(type(self))
class Base64PickleCoder(Coder):
"""Coder of objects by Python pickle, then base64 encoding."""
# TODO(robertwb): Do base64 encoding where it's needed (e.g. in json) rather
# than via a special Coder.
def encode(self, value):
return base64.b64encode(pickle.dumps(value, pickle.HIGHEST_PROTOCOL))
def decode(self, encoded):
return pickle.loads(base64.b64decode(encoded))
def is_deterministic(self):
# Note that the Base64PickleCoder is not deterministic. See the
# corresponding comments for PickleCoder above.
return False
# We allow .key_coder() and .value_coder() to be called on Base64PickleCoder
# since we can't always infer the return values of lambdas in ParDo
# operations, the result of which may be used in a GroupBykey.
#
# TODO(ccy): this is currently only used for KV values from Create transforms.
# Investigate a way to unify this with PickleCoder.
def is_kv_coder(self):
return True
def key_coder(self):
return self
def value_coder(self):
return self
class ProtoCoder(FastCoder):
"""A Coder for Google Protocol Buffers.
It supports both Protocol Buffers syntax versions 2 and 3. However,
the runtime version of the python protobuf library must exactly match the
version of the protoc compiler what was used to generate the protobuf
messages.
ProtoCoder is registered in the global CoderRegistry as the default coder for
any protobuf Message object.
"""
def __init__(self, proto_message_type):
self.proto_message_type = proto_message_type
def _create_impl(self):
return coder_impl.ProtoCoderImpl(self.proto_message_type)
def is_deterministic(self):
# TODO(vikasrk): A proto message can be deterministic if it does not contain
# a Map.
return False
def __eq__(self, other):
return (type(self) == type(other)
and self.proto_message_type == other.proto_message_type)
def __hash__(self):
return hash(self.proto_message_type)
@staticmethod
def from_type_hint(typehint, unused_registry):
if issubclass(typehint, google.protobuf.message.Message):
return ProtoCoder(typehint)
else:
raise ValueError(('Expected a subclass of google.protobuf.message.Message'
', but got a %s' % typehint))
class TupleCoder(FastCoder):
"""Coder of tuple objects."""
def __init__(self, components):
self._coders = tuple(components)
def _create_impl(self):
return coder_impl.TupleCoderImpl([c.get_impl() for c in self._coders])
def is_deterministic(self):
return all(c.is_deterministic() for c in self._coders)
def as_deterministic_coder(self, step_label, error_message=None):
if self.is_deterministic():
return self
else:
return TupleCoder([c.as_deterministic_coder(step_label, error_message)
for c in self._coders])
@staticmethod
def from_type_hint(typehint, registry):
return TupleCoder([registry.get_coder(t) for t in typehint.tuple_types])
def as_cloud_object(self):
if self.is_kv_coder():
return {
'@type': 'kind:pair',
'is_pair_like': True,
'component_encodings': list(
component.as_cloud_object()
for component in self._get_component_coders()
),
}
return super(TupleCoder, self).as_cloud_object()
def _get_component_coders(self):
return self.coders()
def coders(self):
return self._coders
def is_kv_coder(self):
return len(self._coders) == 2
def key_coder(self):
if len(self._coders) != 2:
raise ValueError('TupleCoder does not have exactly 2 components.')
return self._coders[0]
def value_coder(self):
if len(self._coders) != 2:
raise ValueError('TupleCoder does not have exactly 2 components.')
return self._coders[1]
def __repr__(self):
return 'TupleCoder[%s]' % ', '.join(str(c) for c in self._coders)
def __eq__(self, other):
return (type(self) == type(other)
and self._coders == self._coders)
def __hash__(self):
return hash(self._coders)
def to_runner_api_parameter(self, context):
if self.is_kv_coder():
return common_urns.coders.KV.urn, None, self.coders()
else:
return super(TupleCoder, self).to_runner_api_parameter(context)
@Coder.register_urn(common_urns.coders.KV.urn, None)
def from_runner_api_parameter(unused_payload, components, unused_context):
return TupleCoder(components)
class TupleSequenceCoder(FastCoder):
"""Coder of homogeneous tuple objects."""
def __init__(self, elem_coder):
self._elem_coder = elem_coder
def _create_impl(self):
return coder_impl.TupleSequenceCoderImpl(self._elem_coder.get_impl())
def is_deterministic(self):
return self._elem_coder.is_deterministic()
def as_deterministic_coder(self, step_label, error_message=None):
if self.is_deterministic():
return self
else:
return TupleSequenceCoder(
self._elem_coder.as_deterministic_coder(step_label, error_message))
@staticmethod
def from_type_hint(typehint, registry):
return TupleSequenceCoder(registry.get_coder(typehint.inner_type))
def _get_component_coders(self):
return (self._elem_coder,)
def __repr__(self):
return 'TupleSequenceCoder[%r]' % self._elem_coder
def __eq__(self, other):
return (type(self) == type(other)
and self._elem_coder == self._elem_coder)
def __hash__(self):
return hash((type(self), self._elem_coder))
class IterableCoder(FastCoder):
"""Coder of iterables of homogeneous objects."""
def __init__(self, elem_coder):
self._elem_coder = elem_coder
def _create_impl(self):
return coder_impl.IterableCoderImpl(self._elem_coder.get_impl())
def is_deterministic(self):
return self._elem_coder.is_deterministic()
def as_deterministic_coder(self, step_label, error_message=None):
if self.is_deterministic():
return self
else:
return IterableCoder(
self._elem_coder.as_deterministic_coder(step_label, error_message))
def as_cloud_object(self):
return {
'@type': 'kind:stream',
'is_stream_like': True,
'component_encodings': [self._elem_coder.as_cloud_object()],
}
def value_coder(self):
return self._elem_coder
@staticmethod
def from_type_hint(typehint, registry):
return IterableCoder(registry.get_coder(typehint.inner_type))
def _get_component_coders(self):
return (self._elem_coder,)
def __repr__(self):
return 'IterableCoder[%r]' % self._elem_coder
def __eq__(self, other):
return (type(self) == type(other)
and self._elem_coder == self._elem_coder)
def __hash__(self):
return hash((type(self), self._elem_coder))
Coder.register_structured_urn(common_urns.coders.ITERABLE.urn, IterableCoder)
class GlobalWindowCoder(SingletonCoder):
"""Coder for global windows."""
def __init__(self):
from apache_beam.transforms import window
super(GlobalWindowCoder, self).__init__(window.GlobalWindow())
def as_cloud_object(self):
return {
'@type': 'kind:global_window',
}
Coder.register_structured_urn(
common_urns.coders.GLOBAL_WINDOW.urn, GlobalWindowCoder)
class IntervalWindowCoder(FastCoder):
"""Coder for an window defined by a start timestamp and a duration."""
def _create_impl(self):
return coder_impl.IntervalWindowCoderImpl()
def is_deterministic(self):
return True
def as_cloud_object(self):
return {
'@type': 'kind:interval_window',
}
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return hash(type(self))
Coder.register_structured_urn(
common_urns.coders.INTERVAL_WINDOW.urn, IntervalWindowCoder)
class WindowedValueCoder(FastCoder):
"""Coder for windowed values."""
def __init__(self, wrapped_value_coder, window_coder=None):
if not window_coder:
window_coder = PickleCoder()
self.wrapped_value_coder = wrapped_value_coder
self.timestamp_coder = TimestampCoder()
self.window_coder = window_coder
def _create_impl(self):
return coder_impl.WindowedValueCoderImpl(
self.wrapped_value_coder.get_impl(),
self.timestamp_coder.get_impl(),
self.window_coder.get_impl())
def is_deterministic(self):
return all(c.is_deterministic() for c in [self.wrapped_value_coder,
self.timestamp_coder,
self.window_coder])
def as_cloud_object(self):
return {
'@type': 'kind:windowed_value',
'is_wrapper': True,
'component_encodings': [
component.as_cloud_object()
for component in self._get_component_coders()],
}
def _get_component_coders(self):
return [self.wrapped_value_coder, self.window_coder]
def is_kv_coder(self):
return self.wrapped_value_coder.is_kv_coder()
def key_coder(self):
return self.wrapped_value_coder.key_coder()
def value_coder(self):
return self.wrapped_value_coder.value_coder()
def __repr__(self):
return 'WindowedValueCoder[%s]' % self.wrapped_value_coder
def __eq__(self, other):
return (type(self) == type(other)
and self.wrapped_value_coder == other.wrapped_value_coder
and self.timestamp_coder == other.timestamp_coder
and self.window_coder == other.window_coder)
def __hash__(self):
return hash(
(self.wrapped_value_coder, self.timestamp_coder, self.window_coder))
Coder.register_structured_urn(
common_urns.coders.WINDOWED_VALUE.urn, WindowedValueCoder)
class LengthPrefixCoder(FastCoder):
"""For internal use only; no backwards-compatibility guarantees.
Coder which prefixes the length of the encoded object in the stream."""
def __init__(self, value_coder):
self._value_coder = value_coder
def _create_impl(self):
return coder_impl.LengthPrefixCoderImpl(self._value_coder)
def is_deterministic(self):
return self._value_coder.is_deterministic()
def estimate_size(self, value):
value_size = self._value_coder.estimate_size(value)
return get_varint_size(value_size) + value_size
def value_coder(self):
return self._value_coder
def as_cloud_object(self):
return {
'@type': 'kind:length_prefix',
'component_encodings': [self._value_coder.as_cloud_object()],
}
def _get_component_coders(self):
return (self._value_coder,)
def __repr__(self):
return 'LengthPrefixCoder[%r]' % self._value_coder
def __eq__(self, other):
return (type(self) == type(other)
and self._value_coder == other._value_coder)
def __hash__(self):
return hash((type(self), self._value_coder))
Coder.register_structured_urn(
common_urns.coders.LENGTH_PREFIX.urn, LengthPrefixCoder)
| |
# Copyright 2017 BlyNotes. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
###############################################
# Author: Original code provided by Professor for CS 6375
#
# Modified by: Stephen Blystone
#
# Purpose: Modify hyperparameters using Gradient Descent
# Optimizer and modified MNIST data to achieve
# highest test accuracy that you can.
###############################################
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from our_mnist import *
import tensorflow as tf
import sys
import csv
import numpy as np
import timeit
mnist = read_data_sets('MNIST_data', one_hot=True)
# Variables initialized below for automation
# X = tf.placeholder(tf.float32, shape=[None, 784])
# Y = tf.placeholder(tf.float32, shape=[None, 10])
# keep_prob = tf.placeholder(tf.float32)
# *******************************************************************
# DO NOT MODIFY THE CODE ABOVE THIS LINE
# MAKE CHANGES TO THE CODE BELOW:
# Flag to disable printing every 100 batches
DISABLEBATCHPRINT = False
def weight_variable(shape):
"""a method for initializing weights. Initialize to small random values."""
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
"""a method for initializing bias. Initialize to 0.1."""
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def buildSingleLayer(input, numInputs, layer_dict, layerNum, keep_prob_label):
"""Build a single layer of the network (either Hidden or Output).
This is a fully connected Layer of layer_dict['layerSize'] nodes
Input:
-input: input values to this layer (output from the previous layer, or X for first layer)
-numInputs: number of nodes in the input layer (i.e. size of previous layer)
-layer_dict: python dictionary holding layer-specific parameters
-layerNum: index of which layer is being built
-keep_prob_label: python list holding tf.placeholder(tf.float32) for the keep_prob for that layer
Output:
-v_fc: output values for the layer
-W_fc: weights calculated for this layer; used for regularization calculations later
"""
# Print layer-specific information
print("Building Layer {}. numInputs = {} and layerSize = {}".format(
layerNum + 1, numInputs, layer_dict['layerSize']))
# Create Weight and Bias variables using layer-specific values in layer_dict
W_fc = weight_variable([numInputs, layer_dict['layerSize']])
b_fc = bias_variable([layer_dict['layerSize']])
# Every node type calculates the matrix multiplication
matmul = tf.matmul(input, W_fc) + b_fc
# Apply specific node type
if (layer_dict['nodeType'] == "ReLU"):
v_fc = tf.nn.relu(matmul)
elif (layer_dict['nodeType'] == "Sigmoid"):
v_fc = tf.nn.sigmoid(matmul)
elif (layer_dict['nodeType'] == "Linear"):
# Linear, so no need to apply anything
v_fc = matmul
else:
print("ERROR: Unknown nodeType in layer_dict: {}".format(layer_dict['nodeType']))
sys.exit(1)
# Add drop-out layer if specified
if (layer_dict['dropoutFlag']):
print("Dropout Layer")
v_fc = tf.nn.dropout(v_fc, keep_prob_label[layerNum])
return (v_fc, W_fc)
def buildLayers(layers, keep_prob_label, X):
"""Builds all of the layers specified by layerSizes list.
It prints message when it builds input layer, Hidden layers or output layer
Inputs:
-layers: list of all of the layer_dicts
-keep_prob_label: python list holding tf.placeholder(tf.float32) for the keep_prob for each layer
-X: initial input values
Outputs:
-tmpVal: This is the predicted_Y value
-weights: This is the list of weights for each layer; used later in regularization calculations
"""
print("Building Layers")
# Set tmpVal equal to output of current layer built
# Use tmpVal to build next layer
tmpVal = X
# Set tmpSize equal to size of X initially
tmpSize = X.get_shape().dims[1].value
# initialize weights list
weights = []
# Iterate over each layer_dict
# enumerate returns an index along with the data
# index (idx) used to print whether we are building a Hidden Layer or Output Layer
for idx, layer_dict in enumerate(layers):
if (idx < len(layers) - 1):
print("Building Hidden Layer {}".format(idx + 1))
else:
print("Building Output Layer")
# Build layer and get output values
tmpVal, tmpWeight = buildSingleLayer(
tmpVal, tmpSize, layer_dict, idx, keep_prob_label)
# Store the size of the current layer to be used as the input size for the next layer
tmpSize = layer_dict['layerSize'] # Use this for next iteration
# Append the weights from the current layer to the weights list
weights.append(tmpWeight)
# tmpVal is the predicted_Y value
# weights is the list of weights used in each layer
return (tmpVal, weights)
def runAlgorithm(param, outputText=""):
"""Function that runs the algorithm.
Inputs:
-param: a dict containing all the variables I can adjust
-outputText: used for writing to the output file
"""
# Reset the default graph to free memory
# If you were to run a for loop over runAlgorithm trying different configurations,
# Tensorflow places each node into a graph (the default graph if no other is specified).
# Eventually this will cause an Out Of Memory error to occur, because the graph size
# continues to grow.
# To fix this issue, I reset the default graph everytime I call runAlgorithm.
# This causes Tensorflow to remove the old graph and create a brand new graph each time.
tf.reset_default_graph()
# As a result of reseting the default graph, I have to recreate the X and Y placeholder variables.
# Placing them after reset_default_graph causes Tensorflow to add these first to the new graph.
X = tf.placeholder(tf.float32, shape=[None, 784])
Y = tf.placeholder(tf.float32, shape=[None, 10])
# Start interactive session
sess = tf.InteractiveSession()
# Start timer. Used to time how long it takes runAlgorithm to complete
start_time = timeit.default_timer()
# Initialize keep_prob list
# keep_prob holds the actual probability values specified in the layers list.
# keep_prob_label holds the tf.placeholder values for each layer
keep_prob = []
keep_prob_label = []
# enumerate over the layers to get the keep_prob values
for idx, layer in enumerate(param['layers']):
# print("{}: {}".format(idx, layer))
# create placeholder value for keep_prob for each layer
tmpProbHolder = tf.placeholder(tf.float32)
# Append placeholder value to keep_prob_label list
keep_prob_label.append(tmpProbHolder)
# Append actual probability for each layer to end of keep_prob list
keep_prob.append(layer['keep_prob'])
# Create feed_dict for printing the output and testing
feed_dict_Print_Train = {keep_prob_label[i]: 1 for i in range(0, len(param['layers']))}
feed_dict_Print_Train[X] = mnist.train.images
feed_dict_Print_Train[Y] = mnist.train.labels
# Create feed_dict for printing the output and testing
feed_dict_Test = {keep_prob_label[i]: 1 for i in range(0, len(param['layers']))}
feed_dict_Test[X] = mnist.test.images
feed_dict_Test[Y] = mnist.test.labels
# Build layers and get predicted_Y and layerWeights back
predicted_Y, layerWeights = buildLayers(param['layers'], keep_prob_label, X)
# Calculate cross_entropy
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=Y, logits=predicted_Y))
# Calculate regularization value
# Assert that the number of layers equals to the number of layers for which we have weights
# This is required for zip to work properly below
assert len(param['layers']) == len(layerWeights)
# Use Python's zip function, along with list comprehension to calculate the regularization values
# Zip takes 2 data structures of equal lengths and returns the i'th value of the first structure along
# with the i'th value of the second structure.
# For example:
# d1 = [1, 2, 3]
# d2 = [10, 100, 1000]
# for elementFrom_d1, elementFrom_d2 in zip(d1, d2):
# print(elementFrom_d1 * elementFrom_d2)
#
# The above example outputs:
# 10
# 200
# 3000
#
# The list comprehension is basically a shorthand way of doing the following:
# tmp = []
# for a, b in zip(param['layers'], layerWeights):
# tmp.append(a['regLambda'] * tf.nn.l2_loss(b))
#
# Using the above example, the corresponding list comprehension would be:
# d1 = [1, 2, 3]
# d2 = [10, 100, 1000]
# tmp = []
# for elementFrom_d1, elementFrom_d2 in zip(d1, d2):
# tmp.append(elementFrom_d1 * elementFrom_d2)
#
# Printing the tmp value out, we would get:
# [10, 200, 3000]
#
# I then wrap the list comprehension using sum(), which adds up each of the values.
# Performing this on our tmp list:
# sum(tmp) = 3210
#
#
# If you look at the code the Professor originally provided, the below code performs the same
# function, but with a variable number of layers and lambda values.
regularizer = sum([a['regLambda'] * tf.nn.l2_loss(b)
for a, b in zip(param['layers'], layerWeights)])
# calculate the loss
loss = tf.reduce_mean(cross_entropy + regularizer)
# Perform Gradient Descent minimizing the loss
train_step = tf.train.GradientDescentOptimizer(param['learning_rate']).minimize(loss)
# Run the session and initialize the variables
sess.run(tf.global_variables_initializer())
print("Starting Training...")
# Only print if DISABLEBATCHPRINT is not set
if (not DISABLEBATCHPRINT):
print("epoch\ttrain_accuracy\ttest_accuracy")
for i in range(3000):
batch = mnist.train.next_batch(param['batch_size'])
# Only print if DISABLEBATCHPRINT is not set
if (not DISABLEBATCHPRINT):
if i % 100 == 0:
correct_prediction = tf.equal(tf.argmax(predicted_Y, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
test_accuracy = accuracy.eval(feed_dict=feed_dict_Test)
train_accuracy = accuracy.eval(feed_dict=feed_dict_Print_Train)
print("%d \t %.3f \t\t %.3f" % (i, train_accuracy, test_accuracy))
# Create feed_dict for Training
feed_dict_Train = {keep_prob_label[i]: keep_prob[i] for i in range(0, len(param['layers']))}
feed_dict_Train[X] = batch[0]
feed_dict_Train[Y] = batch[1]
# TRAIN STEP
train_step.run(feed_dict=feed_dict_Train)
# end for loop
correct_prediction = tf.equal(tf.argmax(predicted_Y, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
test_accuracy = accuracy.eval(feed_dict=feed_dict_Test)
# End timer
time_duration = timeit.default_timer() - start_time
print("test accuracy: ", test_accuracy)
print("test duration: ", time_duration)
# Write to output file
# Opening a file using the "with open() as f" is considered good practice when dealing with files.
# "with open" ensures that the file is closed properly even if an exception is raised.
# This is much shorter than writing equivalent try-catch-finally blocks
with open('OutputScript1.xls', 'a', newline='') as outfile:
# Use Python's csv module to write to an output file with a tab-delimiter
writer = csv.writer(outfile, delimiter='\t')
# The csv.writer.writerow function takes a single list as input,
# and each column is an item in the list
writer.writerow([test_accuracy, param['layers'], param['learning_rate'],
param['batch_size'], time_duration, outputText])
# Free up memory by closing the session
sess.close()
def main():
# Only use 1 iteration for the file to submit
num_iterations = 1
# Specify final values
layer1 = 980 # or 1000
layer2 = 100
dropout1 = True
dropout2 = True
keep_prob1 = .35
keep_prob2 = 0.75 # or 8
lambdaVal1 = 0
lambdaVal2 = 0.0001
lambdaValOut = 0.0005 # or 0.0006
learnRate = 0.75
batchSize = 100
# Create list of parameters.
# runParameters is a list of dictionaries.
# If wanted to have multiple runs, then you can have multiple dictionaries as items in the list.
# Dictionaries are a data structure that use a key/value pair.
#
# The following are global parameters for the run:
# learning_rate
# batch_size
#
# The other parameters are layer specific.
# The 'layers' key has a list as its value.
# The layers list consists of multiple dictionaries, one for each layer.
# Each layer must specify the following:
# layerSize: number of nodes in that layer
# dropoutFlag: True, False
# nodeType: "ReLU", "Sigmoid", "Linear"
# regLambda: Lambda values for that layer
# keep_prob: dropout keep probability for that layer
runParameters = [{'learning_rate': learnRate, 'batch_size': batchSize,
'layers': [
{'layerSize': layer1, 'dropoutFlag': dropout1,
'nodeType': "ReLU", 'regLambda': lambdaVal1, 'keep_prob': keep_prob1},
{'layerSize': layer2, 'dropoutFlag': dropout2,
'nodeType': "ReLU", 'regLambda': lambdaVal2, 'keep_prob': keep_prob2},
{'layerSize': 10, 'dropoutFlag': False,
'nodeType': "Linear", 'regLambda': lambdaValOut, 'keep_prob': 1}
]}]
# enumerate over the runParameters
# If there is only one set of parameters (like there is above), then it just runs once
for idx, params in enumerate(runParameters):
# Run over num_iterations to get a good average for these parameters
for i in range(0, num_iterations):
# Run algorithm
runAlgorithm(params, "iteration {} of {}".format(i + 1, num_iterations))
# Call the main function
if __name__ == '__main__':
main()
| |
from __future__ import absolute_import
from __future__ import with_statement
from mock import Mock
from celery.worker import bootsteps
from celery.tests.utils import AppCase, Case
class test_Component(Case):
class Def(bootsteps.Component):
name = 'test_Component.Def'
def test_components_must_be_named(self):
with self.assertRaises(NotImplementedError):
class X(bootsteps.Component):
pass
class Y(bootsteps.Component):
abstract = True
def test_namespace_name(self, ns='test_namespace_name'):
class X(bootsteps.Component):
namespace = ns
name = 'X'
self.assertEqual(X.namespace, ns)
self.assertEqual(X.name, 'X')
class Y(bootsteps.Component):
name = '%s.Y' % (ns, )
self.assertEqual(Y.namespace, ns)
self.assertEqual(Y.name, 'Y')
def test_init(self):
self.assertTrue(self.Def(self))
def test_create(self):
self.Def(self).create(self)
def test_include_if(self):
x = self.Def(self)
x.enabled = True
self.assertTrue(x.include_if(self))
x.enabled = False
self.assertFalse(x.include_if(self))
def test_instantiate(self):
self.assertIsInstance(self.Def(self).instantiate(self.Def, self),
self.Def)
def test_include_when_enabled(self):
x = self.Def(self)
x.create = Mock()
x.create.return_value = 'George'
self.assertTrue(x.include(self))
self.assertEqual(x.obj, 'George')
x.create.assert_called_with(self)
def test_include_when_disabled(self):
x = self.Def(self)
x.enabled = False
x.create = Mock()
self.assertFalse(x.include(self))
self.assertFalse(x.create.call_count)
class test_StartStopComponent(Case):
class Def(bootsteps.StartStopComponent):
name = 'test_StartStopComponent.Def'
def setUp(self):
self.components = []
def test_start__stop(self):
x = self.Def(self)
x.create = Mock()
# include creates the underlying object and sets
# its x.obj attribute to it, as well as appending
# it to the parent.components list.
x.include(self)
self.assertTrue(self.components)
self.assertIs(self.components[0], x.obj)
x.start()
x.obj.start.assert_called_with()
x.stop()
x.obj.stop.assert_called_with()
def test_include_when_disabled(self):
x = self.Def(self)
x.enabled = False
x.include(self)
self.assertFalse(self.components)
def test_terminate_when_terminable(self):
x = self.Def(self)
x.terminable = True
x.create = Mock()
x.include(self)
x.terminate()
x.obj.terminate.assert_called_with()
self.assertFalse(x.obj.stop.call_count)
def test_terminate_calls_stop_when_not_terminable(self):
x = self.Def(self)
x.terminable = False
x.create = Mock()
x.include(self)
x.terminate()
x.obj.stop.assert_called_with()
self.assertFalse(x.obj.terminate.call_count)
class test_Namespace(AppCase):
class NS(bootsteps.Namespace):
name = 'test_Namespace'
class ImportingNS(bootsteps.Namespace):
def __init__(self, *args, **kwargs):
bootsteps.Namespace.__init__(self, *args, **kwargs)
self.imported = []
def modules(self):
return ['A', 'B', 'C']
def import_module(self, module):
self.imported.append(module)
def test_components_added_to_unclaimed(self):
class tnA(bootsteps.Component):
name = 'test_Namespace.A'
class tnB(bootsteps.Component):
name = 'test_Namespace.B'
class xxA(bootsteps.Component):
name = 'xx.A'
self.assertIn('A', self.NS._unclaimed['test_Namespace'])
self.assertIn('B', self.NS._unclaimed['test_Namespace'])
self.assertIn('A', self.NS._unclaimed['xx'])
self.assertNotIn('B', self.NS._unclaimed['xx'])
def test_init(self):
ns = self.NS(app=self.app)
self.assertIs(ns.app, self.app)
self.assertEqual(ns.name, 'test_Namespace')
self.assertFalse(ns.services)
def test_interface_modules(self):
self.NS(app=self.app).modules()
def test_load_modules(self):
x = self.ImportingNS(app=self.app)
x.load_modules()
self.assertListEqual(x.imported, ['A', 'B', 'C'])
def test_apply(self):
class MyNS(bootsteps.Namespace):
name = 'test_apply'
def modules(self):
return ['A', 'B']
class A(bootsteps.Component):
name = 'test_apply.A'
requires = ['C']
class B(bootsteps.Component):
name = 'test_apply.B'
class C(bootsteps.Component):
name = 'test_apply.C'
requires = ['B']
class D(bootsteps.Component):
name = 'test_apply.D'
last = True
x = MyNS(app=self.app)
x.import_module = Mock()
x.apply(self)
self.assertItemsEqual(x.components.values(), [A, B, C, D])
self.assertTrue(x.import_module.call_count)
for boot_step in x.boot_steps:
self.assertEqual(boot_step.namespace, x)
self.assertIsInstance(x.boot_steps[0], B)
self.assertIsInstance(x.boot_steps[1], C)
self.assertIsInstance(x.boot_steps[2], A)
self.assertIsInstance(x.boot_steps[3], D)
self.assertIs(x['A'], A)
def test_import_module(self):
x = self.NS(app=self.app)
import os
self.assertIs(x.import_module('os'), os)
def test_find_last_but_no_components(self):
class MyNS(bootsteps.Namespace):
name = 'qwejwioqjewoqiej'
x = MyNS(app=self.app)
x.apply(self)
self.assertIsNone(x._find_last())
| |
#!/usr/bin/python
from collections import OrderedDict
import time
import gevent
import random
from dotstar import Adafruit_DotStar
import sys
import alsaaudio
import wave
import struct
import math
COOLING = 60
SPARKING = 60
numpixels = 120 # Number of LEDs in strip
# Here's how to control the strip from any two GPIO pins:
datapin = 23
clockpin = 24
strip = Adafruit_DotStar(numpixels, datapin, clockpin)
strip.begin() # Initialize pins for output
strip.setBrightness(255) # Limit brightness to ~1/4 duty cycle
hi_thres = 50
mydelay = 0.04
mydelays = [0.01, 0.02, 0.03, 0.1]
heat = []
for x in range(numpixels):
heat.append(random.randint(0, 50))
#Setting color to: 0xFF0000 # Green
#Setting color to: 0xCC00CC # Bright Teal
#Setting color to: 0x66CC00 # Orange
#Setting color to: 0x33FFFF # Magenta
#Setting color to: 0xFF00 # Red
#Setting color to: 0x330099 # Lightish Blue
#Setting color to: 0xFFFF00 # YEllow
#Setting color to: 0xFF # Bright Blue
#Setting color to: 0xFF9900 # YEllower Gren
#Setting color to: 0x33 # Dark BLue
#fire_colors = [ 0x000000, 0xFF0000, 0xFFFF00, 0xFFFFFF ]
#fire_colors = [ "#000500", "#00FF00", "#48FF00" ] # Red Yellow White Fire
fire_colors = [ "#000000", "#FF0000", "#CC00CC", "#66CC00", "#33FFFF", "#00FF00", "#330099", "#FFFF00", "#0000FF", "#FF0000" ] # Chromomancer!
num_colors = 100
my_colors = []
colors_dict = OrderedDict()
allcolors = []
gsparkitup = False
def main():
global strip
global fire_colors
global my_colors
global colors_dict
global allcolors
for x in range(len(fire_colors)):
if x == len(fire_colors) -1:
pass
else:
print("Adding gradient for %s (%s) to %s (%s) with %s colors" % (fire_colors[x], hex_to_RGB(fire_colors[x]), fire_colors[x+1], hex_to_RGB(fire_colors[x+1]), num_colors))
gtmp = linear_gradient(fire_colors[x], fire_colors[x+1], num_colors)
my_colors.append(gtmp['hex'])
colors_dict[fire_colors[x] + "_2_" + fire_colors[x+1]] = gtmp['hex']
for x in colors_dict:
for y in colors_dict[x]:
# print("Color: %s" % hex_to_RGB(y))
allcolors.append(y)
gevent.joinall([
gevent.spawn(FirePlace),
gevent.spawn(PlayFire),
])
def PlayFire():
global heat
global numpixels
channels = 2
rate = 44100
size = 1024
out_stream = alsaaudio.PCM(alsaaudio.PCM_PLAYBACK, alsaaudio.PCM_NORMAL, 'default')
out_stream.setformat(alsaaudio.PCM_FORMAT_S16_LE)
out_stream.setchannels(channels)
out_stream.setrate(rate)
out_stream.setperiodsize(size)
try:
while True:
sndstream = open("crackfire.wav", "rb")
data = sndstream.read(size)
while data:
out_stream.write(data)
gevent.sleep(0.001)
data = sndstream.read(size)
rmsval = rms(data)
if rmsval > hi_thres:
rval = random.randint(0, numpixels - 1)
sparkval = random.randint(200, 255)
# print("Sparking LED %s to %s" % (rval, sparkval))
heat[rval] = heat[rval] + random.randint(160,255)
gevent.sleep(0.001)
sndstream.close()
gevent.sleep(0.001)
except KeyboardInterrupt:
print("")
print("exiting and shutting down strip")
setAllLEDS(strip, [0x000000])
sys.exit(0)
def FirePlace():
global gsparkitup
global numpixels
global SPARKING
global COOLING
global strip
global allcolors
global heat
#for i in range(numpixels):
# heat[i] = 0
# Heat is a value for each pixel that is 0 to 255
# Every cycle there will be some random cololing
# Consider adding a degree of random whether a pixel cools
try:
while True:
for i in range(numpixels):
if random.randint(0, 255) < COOLING:
tval = heat[i] - random.randint(0, ((COOLING * 10) / numpixels) + 2)
heat[i] = tval
gevent.sleep(random.choice(mydelays))
# This is supposed to be a diffusing effect I think
k = numpixels -3
while k > 2:
if random.randint(0, 255) * 2 < COOLING:
tval = (heat[k-1] + heat[ k- 2 ] + heat[ k- 2] ) / 3
heat[k] = tval
k = k - 1
gevent.sleep(random.choice(mydelays))
# Now let's see if we set any sparks!
if gsparkitup == True:
if random.randint(0, 255) < SPARKING:
rval = random.randint(0, numpixels - 1)
sparkval = random.randint(160, 255)
print("Sparking LED %s to %s" % (rval, sparkval))
heat[rval] = heat[rval] + random.randint(160,255)
# Now, actually set the pixels based on a scaled representation of all pixels
for j in range(numpixels):
if heat[j] > 255:
heat[j] = 255
if heat[j] < 0:
heat[j] = 0
newcolor = int((heat[j] * len(allcolors)) / 256)
# print("Pixel: %s has a heat value of %s and a newcolor idx of %s" % (j, heat[j], newcolor))
# print("Setting Color: %s" % hex_to_RGB(allcolors[newcolor]))
#
# print("Setting color to: 0x%0.2X" % int(allcolors[newcolor].replace("#", ''), 16))
strip.setPixelColor(j, int(allcolors[newcolor].replace("#", ''), 16))
gevent.sleep(random.choice(mydelays))
strip.show()
gevent.sleep(random.choice(mydelays))
except KeyboardInterrupt:
print("")
print("exiting and shutting down strip")
setAllLEDS(strip, [0x000000])
sys.exit(0)
def color_dict(gradient):
''' Takes in a list of RGB sub-lists and returns dictionary of
colors in RGB and hex form for use in a graphing function
defined later on '''
return {"hex":[RGB_to_hex(RGB) for RGB in gradient],
"r":[RGB[0] for RGB in gradient],
"g":[RGB[1] for RGB in gradient],
"b":[RGB[2] for RGB in gradient]}
def rms(frame):
SHORT_NORMALIZE = (1.0/32768.0)
CHUNK = 1024
swidth = 2
count = len(frame)/swidth
format = "%dh"%(count)
shorts = struct.unpack( format, frame )
sum_squares = 0.0
for sample in shorts:
n = sample * SHORT_NORMALIZE
sum_squares += n*n
rms = math.pow(sum_squares/count,0.5);
return rms * 10000
def linear_gradient(start_hex, finish_hex="#FFFFFF", n=10):
''' returns a gradient list of (n) colors between
two hex colors. start_hex and finish_hex
should be the full six-digit color string,
inlcuding the number sign ("#FFFFFF") '''
# Starting and ending colors in RGB form
s = hex_to_RGB(start_hex)
f = hex_to_RGB(finish_hex)
# Initilize a list of the output colors with the starting color
RGB_list = [s]
# Calcuate a color at each evenly spaced value of t from 1 to n
for t in range(1, n):
# Interpolate RGB vector for color at the current value of t
curr_vector = [
int(s[j] + (float(t)/(n-1))*(f[j]-s[j]))
for j in range(3)
]
# Add it to our list of output colors
RGB_list.append(curr_vector)
return color_dict(RGB_list)
def hex_to_RGB(hex):
''' "#FFFFFF" -> [255,255,255] '''
# Pass 16 to the integer function for change of base
return [int(hex[i:i+2], 16) for i in range(1,6,2)]
def RGB_to_hex(RGB):
''' [255,255,255] -> "#FFFFFF" '''
# Components need to be integers for hex to make sense
RGB = [int(x) for x in RGB]
return "#"+"".join(["0{0:x}".format(v) if v < 16 else
"{0:x}".format(v) for v in RGB])
def setAllLEDS(strip, colorlist):
numcolors = len(colorlist)
for x in range(numpixels):
idx = x % numcolors
strip.setPixelColor(x, colorlist[idx])
strip.show()
if __name__ == "__main__":
main()
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Hyper-V classes to be used in testing.
"""
import sys
import time
from nova import exception
from nova.virt.hyperv import constants
from nova.virt.hyperv import volumeutilsV2
from xml.etree import ElementTree
# Check needed for unit testing on Unix
if sys.platform == 'win32':
import wmi
class HyperVUtils(object):
def __init__(self):
self.__conn = None
self.__conn_v2 = None
self.__conn_cimv2 = None
self.__conn_wmi = None
self.__conn_storage = None
self._volumeutils = volumeutilsV2.VolumeUtilsV2(
self._conn_storage, self._conn_wmi)
@property
def _conn(self):
if self.__conn is None:
self.__conn = wmi.WMI(moniker='//./root/virtualization')
return self.__conn
@property
def _conn_v2(self):
if self.__conn_v2 is None:
self.__conn_v2 = wmi.WMI(moniker='//./root/virtualization/v2')
return self.__conn_v2
@property
def _conn_cimv2(self):
if self.__conn_cimv2 is None:
self.__conn_cimv2 = wmi.WMI(moniker='//./root/cimv2')
return self.__conn_cimv2
@property
def _conn_wmi(self):
if self.__conn_wmi is None:
self.__conn_wmi = wmi.WMI(moniker='//./root/wmi')
return self.__conn_wmi
@property
def _conn_storage(self):
if self.__conn_storage is None:
storage_namespace = '//./Root/Microsoft/Windows/Storage'
self.__conn_storage = wmi.WMI(moniker=storage_namespace)
return self.__conn_storage
def create_vhd(self, path):
image_service = self._conn.query(
"Select * from Msvm_ImageManagementService")[0]
(job, ret_val) = image_service.CreateDynamicVirtualHardDisk(
Path=path, MaxInternalSize=3 * 1024 * 1024)
if ret_val == constants.WMI_JOB_STATUS_STARTED:
success = self._check_job_status(job)
else:
success = (ret_val == 0)
if not success:
raise Exception('Failed to create Dynamic disk %s with error %d'
% (path, ret_val))
def _check_job_status(self, jobpath):
"""Poll WMI job state for completion"""
job_wmi_path = jobpath.replace('\\', '/')
job = wmi.WMI(moniker=job_wmi_path)
while job.JobState == constants.WMI_JOB_STATE_RUNNING:
time.sleep(0.1)
job = wmi.WMI(moniker=job_wmi_path)
return job.JobState == constants.WMI_JOB_STATE_COMPLETED
def _get_vm(self, vm_name, conn=None):
if conn is None:
conn = self._conn
vml = conn.Msvm_ComputerSystem(ElementName=vm_name)
if not len(vml):
raise exception.InstanceNotFound(instance=vm_name)
return vml[0]
def remote_vm_exists(self, server, vm_name):
conn = wmi.WMI(moniker='//' + server + '/root/virtualization')
return self._vm_exists(conn, vm_name)
def vm_exists(self, vm_name):
return self._vm_exists(self._conn, vm_name)
def _vm_exists(self, conn, vm_name):
return len(conn.Msvm_ComputerSystem(ElementName=vm_name)) > 0
def _get_vm_summary(self, vm_name):
vm = self._get_vm(vm_name)
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
vmsettings = vm.associators(
wmi_association_class='Msvm_SettingsDefineState',
wmi_result_class='Msvm_VirtualSystemSettingData')
settings_paths = [v.path_() for v in vmsettings]
return vs_man_svc.GetSummaryInformation([100, 105],
settings_paths)[1][0]
def get_vm_uptime(self, vm_name):
return self._get_vm_summary(vm_name).UpTime
def get_vm_state(self, vm_name):
return self._get_vm_summary(vm_name).EnabledState
def set_vm_state(self, vm_name, req_state):
self._set_vm_state(self._conn, vm_name, req_state)
def _set_vm_state(self, conn, vm_name, req_state):
vm = self._get_vm(vm_name, conn)
(job, ret_val) = vm.RequestStateChange(req_state)
success = False
if ret_val == constants.WMI_JOB_STATUS_STARTED:
success = self._check_job_status(job)
elif ret_val == 0:
success = True
elif ret_val == 32775:
#Invalid state for current operation. Typically means it is
#already in the state requested
success = True
if not success:
raise Exception(_("Failed to change vm state of %(vm_name)s"
" to %(req_state)s") % locals())
def get_vm_disks(self, vm_name):
return self._get_vm_disks(self._conn, vm_name)
def _get_vm_disks(self, conn, vm_name):
vm = self._get_vm(vm_name, conn)
vmsettings = vm.associators(
wmi_result_class='Msvm_VirtualSystemSettingData')
rasds = vmsettings[0].associators(
wmi_result_class='MSVM_ResourceAllocationSettingData')
disks = [r for r in rasds
if r.ResourceSubType == 'Microsoft Virtual Hard Disk']
disk_files = []
for disk in disks:
disk_files.extend([c for c in disk.Connection])
volumes = [r for r in rasds
if r.ResourceSubType == 'Microsoft Physical Disk Drive']
volume_drives = []
for volume in volumes:
hostResources = volume.HostResource
drive_path = hostResources[0]
volume_drives.append(drive_path)
dvds = [r for r in rasds
if r.ResourceSubType == 'Microsoft Virtual CD/DVD Disk']
dvd_files = []
for dvd in dvds:
dvd_files.extend([c for c in dvd.Connection])
return (disk_files, volume_drives, dvd_files)
def remove_remote_vm(self, server, vm_name):
conn = wmi.WMI(moniker='//' + server + '/root/virtualization')
conn_cimv2 = wmi.WMI(moniker='//' + server + '/root/cimv2')
self._remove_vm(vm_name, conn, conn_cimv2)
def remove_vm(self, vm_name):
self._remove_vm(vm_name, self._conn, self._conn_cimv2)
def _remove_vm(self, vm_name, conn, conn_cimv2):
vm = self._get_vm(vm_name, conn)
vs_man_svc = conn.Msvm_VirtualSystemManagementService()[0]
#Stop the VM first.
self._set_vm_state(conn, vm_name, 3)
(disk_files, volume_drives, dvd_files) = self._get_vm_disks(conn,
vm_name)
(job, ret_val) = vs_man_svc.DestroyVirtualSystem(vm.path_())
if ret_val == constants.WMI_JOB_STATUS_STARTED:
success = self._check_job_status(job)
elif ret_val == 0:
success = True
if not success:
raise Exception(_('Failed to destroy vm %s') % vm_name)
#Delete associated vhd disk files.
for disk in disk_files + dvd_files:
vhd_file = conn_cimv2.query(
"Select * from CIM_DataFile where Name = '" +
disk.replace("'", "''") + "'")[0]
vhd_file.Delete()
def _get_target_iqn(self, volume_id):
return 'iqn.2010-10.org.openstack:volume-' + volume_id
def logout_iscsi_volume_sessions(self, volume_id):
target_iqn = self._get_target_iqn(volume_id)
if (self.iscsi_volume_sessions_exist(volume_id)):
self._volumeutils.logout_storage_target(target_iqn)
def iscsi_volume_sessions_exist(self, volume_id):
target_iqn = self._get_target_iqn(volume_id)
return len(self._conn_wmi.query(
"SELECT * FROM MSiSCSIInitiator_SessionClass \
WHERE TargetName='" + target_iqn + "'")) > 0
def get_vm_count(self):
return len(self._conn.query(
"Select * from Msvm_ComputerSystem where Description "
"<> 'Microsoft Hosting Computer System'"))
def get_vm_snapshots_count(self, vm_name):
return len(self._conn.query(
"Select * from Msvm_VirtualSystemSettingData where \
SettingType = 5 and SystemName = '" + vm_name + "'"))
def get_vhd_parent_path(self, vhd_path):
image_man_svc = self._conn.Msvm_ImageManagementService()[0]
(vhd_info, job_path, ret_val) = \
image_man_svc.GetVirtualHardDiskInfo(vhd_path)
if ret_val == constants.WMI_JOB_STATUS_STARTED:
success = self._check_job_status(job_path)
else:
success = (ret_val == 0)
if not success:
raise Exception(_("Failed to get info for disk %s") %
(vhd_path))
base_disk_path = None
et = ElementTree.fromstring(vhd_info)
for item in et.findall("PROPERTY"):
if item.attrib["NAME"] == "ParentPath":
base_disk_path = item.find("VALUE").text
break
return base_disk_path
| |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# -*- coding: utf-8 -*-
from functools import cmp_to_key
import pydoc
from docutils import core
from docutils import nodes
import six
from sphinx.util import compat
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine import plugin_manager
from heat.engine import properties
from heat.engine import support
_CODE_NAMES = {'2013.1': 'Grizzly',
'2013.2': 'Havana',
'2014.1': 'Icehouse',
'2014.2': 'Juno',
'2015.1': 'Kilo',
'5.0.0': 'Liberty',
'6.0.0': 'Mitaka',
'7.0.0': 'Newton'}
all_resources = {}
class integratedrespages(nodes.General, nodes.Element):
pass
class unsupportedrespages(nodes.General, nodes.Element):
pass
class contribresourcepages(nodes.General, nodes.Element):
pass
class ResourcePages(compat.Directive):
has_content = False
required_arguments = 0
optional_arguments = 1
final_argument_whitespace = False
option_spec = {}
def path(self):
return None
def statuses(self):
return support.SUPPORT_STATUSES
def run(self):
prefix = self.arguments and self.arguments.pop() or None
content = []
for resource_type, resource_classes in _filter_resources(
prefix, self.path(), self.statuses()):
for resource_class in resource_classes:
self.resource_type = resource_type
self.resource_class = resource_class
section = self._section(content, resource_type, '%s')
self.props_schemata = properties.schemata(
self.resource_class.properties_schema)
self.attrs_schemata = attributes.schemata(
self.resource_class.attributes_schema)
# NOTE(prazumovsky): Adding base_attributes_schema dict to
# Resource class should means adding new attributes from this
# dict to documentation of each resource, else there is no
# chance to learn about base attributes.
self.attrs_schemata.update(
self.resource_class.base_attributes_schema)
self.update_policy_schemata = properties.schemata(
self.resource_class.update_policy_schema)
self._status_str(resource_class.support_status, section)
cls_doc = pydoc.getdoc(resource_class)
if cls_doc:
# allow for rst in the class comments
cls_nodes = core.publish_doctree(cls_doc).children
section.extend(cls_nodes)
self.contribute_properties(section)
self.contribute_attributes(section)
self.contribute_update_policy(section)
self.contribute_hot_syntax(section)
return content
def _version_str(self, version):
if version in _CODE_NAMES:
return _("%(version)s (%(code)s)") % {'version': version,
'code': _CODE_NAMES[version]}
else:
return version
def _status_str(self, support_status, section):
while support_status is not None:
sstatus = support_status.to_dict()
if sstatus['status'] is support.SUPPORTED:
msg = _('Available')
else:
msg = sstatus['status']
if sstatus['version'] is not None:
msg = _('%(msg)s since %(version)s') % {
'msg': msg,
'version': self._version_str(sstatus['version'])}
if sstatus['message'] is not None:
msg = _('%(msg)s - %(status_msg)s') % {
'msg': msg,
'status_msg': sstatus['message']}
if not (sstatus['status'] == support.SUPPORTED and
sstatus['version'] is None):
para = nodes.paragraph('', msg)
note = nodes.note('', para)
section.append(note)
support_status = support_status.previous_status
def _section(self, parent, title, id_pattern):
id = id_pattern % self.resource_type
section = nodes.section(ids=[id])
parent.append(section)
title = nodes.title('', title)
section.append(title)
return section
def _prop_syntax_example(self, prop):
if not prop:
return 'Value'
if prop.type == properties.Schema.LIST:
def schema(i):
return prop.schema[i] if prop.schema else None
sub_type = [self._prop_syntax_example(schema(i))
for i in range(2)]
return '[%s, %s, ...]' % tuple(sub_type)
elif prop.type == properties.Schema.MAP:
def sub_props():
for sub_key, sub_value in prop.schema.items():
if sub_value.implemented:
yield '"%s": %s' % (
sub_key, self._prop_syntax_example(sub_value))
return '{%s}' % (', '.join(sub_props()) if prop.schema else '...')
else:
return prop.type
def contribute_hot_syntax(self, parent):
section = self._section(parent, _('HOT Syntax'), '%s-hot')
props = []
for prop_key in sorted(self.props_schemata.keys()):
prop = self.props_schemata[prop_key]
if (prop.implemented
and prop.support_status.status == support.SUPPORTED):
props.append('%s: %s' % (prop_key,
self._prop_syntax_example(prop)))
props_str = ''
if props:
props_str = '''\n properties:
%s''' % ('\n '.join(props))
template = '''heat_template_version: 2015-04-30
...
resources:
...
the_resource:
type: %s%s''' % (self.resource_type, props_str)
block = nodes.literal_block(template, template, language="yaml")
section.append(block)
@staticmethod
def cmp_prop(x, y):
x_key, x_prop = x
y_key, y_prop = y
if x_prop.support_status.status == y_prop.support_status.status:
return (x_key > y_key) - (x_key < y_key)
x_status = x_prop.support_status.status
y_status = y_prop.support_status.status
if x_status == support.SUPPORTED:
return -1
if x_status == support.DEPRECATED:
return 1
return (x_status > y_status) - (x_status < y_status)
def contribute_property(self, parent, prop_key, prop, upd_para=None,
id_pattern_prefix=None):
if not id_pattern_prefix:
id_pattern_prefix = '%s-prop'
id_pattern = id_pattern_prefix + '-' + prop_key
definition = self._section(parent, prop_key, id_pattern)
self._status_str(prop.support_status, definition)
if not prop.implemented:
para = nodes.paragraph('', _('Not implemented.'))
note = nodes.note('', para)
definition.append(note)
return
if prop.description:
para = nodes.paragraph('', prop.description)
definition.append(para)
type = nodes.paragraph('', _('%s value expected.') % prop.type)
definition.append(type)
if upd_para is not None:
definition.append(upd_para)
else:
if prop.update_allowed:
upd_para = nodes.paragraph(
'', _('Can be updated without replacement.'))
definition.append(upd_para)
elif prop.immutable:
upd_para = nodes.paragraph('', _('Updates are not supported. '
'Resource update will fail on'
' any attempt to update this '
'property.'))
definition.append(upd_para)
else:
upd_para = nodes.paragraph('', _('Updates cause replacement.'))
definition.append(upd_para)
if prop.default is not None:
para = nodes.paragraph('', _('Defaults to "%s".') % prop.default)
definition.append(para)
for constraint in prop.constraints:
para = nodes.paragraph('', str(constraint))
definition.append(para)
sub_schema = None
if prop.schema and prop.type == properties.Schema.MAP:
para = nodes.paragraph()
emph = nodes.emphasis('', _('Map properties:'))
para.append(emph)
definition.append(para)
sub_schema = prop.schema
elif prop.schema and prop.type == properties.Schema.LIST:
para = nodes.paragraph()
emph = nodes.emphasis('', _('List contents:'))
para.append(emph)
definition.append(para)
sub_schema = prop.schema
if sub_schema:
for _key, _prop in sorted(sub_schema.items(),
key=cmp_to_key(self.cmp_prop)):
if _prop.support_status.status != support.HIDDEN:
indent = nodes.block_quote()
definition.append(indent)
self.contribute_property(
indent, _key, _prop, upd_para, id_pattern)
def contribute_properties(self, parent):
if not self.props_schemata:
return
props = dict((k, v) for k, v in self.props_schemata.items()
if v.support_status.status != support.HIDDEN)
required_props = dict((k, v) for k, v in props.items()
if v.required)
if required_props:
section = self._section(
parent, _('Required Properties'), '%s-props-req')
for prop_key, prop in sorted(required_props.items(),
key=cmp_to_key(self.cmp_prop)):
self.contribute_property(section, prop_key, prop)
optional_props = dict((k, v) for k, v in props.items()
if not v.required)
if optional_props:
section = self._section(
parent, _('Optional Properties'), '%s-props-opt')
for prop_key, prop in sorted(optional_props.items(),
key=cmp_to_key(self.cmp_prop)):
self.contribute_property(section, prop_key, prop)
def contribute_attributes(self, parent):
if not self.attrs_schemata:
return
section = self._section(parent, _('Attributes'), '%s-attrs')
for prop_key, prop in sorted(self.attrs_schemata.items()):
if prop.support_status.status != support.HIDDEN:
description = prop.description
attr_section = self._section(
section, prop_key, '%s-attr-' + prop_key)
self._status_str(prop.support_status, attr_section)
if description:
def_para = nodes.paragraph('', description)
attr_section.append(def_para)
def contribute_update_policy(self, parent):
if not self.update_policy_schemata:
return
section = self._section(parent, _('update_policy'), '%s-updpolicy')
for _key, _prop in sorted(self.update_policy_schemata.items(),
key=cmp_to_key(self.cmp_prop)):
self.contribute_property(section, _key, _prop)
class IntegrateResourcePages(ResourcePages):
def path(self):
return 'heat.engine.resources'
def statuses(self):
return [support.SUPPORTED]
class UnsupportedResourcePages(ResourcePages):
def path(self):
return 'heat.engine.resources'
def statuses(self):
return [s for s in support.SUPPORT_STATUSES if s != support.SUPPORTED]
class ContribResourcePages(ResourcePages):
def path(self):
return 'heat.engine.plugins'
def _filter_resources(prefix=None, path=None, statuses=None):
def not_hidden_match(cls):
return cls.support_status.status != support.HIDDEN
def prefix_match(name):
return prefix is None or name.startswith(prefix)
def path_match(cls):
return path is None or cls.__module__.startswith(path)
def status_match(cls):
return cls.support_status.status in statuses
statuses = statuses or []
filtered_resources = {}
for name in sorted(all_resources.keys()):
if prefix_match(name):
for cls in all_resources.get(name):
if (path_match(cls) and status_match(cls) and
not_hidden_match(cls)):
if filtered_resources.get(name) is not None:
filtered_resources[name].append(cls)
else:
filtered_resources[name] = [cls]
return sorted(six.iteritems(filtered_resources))
def _load_all_resources():
manager = plugin_manager.PluginManager('heat.engine.resources')
resource_mapping = plugin_manager.PluginMapping('resource')
res_plugin_mappings = resource_mapping.load_all(manager)
for mapping in res_plugin_mappings:
name, cls = mapping
if all_resources.get(name) is not None:
all_resources[name].append(cls)
else:
all_resources[name] = [cls]
def link_resource(app, env, node, contnode):
reftarget = node.attributes['reftarget']
for resource_name in all_resources:
if resource_name.lower() == reftarget.lower():
refnode = nodes.reference('', '', internal=True)
refnode['reftitle'] = resource_name
if resource_name.startswith('AWS'):
source = 'template_guide/cfn'
else:
source = 'template_guide/openstack'
uri = app.builder.get_relative_uri(
node.attributes['refdoc'], source)
refnode['refuri'] = '%s#%s' % (uri, resource_name)
refnode.append(contnode)
return refnode
def setup(app):
_load_all_resources()
app.add_node(integratedrespages)
app.add_directive('integratedrespages', IntegrateResourcePages)
app.add_node(unsupportedrespages)
app.add_directive('unsupportedrespages', UnsupportedResourcePages)
app.add_node(contribresourcepages)
app.add_directive('contribrespages', ContribResourcePages)
app.connect('missing-reference', link_resource)
| |
# coding=utf-8
# Copyright 2022 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data augmentation lib for the Liver Tumor Segmentation (LiTS) dataset.
A set of data augmentation functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
import tensorflow_probability as tfp
from tensorflow.contrib import image as contrib_image
def _truncated_normal(mean, stddev):
v = tf.random.normal(shape=[], mean=mean, stddev=stddev)
v = tf.clip_by_value(v, -2 * stddev + mean, 2 * stddev + mean)
return v
def _rand_noise(noise_mean, noise_dev, scale, shape):
"""Generate random noise given a particular scale and shape."""
noise_shape = [x // scale for x in shape]
noise_shape = [1 if x == 0 else x for x in noise_shape]
noise = tf.random.normal(
shape=noise_shape, mean=noise_mean, stddev=noise_dev)
noise = tf.clip_by_value(
noise, noise_mean - 2.0 * noise_dev, noise_mean + 2.0 * noise_dev)
if scale != 1:
noise = tf.image.resize_images(
noise, [shape[0], shape[1]])
noise = tf.transpose(noise, [0, 2, 1])
noise = tf.image.resize_images(
noise, [shape[0], shape[2]])
noise = tf.transpose(noise, [0, 2, 1])
return noise
def projective_transform(
image, label, reso, image_translate_ratio, image_transform_ratio,
sampled_2d_slices=False):
"""Apply projective transformation on image and label."""
if image_translate_ratio < 0.000001 and (
image_transform_ratio < 0.000001):
return image, label
def _projective_transform(data, proj_matrix, static_axis, interpolation):
"""Apply projective transformation."""
if static_axis == 2:
data = contrib_image.transform(data, proj_matrix, interpolation)
elif static_axis == 1:
data = tf.transpose(data, [0, 2, 1])
data = contrib_image.transform(data, proj_matrix, interpolation)
data = tf.transpose(data, [0, 2, 1])
else:
data = tf.transpose(data, [2, 1, 0])
data = contrib_image.transform(data, proj_matrix, interpolation)
data = tf.transpose(data, [2, 1, 0])
return data
for static_axis in [0, 1, 2]:
if sampled_2d_slices and static_axis != 2:
continue
a0 = _truncated_normal(1.0, image_transform_ratio)
a1 = _truncated_normal(0.0, image_transform_ratio)
a2 = _truncated_normal(
0.0, image_translate_ratio * reso)
b0 = _truncated_normal(0.0, image_transform_ratio)
b1 = _truncated_normal(1.0, image_transform_ratio)
b2 = _truncated_normal(
0.0, image_translate_ratio * reso)
c0 = _truncated_normal(0.0, image_transform_ratio)
c1 = _truncated_normal(0.0, image_transform_ratio)
proj_matrix = [a0, a1, a2, b0, b1, b2, c0, c1]
image = _projective_transform(image, proj_matrix, static_axis, 'BILINEAR')
label = _projective_transform(label, proj_matrix, static_axis, 'NEAREST')
return image, label
def maybe_add_noise(image, noise_shape, scale0, scale1,
image_noise_probability, image_noise_ratio):
"""Add noise at two scales."""
if image_noise_probability < 0.000001 or (
image_noise_ratio < 0.000001):
return image
noise_list = []
for scale in [scale0, scale1]:
rand_image_noise_ratio = tf.random.uniform(
shape=[], minval=0.0, maxval=image_noise_ratio)
noise_list.append(
_rand_noise(0.0, rand_image_noise_ratio, scale, noise_shape))
skip_noise = tf.greater(tf.random.uniform([]), image_noise_probability)
image = tf.cond(skip_noise,
lambda: image, lambda: image + noise_list[0])
image = tf.cond(skip_noise,
lambda: image, lambda: image + noise_list[1])
return image
def _gen_rand_mask(ratio_mean, ratio_stddev, scale, shape, smoothness=0):
"""Generate a binary mask."""
scale = max(scale, 1)
ratio = tf.random.normal(
shape=[], mean=ratio_mean, stddev=ratio_stddev)
low_bound = tf.maximum(0.0, ratio_mean - 2 * ratio_stddev)
up_bound = tf.minimum(1.0, ratio_mean + 2 * ratio_stddev)
percentil_q = tf.cast(
100.0 * tf.clip_by_value(ratio, low_bound, up_bound),
tf.int32)
pattern = _rand_noise(0.0, 1.0, scale, shape)
if smoothness > 0:
smoothness = int(smoothness) // 2 * 2 + 1
pattern = tf.expand_dims(tf.expand_dims(pattern, 0), -1)
pattern = tf.nn.conv3d(
pattern, filter=tf.ones([smoothness, smoothness, smoothness, 1, 1]),
strides=[1, 1, 1, 1, 1], padding='SAME', dilations=[1, 1, 1, 1, 1])
pattern = tf.reduce_sum(pattern, 0)
pattern = tf.reduce_sum(pattern, -1)
thres = tfp.stats.percentile(pattern, q=percentil_q)
rand_mask = tf.less(pattern, thres)
return rand_mask
def maybe_gen_fake_data_based_on_real_data(
image, label, reso, min_fake_lesion_ratio, gen_fake_probability):
"""Remove real lesion and synthesize lesion."""
# TODO(lehou): Replace magic numbers with flag variables.
gen_prob_indicator = tf.random_uniform(
shape=[], minval=0.0, maxval=1.0, dtype=tf.float32)
background_mask = tf.less(label, 0.5)
lesion_mask = tf.greater(label, 1.5)
liver_mask = tf.logical_not(tf.logical_or(background_mask, lesion_mask))
liver_intensity = tf.boolean_mask(image, liver_mask)
lesion_intensity = tf.boolean_mask(image, lesion_mask)
intensity_diff = tf.reduce_mean(liver_intensity) - (
tf.reduce_mean(lesion_intensity))
intensity_diff *= 1.15
intensity_diff = tf.cond(tf.is_nan(intensity_diff),
lambda: 0.0, lambda: intensity_diff)
lesion_liver_ratio = 0.0
lesion_liver_ratio += tf.random.normal(shape=[], mean=0.01, stddev=0.01)
lesion_liver_ratio += tf.random.normal(shape=[], mean=0.0, stddev=0.05)
lesion_liver_ratio = tf.clip_by_value(
lesion_liver_ratio, min_fake_lesion_ratio, min_fake_lesion_ratio + 0.20)
fake_lesion_mask = tf.logical_and(
_gen_rand_mask(ratio_mean=lesion_liver_ratio, ratio_stddev=0.0,
scale=reso // 32, shape=label.shape,
smoothness=reso // 32),
tf.logical_not(background_mask))
liver_mask = tf.logical_not(tf.logical_or(background_mask, fake_lesion_mask))
# Blur the masks
lesion_mask_blur = tf.squeeze(tf.nn.conv3d(
tf.expand_dims(tf.expand_dims(tf.cast(lesion_mask, tf.float32), -1), 0),
filter=tf.ones([reso // 32] * 3 + [1, 1], tf.float32) / (reso // 32) ** 3,
strides=[1, 1, 1, 1, 1],
padding='SAME'))
fake_lesion_mask_blur = tf.squeeze(tf.nn.conv3d(
tf.expand_dims(tf.expand_dims(
tf.cast(fake_lesion_mask, tf.float32), -1), 0),
filter=tf.ones([reso // 32] * 3 + [1, 1], tf.float32) / (reso // 32) ** 3,
strides=[1, 1, 1, 1, 1],
padding='SAME'))
# Remove real lesion and add fake lesion.
# If the intensitify is too small (maybe no liver or lesion region labeled),
# do not generate fake data.
gen_prob_indicator = tf.cond(
tf.greater(intensity_diff, 0.0001),
lambda: gen_prob_indicator, lambda: 0.0)
# pylint: disable=g-long-lambda
image = tf.cond(
tf.greater(gen_prob_indicator, 1 - gen_fake_probability),
lambda: image + intensity_diff * lesion_mask_blur \
- intensity_diff * fake_lesion_mask_blur,
lambda: image)
label = tf.cond(
tf.greater(gen_prob_indicator, 1 - gen_fake_probability),
lambda: tf.cast(background_mask, tf.float32) * 0 + \
tf.cast(liver_mask, tf.float32) * 1 + \
tf.cast(fake_lesion_mask, tf.float32) * 2,
lambda: label)
# pylint: enable=g-long-lambda
return image, label
def maybe_flip(image, label, flip_axis, flip_indicator=None):
"""Randomly flip the image."""
if flip_indicator is None:
flip_indicator = tf.random_uniform(shape=[])
flip_or_not = tf.greater(flip_indicator, 0.5)
def _maybe_flip(data):
"""Flip or not according to flip_or_not."""
data = tf.cond(tf.logical_and(flip_or_not, tf.equal(flip_axis, 1)),
lambda: tf.transpose(data, [1, 0, 2]),
lambda: data)
data = tf.cond(tf.logical_and(flip_or_not, tf.equal(flip_axis, 2)),
lambda: tf.transpose(data, [2, 1, 0]),
lambda: data)
data = tf.cond(flip_or_not,
lambda: tf.image.flip_up_down(data),
lambda: data)
data = tf.cond(tf.logical_and(flip_or_not, tf.equal(flip_axis, 1)),
lambda: tf.transpose(data, [1, 0, 2]),
lambda: data)
data = tf.cond(tf.logical_and(flip_or_not, tf.equal(flip_axis, 2)),
lambda: tf.transpose(data, [2, 1, 0]),
lambda: data)
return data
return _maybe_flip(image), _maybe_flip(label)
def maybe_rot180(image, label, static_axis, rot180_k=None):
"""Randomly rotate the image 180 degrees."""
if rot180_k is None:
rot180_k = 2 * tf.random_uniform(
shape=[], minval=0, maxval=2, dtype=tf.int32)
rot_or_not = tf.not_equal(rot180_k, 0)
def _maybe_rot180(data):
"""Rotate or not according to rot_or_not."""
data = tf.cond(tf.logical_and(rot_or_not, tf.equal(static_axis, 0)),
lambda: tf.transpose(data, [2, 1, 0]),
lambda: data)
data = tf.cond(tf.logical_and(rot_or_not, tf.equal(static_axis, 1)),
lambda: tf.transpose(data, [0, 2, 1]),
lambda: data)
data = tf.cond(rot_or_not,
lambda: tf.image.rot90(data, k=rot180_k),
lambda: data)
data = tf.cond(tf.logical_and(rot_or_not, tf.equal(static_axis, 0)),
lambda: tf.transpose(data, [2, 1, 0]),
lambda: data)
data = tf.cond(tf.logical_and(rot_or_not, tf.equal(static_axis, 1)),
lambda: tf.transpose(data, [0, 2, 1]),
lambda: data)
return data
return _maybe_rot180(image), _maybe_rot180(label)
def intensity_shift(
image, label, per_class_intensity_scale, per_class_intensity_shift):
"""Perturb intensity in lesion and non-lesion regions."""
if per_class_intensity_scale < 0.000001 and (
per_class_intensity_shift < 0.000001):
return image
# Randomly change (mostly increase) intensity of non-lesion region.
per_class_noise = _truncated_normal(
per_class_intensity_shift, per_class_intensity_scale)
image = image + per_class_noise * (
image * tf.cast(tf.greater(label, 1.5), tf.float32))
# Randomly change (mostly decrease) intensity of lesion region.
per_class_noise = _truncated_normal(
-per_class_intensity_shift, per_class_intensity_scale)
image = image + per_class_noise * (
image * tf.cast(tf.less(label, 1.5), tf.float32))
return image
def image_corruption(
image, label, reso, image_corrupt_ratio_mean, image_corrupt_ratio_stddev):
"""Randomly drop non-lesion pixels."""
if image_corrupt_ratio_mean < 0.000001 and (
image_corrupt_ratio_stddev < 0.000001):
return image
# Corrupt non-lesion region according to keep_mask.
keep_mask = _gen_rand_mask(
1 - image_corrupt_ratio_mean,
image_corrupt_ratio_stddev,
reso // 3, image.shape)
keep_mask = tf.logical_or(tf.greater(label, 1.5), keep_mask)
image *= tf.cast(keep_mask, tf.float32)
return image
| |
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heatclient.common import utils
from heatclient.v1 import resources
from six.moves.urllib import parse
from mox3 import mox
import testtools
class ResourceManagerTest(testtools.TestCase):
def setUp(self):
super(ResourceManagerTest, self).setUp()
self.m = mox.Mox()
self.addCleanup(self.m.UnsetStubs)
def _base_test(self, expect, key):
class FakeAPI(object):
"""Fake API and ensure request url is correct."""
def get(self, *args, **kwargs):
assert ('GET', args[0]) == expect
def json_request(self, *args, **kwargs):
assert args == expect
ret = key and {key: []} or {}
return {}, {key: ret}
def raw_request(self, *args, **kwargs):
assert args == expect
return {}
def head(self, url, **kwargs):
return self.json_request("HEAD", url, **kwargs)
def post(self, url, **kwargs):
return self.json_request("POST", url, **kwargs)
def put(self, url, **kwargs):
return self.json_request("PUT", url, **kwargs)
def delete(self, url, **kwargs):
return self.raw_request("DELETE", url, **kwargs)
def patch(self, url, **kwargs):
return self.json_request("PATCH", url, **kwargs)
manager = resources.ResourceManager(FakeAPI())
self.m.StubOutWithMock(manager, '_resolve_stack_id')
self.m.StubOutWithMock(utils, 'get_response_body')
utils.get_response_body(mox.IgnoreArg()).AndReturn(
{key: key and {key: []} or {}})
manager._resolve_stack_id('teststack').AndReturn('teststack/abcd1234')
self.m.ReplayAll()
return manager
def test_get(self):
fields = {'stack_id': 'teststack',
'resource_name': 'testresource'}
expect = ('GET',
'/stacks/teststack%2Fabcd1234/resources'
'/testresource')
key = 'resource'
manager = self._base_test(expect, key)
manager.get(**fields)
self.m.VerifyAll()
def test_get_with_attr(self):
fields = {'stack_id': 'teststack',
'resource_name': 'testresource',
'with_attr': ['attr_a', 'attr_b']}
expect = ('GET',
'/stacks/teststack%2Fabcd1234/resources'
'/testresource?with_attr=attr_a&with_attr=attr_b')
key = 'resource'
manager = self._base_test(expect, key)
manager.get(**fields)
self.m.VerifyAll()
def test_get_with_unicode_resource_name(self):
fields = {'stack_id': 'teststack',
'resource_name': u'\u5de5\u4f5c'}
expect = ('GET',
'/stacks/teststack%2Fabcd1234/resources'
'/%E5%B7%A5%E4%BD%9C')
key = 'resource'
manager = self._base_test(expect, key)
manager.get(**fields)
self.m.VerifyAll()
def test_list(self):
self._test_list(
fields={'stack_id': 'teststack'},
expect='/stacks/teststack/resources')
def test_list_nested(self):
self._test_list(
fields={'stack_id': 'teststack', 'nested_depth': '99'},
expect='/stacks/teststack/resources?%s' % parse.urlencode({
'nested_depth': 99,
}, True)
)
def test_list_detail(self):
self._test_list(
fields={'stack_id': 'teststack', 'with_detail': 'True'},
expect='/stacks/teststack/resources?%s' % parse.urlencode({
'with_detail': True,
}, True)
)
def _test_list(self, fields, expect):
key = 'resources'
class FakeResponse(object):
def json(self):
return {key: {}}
class FakeClient(object):
def get(self, *args, **kwargs):
assert args[0] == expect
return FakeResponse()
manager = resources.ResourceManager(FakeClient())
manager.list(**fields)
def test_metadata(self):
fields = {'stack_id': 'teststack',
'resource_name': 'testresource'}
expect = ('GET',
'/stacks/teststack%2Fabcd1234/resources'
'/testresource/metadata')
key = 'metadata'
manager = self._base_test(expect, key)
manager.metadata(**fields)
self.m.VerifyAll()
def test_generate_template(self):
fields = {'resource_name': 'testresource'}
expect = ('GET', '/resource_types/testresource/template')
key = None
class FakeAPI(object):
"""Fake API and ensure request url is correct."""
def get(self, *args, **kwargs):
assert ('GET', args[0]) == expect
def json_request(self, *args, **kwargs):
assert args == expect
ret = key and {key: []} or {}
return {}, {key: ret}
manager = resources.ResourceManager(FakeAPI())
self.m.StubOutWithMock(utils, 'get_response_body')
utils.get_response_body(mox.IgnoreArg()).AndReturn(
{key: key and {key: []} or {}})
self.m.ReplayAll()
manager.generate_template(**fields)
self.m.VerifyAll()
def test_signal(self):
fields = {'stack_id': 'teststack',
'resource_name': 'testresource',
'data': 'Some content'}
expect = ('POST',
'/stacks/teststack%2Fabcd1234/resources'
'/testresource/signal')
key = 'signal'
manager = self._base_test(expect, key)
manager.signal(**fields)
self.m.VerifyAll()
class ResourceStackNameTest(testtools.TestCase):
def test_stack_name(self):
resource = resources.Resource(None, {"links": [{
"href": "http://heat.example.com:8004/foo/12/resources/foobar",
"rel": "self"
}, {
"href": "http://heat.example.com:8004/foo/12",
"rel": "stack"
}]})
self.assertEqual('foo', resource.stack_name)
def test_stack_name_no_links(self):
resource = resources.Resource(None, {})
self.assertIsNone(resource.stack_name)
| |
"""Support for Adafruit DHT temperature and humidity sensor."""
from __future__ import annotations
from datetime import timedelta
import logging
import adafruit_dht
import board
import voluptuous as vol
from homeassistant.components.sensor import (
PLATFORM_SCHEMA,
SensorDeviceClass,
SensorEntity,
SensorEntityDescription,
SensorStateClass,
)
from homeassistant.const import (
CONF_MONITORED_CONDITIONS,
CONF_NAME,
CONF_PIN,
PERCENTAGE,
TEMP_CELSIUS,
)
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
CONF_SENSOR = "sensor"
CONF_HUMIDITY_OFFSET = "humidity_offset"
CONF_TEMPERATURE_OFFSET = "temperature_offset"
DEFAULT_NAME = "DHT Sensor"
# DHT11 is able to deliver data once per second, DHT22 once every two
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=30)
SENSOR_TEMPERATURE = "temperature"
SENSOR_HUMIDITY = "humidity"
SENSOR_TYPES: tuple[SensorEntityDescription, ...] = (
SensorEntityDescription(
key=SENSOR_TEMPERATURE,
name="Temperature",
native_unit_of_measurement=TEMP_CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key=SENSOR_HUMIDITY,
name="Humidity",
native_unit_of_measurement=PERCENTAGE,
device_class=SensorDeviceClass.HUMIDITY,
state_class=SensorStateClass.MEASUREMENT,
),
)
SENSOR_KEYS: list[str] = [desc.key for desc in SENSOR_TYPES]
def validate_pin_input(value):
"""Validate that the GPIO PIN is prefixed with a D."""
try:
int(value)
return f"D{value}"
except ValueError:
return value.upper()
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_SENSOR): cv.string,
vol.Required(CONF_PIN): vol.All(cv.string, validate_pin_input),
vol.Optional(CONF_MONITORED_CONDITIONS, default=[]): vol.All(
cv.ensure_list, [vol.In(SENSOR_KEYS)]
),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_TEMPERATURE_OFFSET, default=0): vol.All(
vol.Coerce(float), vol.Range(min=-100, max=100)
),
vol.Optional(CONF_HUMIDITY_OFFSET, default=0): vol.All(
vol.Coerce(float), vol.Range(min=-100, max=100)
),
}
)
def setup_platform(
hass: HomeAssistant,
config: ConfigType,
add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the DHT sensor."""
_LOGGER.warning(
"The DHT Sensor integration is deprecated and will be removed "
"in Home Assistant Core 2022.4; this integration is removed under "
"Architectural Decision Record 0019, more information can be found here: "
"https://github.com/home-assistant/architecture/blob/master/adr/0019-GPIO.md"
)
available_sensors = {
"AM2302": adafruit_dht.DHT22,
"DHT11": adafruit_dht.DHT11,
"DHT22": adafruit_dht.DHT22,
}
sensor = available_sensors.get(config[CONF_SENSOR])
pin = config[CONF_PIN]
temperature_offset = config[CONF_TEMPERATURE_OFFSET]
humidity_offset = config[CONF_HUMIDITY_OFFSET]
name = config[CONF_NAME]
if not sensor:
_LOGGER.error("DHT sensor type is not supported")
return
data = DHTClient(sensor, pin, name)
monitored_conditions = config[CONF_MONITORED_CONDITIONS]
entities = [
DHTSensor(data, name, temperature_offset, humidity_offset, description)
for description in SENSOR_TYPES
if description.key in monitored_conditions
]
add_entities(entities, True)
class DHTSensor(SensorEntity):
"""Implementation of the DHT sensor."""
def __init__(
self,
dht_client,
name,
temperature_offset,
humidity_offset,
description: SensorEntityDescription,
):
"""Initialize the sensor."""
self.entity_description = description
self.dht_client = dht_client
self.temperature_offset = temperature_offset
self.humidity_offset = humidity_offset
self._attr_name = f"{name} {description.name}"
def update(self):
"""Get the latest data from the DHT and updates the states."""
self.dht_client.update()
temperature_offset = self.temperature_offset
humidity_offset = self.humidity_offset
data = self.dht_client.data
sensor_type = self.entity_description.key
if sensor_type == SENSOR_TEMPERATURE and sensor_type in data:
temperature = data[SENSOR_TEMPERATURE]
_LOGGER.debug(
"Temperature %.1f \u00b0C + offset %.1f",
temperature,
temperature_offset,
)
if -20 <= temperature < 80:
self._attr_native_value = round(temperature + temperature_offset, 1)
elif sensor_type == SENSOR_HUMIDITY and sensor_type in data:
humidity = data[SENSOR_HUMIDITY]
_LOGGER.debug("Humidity %.1f%% + offset %.1f", humidity, humidity_offset)
if 0 <= humidity <= 100:
self._attr_native_value = round(humidity + humidity_offset, 1)
class DHTClient:
"""Get the latest data from the DHT sensor."""
def __init__(self, sensor, pin, name):
"""Initialize the sensor."""
self.sensor = sensor
self.pin = getattr(board, pin)
self.data = {}
self.name = name
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data the DHT sensor."""
dht = self.sensor(self.pin)
try:
temperature = dht.temperature
humidity = dht.humidity
except RuntimeError:
_LOGGER.debug("Unexpected value from DHT sensor: %s", self.name)
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Error updating DHT sensor: %s", self.name)
else:
if temperature:
self.data[SENSOR_TEMPERATURE] = temperature
if humidity:
self.data[SENSOR_HUMIDITY] = humidity
finally:
dht.exit()
| |
# coding: utf-8
"""
Server API
Reference for Server API (REST/Json)
OpenAPI spec version: 2.0.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class UpdateExtractRequest(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, id_product=None, id_product_image=None, id_media_player=None, id_media_source=None, filename=None, position=None, active=None, date_add=None, date_upd=None, can_watch=None, name=None, description=None, cover=None, thumbnail=None, advertising_url=None):
"""
UpdateExtractRequest - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'id_product': 'int',
'id_product_image': 'int',
'id_media_player': 'int',
'id_media_source': 'int',
'filename': 'str',
'position': 'int',
'active': 'bool',
'date_add': 'str',
'date_upd': 'str',
'can_watch': 'bool',
'name': 'list[I18nField]',
'description': 'list[I18nField]',
'cover': 'str',
'thumbnail': 'str',
'advertising_url': 'str'
}
self.attribute_map = {
'id_product': 'id_product',
'id_product_image': 'id_product_image',
'id_media_player': 'id_media_player',
'id_media_source': 'id_media_source',
'filename': 'filename',
'position': 'position',
'active': 'active',
'date_add': 'date_add',
'date_upd': 'date_upd',
'can_watch': 'can_watch',
'name': 'name',
'description': 'description',
'cover': 'cover',
'thumbnail': 'thumbnail',
'advertising_url': 'advertising_url'
}
self._id_product = id_product
self._id_product_image = id_product_image
self._id_media_player = id_media_player
self._id_media_source = id_media_source
self._filename = filename
self._position = position
self._active = active
self._date_add = date_add
self._date_upd = date_upd
self._can_watch = can_watch
self._name = name
self._description = description
self._cover = cover
self._thumbnail = thumbnail
self._advertising_url = advertising_url
@property
def id_product(self):
"""
Gets the id_product of this UpdateExtractRequest.
:return: The id_product of this UpdateExtractRequest.
:rtype: int
"""
return self._id_product
@id_product.setter
def id_product(self, id_product):
"""
Sets the id_product of this UpdateExtractRequest.
:param id_product: The id_product of this UpdateExtractRequest.
:type: int
"""
self._id_product = id_product
@property
def id_product_image(self):
"""
Gets the id_product_image of this UpdateExtractRequest.
:return: The id_product_image of this UpdateExtractRequest.
:rtype: int
"""
return self._id_product_image
@id_product_image.setter
def id_product_image(self, id_product_image):
"""
Sets the id_product_image of this UpdateExtractRequest.
:param id_product_image: The id_product_image of this UpdateExtractRequest.
:type: int
"""
self._id_product_image = id_product_image
@property
def id_media_player(self):
"""
Gets the id_media_player of this UpdateExtractRequest.
:return: The id_media_player of this UpdateExtractRequest.
:rtype: int
"""
return self._id_media_player
@id_media_player.setter
def id_media_player(self, id_media_player):
"""
Sets the id_media_player of this UpdateExtractRequest.
:param id_media_player: The id_media_player of this UpdateExtractRequest.
:type: int
"""
self._id_media_player = id_media_player
@property
def id_media_source(self):
"""
Gets the id_media_source of this UpdateExtractRequest.
:return: The id_media_source of this UpdateExtractRequest.
:rtype: int
"""
return self._id_media_source
@id_media_source.setter
def id_media_source(self, id_media_source):
"""
Sets the id_media_source of this UpdateExtractRequest.
:param id_media_source: The id_media_source of this UpdateExtractRequest.
:type: int
"""
self._id_media_source = id_media_source
@property
def filename(self):
"""
Gets the filename of this UpdateExtractRequest.
:return: The filename of this UpdateExtractRequest.
:rtype: str
"""
return self._filename
@filename.setter
def filename(self, filename):
"""
Sets the filename of this UpdateExtractRequest.
:param filename: The filename of this UpdateExtractRequest.
:type: str
"""
self._filename = filename
@property
def position(self):
"""
Gets the position of this UpdateExtractRequest.
:return: The position of this UpdateExtractRequest.
:rtype: int
"""
return self._position
@position.setter
def position(self, position):
"""
Sets the position of this UpdateExtractRequest.
:param position: The position of this UpdateExtractRequest.
:type: int
"""
self._position = position
@property
def active(self):
"""
Gets the active of this UpdateExtractRequest.
:return: The active of this UpdateExtractRequest.
:rtype: bool
"""
return self._active
@active.setter
def active(self, active):
"""
Sets the active of this UpdateExtractRequest.
:param active: The active of this UpdateExtractRequest.
:type: bool
"""
self._active = active
@property
def date_add(self):
"""
Gets the date_add of this UpdateExtractRequest.
:return: The date_add of this UpdateExtractRequest.
:rtype: str
"""
return self._date_add
@date_add.setter
def date_add(self, date_add):
"""
Sets the date_add of this UpdateExtractRequest.
:param date_add: The date_add of this UpdateExtractRequest.
:type: str
"""
self._date_add = date_add
@property
def date_upd(self):
"""
Gets the date_upd of this UpdateExtractRequest.
:return: The date_upd of this UpdateExtractRequest.
:rtype: str
"""
return self._date_upd
@date_upd.setter
def date_upd(self, date_upd):
"""
Sets the date_upd of this UpdateExtractRequest.
:param date_upd: The date_upd of this UpdateExtractRequest.
:type: str
"""
self._date_upd = date_upd
@property
def can_watch(self):
"""
Gets the can_watch of this UpdateExtractRequest.
:return: The can_watch of this UpdateExtractRequest.
:rtype: bool
"""
return self._can_watch
@can_watch.setter
def can_watch(self, can_watch):
"""
Sets the can_watch of this UpdateExtractRequest.
:param can_watch: The can_watch of this UpdateExtractRequest.
:type: bool
"""
self._can_watch = can_watch
@property
def name(self):
"""
Gets the name of this UpdateExtractRequest.
:return: The name of this UpdateExtractRequest.
:rtype: list[I18nField]
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this UpdateExtractRequest.
:param name: The name of this UpdateExtractRequest.
:type: list[I18nField]
"""
self._name = name
@property
def description(self):
"""
Gets the description of this UpdateExtractRequest.
:return: The description of this UpdateExtractRequest.
:rtype: list[I18nField]
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this UpdateExtractRequest.
:param description: The description of this UpdateExtractRequest.
:type: list[I18nField]
"""
self._description = description
@property
def cover(self):
"""
Gets the cover of this UpdateExtractRequest.
:return: The cover of this UpdateExtractRequest.
:rtype: str
"""
return self._cover
@cover.setter
def cover(self, cover):
"""
Sets the cover of this UpdateExtractRequest.
:param cover: The cover of this UpdateExtractRequest.
:type: str
"""
self._cover = cover
@property
def thumbnail(self):
"""
Gets the thumbnail of this UpdateExtractRequest.
:return: The thumbnail of this UpdateExtractRequest.
:rtype: str
"""
return self._thumbnail
@thumbnail.setter
def thumbnail(self, thumbnail):
"""
Sets the thumbnail of this UpdateExtractRequest.
:param thumbnail: The thumbnail of this UpdateExtractRequest.
:type: str
"""
self._thumbnail = thumbnail
@property
def advertising_url(self):
"""
Gets the advertising_url of this UpdateExtractRequest.
:return: The advertising_url of this UpdateExtractRequest.
:rtype: str
"""
return self._advertising_url
@advertising_url.setter
def advertising_url(self, advertising_url):
"""
Sets the advertising_url of this UpdateExtractRequest.
:param advertising_url: The advertising_url of this UpdateExtractRequest.
:type: str
"""
self._advertising_url = advertising_url
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| |
"""
Contains the POS tags, dependency relations, and morphological features defined
by the Universal Dependencies project [0], for both of its major versions.
[0] http://universaldependencies.org/
"""
from collections import OrderedDict
"""
Tuples (one for each UD version) listing all possible universal POS tags with
the addition of a non-standard POS tag marking a sentence's root.
[1] http://universaldependencies.org/docsv1/u/pos/index.html
[2] http://universaldependencies.org/u/pos/index.html
"""
POS_TAGS_V1 = (
'ROOT', # non-standard! the root of a sentence
'ADJ', # adjective
'ADP', # adposition
'ADV', # adverb
'AUX', # auxiliary verb
'CONJ', # coordinating conjunction
'DET', # determiner
'INTJ', # interjection
'NOUN', # noun
'NUM', # numeral
'PART', # particle
'PRON', # pronoun
'PROPN', # proper noun
'PUNCT', # punctuation
'SCONJ', # subordinating conjunction
'SYM', # symbol
'VERB', # verb
'X', # other
)
POS_TAGS_V2 = (
'ROOT', # non-standard! the root of a sentence
'ADJ', # adjective
'ADP', # adposition
'ADV', # adverb
'AUX', # auxiliary
'CCONJ', # coordinating conjunction
'DET', # determiner
'INTJ', # interjection
'NOUN', # noun
'NUM', # numeral
'PART', # particle
'PRON', # pronoun
'PROPN', # proper noun
'PUNCT', # punctuation
'SCONJ', # subordinating conjunction
'SYM', # symbol
'VERB', # verb
'X', # other
)
"""
Tuples (one for each UD version) listing all possible universal dependency
relations.
[3] http://universaldependencies.org/docsv1/u/dep/index.html
[4] http://universaldependencies.org/u/dep/index.html
"""
DEP_RELS_V1 = (
'acl', # clausal modifier of noun (adjectival clause)
'advcl', # adverbial clause modifier
'advmod', # adverbial modifier
'amod', # adjectival modifier
'appos', # appositional modifier
'aux', # auxiliary
'auxpass', # passive auxiliary
'case', # case marking
'cc', # coordinating conjunction
'ccomp', # clausal complement
'compound', # compound
'conj', # conjunct
'cop', # copula
'csubj', # clausal subject
'csubjpass', # clausal passive subject
'dep', # unspecified dependency
'det', # determiner
'discourse', # discourse element
'dislocated', # dislocated elements
'dobj', # direct object
'expl', # expletive
'foreign', # foreign words
'goeswith', # goes with
'iobj', # indirect object
'list', # list
'mark', # marker
'mwe', # multi-word expression
'name', # name
'neg', # negation modifier
'nmod', # nominal modifier
'nsubj', # nominal subject
'nsubjpass', # passive nominal subject
'nummod', # numeric modifier
'parataxis', # parataxis
'punct', # punctuation
'remnant', # remnant in ellipsis
'reparandum', # overridden disfluency
'root', # root
'vocative', # vocative
'xcomp', # open clausal complement
)
DEP_RELS_V2 = (
'acl', # clausal modifier of noun (adjectival clause)
'advcl', # adverbial clause modifier
'advmod', # adverbial modifier
'amod', # adjectival modifier
'appos', # appositional modifier
'aux', # auxiliary
'case', # case marking
'cc', # coordinating conjunction
'ccomp', # clausal complement
'clf', # classifier
'compound', # compound
'conj', # conjunct
'cop', # copula
'csubj', # clausal subject
'dep', # unspecified dependency
'det', # determiner
'discourse', # discourse element
'dislocated', # dislocated elements
'expl', # expletive
'fixed', # fixed multiword expression
'flat', # flat multiword expression
'goeswith', # goes with
'iobj', # indirect object
'list', # list
'mark', # marker
'nmod', # nominal modifier
'nsubj', # nominal subject
'nummod', # numeric modifier
'obj', # object
'obl', # oblique nominal
'orphan', # orphan
'parataxis', # parataxis
'punct', # punctuation
'reparandum', # overridden disfluency
'root', # root
'vocative', # vocative
'xcomp', # open clausal complement
)
"""
Ordered dicts (one for each UD version) where the keys are the universal
features and the values are the respective features' possible values.
[5] http://universaldependencies.org/docsv1/u/feat/index.html
[6] http://universaldependencies.org/u/feat/index.html
"""
MORPH_FEATURES_V1 = {
'Animacy': ('Anim', 'Inan', 'Nhum'),
'Aspect': ('Imp ', 'Perf', 'Pro', 'Prog'),
'Case': ('Abs', 'Acc', 'Erg', 'Nom',
'Abe', 'Ben', 'Cau', 'Com', 'Dat', 'Dis',
'Gen', 'Ins', 'Par', 'Tem', 'Tra', 'Voc',
'Abl', 'Add', 'Ade', 'All', 'Del', 'Ela', 'Ess',
'Ill', 'Ine', 'Lat', 'Loc', 'Sub', 'Sup', 'Ter'),
'Definite': ('Com', 'Def', 'Ind', 'Red'),
'Degree': ('Abs', 'Cmp', 'Pos', 'Sup'),
'Gender': ('Com', 'Fem', 'Masc', 'Neut'),
'Mood': ('Cnd', 'Des', 'Imp', 'Ind', 'Jus', 'Nec', 'Opt', 'Pot', 'Qot', 'Sub'),
'Negative': ('Neg', 'Pos'),
'NumType': ('Card', 'Dist', 'Frac', 'Gen', 'Mult', 'Ord', 'Range', 'Sets'),
'Number': ('Coll', 'Dual', 'Plur', 'Ptan', 'Sing'),
'Person': ('1', '2', '3'),
'Poss': ('Yes',),
'PronType': ('Art', 'Dem', 'Ind', 'Int', 'Neg', 'Prs', 'Rcp', 'Rel', 'Tot'),
'Reflex': ('Yes',),
'Tense': ('Fut', 'Imp', 'Nar', 'Past', 'Pqp', 'Pres'),
'VerbForm': ('Fin', 'Ger', 'Inf', 'Part', 'Sup', 'Trans'),
'Voice': ('Act', 'Cau', 'Pass', 'Rcp')
}
MORPH_FEATURES_V2 = {
'Abbr': ('Yes',),
'Animacy': ('Anim', 'Hum ', 'Inan ', 'Nhum'),
'Aspect': ('Hab', 'Imp ', 'Iter', 'Perf', 'Prog', 'Prosp'),
'Case': ('Abs', 'Acc', 'Erg', 'Nom',
'Abe', 'Ben', 'Cau', 'Cmp', 'Com', 'Dat', 'Dis',
'Equ', 'Gen', 'Ins', 'Par', 'Tem', 'Tra', 'Voc',
'Abl', 'Add', 'Ade', 'All', 'Del', 'Ela', 'Ess',
'Ill', 'Ine', 'Lat', 'Loc', 'Sub', 'Sup', 'Ter'),
'Definite': ('Com', 'Cons', 'Def', 'Ind', 'Spec'),
'Degree': ('Abs', 'Cmp', 'Equ', 'Pos', 'Sup'),
'Evident': ('Fh', 'Nfh'),
'Foreign': ('Yes',),
'Gender': ('Com', 'Fem', 'Masc', 'Neut'),
'Mood': ('Adm', 'Cnd', 'Des', 'Imp', 'Ind', 'Jus',
'Nec', 'Opt', 'Pot', 'Prp', 'Qot', 'Sub'),
'NumType': ('Card', 'Dist', 'Frac', 'Mult', 'Ord', 'Range', 'Sets'),
'Number': ('Coll', 'Count', 'Dual', 'Grpa', 'Grpl',
'Inv', 'Pauc', 'Plur', 'Ptan', 'Sing', 'Tri'),
'Person': ('0', '1', '2', '3', '4'),
'Polarity': ('Neg', 'Pos'),
'Polite': ('Elev', 'Form', 'Humb', 'Infm'),
'Poss': ('Yes',),
'PronType': ('Art', 'Dem', 'Emp', 'Exc', 'Ind', 'Int',
'Neg', 'Prs', 'Rcp', 'Rel', 'Tot'),
'Reflex': ('Yes',),
'Tense': ('Fut', 'Imp', 'Past', 'Pqp', 'Pres'),
'VerbForm': ('Conv', 'Fin', 'Gdv', 'Ger', 'Inf', 'Part', 'Sup', 'Vnoun'),
'Voice': ('Act', 'Antip', 'Cau', 'Dir', 'Inv', 'Mid', 'Pass', 'Rcp')
}
MORPH_FEATURES_V1 = OrderedDict(sorted(MORPH_FEATURES_V1.items(), key=lambda x: x[0]))
MORPH_FEATURES_V2 = OrderedDict(sorted(MORPH_FEATURES_V2.items(), key=lambda x: x[0]))
| |
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_HTTPError,
compat_str,
)
from ..utils import (
determine_ext,
ExtractorError,
fix_xml_ampersands,
int_or_none,
orderedSet,
parse_duration,
qualities,
strip_jsonp,
unified_strdate,
)
class NPOBaseIE(InfoExtractor):
def _get_token(self, video_id):
return self._download_json(
'http://ida.omroep.nl/app.php/auth', video_id,
note='Downloading token')['token']
class NPOIE(NPOBaseIE):
IE_NAME = 'npo'
IE_DESC = 'npo.nl, ntr.nl, omroepwnl.nl, zapp.nl and npo3.nl'
_VALID_URL = r'''(?x)
(?:
npo:|
https?://
(?:www\.)?
(?:
npo\.nl/(?:[^/]+/)*|
(?:ntr|npostart)\.nl/(?:[^/]+/){2,}|
omroepwnl\.nl/video/fragment/[^/]+__|
(?:zapp|npo3)\.nl/(?:[^/]+/){2,}
)
)
(?P<id>[^/?#]+)
'''
_TESTS = [{
'url': 'http://www.npo.nl/nieuwsuur/22-06-2014/VPWON_1220719',
'md5': '4b3f9c429157ec4775f2c9cb7b911016',
'info_dict': {
'id': 'VPWON_1220719',
'ext': 'm4v',
'title': 'Nieuwsuur',
'description': 'Dagelijks tussen tien en elf: nieuws, sport en achtergronden.',
'upload_date': '20140622',
},
}, {
'url': 'http://www.npo.nl/de-mega-mike-mega-thomas-show/27-02-2009/VARA_101191800',
'md5': 'da50a5787dbfc1603c4ad80f31c5120b',
'info_dict': {
'id': 'VARA_101191800',
'ext': 'm4v',
'title': 'De Mega Mike & Mega Thomas show: The best of.',
'description': 'md5:3b74c97fc9d6901d5a665aac0e5400f4',
'upload_date': '20090227',
'duration': 2400,
},
}, {
'url': 'http://www.npo.nl/tegenlicht/25-02-2013/VPWON_1169289',
'md5': 'f8065e4e5a7824068ed3c7e783178f2c',
'info_dict': {
'id': 'VPWON_1169289',
'ext': 'm4v',
'title': 'Tegenlicht: Zwart geld. De toekomst komt uit Afrika',
'description': 'md5:52cf4eefbc96fffcbdc06d024147abea',
'upload_date': '20130225',
'duration': 3000,
},
}, {
'url': 'http://www.npo.nl/de-nieuwe-mens-deel-1/21-07-2010/WO_VPRO_043706',
'info_dict': {
'id': 'WO_VPRO_043706',
'ext': 'm4v',
'title': 'De nieuwe mens - Deel 1',
'description': 'md5:518ae51ba1293ffb80d8d8ce90b74e4b',
'duration': 4680,
},
'params': {
'skip_download': True,
}
}, {
# non asf in streams
'url': 'http://www.npo.nl/hoe-gaat-europa-verder-na-parijs/10-01-2015/WO_NOS_762771',
'info_dict': {
'id': 'WO_NOS_762771',
'ext': 'mp4',
'title': 'Hoe gaat Europa verder na Parijs?',
},
'params': {
'skip_download': True,
}
}, {
'url': 'http://www.ntr.nl/Aap-Poot-Pies/27/detail/Aap-poot-pies/VPWON_1233944#content',
'info_dict': {
'id': 'VPWON_1233944',
'ext': 'm4v',
'title': 'Aap, poot, pies',
'description': 'md5:c9c8005d1869ae65b858e82c01a91fde',
'upload_date': '20150508',
'duration': 599,
},
'params': {
'skip_download': True,
}
}, {
'url': 'http://www.omroepwnl.nl/video/fragment/vandaag-de-dag-verkiezingen__POMS_WNL_853698',
'info_dict': {
'id': 'POW_00996502',
'ext': 'm4v',
'title': '''"Dit is wel een 'landslide'..."''',
'description': 'md5:f8d66d537dfb641380226e31ca57b8e8',
'upload_date': '20150508',
'duration': 462,
},
'params': {
'skip_download': True,
}
}, {
# audio
'url': 'http://www.npo.nl/jouw-stad-rotterdam/29-01-2017/RBX_FUNX_6683215/RBX_FUNX_7601437',
'info_dict': {
'id': 'RBX_FUNX_6683215',
'ext': 'mp3',
'title': 'Jouw Stad Rotterdam',
'description': 'md5:db251505244f097717ec59fabc372d9f',
},
'params': {
'skip_download': True,
}
}, {
'url': 'http://www.zapp.nl/de-bzt-show/gemist/KN_1687547',
'only_matching': True,
}, {
'url': 'http://www.zapp.nl/de-bzt-show/filmpjes/POMS_KN_7315118',
'only_matching': True,
}, {
'url': 'http://www.zapp.nl/beste-vrienden-quiz/extra-video-s/WO_NTR_1067990',
'only_matching': True,
}, {
'url': 'https://www.npo3.nl/3onderzoekt/16-09-2015/VPWON_1239870',
'only_matching': True,
}, {
# live stream
'url': 'npo:LI_NL1_4188102',
'only_matching': True,
}, {
'url': 'http://www.npo.nl/radio-gaga/13-06-2017/BNN_101383373',
'only_matching': True,
}, {
'url': 'https://www.zapp.nl/1803-skelterlab/instructie-video-s/740-instructievideo-s/POMS_AT_11736927',
'only_matching': True,
}, {
'url': 'https://www.npostart.nl/broodje-gezond-ei/28-05-2018/KN_1698996',
'only_matching': True,
}, {
'url': 'https://npo.nl/KN_1698996',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
return (False if any(ie.suitable(url)
for ie in (NPOLiveIE, NPORadioIE, NPORadioFragmentIE))
else super(NPOIE, cls).suitable(url))
def _real_extract(self, url):
video_id = self._match_id(url)
return self._get_info(video_id)
def _get_info(self, video_id):
metadata = self._download_json(
'http://e.omroep.nl/metadata/%s' % video_id,
video_id,
# We have to remove the javascript callback
transform_source=strip_jsonp,
)
error = metadata.get('error')
if error:
raise ExtractorError(error, expected=True)
# For some videos actual video id (prid) is different (e.g. for
# http://www.omroepwnl.nl/video/fragment/vandaag-de-dag-verkiezingen__POMS_WNL_853698
# video id is POMS_WNL_853698 but prid is POW_00996502)
video_id = metadata.get('prid') or video_id
# titel is too generic in some cases so utilize aflevering_titel as well
# when available (e.g. http://tegenlicht.vpro.nl/afleveringen/2014-2015/access-to-africa.html)
title = metadata['titel']
sub_title = metadata.get('aflevering_titel')
if sub_title and sub_title != title:
title += ': %s' % sub_title
token = self._get_token(video_id)
formats = []
urls = set()
def is_legal_url(format_url):
return format_url and format_url not in urls and re.match(
r'^(?:https?:)?//', format_url)
QUALITY_LABELS = ('Laag', 'Normaal', 'Hoog')
QUALITY_FORMATS = ('adaptive', 'wmv_sb', 'h264_sb', 'wmv_bb', 'h264_bb', 'wvc1_std', 'h264_std')
quality_from_label = qualities(QUALITY_LABELS)
quality_from_format_id = qualities(QUALITY_FORMATS)
items = self._download_json(
'http://ida.omroep.nl/app.php/%s' % video_id, video_id,
'Downloading formats JSON', query={
'adaptive': 'yes',
'token': token,
})['items'][0]
for num, item in enumerate(items):
item_url = item.get('url')
if not is_legal_url(item_url):
continue
urls.add(item_url)
format_id = self._search_regex(
r'video/ida/([^/]+)', item_url, 'format id',
default=None)
item_label = item.get('label')
def add_format_url(format_url):
width = int_or_none(self._search_regex(
r'(\d+)[xX]\d+', format_url, 'width', default=None))
height = int_or_none(self._search_regex(
r'\d+[xX](\d+)', format_url, 'height', default=None))
if item_label in QUALITY_LABELS:
quality = quality_from_label(item_label)
f_id = item_label
elif item_label in QUALITY_FORMATS:
quality = quality_from_format_id(format_id)
f_id = format_id
else:
quality, f_id = [None] * 2
formats.append({
'url': format_url,
'format_id': f_id,
'width': width,
'height': height,
'quality': quality,
})
# Example: http://www.npo.nl/de-nieuwe-mens-deel-1/21-07-2010/WO_VPRO_043706
if item.get('contentType') in ('url', 'audio'):
add_format_url(item_url)
continue
try:
stream_info = self._download_json(
item_url + '&type=json', video_id,
'Downloading %s stream JSON'
% item_label or item.get('format') or format_id or num)
except ExtractorError as ee:
if isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 404:
error = (self._parse_json(
ee.cause.read().decode(), video_id,
fatal=False) or {}).get('errorstring')
if error:
raise ExtractorError(error, expected=True)
raise
# Stream URL instead of JSON, example: npo:LI_NL1_4188102
if isinstance(stream_info, compat_str):
if not stream_info.startswith('http'):
continue
video_url = stream_info
# JSON
else:
video_url = stream_info.get('url')
if not video_url or video_url in urls:
continue
urls.add(video_url)
if determine_ext(video_url) == 'm3u8':
formats.extend(self._extract_m3u8_formats(
video_url, video_id, ext='mp4',
entry_protocol='m3u8_native', m3u8_id='hls', fatal=False))
else:
add_format_url(video_url)
is_live = metadata.get('medium') == 'live'
if not is_live:
for num, stream in enumerate(metadata.get('streams', [])):
stream_url = stream.get('url')
if not is_legal_url(stream_url):
continue
urls.add(stream_url)
# smooth streaming is not supported
stream_type = stream.get('type', '').lower()
if stream_type in ['ss', 'ms']:
continue
if stream_type == 'hds':
f4m_formats = self._extract_f4m_formats(
stream_url, video_id, fatal=False)
# f4m downloader downloads only piece of live stream
for f4m_format in f4m_formats:
f4m_format['preference'] = -1
formats.extend(f4m_formats)
elif stream_type == 'hls':
formats.extend(self._extract_m3u8_formats(
stream_url, video_id, ext='mp4', fatal=False))
# Example: http://www.npo.nl/de-nieuwe-mens-deel-1/21-07-2010/WO_VPRO_043706
elif '.asf' in stream_url:
asx = self._download_xml(
stream_url, video_id,
'Downloading stream %d ASX playlist' % num,
transform_source=fix_xml_ampersands, fatal=False)
if not asx:
continue
ref = asx.find('./ENTRY/Ref')
if ref is None:
continue
video_url = ref.get('href')
if not video_url or video_url in urls:
continue
urls.add(video_url)
formats.append({
'url': video_url,
'ext': stream.get('formaat', 'asf'),
'quality': stream.get('kwaliteit'),
'preference': -10,
})
else:
formats.append({
'url': stream_url,
'quality': stream.get('kwaliteit'),
})
self._sort_formats(formats)
subtitles = {}
if metadata.get('tt888') == 'ja':
subtitles['nl'] = [{
'ext': 'vtt',
'url': 'http://tt888.omroep.nl/tt888/%s' % video_id,
}]
return {
'id': video_id,
'title': self._live_title(title) if is_live else title,
'description': metadata.get('info'),
'thumbnail': metadata.get('images', [{'url': None}])[-1]['url'],
'upload_date': unified_strdate(metadata.get('gidsdatum')),
'duration': parse_duration(metadata.get('tijdsduur')),
'formats': formats,
'subtitles': subtitles,
'is_live': is_live,
}
class NPOLiveIE(NPOBaseIE):
IE_NAME = 'npo.nl:live'
_VALID_URL = r'https?://(?:www\.)?npo\.nl/live(?:/(?P<id>[^/?#&]+))?'
_TESTS = [{
'url': 'http://www.npo.nl/live/npo-1',
'info_dict': {
'id': 'LI_NL1_4188102',
'display_id': 'npo-1',
'ext': 'mp4',
'title': 're:^NPO 1 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'is_live': True,
},
'params': {
'skip_download': True,
}
}, {
'url': 'http://www.npo.nl/live',
'only_matching': True,
}]
def _real_extract(self, url):
display_id = self._match_id(url) or 'npo-1'
webpage = self._download_webpage(url, display_id)
live_id = self._search_regex(
[r'media-id="([^"]+)"', r'data-prid="([^"]+)"'], webpage, 'live id')
return {
'_type': 'url_transparent',
'url': 'npo:%s' % live_id,
'ie_key': NPOIE.ie_key(),
'id': live_id,
'display_id': display_id,
}
class NPORadioIE(InfoExtractor):
IE_NAME = 'npo.nl:radio'
_VALID_URL = r'https?://(?:www\.)?npo\.nl/radio/(?P<id>[^/]+)'
_TEST = {
'url': 'http://www.npo.nl/radio/radio-1',
'info_dict': {
'id': 'radio-1',
'ext': 'mp3',
'title': 're:^NPO Radio 1 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'is_live': True,
},
'params': {
'skip_download': True,
}
}
@classmethod
def suitable(cls, url):
return False if NPORadioFragmentIE.suitable(url) else super(NPORadioIE, cls).suitable(url)
@staticmethod
def _html_get_attribute_regex(attribute):
return r'{0}\s*=\s*\'([^\']+)\''.format(attribute)
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(
self._html_get_attribute_regex('data-channel'), webpage, 'title')
stream = self._parse_json(
self._html_search_regex(self._html_get_attribute_regex('data-streams'), webpage, 'data-streams'),
video_id)
codec = stream.get('codec')
return {
'id': video_id,
'url': stream['url'],
'title': self._live_title(title),
'acodec': codec,
'ext': codec,
'is_live': True,
}
class NPORadioFragmentIE(InfoExtractor):
IE_NAME = 'npo.nl:radio:fragment'
_VALID_URL = r'https?://(?:www\.)?npo\.nl/radio/[^/]+/fragment/(?P<id>\d+)'
_TEST = {
'url': 'http://www.npo.nl/radio/radio-5/fragment/174356',
'md5': 'dd8cc470dad764d0fdc70a9a1e2d18c2',
'info_dict': {
'id': '174356',
'ext': 'mp3',
'title': 'Jubileumconcert Willeke Alberti',
},
}
def _real_extract(self, url):
audio_id = self._match_id(url)
webpage = self._download_webpage(url, audio_id)
title = self._html_search_regex(
r'href="/radio/[^/]+/fragment/%s" title="([^"]+)"' % audio_id,
webpage, 'title')
audio_url = self._search_regex(
r"data-streams='([^']+)'", webpage, 'audio url')
return {
'id': audio_id,
'url': audio_url,
'title': title,
}
class NPODataMidEmbedIE(InfoExtractor):
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
video_id = self._search_regex(
r'data-mid=(["\'])(?P<id>(?:(?!\1).)+)\1', webpage, 'video_id', group='id')
return {
'_type': 'url_transparent',
'ie_key': 'NPO',
'url': 'npo:%s' % video_id,
'display_id': display_id
}
class SchoolTVIE(NPODataMidEmbedIE):
IE_NAME = 'schooltv'
_VALID_URL = r'https?://(?:www\.)?schooltv\.nl/video/(?P<id>[^/?#&]+)'
_TEST = {
'url': 'http://www.schooltv.nl/video/ademhaling-de-hele-dag-haal-je-adem-maar-wat-gebeurt-er-dan-eigenlijk-in-je-lichaam/',
'info_dict': {
'id': 'WO_NTR_429477',
'display_id': 'ademhaling-de-hele-dag-haal-je-adem-maar-wat-gebeurt-er-dan-eigenlijk-in-je-lichaam',
'title': 'Ademhaling: De hele dag haal je adem. Maar wat gebeurt er dan eigenlijk in je lichaam?',
'ext': 'mp4',
'description': 'md5:abfa0ff690adb73fd0297fd033aaa631'
},
'params': {
# Skip because of m3u8 download
'skip_download': True
}
}
class HetKlokhuisIE(NPODataMidEmbedIE):
IE_NAME = 'hetklokhuis'
_VALID_URL = r'https?://(?:www\.)?hetklokhuis\.nl/[^/]+/\d+/(?P<id>[^/?#&]+)'
_TEST = {
'url': 'http://hetklokhuis.nl/tv-uitzending/3471/Zwaartekrachtsgolven',
'info_dict': {
'id': 'VPWON_1260528',
'display_id': 'Zwaartekrachtsgolven',
'ext': 'm4v',
'title': 'Het Klokhuis: Zwaartekrachtsgolven',
'description': 'md5:c94f31fb930d76c2efa4a4a71651dd48',
'upload_date': '20170223',
},
'params': {
'skip_download': True
}
}
class NPOPlaylistBaseIE(NPOIE):
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
entries = [
self.url_result('npo:%s' % video_id if not video_id.startswith('http') else video_id)
for video_id in orderedSet(re.findall(self._PLAYLIST_ENTRY_RE, webpage))
]
playlist_title = self._html_search_regex(
self._PLAYLIST_TITLE_RE, webpage, 'playlist title',
default=None) or self._og_search_title(webpage)
return self.playlist_result(entries, playlist_id, playlist_title)
class VPROIE(NPOPlaylistBaseIE):
IE_NAME = 'vpro'
_VALID_URL = r'https?://(?:www\.)?(?:(?:tegenlicht\.)?vpro|2doc)\.nl/(?:[^/]+/)*(?P<id>[^/]+)\.html'
_PLAYLIST_TITLE_RE = (r'<h1[^>]+class=["\'].*?\bmedia-platform-title\b.*?["\'][^>]*>([^<]+)',
r'<h5[^>]+class=["\'].*?\bmedia-platform-subtitle\b.*?["\'][^>]*>([^<]+)')
_PLAYLIST_ENTRY_RE = r'data-media-id="([^"]+)"'
_TESTS = [
{
'url': 'http://tegenlicht.vpro.nl/afleveringen/2012-2013/de-toekomst-komt-uit-afrika.html',
'md5': 'f8065e4e5a7824068ed3c7e783178f2c',
'info_dict': {
'id': 'VPWON_1169289',
'ext': 'm4v',
'title': 'De toekomst komt uit Afrika',
'description': 'md5:52cf4eefbc96fffcbdc06d024147abea',
'upload_date': '20130225',
},
'skip': 'Video gone',
},
{
'url': 'http://www.vpro.nl/programmas/2doc/2015/sergio-herman.html',
'info_dict': {
'id': 'sergio-herman',
'title': 'sergio herman: fucking perfect',
},
'playlist_count': 2,
},
{
# playlist with youtube embed
'url': 'http://www.vpro.nl/programmas/2doc/2015/education-education.html',
'info_dict': {
'id': 'education-education',
'title': 'education education',
},
'playlist_count': 2,
},
{
'url': 'http://www.2doc.nl/documentaires/series/2doc/2015/oktober/de-tegenprestatie.html',
'info_dict': {
'id': 'de-tegenprestatie',
'title': 'De Tegenprestatie',
},
'playlist_count': 2,
}, {
'url': 'http://www.2doc.nl/speel~VARA_101375237~mh17-het-verdriet-van-nederland~.html',
'info_dict': {
'id': 'VARA_101375237',
'ext': 'm4v',
'title': 'MH17: Het verdriet van Nederland',
'description': 'md5:09e1a37c1fdb144621e22479691a9f18',
'upload_date': '20150716',
},
'params': {
# Skip because of m3u8 download
'skip_download': True
},
}
]
class WNLIE(NPOPlaylistBaseIE):
IE_NAME = 'wnl'
_VALID_URL = r'https?://(?:www\.)?omroepwnl\.nl/video/detail/(?P<id>[^/]+)__\d+'
_PLAYLIST_TITLE_RE = r'(?s)<h1[^>]+class="subject"[^>]*>(.+?)</h1>'
_PLAYLIST_ENTRY_RE = r'<a[^>]+href="([^"]+)"[^>]+class="js-mid"[^>]*>Deel \d+'
_TESTS = [{
'url': 'http://www.omroepwnl.nl/video/detail/vandaag-de-dag-6-mei__060515',
'info_dict': {
'id': 'vandaag-de-dag-6-mei',
'title': 'Vandaag de Dag 6 mei',
},
'playlist_count': 4,
}]
class AndereTijdenIE(NPOPlaylistBaseIE):
IE_NAME = 'anderetijden'
_VALID_URL = r'https?://(?:www\.)?anderetijden\.nl/programma/(?:[^/]+/)+(?P<id>[^/?#&]+)'
_PLAYLIST_TITLE_RE = r'(?s)<h1[^>]+class=["\'].*?\bpage-title\b.*?["\'][^>]*>(.+?)</h1>'
_PLAYLIST_ENTRY_RE = r'<figure[^>]+class=["\']episode-container episode-page["\'][^>]+data-prid=["\'](.+?)["\']'
_TESTS = [{
'url': 'http://anderetijden.nl/programma/1/Andere-Tijden/aflevering/676/Duitse-soldaten-over-de-Slag-bij-Arnhem',
'info_dict': {
'id': 'Duitse-soldaten-over-de-Slag-bij-Arnhem',
'title': 'Duitse soldaten over de Slag bij Arnhem',
},
'playlist_count': 3,
}]
| |
"""
Utility function to facilitate testing.
"""
import os
import sys
import re
import operator
from nosetester import import_nose
__all__ = ['assert_equal', 'assert_almost_equal','assert_approx_equal',
'assert_array_equal', 'assert_array_less', 'assert_string_equal',
'assert_array_almost_equal', 'assert_raises', 'build_err_msg',
'decorate_methods', 'jiffies', 'memusage', 'print_assert_equal',
'raises', 'rand', 'rundocs', 'runstring', 'verbose', 'measure']
verbose = 0
def rand(*args):
"""Returns an array of random numbers with the given shape.
This only uses the standard library, so it is useful for testing purposes.
"""
import random
from numpy.core import zeros, float64
results = zeros(args, float64)
f = results.flat
for i in range(len(f)):
f[i] = random.random()
return results
if sys.platform[:5]=='linux':
def jiffies(_proc_pid_stat = '/proc/%s/stat'%(os.getpid()),
_load_time=[]):
""" Return number of jiffies (1/100ths of a second) that this
process has been scheduled in user mode. See man 5 proc. """
import time
if not _load_time:
_load_time.append(time.time())
try:
f=open(_proc_pid_stat,'r')
l = f.readline().split(' ')
f.close()
return int(l[13])
except:
return int(100*(time.time()-_load_time[0]))
def memusage(_proc_pid_stat = '/proc/%s/stat'%(os.getpid())):
""" Return virtual memory size in bytes of the running python.
"""
try:
f=open(_proc_pid_stat,'r')
l = f.readline().split(' ')
f.close()
return int(l[22])
except:
return
else:
# os.getpid is not in all platforms available.
# Using time is safe but inaccurate, especially when process
# was suspended or sleeping.
def jiffies(_load_time=[]):
""" Return number of jiffies (1/100ths of a second) that this
process has been scheduled in user mode. [Emulation with time.time]. """
import time
if not _load_time:
_load_time.append(time.time())
return int(100*(time.time()-_load_time[0]))
def memusage():
""" Return memory usage of running python. [Not implemented]"""
raise NotImplementedError
if os.name=='nt' and sys.version[:3] > '2.3':
# Code "stolen" from enthought/debug/memusage.py
def GetPerformanceAttributes(object, counter, instance = None,
inum=-1, format = None, machine=None):
# NOTE: Many counters require 2 samples to give accurate results,
# including "% Processor Time" (as by definition, at any instant, a
# thread's CPU usage is either 0 or 100). To read counters like this,
# you should copy this function, but keep the counter open, and call
# CollectQueryData() each time you need to know.
# See http://msdn.microsoft.com/library/en-us/dnperfmo/html/perfmonpt2.asp
# My older explanation for this was that the "AddCounter" process forced
# the CPU to 100%, but the above makes more sense :)
import win32pdh
if format is None: format = win32pdh.PDH_FMT_LONG
path = win32pdh.MakeCounterPath( (machine,object,instance, None, inum,counter) )
hq = win32pdh.OpenQuery()
try:
hc = win32pdh.AddCounter(hq, path)
try:
win32pdh.CollectQueryData(hq)
type, val = win32pdh.GetFormattedCounterValue(hc, format)
return val
finally:
win32pdh.RemoveCounter(hc)
finally:
win32pdh.CloseQuery(hq)
def memusage(processName="python", instance=0):
# from win32pdhutil, part of the win32all package
import win32pdh
return GetPerformanceAttributes("Process", "Virtual Bytes",
processName, instance,
win32pdh.PDH_FMT_LONG, None)
def build_err_msg(arrays, err_msg, header='Items are not equal:',
verbose=True,
names=('ACTUAL', 'DESIRED')):
msg = ['\n' + header]
if err_msg:
if err_msg.find('\n') == -1 and len(err_msg) < 79-len(header):
msg = [msg[0] + ' ' + err_msg]
else:
msg.append(err_msg)
if verbose:
for i, a in enumerate(arrays):
try:
r = repr(a)
except:
r = '[repr failed]'
if r.count('\n') > 3:
r = '\n'.join(r.splitlines()[:3])
r += '...'
msg.append(' %s: %s' % (names[i], r))
return '\n'.join(msg)
def assert_equal(actual,desired,err_msg='',verbose=True):
"""
Raise an assertion if two objects are not equal.
Given two objects (lists, tuples, dictionaries or numpy arrays), check
that all elements of these objects are equal. An exception is raised at
the first conflicting values.
Parameters
----------
actual : list, tuple, dict or ndarray
The object to check.
desired : list, tuple, dict or ndarray
The expected object.
err_msg : string
The error message to be printed in case of failure.
verbose : bool
If True, the conflicting values are appended to the error message.
Raises
------
AssertionError
If actual and desired are not equal.
Examples
--------
>>> np.testing.assert_equal([4,5], [4,6]) # doctest:+ELLIPSIS
...
<type 'exceptions.AssertionError'>:
Items are not equal:
item=1
<BLANKLINE>
ACTUAL: 5
DESIRED: 6
"""
if isinstance(desired, dict):
if not isinstance(actual, dict) :
raise AssertionError(repr(type(actual)))
assert_equal(len(actual),len(desired),err_msg,verbose)
for k,i in desired.items():
if k not in actual :
raise AssertionError(repr(k))
assert_equal(actual[k], desired[k], 'key=%r\n%s' % (k,err_msg), verbose)
return
if isinstance(desired, (list,tuple)) and isinstance(actual, (list,tuple)):
assert_equal(len(actual),len(desired),err_msg,verbose)
for k in range(len(desired)):
assert_equal(actual[k], desired[k], 'item=%r\n%s' % (k,err_msg), verbose)
return
from numpy.core import ndarray
if isinstance(actual, ndarray) or isinstance(desired, ndarray):
return assert_array_equal(actual, desired, err_msg, verbose)
msg = build_err_msg([actual, desired], err_msg, verbose=verbose)
if desired != actual :
raise AssertionError(msg)
def print_assert_equal(test_string,actual,desired):
import pprint
try:
assert(actual == desired)
except AssertionError:
import cStringIO
msg = cStringIO.StringIO()
msg.write(test_string)
msg.write(' failed\nACTUAL: \n')
pprint.pprint(actual,msg)
msg.write('DESIRED: \n')
pprint.pprint(desired,msg)
raise AssertionError(msg.getvalue())
def assert_almost_equal(actual,desired,decimal=7,err_msg='',verbose=True):
""" Raise an assertion if two items are not equal.
I think this should be part of unittest.py
The test is equivalent to abs(desired-actual) < 0.5 * 10**(-decimal)
"""
from numpy.core import ndarray
if isinstance(actual, ndarray) or isinstance(desired, ndarray):
return assert_array_almost_equal(actual, desired, decimal, err_msg)
msg = build_err_msg([actual, desired], err_msg, verbose=verbose)
if round(abs(desired - actual),decimal) != 0 :
raise AssertionError(msg)
def assert_approx_equal(actual,desired,significant=7,err_msg='',verbose=True):
""" Raise an assertion if two items are not
equal. I think this should be part of unittest.py
Approximately equal is defined as the number of significant digits
correct
"""
import math
actual, desired = map(float, (actual, desired))
if desired==actual:
return
# Normalized the numbers to be in range (-10.0,10.0)
scale = float(pow(10,math.floor(math.log10(0.5*(abs(desired)+abs(actual))))))
try:
sc_desired = desired/scale
except ZeroDivisionError:
sc_desired = 0.0
try:
sc_actual = actual/scale
except ZeroDivisionError:
sc_actual = 0.0
msg = build_err_msg([actual, desired], err_msg,
header='Items are not equal to %d significant digits:' %
significant,
verbose=verbose)
if math.fabs(sc_desired - sc_actual) >= pow(10.,-(significant-1)) :
raise AssertionError(msg)
def assert_array_compare(comparison, x, y, err_msg='', verbose=True,
header=''):
from numpy.core import asarray, isnan, any
from numpy import isreal, iscomplex
x = asarray(x)
y = asarray(y)
def isnumber(x):
return x.dtype.char in '?bhilqpBHILQPfdgFDG'
try:
cond = (x.shape==() or y.shape==()) or x.shape == y.shape
if not cond:
msg = build_err_msg([x, y],
err_msg
+ '\n(shapes %s, %s mismatch)' % (x.shape,
y.shape),
verbose=verbose, header=header,
names=('x', 'y'))
if not cond :
raise AssertionError(msg)
if (isnumber(x) and isnumber(y)) and (any(isnan(x)) or any(isnan(y))):
# Handling nan: we first check that x and y have the nan at the
# same locations, and then we mask the nan and do the comparison as
# usual.
xnanid = isnan(x)
ynanid = isnan(y)
try:
assert_array_equal(xnanid, ynanid)
except AssertionError:
msg = build_err_msg([x, y],
err_msg
+ '\n(x and y nan location mismatch %s, ' \
'%s mismatch)' % (xnanid, ynanid),
verbose=verbose, header=header,
names=('x', 'y'))
val = comparison(x[~xnanid], y[~ynanid])
else:
val = comparison(x,y)
if isinstance(val, bool):
cond = val
reduced = [0]
else:
reduced = val.ravel()
cond = reduced.all()
reduced = reduced.tolist()
if not cond:
match = 100-100.0*reduced.count(1)/len(reduced)
msg = build_err_msg([x, y],
err_msg
+ '\n(mismatch %s%%)' % (match,),
verbose=verbose, header=header,
names=('x', 'y'))
if not cond :
raise AssertionError(msg)
except ValueError:
msg = build_err_msg([x, y], err_msg, verbose=verbose, header=header,
names=('x', 'y'))
raise ValueError(msg)
def assert_array_equal(x, y, err_msg='', verbose=True):
assert_array_compare(operator.__eq__, x, y, err_msg=err_msg,
verbose=verbose, header='Arrays are not equal')
def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True):
from numpy.core import around
def compare(x, y):
return around(abs(x-y),decimal) <= 10.0**(-decimal)
assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose,
header='Arrays are not almost equal')
def assert_array_less(x, y, err_msg='', verbose=True):
assert_array_compare(operator.__lt__, x, y, err_msg=err_msg,
verbose=verbose,
header='Arrays are not less-ordered')
def runstring(astr, dict):
exec astr in dict
def assert_string_equal(actual, desired):
# delay import of difflib to reduce startup time
import difflib
if not isinstance(actual, str) :
raise AssertionError(`type(actual)`)
if not isinstance(desired, str):
raise AssertionError(`type(desired)`)
if re.match(r'\A'+desired+r'\Z', actual, re.M): return
diff = list(difflib.Differ().compare(actual.splitlines(1), desired.splitlines(1)))
diff_list = []
while diff:
d1 = diff.pop(0)
if d1.startswith(' '):
continue
if d1.startswith('- '):
l = [d1]
d2 = diff.pop(0)
if d2.startswith('? '):
l.append(d2)
d2 = diff.pop(0)
if not d2.startswith('+ ') :
raise AssertionError(`d2`)
l.append(d2)
d3 = diff.pop(0)
if d3.startswith('? '):
l.append(d3)
else:
diff.insert(0, d3)
if re.match(r'\A'+d2[2:]+r'\Z', d1[2:]):
continue
diff_list.extend(l)
continue
raise AssertionError(`d1`)
if not diff_list:
return
msg = 'Differences in strings:\n%s' % (''.join(diff_list)).rstrip()
if actual != desired :
raise AssertionError(msg)
def rundocs(filename=None):
""" Run doc string tests found in filename.
"""
import doctest, imp
if filename is None:
f = sys._getframe(1)
filename = f.f_globals['__file__']
name = os.path.splitext(os.path.basename(filename))[0]
path = [os.path.dirname(filename)]
file, pathname, description = imp.find_module(name, path)
try:
m = imp.load_module(name, file, pathname, description)
finally:
file.close()
if sys.version[:3]<'2.4':
doctest.testmod(m, verbose=False)
else:
tests = doctest.DocTestFinder().find(m)
runner = doctest.DocTestRunner(verbose=False)
for test in tests:
runner.run(test)
return
def raises(*args,**kwargs):
nose = import_nose()
return nose.tools.raises(*args,**kwargs)
def assert_raises(*args,**kwargs):
nose = import_nose()
return nose.tools.assert_raises(*args,**kwargs)
def decorate_methods(cls, decorator, testmatch=None):
''' Apply decorator to all methods in class matching testmatch
Parameters
----------
cls : class
Class to decorate methods for
decorator : function
Decorator to apply to methods
testmatch : compiled regexp or string to compile to regexp
Decorators are applied if testmatch.search(methodname)
is not None. Default value is
re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)
(the default for nose)
'''
if testmatch is None:
testmatch = re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)
else:
testmatch = re.compile(testmatch)
cls_attr = cls.__dict__
# delayed import to reduce startup time
from inspect import isfunction
methods = filter(isfunction, cls_attr.values())
for function in methods:
try:
if hasattr(function, 'compat_func_name'):
funcname = function.compat_func_name
else:
funcname = function.__name__
except AttributeError:
# not a function
continue
if testmatch.search(funcname) and not funcname.startswith('_'):
setattr(cls, funcname, decorator(function))
return
def measure(code_str,times=1,label=None):
""" Return elapsed time for executing code_str in the
namespace of the caller for given times.
"""
frame = sys._getframe(1)
locs,globs = frame.f_locals,frame.f_globals
code = compile(code_str,
'Test name: %s ' % label,
'exec')
i = 0
elapsed = jiffies()
while i < times:
i += 1
exec code in globs,locs
elapsed = jiffies() - elapsed
return 0.01*elapsed
| |
from importlib import import_module
import os
from django.core.exceptions import AppRegistryNotReady, ImproperlyConfigured
from django.utils.module_loading import module_has_submodule
from django.utils._os import upath
MODELS_MODULE_NAME = 'models'
class AppConfig(object):
"""
Class representing a Django application and its configuration.
"""
def __init__(self, app_name, app_module):
# Full Python path to the application eg. 'django.contrib.admin'.
self.name = app_name
# Root module for the application eg. <module 'django.contrib.admin'
# from 'django/contrib/admin/__init__.pyc'>.
self.module = app_module
# The following attributes could be defined at the class level in a
# subclass, hence the test-and-set pattern.
# Last component of the Python path to the application eg. 'admin'.
# This value must be unique across a Django project.
if not hasattr(self, 'label'):
self.label = app_name.rpartition(".")[2]
# Human-readable name for the application eg. "Admin".
if not hasattr(self, 'verbose_name'):
self.verbose_name = self.label.title()
# Filesystem path to the application directory eg.
# u'/usr/lib/python2.7/dist-packages/django/contrib/admin'. Unicode on
# Python 2 and a str on Python 3.
if not hasattr(self, 'path'):
self.path = self._path_from_module(app_module)
# Module containing models eg. <module 'django.contrib.admin.models'
# from 'django/contrib/admin/models.pyc'>. Set by import_models().
# None if the application doesn't have a models module.
self.models_module = None
# Mapping of lower case model names to model classes. Initially set to
# None to prevent accidental access before import_models() runs.
self.models = None
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.label)
def _path_from_module(self, module):
"""Attempt to determine app's filesystem path from its module."""
# See #21874 for extended discussion of the behavior of this method in
# various cases.
# Convert paths to list because Python 3.3 _NamespacePath does not
# support indexing.
paths = list(getattr(module, '__path__', []))
if len(paths) != 1:
filename = getattr(module, '__file__', None)
if filename is not None:
paths = [os.path.dirname(filename)]
if len(paths) > 1:
raise ImproperlyConfigured(
"The app module %r has multiple filesystem locations (%r); "
"you must configure this app with an AppConfig subclass "
"with a 'path' class attribute." % (module, paths))
elif not paths:
raise ImproperlyConfigured(
"The app module %r has no filesystem location, "
"you must configure this app with an AppConfig subclass "
"with a 'path' class attribute." % (module,))
return upath(paths[0])
@classmethod
def create(cls, entry):
"""
Factory that creates an app config from an entry in INSTALLED_APPS.
"""
try:
# If import_module succeeds, entry is a path to an app module,
# which may specify an app config class with default_app_config.
# Otherwise, entry is a path to an app config class or an error.
module = import_module(entry)
except ImportError:
# Track that importing as an app module failed. If importing as an
# app config class fails too, we'll trigger the ImportError again.
module = None
mod_path, _, cls_name = entry.rpartition('.')
# Raise the original exception when entry cannot be a path to an
# app config class.
if not mod_path:
raise
else:
try:
# If this works, the app module specifies an app config class.
entry = module.default_app_config
except AttributeError:
# Otherwise, it simply uses the default app config class.
return cls(entry, module)
else:
mod_path, _, cls_name = entry.rpartition('.')
# If we're reaching this point, we must attempt to load the app config
# class located at <mod_path>.<cls_name>
mod = import_module(mod_path)
try:
cls = getattr(mod, cls_name)
except AttributeError:
if module is None:
# If importing as an app module failed, that error probably
# contains the most informative traceback. Trigger it again.
import_module(entry)
else:
raise
# Check for obvious errors. (This check prevents duck typing, but
# it could be removed if it became a problem in practice.)
if not issubclass(cls, AppConfig):
raise ImproperlyConfigured(
"'%s' isn't a subclass of AppConfig." % entry)
# Obtain app name here rather than in AppClass.__init__ to keep
# all error checking for entries in INSTALLED_APPS in one place.
try:
app_name = cls.name
except AttributeError:
raise ImproperlyConfigured(
"'%s' must supply a name attribute." % entry)
# Ensure app_name points to a valid module.
app_module = import_module(app_name)
# Entry is a path to an app config class.
return cls(app_name, app_module)
def check_models_ready(self):
"""
Raises an exception if models haven't been imported yet.
"""
if self.models is None:
raise AppRegistryNotReady(
"Models for app '%s' haven't been imported yet." % self.label)
def get_model(self, model_name):
"""
Returns the model with the given case-insensitive model_name.
Raises LookupError if no model exists with this name.
"""
self.check_models_ready()
try:
return self.models[model_name.lower()]
except KeyError:
raise LookupError(
"App '%s' doesn't have a '%s' model." % (self.label, model_name))
def get_models(self, include_auto_created=False,
include_deferred=False, include_swapped=False):
"""
Returns an iterable of models.
By default, the following models aren't included:
- auto-created models for many-to-many relations without
an explicit intermediate table,
- models created to satisfy deferred attribute queries,
- models that have been swapped out.
Set the corresponding keyword argument to True to include such models.
Keyword arguments aren't documented; they're a private API.
"""
self.check_models_ready()
for model in self.models.values():
if model._deferred and not include_deferred:
continue
if model._meta.auto_created and not include_auto_created:
continue
if model._meta.swapped and not include_swapped:
continue
yield model
def import_models(self, all_models):
# Dictionary of models for this app, primarily maintained in the
# 'all_models' attribute of the Apps this AppConfig is attached to.
# Injected as a parameter because it gets populated when models are
# imported, which might happen before populate() imports models.
self.models = all_models
if module_has_submodule(self.module, MODELS_MODULE_NAME):
models_module_name = '%s.%s' % (self.name, MODELS_MODULE_NAME)
self.models_module = import_module(models_module_name)
def ready(self):
"""
Override this method in subclasses to run code when Django starts.
"""
| |
"""
For the ePuck robot, a small simulator is provided. It allows to place
ePuck in an arena, with unpassable walls and obstacles at (almost)
arbitrary locations. Some environment objects are predefined in
:py:mod:`HDPy.epuck.env`. The class :py:class:`Robot` provides the
implementation of the simulated ePuck. Obstacles are directly inserted
into this instance, hence it combines the robot with the environment.
As for other problems, a :py:class:`ADHDP` instance can be used on top
of this to control the robot motion. A plant and a policy have to be
provided (see :ref:`plants-and-policies`). Due to historical reasons,
the interpretation of the policy (i.e. action) is done in the robot.
In :py:class:`Robot`, the action is a relative heading,
:py:class:`AbsoluteRobot` implements an absolute one.
The robot and adhdp instances are combined in the
:py:func:`simulation_loop` function to run the simulation for a
fixed amount of time.
"""
import numpy as np
import pylab
import warnings
def _intersect((o1x, o1y), (d1x, d1y), (o2x, o2y), (d2x, d2y)):
"""Intersection of two bounded lines. The lines are given
with the origin and direction. Returned is the step length for
both lines, in the same order as the input.
o1x + t1 * d1x = o2x + t2 * d2x
o1y + t1 * d1y = o2y + t2 * d2y
=> t1 = (o2x + t2 * d2x - o1x)/d1x
=> o1y + ((o2x + t2 * d2x - o1x)/d1x) * d1y = o2y + t2 * d2y
=> o1y + (o2x + t2 * d2x - o1x) * d1y/d1x = o2y + t2 * d2y
=> o1y + (t2 * d2x + o2x - o1x) * d1y/d1x = o2y + t2 * d2y
=> o1y + t2*d2x*d1y/d1x + (o2x - o1x) * d1y/d1x = o2y + t2 * d2y
=> o1y - o2y + (o2x - o1x) * d1y/d1x = t2 * d2y - t2*d2x*d1y/d1x
=> o1y - o2y + (o2x - o1x) * d1y/d1x = t2 * (d2y - d2x*d1y/d1x)
=> t2 = (o1y - o2y + (o2x - o1x) * d1y/d1x) / (d2y - d2x*d1y/d1x)
"""
tol = 1e-14
if abs(d1y - 0.0) < tol :
# o_dir = (!0.0, 0.0)
if abs(d2y - d2x*d1y/d1x) < tol: # parallel
t0, t1 = float('inf'), float('inf')
else:
nom = o2y - o1y - d1y * (o2x - o1x)/d1x
denom = (d1y*d2x)/d1x - d2y
t0 = nom/denom
t1 = (o2x - o1x + t0 * d2x)/d1x
else:
# o_dir = (0.0, !0.0)
if abs(d2x - d2y*d1x/d1y) < tol: # parallel
t0, t1 = float('inf'), float('inf')
else:
nom = o2x - o1x - d1x * (o2y - o1y)/d1y
denom = (d1x*d2y)/d1y - d2x
t0 = nom/denom
t1 = (o2y - o1y + t0 * d2y) / d1y
return t1, t0
def _in_obstacle(loc, obstacle):
"""Check if a location is within an obstacle.
Assuming the obstacle edges are given in the right order (meaning
that the polygon is defined through lines between successive
points).
As reference, the origin is picked. This implies that the obstacle
must not include the origin.
Edges and corners count as within the obstacle
"""
if any([loc == obs for obs in obstacle]):
return True
faces = [(p0, p1) for p0, p1 in zip(obstacle[:-1], obstacle[1:])]
faces.append((obstacle[-1], obstacle[0]))
num_intersect = sum([_obs_intersect((loc, (0.0, 0.0)), line) for line in faces])
if num_intersect % 2 == 0:
return False
else:
return True
def _obs_intersect(((x0, y0), (x1, y1)), ((x2, y2), (x3, y3))):
"""Check if two lines intersect. The boundaries don't count as
intersection."""
base1 = (x0, y0)
base2 = (x2, y2)
dir1 = (x1-x0, y1-y0)
dir2 = (x3-x2, y3-y2)
t1, t2 = _intersect(base1, dir1, base2, dir2)
eps = 0.00001
if -eps < t1 and t1 < 1.0 + eps and -eps < t2 and t2 < 1.0 + eps:
return True
else:
return False
class Robot(object):
"""Simulated ePuck robot.
The robot may be steered by means of change in its orientation (i.e.
the heading relative to the robot). Every time an action is
executed, the robot turns to the target orientation, then moves
forward. How much it moves is proportional to the ``speed`` and
``step_time``. In between, infrared sensor readouts can be taken.
The robot is placed in an arena, with some obstacles and walls it
can collide with but not pass. Upon collision, the robot stops
moving.
``walls``
List of wall lines which cannot be passed. The lines are to be
given by their endpoints.
``obstacles``
List of obstacles which cannot be passed. In contrast to walls,
the obstacles are closed polygons. They have to be given
as list of corner points. Obstacles may not include the origin
(0, 0).
``speed``
Speed of the robot.
``step_time``
Time quantum for movement, i.e. for how long the robot drives
forward.
``tol``
Minimal distance from any obstacle or wall which counts as
collision.
.. note::
Obstacles may not include the origin (0, 0).
.. todo::
wall tolerance does not operate correctly.
"""
def __init__(self, walls=None, obstacles=None, speed=0.5, step_time=1.0, tol=0.0):
if obstacles is None:
obstacles = []
if walls is None:
walls = []
walls = walls[:]
for obs in obstacles:
walls.extend([(x0, y0, x1, y1) for (x0, y0), (x1, y1) in zip(obs[:-1], obs[1:])])
walls.append((obs[-1][0], obs[-1][1], obs[0][0], obs[0][1]))
if tol > 0.0:
warnings.warn("tolerance > 0 doesn't work properly; It only works if the robot faces the wall (not when parallel or away from the wall).")
self.sensors = [2*np.pi*i/8.0 for i in range(8)]
#self.obstacles = [ (x0,y0,x1,y1) ]
self.obstacle_line = walls
self._ir_max, self.tol = 15.0, tol
self.obstacles = self._cmp_obstacles(self.obstacle_line)
self.polygons = obstacles[:]
self.speed, self.step_time = speed, step_time
self.loc = (0.0, 0.0)
self.pose = 0.0
self.trajectory = []
self.reset()
def _cmp_obstacles(self, lines):
"""Convert lines given by their endpoints to their corresponding
vector representation"""
obstacles = []
for x0, y0, x1, y1 in lines:
o_vec = (x1-x0, y1-y0)
if o_vec[0] == 0.0 and o_vec[1] == 0.0:
raise Exception('Obstacle line must have a direction')
o_base = (x0, y0)
o_limit = 1.0
obstacles.append((o_vec, o_base, o_limit))
return obstacles
def _cmp_obstacle_lines(self, obstacles):
"""Convert lines given by as vector to their corresponding
endpoint representation."""
lines = []
for o_vec, o_base, o_limit in obstacles:
x0, y0 = o_base
if o_limit == float('inf'):
raise Exception('Infinite lines not supported')
x1 = o_base[0] + o_limit * o_vec[0]
y1 = o_base[1] + o_limit * o_vec[1]
lines.append((x0, y0, x1, y1))
return lines
def reset(self):
"""Reset the robot to the origin."""
self.loc = (0.0, 0.0)
self.pose = 0.0
self.trajectory = [self.loc]
def reset_random(self, loc_lo=-10.0, loc_hi=10.0):
"""Reset the robot to a random location, outside the obstacles."""
for i in xrange(1000):
loc = self.loc = (np.random.uniform(loc_lo, loc_hi), np.random.uniform(loc_lo, loc_hi))
pose = self.pose = np.random.uniform(0, 2*np.pi)
if not any([_in_obstacle(self.loc, obs) for obs in self.polygons]) and not self.take_action(0.0):
break
if i == 1000:
warnings.warn('Random reset iterations maximum exceeded')
self.loc = loc
self.pose = pose
self.trajectory = [self.loc]
def read_ir(self):
"""Compute the proximities to obstacles in all infrared sensor
directions."""
# view-direction
readout = []
for sensor in self.sensors:
s_dist = self._ir_max
s_ori = self.pose + sensor
s_dir = (np.cos(s_ori), np.sin(s_ori))
s_base = self.loc
for o_dir, o_base, o_limit in self.obstacles:
# obstacles intersection
t0, t1 = _intersect(o_base, o_dir, s_base, s_dir)
eps = 0.00001
if t1 >= 0 and (o_limit == float('inf') or (-eps <= t0 and t0 <= o_limit + eps)):
#if t0 >= 0 and t1 >= 0 and t1 <= 1.0:
# intersection at distance (t0 * s_dir)
dist = np.linalg.norm((t1 * s_dir[0], t1 * s_dir[1]))
else:
# no intersection
dist = self._ir_max
if dist < s_dist:
s_dist = dist
readout.append(s_dist)
return readout
def read_sensors(self):
"""Read all sensors. A :py:keyword:`dict` is returned."""
ir = self.read_ir()
#noise = np.random.normal(scale=0.01, size=(len(ir)))
#ir = map(operator.add, ir, noise)
return {'loc': np.atleast_2d(self.loc), 'pose': np.atleast_2d(self.pose), 'ir': np.atleast_2d(ir)}
def take_action(self, action):
"""Execute an ``action`` and move forward
(speed * step_time units or until collision). Return
:py:const:`True` if the robot collided.
"""
# turn
if isinstance(action, np.ndarray):
action = action.flatten()[0]
self.pose = (self.pose + action) % (2*np.pi)
#self.pose = action % (2*np.pi)
# move forward
t = self.speed * self.step_time # distance per step
# Collision detection
eps = 0.00001
r_vec = (np.cos(self.pose), np.sin(self.pose))
wall_dists = [(idx, _intersect(self.loc, r_vec, o_base, o_vec), o_limit) for idx, (o_vec, o_base, o_limit) in enumerate(self.obstacles)]
wall_dists = [(idx, r_dist) for idx, (r_dist, o_dist), o_limit in wall_dists if r_dist >= 0.0 and r_dist < float('inf') and -eps <= o_dist and o_dist <= o_limit + eps]
if len(wall_dists) > 0:
# Distance to the wall
wall_idx, min_wall_dist = min(wall_dists, key=lambda (idx, dist): dist)
dist = np.linalg.norm((min_wall_dist * r_vec[0], min_wall_dist * r_vec[1]))
# angle between wall and robot trajectory
o_vec = self.obstacles[wall_idx][0]
a = np.arccos( (o_vec[0] * r_vec[0] + o_vec[1] * r_vec[1]) / (np.linalg.norm(o_vec) * np.linalg.norm(r_vec)) )
if a > np.pi/2.0:
a = np.pi - a
# maximum driving distance
k = self.tol / np.sin(a)
t_max = dist - k
else:
# no wall ahead
t_max = float('inf')
collide = t >= t_max
t = min(t, t_max)
# next location
self.loc = (self.loc[0] + np.cos(self.pose) * t, self.loc[1] + np.sin(self.pose) * t) # t doesn't denote the distance in moving direction!
self.trajectory.append(self.loc)
return collide
def plot_trajectory(self, wait=False, with_tol=True, tol=None, full_view=True, axis=None):
"""Plot the robot trajectory in a :py:mod:`pylab` figure.
``wait``
True for blocking until the figure is closed.
``with_tol``
Plot obstacle tolerance lines.
``tol``
Overwrite the obstacle tolerance.
``full_view``
Keep the original clipping of the window. If false, the
clipping will be adjusted to the data.
``axis``
A :py:mod:`pylab` axis, which should be used for plotting.
If not provided, the first axis of the first figure is used.
"""
if axis is None:
axis = pylab.figure(1).axes[0]
axis.clear()
self._plot_obstacles(axis, with_tol, tol)
x, y = zip(*self.trajectory)
axis.plot(x, y, 'b-')
axis.plot(x, y, 'b*')
if full_view:
x0, x1, y0, y1 = axis.axis()
else:
x0, x1, y0, y1 = min(x), max(x), min(y), max(y)
axis.axis((
x0 + x0*0.1,
x1 + x1*0.1,
y0 + y0*0.1,
y1 + y1*0.1
))
pylab.show(block=wait)
def _plot_obstacles(self, axis, with_tol=True, tol=None):
"""Plot all obstacles and walls into a :py:mod:`pylab` figure.
``axis``
The axis where stuff is plotted into.
``with_tol``
Plot obstacle tolerance lines.
``tol``
Overwrite the obstacle tolerance.
"""
if tol is None:
tol = self.tol
for vec, base, limit in self.obstacles:
# obstacle line
axis.plot((base[0], base[0]+limit*vec[0]), (base[1], base[1]+limit*vec[1]), 'k')
if with_tol and tol > 0:
if vec[1] == 0.0:
y = (-vec[1]/vec[0], 1.0)
else:
y = (1.0, -vec[0]/vec[1])
y = (y[0] * tol / np.linalg.norm(y), y[1] * tol / np.linalg.norm(y))
base_tn = (base[0] - y[0], base[1] - y[1])
base_tp = (base[0] + y[0], base[1] + y[1])
# obstacle tolerance
axis.plot((base_tn[0], base_tn[0]+limit*vec[0]), (base_tn[1], base_tn[1]+limit*vec[1]), 'k:')
axis.plot((base_tp[0], base_tp[0]+limit*vec[0]), (base_tp[1], base_tp[1]+limit*vec[1]), 'k:')
class AbsoluteRobot(Robot):
"""Simulated ePuck robot.
In contrast to :py:class:`Robot`, the heading is with respect to
the arena instead of the robot - i.e. it is absolute, not relative
to the robot.
"""
def take_action(self, action):
"""Execute an ``action`` and move forward
(speed * step_time units or until collision). Return
:py:const:`True` if the robot collided.
"""
if isinstance(action, np.ndarray):
action = action.flatten()[0]
self.pose = action % (2*np.pi)
return super(AbsoluteRobot, self).take_action(0.0)
def simulation_loop(acd, robot, max_step=-1, max_episodes=-1, max_total_iter=-1):
"""Simulate some episodes of the ePuck robot.
This method handles data passing between the ``acd`` and ``robot``
instances in two loops, one for the episode and one for the whole
experiment.
``acd``
Actor-Critic instance (:py:class:`ADHDP`).
``robot``
Robot instance (:py:class:`Robot`).
``max_step``
Maximum number of steps in an episode. Negative means no limit.
``max_episodes``
Maximum number of episodes. Negative means no limit.
``max_total_iter``
Maximum number of steps in total. Negative means no limit.
"""
if max_step < 0 and max_episodes < 0 and max_total_iter < 0:
raise Exception('The simulation cannot run forever.')
num_episode = 0
num_total_iter = 0
while True:
# init episode
acd.new_episode()
acd.signal('new_episode') # collectors will create new group
robot.reset()
acd.child.reset()
a_curr = np.atleast_2d([acd.child.action])
num_step = 0 # k
while True:
# Apply current action
collided = robot.take_action(a_curr)
# Observe sensors
s_next = robot.read_sensors()
# Execute ACD
a_next = acd(s_next, num_step, num_step+1, 1)
# Iterate
num_step += 1
num_total_iter += 1
if collided:
break
if max_step > 0 and num_step >= max_step:
break
acd.a_curr = a_curr = a_next
if num_step <= 3:
print "Warning: episode ended prematurely"
num_episode += 1
if max_episodes > 0 and num_episode >= max_episodes:
break
if max_total_iter > 0 and num_total_iter >= max_total_iter:
break
return acd
| |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import csv
import logging
import operator
import apache_beam as beam
from apache_beam import pvalue
from apache_beam.options import pipeline_options
from apache_beam.options import value_provider
from apache_beam.transforms import util
import common as c
class RuntimeOptions(pipeline_options.PipelineOptions):
"""Specifies runtime options for the pipeline.
Class defining the arguments that can be passed to the pipeline to
customize the execution.
"""
@classmethod
def _add_argparse_args(cls, parser):
parser.add_value_provider_argument(f'--{c._OPTION_INPUT_CSV}')
parser.add_value_provider_argument(f'--{c._OPTION_OUTPUT_FOLDER}')
parser.add_value_provider_argument(
f'--{c._OPTION_CUSTOMER_ID_COLUMN_POSITION}', type=int)
parser.add_value_provider_argument(
f'--{c._OPTION_TRANSACTION_DATE_COLUMN_POSITION}', type=int)
parser.add_value_provider_argument(
f'--{c._OPTION_SALES_COLUMN_POSITION}', type=int)
parser.add_value_provider_argument(
f'--{c._OPTION_EXTRA_DIMENSION_COLUMN_POSITION}', type=int)
parser.add_value_provider_argument(f'--{c._OPTION_DATE_PARSING_PATTERN}',
default='YYYY-MM-DD')
parser.add_value_provider_argument(
f'--{c._OPTION_MODEL_TIME_GRANULARITY}',
default=c.TimeGranularityParams.GRANULARITY_WEEKLY)
parser.add_value_provider_argument(
f'--{c._OPTION_FREQUENCY_MODEL_TYPE}', default=c._MODEL_TYPE_MBGNBD)
parser.add_value_provider_argument(
f'--{c._OPTION_CALIBRATION_START_DATE}')
parser.add_value_provider_argument(f'--{c._OPTION_CALIBRATION_END_DATE}')
parser.add_value_provider_argument(f'--{c._OPTION_COHORT_START_DATE}')
parser.add_value_provider_argument(f'--{c._OPTION_COHORT_END_DATE}')
parser.add_value_provider_argument(f'--{c._OPTION_HOLDOUT_END_DATE}')
parser.add_value_provider_argument(
f'--{c._OPTION_PREDICTION_PERIOD}', default=52, type=int)
parser.add_value_provider_argument(
f'--{c._OPTION_OUTPUT_SEGMENTS}', default=5, type=int)
parser.add_value_provider_argument(
f'--{c._OPTION_TRANSACTION_FREQUENCY_THRESHOLD}', default=15,
type=int)
parser.add_value_provider_argument(
f'--{c._OPTION_PENALIZER_COEF}', default=0.0, type=float)
parser.add_value_provider_argument(
f'--{c._OPTION_ROUND_NUMBERS}', default="True")
def run(argv=None):
"""Main function.
Main function containing the Apache Beam pipeline describing how to process
the input CSV file to generate the LTV predictions.
"""
parser = argparse.ArgumentParser()
_, pipeline_args = parser.parse_known_args(argv)
options = pipeline_options.PipelineOptions(pipeline_args)
runtime_options = options.view_as(RuntimeOptions)
with beam.Pipeline(options=options) as pipeline:
options = (
pipeline
| 'Create single element Stream containing options dict' >>
beam.Create([options.get_all_options()])
| beam.Map(lambda x: {
k: v.get() if isinstance(v, value_provider.ValueProvider)
else v
for (k, v) in x.items()
})
| beam.Map(c.set_extra_options)
)
full_elog = (
pipeline
| beam.io.ReadFromText(
getattr(runtime_options, c._OPTION_INPUT_CSV),
skip_header_lines=1)
| beam.Map(lambda x: list(csv.reader([x]))[0])
| beam.FlatMap(
c.csv_line_to_list,
pvalue.AsSingleton(options)) # (customer_id, date_str, date,
# sales, extra_dimension?)
)
full_elog_merged = (
full_elog
| beam.Filter(lambda x: x[3] > 0) # sales > 0
| beam.Map(lambda x: ((x[0], x[1]), x)) # key: (customer_id, date)
| 'Group full elog by customer and date' >> beam.GroupByKey()
| beam.Map(c.merge_full_elog_by_customer_and_date) # (customer_id,
# date_str, date,
# sales)
)
min_max_dates = (
full_elog_merged
| beam.Map(lambda x: x[2]) # date
| beam.CombineGlobally(c.MinMaxDatesFn())
| beam.Map(c.min_max_dates_dict)
)
limits_dates = (
min_max_dates
| beam.FlatMap(c.limit_dates_boundaries, pvalue.AsSingleton(options))
)
cohort = (
full_elog_merged
| beam.FlatMap(c.filter_customers_in_cohort,
pvalue.AsSingleton(limits_dates))
| 'Distinct Customer IDs in Cohort' >> util.Distinct()
)
cohort_count = (
cohort
| 'Count cohort entries' >> beam.combiners.Count.Globally()
)
cohort_set = (
cohort
| beam.Map(lambda x: (x, 1))
)
all_customer_ids = (
full_elog_merged
| beam.Map(lambda x: x[0]) # key: customer_id
| 'Distinct all Customer IDs' >> util.Distinct()
)
all_customer_ids_count = (
all_customer_ids
| 'Count all customers' >> beam.combiners.Count.Globally()
)
num_customers = (
pipeline
| 'Create single elem Stream I' >> beam.Create([1])
| beam.FlatMap(c.count_customers,
pvalue.AsSingleton(cohort_count),
pvalue.AsSingleton(all_customer_ids_count),
pvalue.AsSingleton(options))
)
cal_hol_elog = (
full_elog_merged
| beam.FlatMap(c.filter_cohort_records_in_cal_hol,
pvalue.AsDict(cohort_set),
pvalue.AsSingleton(limits_dates))
)
cal_hol_elog_count = (
cal_hol_elog
| 'Count cal hol elog entries' >> beam.combiners.Count.Globally()
)
calibration = (
cal_hol_elog
| beam.FlatMap(c.filter_records_in_calibration,
pvalue.AsSingleton(limits_dates))
)
num_txns_total = (
full_elog_merged
| beam.FlatMap(c.filter_records_in_cal_hol,
pvalue.AsSingleton(limits_dates))
| 'Count num txns total' >> beam.combiners.Count.Globally()
)
num_txns = (
pipeline
| 'Create single elem Stream II' >> beam.Create([1])
| beam.FlatMap(c.count_txns,
pvalue.AsSingleton(cal_hol_elog_count),
pvalue.AsSingleton(num_txns_total),
pvalue.AsSingleton(options))
)
calcbs = (
calibration
| beam.Map(lambda x: (x[0], x))
| 'Group calibration elog by customer id' >> beam.GroupByKey()
| beam.FlatMap(
c.create_cal_cbs,
pvalue.AsSingleton(options),
pvalue.AsSingleton(limits_dates)
) # (customer_id, number_of_transactions, average_order_value,
# frequency, recency, total_time_observed)
)
first_transaction_dates_by_customer = (
cal_hol_elog
| beam.Map(lambda x: (x[0], x)) # customer_id
| 'Group cal hol elog by customer id' >> beam.GroupByKey()
| beam.Map(lambda x: (x[0], min(map(operator.itemgetter(2), x[1])))
) # item 2 -> date
)
cal_hol_elog_repeat = (
cal_hol_elog
| beam.FlatMap(c.filter_first_transaction_date_records,
pvalue.AsDict(first_transaction_dates_by_customer))
| beam.FlatMap(
c.calculate_time_unit_numbers, # (customer_id, date,
# time_unit_number)
pvalue.AsSingleton(options),
pvalue.AsSingleton(limits_dates))
| beam.Map(lambda x: (x[2], 1)) # key: time_unit_number
| 'Group cal hol elog repeat by time unit number' >>
beam.GroupByKey()
| beam.Map(lambda x: (x[0], sum(x[1]))
) # (time_unit_number, occurrences)
)
repeat_tx = (
pipeline
| 'Create single elem Stream III' >> beam.Create([1])
| beam.FlatMap(c.calculate_cumulative_repeat_transactions,
pvalue.AsIter(cal_hol_elog_repeat)
) # (time_unit_number, repeat_transactions,
# repeat_transactions_cumulative)
)
model_validation = (
pipeline
| 'Create single elem Stream IV' >> beam.Create([1])
| beam.FlatMap(c.calculate_model_fit_validation,
pvalue.AsSingleton(options),
pvalue.AsSingleton(limits_dates),
pvalue.AsIter(calcbs),
pvalue.AsIter(repeat_tx),
pvalue.AsSingleton(num_customers),
pvalue.AsSingleton(num_txns))
)
_ = (
model_validation
| beam.Map(c.raise_error_if_invalid_mape)
)
_ = (
model_validation
| beam.Map(lambda x: x[0])
| beam.FlatMap(c.calculate_model_fit_validation_to_text,
pvalue.AsSingleton(options))
)
fullcbs_without_extra_dimension = (
full_elog_merged
| beam.Map(lambda x: (x[0], x)) # key: customer_id
| 'Group full merged elog by customer id' >> beam.GroupByKey()
| beam.FlatMap(
c.create_fullcbs,
pvalue.AsSingleton(options),
pvalue.AsSingleton(min_max_dates)
) # (customer_id, number_of_transactions, historical_aov,
# frequency, recency, total_time_observed)
)
full_elog_if_extra_dimension = (
full_elog
| 'Discard records if no extra dimension' >> beam.FlatMap(
c.discard_if_no_extra_dimension, pvalue.AsSingleton(options))
)
extra_dimensions_stats = (
full_elog_if_extra_dimension
| beam.Map(lambda x: ((x[0], x[4]), x)
) # key: (customer_id, extra_dimension)
| 'Group full elog by customer id and extra dimension' >>
beam.GroupByKey()
| beam.Map(
c.create_extra_dimensions_stats
) # (customer_id, extra_dimension, dimension_count, tot_sales,
# max_dimension_date)
)
top_dimension_per_customer = (
extra_dimensions_stats
| beam.Map(lambda x: (x[0], x)) # customer_id
| 'Group extra dimension stats by customer id' >> beam.GroupByKey()
| beam.Map(
c.extract_top_extra_dimension
) # (customer_id, extra_dimension, dimension_count, tot_sales,
# max_dimension_date)
)
customer_dimension_map = (
top_dimension_per_customer
| beam.Map(
lambda x: (x[0], x[1])) # (customer_id, extra_dimension)
)
prediction = (
pipeline
| 'Create single elem Stream V' >> beam.Create([1])
| beam.FlatMap(
c.calculate_prediction,
pvalue.AsSingleton(options),
pvalue.AsIter(fullcbs_without_extra_dimension),
pvalue.AsSingleton(num_customers),
pvalue.AsSingleton(num_txns)
) # [customer_id, p_alive, predicted_purchases, future_aov,
# historical_aov, expected_value, frequency, recency,
# total_time_observed], prediction_params
)
prediction_by_customer_no_segments_no_extra_dimension = (
prediction
| beam.FlatMap(lambda x: x[0]) # Extract predictions by customer
)
prediction_by_customer_no_segments = (
prediction_by_customer_no_segments_no_extra_dimension
| beam.FlatMap(
c.add_top_extra_dimension_to_fullcbs,
pvalue.AsSingleton(options),
pvalue.AsDict(customer_dimension_map)
) # [customer_id, p_alive, predicted_purchases, future_aov
# historical_aov, expected_value, frequency, recency,
# total_time_observed, extra_dimension?]
)
_ = (
prediction
| beam.Map(lambda x: x[1]) # Extract predictions params
| beam.FlatMap(c.calculate_prediction_to_text,
pvalue.AsSingleton(options))
)
num_rows = (
full_elog_merged
| 'Count num rows in full elog merged' >>
beam.combiners.Count.Globally()
)
segment_predictions_exact = (
pipeline
| 'Create single elem Stream VII' >> beam.Create([1])
| beam.FlatMap(lambda _, rows_count: [
rows_count <= c._SEGMENT_PREDICTION_THRESHOLD],
pvalue.AsSingleton(num_rows))
)
sharded_cust_predictions_no_segments_exact, \
sharded_cust_predictions_no_segments_hash = (
prediction_by_customer_no_segments
| beam.FlatMap(
c.prediction_sharded,
pvalue.AsSingleton(options),
pvalue.AsSingleton(segment_predictions_exact)
) # [customer_id, p_alive, predicted_purchases, future_aov,
# historical_aov, expected_value, frequency, recency,
# total_time_observed, extra_dimension?]
| beam.Partition(lambda x, _: 0 if x[1] else 1, 2)
)
# BEGIN of "exact" branch
prediction_by_customer_exact = (
pipeline
| 'Create single elem Stream VIII' >> beam.Create([1])
| beam.FlatMap(c.split_in_ntiles_exact,
pvalue.AsSingleton(options),
pvalue.AsIter(
sharded_cust_predictions_no_segments_exact)
) # [customer_id, p_alive, predicted_purchases,
# future_aov, historical_aov, expected_value,
# frequency, recency, total_time_observed,
# segment, extra_dimension?]
)
# END of "exact" branch
# BEGIN of "hash" branch
customer_count_by_expected_value = (
sharded_cust_predictions_no_segments_hash
| beam.Map(lambda x: (x[0][5], 1)) # (expected_value, 1)
| 'Group customer predictions by expected value' >>
beam.GroupByKey()
| beam.Map(
lambda x: (x[0], sum(x[1]))) # expected_value, customers_count
)
hash_segment_limits = (
pipeline
| 'Create single elem Stream IX' >> beam.Create([1])
| beam.FlatMap(c.expected_values_segment_limits,
pvalue.AsSingleton(options),
pvalue.AsIter(customer_count_by_expected_value),
pvalue.AsSingleton(all_customer_ids_count))
)
prediction_by_customer_hash = (
sharded_cust_predictions_no_segments_hash
| beam.Map(lambda x: x[0])
| beam.FlatMap(c.split_in_ntiles_hash,
pvalue.AsSingleton(hash_segment_limits)
) # [customer_id, p_alive, predicted_purchases,
# future_aov, historical_aov, expected_value,
# frequency, recency, total_time_observed,
# segment, extra_dimension?]
)
# END of "hash" branch
prediction_by_customer = (
# only one of these two streams will contains values
(prediction_by_customer_exact, prediction_by_customer_hash)
| beam.Flatten()
)
_ = (
prediction_by_customer
| beam.FlatMap(lambda x, opts: [x + ['']]
if not opts[c._OPTION_EXTRA_DIMENSION_EXISTS] else [x],
pvalue.AsSingleton(options))
| 'prediction_by_customer to CSV line' >> beam.Map(c.list_to_csv_line)
| 'Write prediction_by_customer' >>
beam.io.WriteToText(getattr(runtime_options, c._OPTION_OUTPUT_FOLDER),
header='customer_id,p_alive'
',predicted_purchases'
',future_aov,historical_aov'
',expected_value,frequency,recency'
',total_time_observed,segment'
',extra_dimension',
shard_name_template='',
num_shards=1,
file_name_suffix=
'prediction_by_customer.csv')
)
prediction_summary_temp = (
prediction_by_customer
| beam.Map(lambda x: (x[9], x)) # key: segment
| 'Group customer predictions by segment' >> beam.GroupByKey()
| beam.FlatMap(c.generate_prediction_summary,
pvalue.AsSingleton(options)
) # (segment, average_retention_probability,
# average_predicted_customer_value,
# average_predicted_order_value,
# average_predicted_purchases, total_customer_value,
# number_of_customers)
)
tot_equity = (
prediction_summary_temp
| beam.Map(lambda x: x[5]) # total_customer_value
| beam.CombineGlobally(sum)
)
prediction_summary = (
prediction_summary_temp
| beam.FlatMap(
c.calculate_perc_of_total_customer_value,
pvalue.AsSingleton(tot_equity),
pvalue.AsSingleton(options)
) # (segment, average_retention_probability,
# average_predicted_customer_value,
# average_predicted_order_value,
# average_predicted_purchases,
# total_customer_value, number_of_customers,
# perc_of_total_customer_value)
)
_ = (
prediction_summary
| 'prediction_summary to CSV line' >> beam.Map(c.list_to_csv_line)
| 'Write prediction_summary' >> beam.io.WriteToText(
getattr(runtime_options, c._OPTION_OUTPUT_FOLDER),
header='segment,average_retention_probability'
',average_predicted_customer_value'
',average_predicted_order_value,average_predicted_purchases'
',total_customer_value,number_of_customers'
',perc_of_total_customer_value',
shard_name_template='',
num_shards=1,
file_name_suffix='prediction_summary.csv')
)
prediction_summary_extra_dimension = (
prediction_by_customer
| 'Discard prediction if there is not extra dimension' >>
beam.FlatMap(c.discard_if_no_extra_dimension,
pvalue.AsSingleton(options))
| beam.Map(lambda x: (x[10], x)) # extra dimension
| 'Group customer predictions by extra dimension' >>
beam.GroupByKey()
| beam.FlatMap(c.generate_prediction_summary_extra_dimension,
pvalue.AsSingleton(tot_equity),
pvalue.AsSingleton(options))
)
_ = (
prediction_summary_extra_dimension
| 'prediction_summary_extra_dimension to CSV line' >>
beam.Map(c.list_to_csv_line)
|
'Write prediction_summary_extra_dimension' >> beam.io.WriteToText(
getattr(runtime_options, c._OPTION_OUTPUT_FOLDER),
header='extra_dimension,average_retention_probability'
',average_predicted_customer_value'
',average_predicted_order_value'
',average_predicted_purchases,total_customer_value'
',number_of_customers,perc_of_total_customer_value',
shard_name_template='',
num_shards=1,
file_name_suffix='prediction_summary_extra_dimension.csv')
)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run()
| |
#!/usr/bin/env python
"""
lit - LLVM Integrated Tester.
See lit.pod for more information.
"""
from __future__ import absolute_import
import math, os, platform, random, re, sys, time
import lit.ProgressBar
import lit.LitConfig
import lit.Test
import lit.run
import lit.util
import lit.discovery
class TestingProgressDisplay(object):
def __init__(self, opts, numTests, progressBar=None):
self.opts = opts
self.numTests = numTests
self.current = None
self.progressBar = progressBar
self.completed = 0
def finish(self):
if self.progressBar:
self.progressBar.clear()
elif self.opts.quiet:
pass
elif self.opts.succinct:
sys.stdout.write('\n')
def update(self, test):
self.completed += 1
if self.opts.incremental:
update_incremental_cache(test)
if self.progressBar:
self.progressBar.update(float(self.completed)/self.numTests,
test.getFullName())
shouldShow = test.result.code.isFailure or \
(not self.opts.quiet and not self.opts.succinct)
if not shouldShow:
return
if self.progressBar:
self.progressBar.clear()
# Show the test result line.
test_name = test.getFullName()
print('%s: %s (%d of %d)' % (test.result.code.name, test_name,
self.completed, self.numTests))
# Show the test failure output, if requested.
if test.result.code.isFailure and self.opts.showOutput:
print("%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
'*'*20))
print(test.result.output)
print("*" * 20)
# Report test metrics, if present.
if test.result.metrics:
print("%s TEST '%s' RESULTS %s" % ('*'*10, test.getFullName(),
'*'*10))
items = sorted(test.result.metrics.items())
for metric_name, value in items:
print('%s: %s ' % (metric_name, value.format()))
print("*" * 10)
# Ensure the output is flushed.
sys.stdout.flush()
def write_test_results(run, lit_config, testing_time, output_path):
try:
import json
except ImportError:
lit_config.fatal('test output unsupported with Python 2.5')
# Construct the data we will write.
data = {}
# Encode the current lit version as a schema version.
data['__version__'] = lit.__versioninfo__
data['elapsed'] = testing_time
# FIXME: Record some information on the lit configuration used?
# FIXME: Record information from the individual test suites?
# Encode the tests.
data['tests'] = tests_data = []
for test in run.tests:
test_data = {
'name' : test.getFullName(),
'code' : test.result.code.name,
'output' : test.result.output,
'elapsed' : test.result.elapsed }
# Add test metrics, if present.
if test.result.metrics:
test_data['metrics'] = metrics_data = {}
for key, value in test.result.metrics.items():
metrics_data[key] = value.todata()
tests_data.append(test_data)
# Write the output.
f = open(output_path, 'w')
try:
json.dump(data, f, indent=2, sort_keys=True)
f.write('\n')
finally:
f.close()
def update_incremental_cache(test):
if not test.result.code.isFailure:
return
fname = test.getFilePath()
os.utime(fname, None)
def sort_by_incremental_cache(run):
def sortIndex(test):
fname = test.getFilePath()
try:
return -os.path.getmtime(fname)
except:
return 0
run.tests.sort(key = lambda t: sortIndex(t))
def main(builtinParameters = {}):
# Use processes by default on Unix platforms.
isWindows = platform.system() == 'Windows'
useProcessesIsDefault = not isWindows
global options
from optparse import OptionParser, OptionGroup
parser = OptionParser("usage: %prog [options] {file-or-path}")
parser.add_option("", "--version", dest="show_version",
help="Show version and exit",
action="store_true", default=False)
parser.add_option("-j", "--threads", dest="numThreads", metavar="N",
help="Number of testing threads",
type=int, action="store", default=None)
parser.add_option("", "--config-prefix", dest="configPrefix",
metavar="NAME", help="Prefix for 'lit' config files",
action="store", default=None)
parser.add_option("-D", "--param", dest="userParameters",
metavar="NAME=VAL",
help="Add 'NAME' = 'VAL' to the user defined parameters",
type=str, action="append", default=[])
group = OptionGroup(parser, "Output Format")
# FIXME: I find these names very confusing, although I like the
# functionality.
group.add_option("-q", "--quiet", dest="quiet",
help="Suppress no error output",
action="store_true", default=False)
group.add_option("-s", "--succinct", dest="succinct",
help="Reduce amount of output",
action="store_true", default=False)
group.add_option("-v", "--verbose", dest="showOutput",
help="Show all test output",
action="store_true", default=False)
group.add_option("-o", "--output", dest="output_path",
help="Write test results to the provided path",
action="store", type=str, metavar="PATH")
group.add_option("", "--no-progress-bar", dest="useProgressBar",
help="Do not use curses based progress bar",
action="store_false", default=True)
group.add_option("", "--show-unsupported", dest="show_unsupported",
help="Show unsupported tests",
action="store_true", default=False)
group.add_option("", "--show-xfail", dest="show_xfail",
help="Show tests that were expected to fail",
action="store_true", default=False)
parser.add_option_group(group)
group = OptionGroup(parser, "Test Execution")
group.add_option("", "--path", dest="path",
help="Additional paths to add to testing environment",
action="append", type=str, default=[])
group.add_option("", "--vg", dest="useValgrind",
help="Run tests under valgrind",
action="store_true", default=False)
group.add_option("", "--vg-leak", dest="valgrindLeakCheck",
help="Check for memory leaks under valgrind",
action="store_true", default=False)
group.add_option("", "--vg-arg", dest="valgrindArgs", metavar="ARG",
help="Specify an extra argument for valgrind",
type=str, action="append", default=[])
group.add_option("", "--time-tests", dest="timeTests",
help="Track elapsed wall time for each test",
action="store_true", default=False)
group.add_option("", "--no-execute", dest="noExecute",
help="Don't execute any tests (assume PASS)",
action="store_true", default=False)
group.add_option("", "--xunit-xml-output", dest="xunit_output_file",
help=("Write XUnit-compatible XML test reports to the"
" specified file"), default=None)
parser.add_option_group(group)
group = OptionGroup(parser, "Test Selection")
group.add_option("", "--max-tests", dest="maxTests", metavar="N",
help="Maximum number of tests to run",
action="store", type=int, default=None)
group.add_option("", "--max-time", dest="maxTime", metavar="N",
help="Maximum time to spend testing (in seconds)",
action="store", type=float, default=None)
group.add_option("", "--shuffle", dest="shuffle",
help="Run tests in random order",
action="store_true", default=False)
group.add_option("-i", "--incremental", dest="incremental",
help="Run modified and failing tests first (updates "
"mtimes)",
action="store_true", default=False)
group.add_option("", "--filter", dest="filter", metavar="REGEX",
help=("Only run tests with paths matching the given "
"regular expression"),
action="store", default=None)
parser.add_option_group(group)
group = OptionGroup(parser, "Debug and Experimental Options")
group.add_option("", "--debug", dest="debug",
help="Enable debugging (for 'lit' development)",
action="store_true", default=False)
group.add_option("", "--show-suites", dest="showSuites",
help="Show discovered test suites",
action="store_true", default=False)
group.add_option("", "--show-tests", dest="showTests",
help="Show all discovered tests",
action="store_true", default=False)
group.add_option("", "--use-processes", dest="useProcesses",
help="Run tests in parallel with processes (not threads)",
action="store_true", default=useProcessesIsDefault)
group.add_option("", "--use-threads", dest="useProcesses",
help="Run tests in parallel with threads (not processes)",
action="store_false", default=useProcessesIsDefault)
parser.add_option_group(group)
(opts, args) = parser.parse_args()
if opts.show_version:
print("lit %s" % (lit.__version__,))
return
if not args:
parser.error('No inputs specified')
if opts.numThreads is None:
# Python <2.5 has a race condition causing lit to always fail with numThreads>1
# http://bugs.python.org/issue1731717
# I haven't seen this bug occur with 2.5.2 and later, so only enable multiple
# threads by default there.
if sys.hexversion >= 0x2050200:
opts.numThreads = lit.util.detectCPUs()
else:
opts.numThreads = 1
inputs = args
# Create the user defined parameters.
userParams = dict(builtinParameters)
for entry in opts.userParameters:
if '=' not in entry:
name,val = entry,''
else:
name,val = entry.split('=', 1)
userParams[name] = val
# Create the global config object.
litConfig = lit.LitConfig.LitConfig(
progname = os.path.basename(sys.argv[0]),
path = opts.path,
quiet = opts.quiet,
useValgrind = opts.useValgrind,
valgrindLeakCheck = opts.valgrindLeakCheck,
valgrindArgs = opts.valgrindArgs,
noExecute = opts.noExecute,
debug = opts.debug,
isWindows = isWindows,
params = userParams,
config_prefix = opts.configPrefix)
# Perform test discovery.
run = lit.run.Run(litConfig,
lit.discovery.find_tests_for_inputs(litConfig, inputs))
if opts.showSuites or opts.showTests:
# Aggregate the tests by suite.
suitesAndTests = {}
for result_test in run.tests:
if result_test.suite not in suitesAndTests:
suitesAndTests[result_test.suite] = []
suitesAndTests[result_test.suite].append(result_test)
suitesAndTests = list(suitesAndTests.items())
suitesAndTests.sort(key = lambda item: item[0].name)
# Show the suites, if requested.
if opts.showSuites:
print('-- Test Suites --')
for ts,ts_tests in suitesAndTests:
print(' %s - %d tests' %(ts.name, len(ts_tests)))
print(' Source Root: %s' % ts.source_root)
print(' Exec Root : %s' % ts.exec_root)
# Show the tests, if requested.
if opts.showTests:
print('-- Available Tests --')
for ts,ts_tests in suitesAndTests:
ts_tests.sort(key = lambda test: test.path_in_suite)
for test in ts_tests:
print(' %s' % (test.getFullName(),))
# Exit.
sys.exit(0)
# Select and order the tests.
numTotalTests = len(run.tests)
# First, select based on the filter expression if given.
if opts.filter:
try:
rex = re.compile(opts.filter)
except:
parser.error("invalid regular expression for --filter: %r" % (
opts.filter))
run.tests = [result_test for result_test in run.tests
if rex.search(result_test.getFullName())]
# Then select the order.
if opts.shuffle:
random.shuffle(run.tests)
elif opts.incremental:
sort_by_incremental_cache(run)
else:
run.tests.sort(key = lambda result_test: result_test.getFullName())
# Finally limit the number of tests, if desired.
if opts.maxTests is not None:
run.tests = run.tests[:opts.maxTests]
# Don't create more threads than tests.
opts.numThreads = min(len(run.tests), opts.numThreads)
extra = ''
if len(run.tests) != numTotalTests:
extra = ' of %d' % numTotalTests
header = '-- Testing: %d%s tests, %d threads --'%(len(run.tests), extra,
opts.numThreads)
progressBar = None
if not opts.quiet:
if opts.succinct and opts.useProgressBar:
try:
tc = lit.ProgressBar.TerminalController()
progressBar = lit.ProgressBar.ProgressBar(tc, header)
except ValueError:
print(header)
progressBar = lit.ProgressBar.SimpleProgressBar('Testing: ')
else:
print(header)
startTime = time.time()
display = TestingProgressDisplay(opts, len(run.tests), progressBar)
try:
run.execute_tests(display, opts.numThreads, opts.maxTime,
opts.useProcesses)
except KeyboardInterrupt:
sys.exit(2)
display.finish()
testing_time = time.time() - startTime
if not opts.quiet:
print('Testing Time: %.2fs' % (testing_time,))
# Write out the test data, if requested.
if opts.output_path is not None:
write_test_results(run, litConfig, testing_time, opts.output_path)
# List test results organized by kind.
hasFailures = False
byCode = {}
for test in run.tests:
if test.result.code not in byCode:
byCode[test.result.code] = []
byCode[test.result.code].append(test)
if test.result.code.isFailure:
hasFailures = True
# Print each test in any of the failing groups.
for title,code in (('Unexpected Passing Tests', lit.Test.XPASS),
('Failing Tests', lit.Test.FAIL),
('Unresolved Tests', lit.Test.UNRESOLVED),
('Unsupported Tests', lit.Test.UNSUPPORTED),
('Expected Failing Tests', lit.Test.XFAIL)):
if (lit.Test.XFAIL == code and not opts.show_xfail) or \
(lit.Test.UNSUPPORTED == code and not opts.show_unsupported):
continue
elts = byCode.get(code)
if not elts:
continue
print('*'*20)
print('%s (%d):' % (title, len(elts)))
for test in elts:
print(' %s' % test.getFullName())
sys.stdout.write('\n')
if opts.timeTests and run.tests:
# Order by time.
test_times = [(test.getFullName(), test.result.elapsed)
for test in run.tests]
lit.util.printHistogram(test_times, title='Tests')
for name,code in (('Expected Passes ', lit.Test.PASS),
('Expected Failures ', lit.Test.XFAIL),
('Unsupported Tests ', lit.Test.UNSUPPORTED),
('Unresolved Tests ', lit.Test.UNRESOLVED),
('Unexpected Passes ', lit.Test.XPASS),
('Unexpected Failures', lit.Test.FAIL)):
if opts.quiet and not code.isFailure:
continue
N = len(byCode.get(code,[]))
if N:
print(' %s: %d' % (name,N))
if opts.xunit_output_file:
# Collect the tests, indexed by test suite
by_suite = {}
for result_test in run.tests:
suite = result_test.suite.config.name
if suite not in by_suite:
by_suite[suite] = {
'passes' : 0,
'failures' : 0,
'tests' : [] }
by_suite[suite]['tests'].append(result_test)
if result_test.result.code.isFailure:
by_suite[suite]['failures'] += 1
else:
by_suite[suite]['passes'] += 1
xunit_output_file = open(opts.xunit_output_file, "w")
xunit_output_file.write("<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n")
xunit_output_file.write("<testsuites>\n")
for suite_name, suite in by_suite.items():
safe_suite_name = suite_name.replace(".", "-")
xunit_output_file.write("<testsuite name='" + safe_suite_name + "'")
xunit_output_file.write(" tests='" + str(suite['passes'] +
suite['failures']) + "'")
xunit_output_file.write(" failures='" + str(suite['failures']) +
"'>\n")
for result_test in suite['tests']:
xunit_output_file.write(result_test.getJUnitXML() + "\n")
xunit_output_file.write("</testsuite>\n")
xunit_output_file.write("</testsuites>")
xunit_output_file.close()
# If we encountered any additional errors, exit abnormally.
if litConfig.numErrors:
sys.stderr.write('\n%d error(s), exiting.\n' % litConfig.numErrors)
sys.exit(2)
# Warn about warnings.
if litConfig.numWarnings:
sys.stderr.write('\n%d warning(s) in tests.\n' % litConfig.numWarnings)
if hasFailures:
sys.exit(1)
sys.exit(0)
if __name__=='__main__':
main()
| |
import wx
import webbrowser
from settings import settings
BLANK = 'about:blank'
COMMAND_CLOSE = 'http://close/'
COMMAND_NEXT = 'http://next/'
COMMAND_PREVIOUS = 'http://previous/'
COMMAND_FIRST = 'http://first/'
COMMAND_LAST = 'http://last/'
COMMAND_PLAY = 'http://play/'
COMMAND_PAUSE = 'http://pause/'
def position_window(window):
index = settings.POPUP_DISPLAY
if index >= wx.Display_GetCount():
index = 0
display = wx.Display(index)
x, y, w, h = display.GetClientArea()
cw, ch = window.GetSize()
pad = 10
x1 = x + pad
y1 = y + pad
x2 = x + w - cw - pad
y2 = y + h - ch - pad
x3 = x + w / 2 - cw / 2
y3 = y + h / 2 - ch / 2
lookup = {
(-1, -1): (x1, y1),
(1, -1): (x2, y1),
(-1, 1): (x1, y2),
(1, 1): (x2, y2),
(0, 0): (x3, y3),
}
window.SetPosition(lookup[settings.POPUP_POSITION])
class Event(wx.PyEvent):
def __init__(self, event_object, type):
super(Event, self).__init__()
self.SetEventType(type.typeId)
self.SetEventObject(event_object)
EVT_LINK = wx.PyEventBinder(wx.NewEventType())
EVT_POPUP_CLOSE = wx.PyEventBinder(wx.NewEventType())
class PopupManager(wx.EvtHandler):
def __init__(self):
super(PopupManager, self).__init__()
self.timer = None
self.auto = settings.POPUP_AUTO_PLAY
self.cache = {}
def set_items(self, items, index=0, focus=False):
self.items = list(items)
self.index = index
self.count = len(self.items)
self.clear_cache(keep_current_item=True)
self.update(focus)
self.set_timer()
def update(self, focus=False):
item = self.items[self.index]
if item in self.cache:
self.show_frame(focus)
self.update_cache()
else:
self.update_cache(True)
self.show_frame(focus)
self.update_cache()
def update_cache(self, current_only=False):
indexes = set()
indexes.add(self.index)
if not current_only:
indexes.add(self.index - 1)
indexes.add(self.index + 1)
#indexes.add(0)
#indexes.add(self.count - 1)
items = set(self.items[index] for index in indexes if index >= 0 and index < self.count)
for item in items:
if item in self.cache:
continue
frame = self.create_frame(item)
self.cache[item] = frame
for item, frame in self.cache.items():
if item not in items:
frame.Close()
del self.cache[item]
def clear_cache(self, keep_current_item=False):
current_item = self.items[self.index]
for item, frame in self.cache.items():
if keep_current_item and item == current_item:
continue
frame.Close()
del self.cache[item]
def show_frame(self, focus=False):
current_item = self.items[self.index]
current_item.read = True
for item, frame in self.cache.items():
if item == current_item:
if focus:
frame.Show()
else:
frame.Disable()
frame.Show()
frame.Enable()
frame.Update()
if settings.POPUP_TRANSPARENCY < 255:
frame.SetTransparent(settings.POPUP_TRANSPARENCY)
for item, frame in self.cache.items():
if item != current_item:
frame.Hide()
def create_frame(self, item):
if True:#settings.POPUP_THEME == 'default':
import theme_default
context = self.create_context(item)
frame = theme_default.Frame(item, context)
frame.Bind(EVT_LINK, self.on_link)
position_window(frame)
if settings.POPUP_TRANSPARENCY < 255:
frame.SetTransparent(0)
return frame
def create_context(self, item):
context = {}
count = str(self.count)
index = str(self.items.index(item) + 1)
index = '%s%s' % ('0' * (len(count) - len(index)), index)
context['item_index'] = index
context['item_count'] = count
context['is_playing'] = self.auto
context['is_paused'] = not self.auto
context['POPUP_WIDTH'] = settings.POPUP_WIDTH
context['COMMAND_CLOSE'] = COMMAND_CLOSE
context['COMMAND_NEXT'] = COMMAND_NEXT
context['COMMAND_PREVIOUS'] = COMMAND_PREVIOUS
context['COMMAND_FIRST'] = COMMAND_FIRST
context['COMMAND_LAST'] = COMMAND_LAST
context['COMMAND_PLAY'] = COMMAND_PLAY
context['COMMAND_PAUSE'] = COMMAND_PAUSE
return context
def set_timer(self):
if self.timer and self.timer.IsRunning():
return
duration = settings.POPUP_DURATION * 1000
self.timer = wx.CallLater(duration, self.on_timer)
def stop_timer(self):
if self.timer and self.timer.IsRunning():
self.timer.Stop()
self.timer = None
def on_link(self, event):
link = event.link
# track the click
item = self.items[self.index]
feed = item.feed
if link == item.link or link == feed.link:
feed.clicks += 1
# handle the click
if link == BLANK:
event.Skip()
elif link == COMMAND_CLOSE:
self.on_close()
elif link == COMMAND_FIRST:
self.auto = False
self.on_first()
elif link == COMMAND_LAST:
self.auto = False
self.on_last()
elif link == COMMAND_NEXT:
self.auto = False
self.on_next()
elif link == COMMAND_PREVIOUS:
self.auto = False
self.on_previous()
elif link == COMMAND_PLAY:
if not self.auto:
self.auto = True
self.stop_timer()
self.on_timer()
elif link == COMMAND_PAUSE:
self.auto = False
else:
webbrowser.open(link)
def on_first(self):
self.index = 0
self.update(True)
def on_last(self):
self.index = self.count - 1
self.update(True)
def on_next(self, focus=True):
if self.index < self.count - 1:
self.index += 1
self.update(focus)
else:
self.on_close()
def on_previous(self):
if self.index > 0:
self.index -= 1
self.update(True)
def on_close(self):
self.stop_timer()
self.clear_cache()
event = Event(self, EVT_POPUP_CLOSE)
wx.PostEvent(self, event)
def on_timer(self):
self.timer = None
if not self.auto:
return
if self.index == self.count - 1:
self.on_close()
else:
self.on_next(False)
self.set_timer()
| |
# Copyright DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger('cassandra').addHandler(NullHandler())
__version_info__ = (3, 13, 0, 'post0')
__version__ = '.'.join(map(str, __version_info__))
class ConsistencyLevel(object):
"""
Spcifies how many replicas must respond for an operation to be considered
a success. By default, ``ONE`` is used for all operations.
"""
ANY = 0
"""
Only requires that one replica receives the write *or* the coordinator
stores a hint to replay later. Valid only for writes.
"""
ONE = 1
"""
Only one replica needs to respond to consider the operation a success
"""
TWO = 2
"""
Two replicas must respond to consider the operation a success
"""
THREE = 3
"""
Three replicas must respond to consider the operation a success
"""
QUORUM = 4
"""
``ceil(RF/2)`` replicas must respond to consider the operation a success
"""
ALL = 5
"""
All replicas must respond to consider the operation a success
"""
LOCAL_QUORUM = 6
"""
Requires a quorum of replicas in the local datacenter
"""
EACH_QUORUM = 7
"""
Requires a quorum of replicas in each datacenter
"""
SERIAL = 8
"""
For conditional inserts/updates that utilize Cassandra's lightweight
transactions, this requires consensus among all replicas for the
modified data.
"""
LOCAL_SERIAL = 9
"""
Like :attr:`~ConsistencyLevel.SERIAL`, but only requires consensus
among replicas in the local datacenter.
"""
LOCAL_ONE = 10
"""
Sends a request only to replicas in the local datacenter and waits for
one response.
"""
ConsistencyLevel.value_to_name = {
ConsistencyLevel.ANY: 'ANY',
ConsistencyLevel.ONE: 'ONE',
ConsistencyLevel.TWO: 'TWO',
ConsistencyLevel.THREE: 'THREE',
ConsistencyLevel.QUORUM: 'QUORUM',
ConsistencyLevel.ALL: 'ALL',
ConsistencyLevel.LOCAL_QUORUM: 'LOCAL_QUORUM',
ConsistencyLevel.EACH_QUORUM: 'EACH_QUORUM',
ConsistencyLevel.SERIAL: 'SERIAL',
ConsistencyLevel.LOCAL_SERIAL: 'LOCAL_SERIAL',
ConsistencyLevel.LOCAL_ONE: 'LOCAL_ONE'
}
ConsistencyLevel.name_to_value = {
'ANY': ConsistencyLevel.ANY,
'ONE': ConsistencyLevel.ONE,
'TWO': ConsistencyLevel.TWO,
'THREE': ConsistencyLevel.THREE,
'QUORUM': ConsistencyLevel.QUORUM,
'ALL': ConsistencyLevel.ALL,
'LOCAL_QUORUM': ConsistencyLevel.LOCAL_QUORUM,
'EACH_QUORUM': ConsistencyLevel.EACH_QUORUM,
'SERIAL': ConsistencyLevel.SERIAL,
'LOCAL_SERIAL': ConsistencyLevel.LOCAL_SERIAL,
'LOCAL_ONE': ConsistencyLevel.LOCAL_ONE
}
def consistency_value_to_name(value):
return ConsistencyLevel.value_to_name[value] if value is not None else "Not Set"
class ProtocolVersion(object):
"""
Defines native protocol versions supported by this driver.
"""
V1 = 1
"""
v1, supported in Cassandra 1.2-->2.2
"""
V2 = 2
"""
v2, supported in Cassandra 2.0-->2.2;
added support for lightweight transactions, batch operations, and automatic query paging.
"""
V3 = 3
"""
v3, supported in Cassandra 2.1-->3.x+;
added support for protocol-level client-side timestamps (see :attr:`.Session.use_client_timestamp`),
serial consistency levels for :class:`~.BatchStatement`, and an improved connection pool.
"""
V4 = 4
"""
v4, supported in Cassandra 2.2-->3.x+;
added a number of new types, server warnings, new failure messages, and custom payloads. Details in the
`project docs <https://github.com/apache/cassandra/blob/trunk/doc/native_protocol_v4.spec>`_
"""
V5 = 5
"""
v5, in beta from 3.x+
"""
SUPPORTED_VERSIONS = (V5, V4, V3, V2, V1)
"""
A tuple of all supported protocol versions
"""
BETA_VERSIONS = (V5,)
"""
A tuple of all beta protocol versions
"""
MIN_SUPPORTED = min(SUPPORTED_VERSIONS)
"""
Minimum protocol version supported by this driver.
"""
MAX_SUPPORTED = max(SUPPORTED_VERSIONS)
"""
Maximum protocol versioni supported by this driver.
"""
@classmethod
def get_lower_supported(cls, previous_version):
"""
Return the lower supported protocol version. Beta versions are omitted.
"""
try:
version = next(v for v in sorted(ProtocolVersion.SUPPORTED_VERSIONS, reverse=True) if
v not in ProtocolVersion.BETA_VERSIONS and v < previous_version)
except StopIteration:
version = 0
return version
@classmethod
def uses_int_query_flags(cls, version):
return version >= cls.V5
@classmethod
def uses_prepare_flags(cls, version):
return version >= cls.V5
@classmethod
def uses_prepared_metadata(cls, version):
return version >= cls.V5
@classmethod
def uses_error_code_map(cls, version):
return version >= cls.V5
@classmethod
def uses_keyspace_flag(cls, version):
return version >= cls.V5
class SchemaChangeType(object):
DROPPED = 'DROPPED'
CREATED = 'CREATED'
UPDATED = 'UPDATED'
class SchemaTargetType(object):
KEYSPACE = 'KEYSPACE'
TABLE = 'TABLE'
TYPE = 'TYPE'
FUNCTION = 'FUNCTION'
AGGREGATE = 'AGGREGATE'
class SignatureDescriptor(object):
def __init__(self, name, argument_types):
self.name = name
self.argument_types = argument_types
@property
def signature(self):
"""
function signature string in the form 'name([type0[,type1[...]]])'
can be used to uniquely identify overloaded function names within a keyspace
"""
return self.format_signature(self.name, self.argument_types)
@staticmethod
def format_signature(name, argument_types):
return "%s(%s)" % (name, ','.join(t for t in argument_types))
def __repr__(self):
return "%s(%s, %s)" % (self.__class__.__name__, self.name, self.argument_types)
class UserFunctionDescriptor(SignatureDescriptor):
"""
Describes a User function by name and argument signature
"""
name = None
"""
name of the function
"""
argument_types = None
"""
Ordered list of CQL argument type names comprising the type signature
"""
class UserAggregateDescriptor(SignatureDescriptor):
"""
Describes a User aggregate function by name and argument signature
"""
name = None
"""
name of the aggregate
"""
argument_types = None
"""
Ordered list of CQL argument type names comprising the type signature
"""
class DriverException(Exception):
"""
Base for all exceptions explicitly raised by the driver.
"""
pass
class RequestExecutionException(DriverException):
"""
Base for request execution exceptions returned from the server.
"""
pass
class Unavailable(RequestExecutionException):
"""
There were not enough live replicas to satisfy the requested consistency
level, so the coordinator node immediately failed the request without
forwarding it to any replicas.
"""
consistency = None
""" The requested :class:`ConsistencyLevel` """
required_replicas = None
""" The number of replicas that needed to be live to complete the operation """
alive_replicas = None
""" The number of replicas that were actually alive """
def __init__(self, summary_message, consistency=None, required_replicas=None, alive_replicas=None):
self.consistency = consistency
self.required_replicas = required_replicas
self.alive_replicas = alive_replicas
Exception.__init__(self, summary_message + ' info=' +
repr({'consistency': consistency_value_to_name(consistency),
'required_replicas': required_replicas,
'alive_replicas': alive_replicas}))
class Timeout(RequestExecutionException):
"""
Replicas failed to respond to the coordinator node before timing out.
"""
consistency = None
""" The requested :class:`ConsistencyLevel` """
required_responses = None
""" The number of required replica responses """
received_responses = None
"""
The number of replicas that responded before the coordinator timed out
the operation
"""
def __init__(self, summary_message, consistency=None, required_responses=None, received_responses=None):
self.consistency = consistency
self.required_responses = required_responses
self.received_responses = received_responses
Exception.__init__(self, summary_message + ' info=' +
repr({'consistency': consistency_value_to_name(consistency),
'required_responses': required_responses,
'received_responses': received_responses}))
class ReadTimeout(Timeout):
"""
A subclass of :exc:`Timeout` for read operations.
This indicates that the replicas failed to respond to the coordinator
node before the configured timeout. This timeout is configured in
``cassandra.yaml`` with the ``read_request_timeout_in_ms``
and ``range_request_timeout_in_ms`` options.
"""
data_retrieved = None
"""
A boolean indicating whether the requested data was retrieved
by the coordinator from any replicas before it timed out the
operation
"""
def __init__(self, message, data_retrieved=None, **kwargs):
Timeout.__init__(self, message, **kwargs)
self.data_retrieved = data_retrieved
class WriteTimeout(Timeout):
"""
A subclass of :exc:`Timeout` for write operations.
This indicates that the replicas failed to respond to the coordinator
node before the configured timeout. This timeout is configured in
``cassandra.yaml`` with the ``write_request_timeout_in_ms``
option.
"""
write_type = None
"""
The type of write operation, enum on :class:`~cassandra.policies.WriteType`
"""
def __init__(self, message, write_type=None, **kwargs):
Timeout.__init__(self, message, **kwargs)
self.write_type = write_type
class CDCWriteFailure(RequestExecutionException):
"""
Hit limit on data in CDC folder, writes are rejected
"""
def __init__(self, message):
Exception.__init__(self, message)
class CoordinationFailure(RequestExecutionException):
"""
Replicas sent a failure to the coordinator.
"""
consistency = None
""" The requested :class:`ConsistencyLevel` """
required_responses = None
""" The number of required replica responses """
received_responses = None
"""
The number of replicas that responded before the coordinator timed out
the operation
"""
failures = None
"""
The number of replicas that sent a failure message
"""
error_code_map = None
"""
A map of inet addresses to error codes representing replicas that sent
a failure message. Only set when `protocol_version` is 5 or higher.
"""
def __init__(self, summary_message, consistency=None, required_responses=None,
received_responses=None, failures=None, error_code_map=None):
self.consistency = consistency
self.required_responses = required_responses
self.received_responses = received_responses
self.failures = failures
self.error_code_map = error_code_map
info_dict = {
'consistency': consistency_value_to_name(consistency),
'required_responses': required_responses,
'received_responses': received_responses,
'failures': failures
}
if error_code_map is not None:
# make error codes look like "0x002a"
formatted_map = dict((addr, '0x%04x' % err_code)
for (addr, err_code) in error_code_map.items())
info_dict['error_code_map'] = formatted_map
Exception.__init__(self, summary_message + ' info=' + repr(info_dict))
class ReadFailure(CoordinationFailure):
"""
A subclass of :exc:`CoordinationFailure` for read operations.
This indicates that the replicas sent a failure message to the coordinator.
"""
data_retrieved = None
"""
A boolean indicating whether the requested data was retrieved
by the coordinator from any replicas before it timed out the
operation
"""
def __init__(self, message, data_retrieved=None, **kwargs):
CoordinationFailure.__init__(self, message, **kwargs)
self.data_retrieved = data_retrieved
class WriteFailure(CoordinationFailure):
"""
A subclass of :exc:`CoordinationFailure` for write operations.
This indicates that the replicas sent a failure message to the coordinator.
"""
write_type = None
"""
The type of write operation, enum on :class:`~cassandra.policies.WriteType`
"""
def __init__(self, message, write_type=None, **kwargs):
CoordinationFailure.__init__(self, message, **kwargs)
self.write_type = write_type
class FunctionFailure(RequestExecutionException):
"""
User Defined Function failed during execution
"""
keyspace = None
"""
Keyspace of the function
"""
function = None
"""
Name of the function
"""
arg_types = None
"""
List of argument type names of the function
"""
def __init__(self, summary_message, keyspace, function, arg_types):
self.keyspace = keyspace
self.function = function
self.arg_types = arg_types
Exception.__init__(self, summary_message)
class RequestValidationException(DriverException):
"""
Server request validation failed
"""
pass
class ConfigurationException(RequestValidationException):
"""
Server indicated request errro due to current configuration
"""
pass
class AlreadyExists(ConfigurationException):
"""
An attempt was made to create a keyspace or table that already exists.
"""
keyspace = None
"""
The name of the keyspace that already exists, or, if an attempt was
made to create a new table, the keyspace that the table is in.
"""
table = None
"""
The name of the table that already exists, or, if an attempt was
make to create a keyspace, :const:`None`.
"""
def __init__(self, keyspace=None, table=None):
if table:
message = "Table '%s.%s' already exists" % (keyspace, table)
else:
message = "Keyspace '%s' already exists" % (keyspace,)
Exception.__init__(self, message)
self.keyspace = keyspace
self.table = table
class InvalidRequest(RequestValidationException):
"""
A query was made that was invalid for some reason, such as trying to set
the keyspace for a connection to a nonexistent keyspace.
"""
pass
class Unauthorized(RequestValidationException):
"""
The current user is not authorized to perform the requested operation.
"""
pass
class AuthenticationFailed(DriverException):
"""
Failed to authenticate.
"""
pass
class OperationTimedOut(DriverException):
"""
The operation took longer than the specified (client-side) timeout
to complete. This is not an error generated by Cassandra, only
the driver.
"""
errors = None
"""
A dict of errors keyed by the :class:`~.Host` against which they occurred.
"""
last_host = None
"""
The last :class:`~.Host` this operation was attempted against.
"""
def __init__(self, errors=None, last_host=None):
self.errors = errors
self.last_host = last_host
message = "errors=%s, last_host=%s" % (self.errors, self.last_host)
Exception.__init__(self, message)
class UnsupportedOperation(DriverException):
"""
An attempt was made to use a feature that is not supported by the
selected protocol version. See :attr:`Cluster.protocol_version`
for more details.
"""
pass
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import hashlib
import json
from lxml import etree
import re
import time
from tempest.common import http
from tempest import exceptions
from tempest.openstack.common import log as logging
from tempest.services.compute.xml.common import xml_to_json
# redrive rate limited calls at most twice
MAX_RECURSION_DEPTH = 2
TOKEN_CHARS_RE = re.compile('^[-A-Za-z0-9+/=]*$')
# All the successful HTTP status codes from RFC 2616
HTTP_SUCCESS = (200, 201, 202, 203, 204, 205, 206)
class RestClient(object):
TYPE = "json"
LOG = logging.getLogger(__name__)
def __init__(self, config, user, password, auth_url, tenant_name=None,
auth_version='v2'):
self.config = config
self.user = user
self.password = password
self.auth_url = auth_url
self.tenant_name = tenant_name
self.auth_version = auth_version
self.service = None
self.token = None
self.base_url = None
self.region = {}
for cfgname in dir(self.config):
# Find all config.FOO.catalog_type and assume FOO is a service.
cfg = getattr(self.config, cfgname)
catalog_type = getattr(cfg, 'catalog_type', None)
if not catalog_type:
continue
service_region = getattr(cfg, 'region', None)
if not service_region:
service_region = self.config.identity.region
self.region[catalog_type] = service_region
self.endpoint_url = 'publicURL'
self.headers = {'Content-Type': 'application/%s' % self.TYPE,
'Accept': 'application/%s' % self.TYPE}
self.build_interval = config.compute.build_interval
self.build_timeout = config.compute.build_timeout
self.general_header_lc = set(('cache-control', 'connection',
'date', 'pragma', 'trailer',
'transfer-encoding', 'via',
'warning'))
self.response_header_lc = set(('accept-ranges', 'age', 'etag',
'location', 'proxy-authenticate',
'retry-after', 'server',
'vary', 'www-authenticate'))
dscv = self.config.identity.disable_ssl_certificate_validation
self.http_obj = http.ClosingHttp(
disable_ssl_certificate_validation=dscv)
def __str__(self):
STRING_LIMIT = 80
str_format = ("config:%s, user:%s, password:%s, "
"auth_url:%s, tenant_name:%s, auth_version:%s, "
"service:%s, base_url:%s, region:%s, "
"endpoint_url:%s, build_interval:%s, build_timeout:%s"
"\ntoken:%s..., \nheaders:%s...")
return str_format % (self.config, self.user, self.password,
self.auth_url, self.tenant_name,
self.auth_version, self.service,
self.base_url, self.region, self.endpoint_url,
self.build_interval, self.build_timeout,
str(self.token)[0:STRING_LIMIT],
str(self.headers)[0:STRING_LIMIT])
def _set_auth(self):
"""
Sets the token and base_url used in requests based on the strategy type
"""
if self.auth_version == 'v3':
auth_func = self.identity_auth_v3
else:
auth_func = self.keystone_auth
self.token, self.base_url = (
auth_func(self.user, self.password, self.auth_url,
self.service, self.tenant_name))
def clear_auth(self):
"""
Can be called to clear the token and base_url so that the next request
will fetch a new token and base_url.
"""
self.token = None
self.base_url = None
def get_auth(self):
"""Returns the token of the current request or sets the token if
none.
"""
if not self.token:
self._set_auth()
return self.token
def basic_auth(self, user, password, auth_url):
"""
Provides authentication for the target API.
"""
params = {}
params['headers'] = {'User-Agent': 'Test-Client', 'X-Auth-User': user,
'X-Auth-Key': password}
resp, body = self.http_obj.request(auth_url, 'GET', **params)
try:
return resp['x-auth-token'], resp['x-server-management-url']
except Exception:
raise
def keystone_auth(self, user, password, auth_url, service, tenant_name):
"""
Provides authentication via Keystone using v2 identity API.
"""
# Normalize URI to ensure /tokens is in it.
if 'tokens' not in auth_url:
auth_url = auth_url.rstrip('/') + '/tokens'
creds = {
'auth': {
'passwordCredentials': {
'username': user,
'password': password,
},
'tenantName': tenant_name,
}
}
headers = {'Content-Type': 'application/json'}
body = json.dumps(creds)
self._log_request('POST', auth_url, headers, body)
resp, resp_body = self.http_obj.request(auth_url, 'POST',
headers=headers, body=body)
self._log_response(resp, resp_body)
if resp.status == 200:
try:
auth_data = json.loads(resp_body)['access']
token = auth_data['token']['id']
except Exception as e:
print("Failed to obtain token for user: %s" % e)
raise
mgmt_url = None
for ep in auth_data['serviceCatalog']:
if ep["type"] == service:
for _ep in ep['endpoints']:
if service in self.region and \
_ep['region'] == self.region[service]:
mgmt_url = _ep[self.endpoint_url]
if not mgmt_url:
mgmt_url = ep['endpoints'][0][self.endpoint_url]
break
if mgmt_url is None:
raise exceptions.EndpointNotFound(service)
return token, mgmt_url
elif resp.status == 401:
raise exceptions.AuthenticationFailure(user=user,
password=password,
tenant=tenant_name)
raise exceptions.IdentityError('Unexpected status code {0}'.format(
resp.status))
def identity_auth_v3(self, user, password, auth_url, service,
project_name, domain_id='default'):
"""Provides authentication using Identity API v3."""
req_url = auth_url.rstrip('/') + '/auth/tokens'
creds = {
"auth": {
"identity": {
"methods": ["password"],
"password": {
"user": {
"name": user, "password": password,
"domain": {"id": domain_id}
}
}
},
"scope": {
"project": {
"domain": {"id": domain_id},
"name": project_name
}
}
}
}
headers = {'Content-Type': 'application/json'}
body = json.dumps(creds)
resp, body = self.http_obj.request(req_url, 'POST',
headers=headers, body=body)
if resp.status == 201:
try:
token = resp['x-subject-token']
except Exception:
self.LOG.exception("Failed to obtain token using V3"
" authentication (auth URL is '%s')" %
req_url)
raise
catalog = json.loads(body)['token']['catalog']
mgmt_url = None
for service_info in catalog:
if service_info['type'] != service:
continue # this isn't the entry for us.
endpoints = service_info['endpoints']
# Look for an endpoint in the region if configured.
if service in self.region:
region = self.region[service]
for ep in endpoints:
if ep['region'] != region:
continue
mgmt_url = ep['url']
# FIXME(blk-u): this isn't handling endpoint type
# (public, internal, admin).
break
if not mgmt_url:
# Didn't find endpoint for region, use the first.
ep = endpoints[0]
mgmt_url = ep['url']
# FIXME(blk-u): this isn't handling endpoint type
# (public, internal, admin).
break
return token, mgmt_url
elif resp.status == 401:
raise exceptions.AuthenticationFailure(user=user,
password=password)
else:
self.LOG.error("Failed to obtain token using V3 authentication"
" (auth URL is '%s'), the response status is %s" %
(req_url, resp.status))
raise exceptions.AuthenticationFailure(user=user,
password=password)
def expected_success(self, expected_code, read_code):
assert_msg = ("This function only allowed to use for HTTP status"
"codes which explicitly defined in the RFC 2616. {0}"
" is not a defined Success Code!").format(expected_code)
assert expected_code in HTTP_SUCCESS, assert_msg
# NOTE(afazekas): the http status code above 400 is processed by
# the _error_checker method
if read_code < 400 and read_code != expected_code:
pattern = """Unexpected http success status code {0},
The expected status code is {1}"""
details = pattern.format(read_code, expected_code)
raise exceptions.InvalidHttpSuccessCode(details)
def post(self, url, body, headers):
return self.request('POST', url, headers, body)
def get(self, url, headers=None):
return self.request('GET', url, headers)
def delete(self, url, headers=None):
return self.request('DELETE', url, headers)
def patch(self, url, body, headers):
return self.request('PATCH', url, headers, body)
def put(self, url, body, headers):
return self.request('PUT', url, headers, body)
def head(self, url, headers=None):
return self.request('HEAD', url, headers)
def copy(self, url, headers=None):
return self.request('COPY', url, headers)
def get_versions(self):
resp, body = self.get('')
body = self._parse_resp(body)
body = body['versions']
versions = map(lambda x: x['id'], body)
return resp, versions
def _log_request(self, method, req_url, headers, body):
self.LOG.info('Request: ' + method + ' ' + req_url)
if headers:
print_headers = headers
if 'X-Auth-Token' in headers and headers['X-Auth-Token']:
token = headers['X-Auth-Token']
if len(token) > 64 and TOKEN_CHARS_RE.match(token):
print_headers = headers.copy()
print_headers['X-Auth-Token'] = "<Token omitted>"
self.LOG.debug('Request Headers: ' + str(print_headers))
if body:
str_body = str(body)
length = len(str_body)
self.LOG.debug('Request Body: ' + str_body[:2048])
if length >= 2048:
self.LOG.debug("Large body (%d) md5 summary: %s", length,
hashlib.md5(str_body).hexdigest())
def _log_response(self, resp, resp_body):
status = resp['status']
self.LOG.info("Response Status: " + status)
headers = resp.copy()
del headers['status']
if headers.get('x-compute-request-id'):
self.LOG.info("Nova request id: %s" %
headers.pop('x-compute-request-id'))
elif headers.get('x-openstack-request-id'):
self.LOG.info("Glance request id %s" %
headers.pop('x-openstack-request-id'))
if len(headers):
self.LOG.debug('Response Headers: ' + str(headers))
if resp_body:
str_body = str(resp_body)
length = len(str_body)
self.LOG.debug('Response Body: ' + str_body[:2048])
if length >= 2048:
self.LOG.debug("Large body (%d) md5 summary: %s", length,
hashlib.md5(str_body).hexdigest())
def _parse_resp(self, body):
return json.loads(body)
def response_checker(self, method, url, headers, body, resp, resp_body):
if (resp.status in set((204, 205, 304)) or resp.status < 200 or
method.upper() == 'HEAD') and resp_body:
raise exceptions.ResponseWithNonEmptyBody(status=resp.status)
# NOTE(afazekas):
# If the HTTP Status Code is 205
# 'The response MUST NOT include an entity.'
# A HTTP entity has an entity-body and an 'entity-header'.
# In the HTTP response specification (Section 6) the 'entity-header'
# 'generic-header' and 'response-header' are in OR relation.
# All headers not in the above two group are considered as entity
# header in every interpretation.
if (resp.status == 205 and
0 != len(set(resp.keys()) - set(('status',)) -
self.response_header_lc - self.general_header_lc)):
raise exceptions.ResponseWithEntity()
# NOTE(afazekas)
# Now the swift sometimes (delete not empty container)
# returns with non json error response, we can create new rest class
# for swift.
# Usually RFC2616 says error responses SHOULD contain an explanation.
# The warning is normal for SHOULD/SHOULD NOT case
# Likely it will cause an error
if not resp_body and resp.status >= 400:
self.LOG.warning("status >= 400 response with empty body")
def _request(self, method, url,
headers=None, body=None):
"""A simple HTTP request interface."""
req_url = "%s/%s" % (self.base_url, url)
self._log_request(method, req_url, headers, body)
resp, resp_body = self.http_obj.request(req_url, method,
headers=headers, body=body)
self._log_response(resp, resp_body)
self.response_checker(method, url, headers, body, resp, resp_body)
return resp, resp_body
def request(self, method, url,
headers=None, body=None):
retry = 0
if (self.token is None) or (self.base_url is None):
self._set_auth()
if headers is None:
headers = {}
headers['X-Auth-Token'] = self.token
resp, resp_body = self._request(method, url,
headers=headers, body=body)
while (resp.status == 413 and
'retry-after' in resp and
not self.is_absolute_limit(
resp, self._parse_resp(resp_body)) and
retry < MAX_RECURSION_DEPTH):
retry += 1
delay = int(resp['retry-after'])
time.sleep(delay)
resp, resp_body = self._request(method, url,
headers=headers, body=body)
self._error_checker(method, url, headers, body,
resp, resp_body)
return resp, resp_body
def _error_checker(self, method, url,
headers, body, resp, resp_body):
# NOTE(mtreinish): Check for httplib response from glance_http. The
# object can't be used here because importing httplib breaks httplib2.
# If another object from a class not imported were passed here as
# resp this could possibly fail
if str(type(resp)) == "<type 'instance'>":
ctype = resp.getheader('content-type')
else:
try:
ctype = resp['content-type']
# NOTE(mtreinish): Keystone delete user responses doesn't have a
# content-type header. (They don't have a body) So just pretend it
# is set.
except KeyError:
ctype = 'application/json'
# It is not an error response
if resp.status < 400:
return
JSON_ENC = ['application/json; charset=UTF-8', 'application/json',
'application/json; charset=utf-8']
# NOTE(mtreinish): This is for compatibility with Glance and swift
# APIs. These are the return content types that Glance api v1
# (and occasionally swift) are using.
TXT_ENC = ['text/plain', 'text/plain; charset=UTF-8',
'text/html; charset=UTF-8', 'text/plain; charset=utf-8']
XML_ENC = ['application/xml', 'application/xml; charset=UTF-8']
if ctype in JSON_ENC or ctype in XML_ENC:
parse_resp = True
elif ctype in TXT_ENC:
parse_resp = False
else:
raise exceptions.RestClientException(str(resp.status))
if resp.status == 401 or resp.status == 403:
raise exceptions.Unauthorized()
if resp.status == 404:
raise exceptions.NotFound(resp_body)
if resp.status == 400:
if parse_resp:
resp_body = self._parse_resp(resp_body)
raise exceptions.BadRequest(resp_body)
if resp.status == 409:
if parse_resp:
resp_body = self._parse_resp(resp_body)
raise exceptions.Conflict(resp_body)
if resp.status == 413:
if parse_resp:
resp_body = self._parse_resp(resp_body)
if self.is_absolute_limit(resp, resp_body):
raise exceptions.OverLimit(resp_body)
else:
raise exceptions.RateLimitExceeded(resp_body)
if resp.status == 422:
if parse_resp:
resp_body = self._parse_resp(resp_body)
raise exceptions.UnprocessableEntity(resp_body)
if resp.status in (500, 501):
message = resp_body
if parse_resp:
try:
resp_body = self._parse_resp(resp_body)
except ValueError:
# If response body is a non-json string message.
# Use resp_body as is and raise InvalidResponseBody
# exception.
raise exceptions.InvalidHTTPResponseBody(message)
else:
# I'm seeing both computeFault
# and cloudServersFault come back.
# Will file a bug to fix, but leave as is for now.
if 'cloudServersFault' in resp_body:
message = resp_body['cloudServersFault']['message']
elif 'computeFault' in resp_body:
message = resp_body['computeFault']['message']
elif 'error' in resp_body: # Keystone errors
message = resp_body['error']['message']
raise exceptions.IdentityError(message)
elif 'message' in resp_body:
message = resp_body['message']
raise exceptions.ServerFault(message)
if resp.status >= 400:
if parse_resp:
resp_body = self._parse_resp(resp_body)
raise exceptions.RestClientException(str(resp.status))
def is_absolute_limit(self, resp, resp_body):
if (not isinstance(resp_body, collections.Mapping) or
'retry-after' not in resp):
return True
over_limit = resp_body.get('overLimit', None)
if not over_limit:
return True
return 'exceed' in over_limit.get('message', 'blabla')
def wait_for_resource_deletion(self, id):
"""Waits for a resource to be deleted."""
start_time = int(time.time())
while True:
if self.is_resource_deleted(id):
return
if int(time.time()) - start_time >= self.build_timeout:
raise exceptions.TimeoutException
time.sleep(self.build_interval)
def is_resource_deleted(self, id):
"""
Subclasses override with specific deletion detection.
"""
message = ('"%s" does not implement is_resource_deleted'
% self.__class__.__name__)
raise NotImplementedError(message)
class RestClientXML(RestClient):
TYPE = "xml"
def _parse_resp(self, body):
return xml_to_json(etree.fromstring(body))
def is_absolute_limit(self, resp, resp_body):
if (not isinstance(resp_body, collections.Mapping) or
'retry-after' not in resp):
return True
return 'exceed' in resp_body.get('message', 'blabla')
| |
#!/usr/bin/env python
#
#-----------------------------------------------------------------------
# A test suite for the table interface built on bsddb.db
#-----------------------------------------------------------------------
#
# Copyright (C) 2000, 2001 by Autonomous Zone Industries
# Copyright (C) 2002 Gregory P. Smith
#
# March 20, 2000
#
# License: This is free software. You may use this software for any
# purpose including modification/redistribution, so long as
# this header remains intact and that you do not claim any
# rights of ownership or authorship of this software. This
# software has been tested, but no warranty is expressed or
# implied.
#
# -- Gregory P. Smith <greg@electricrain.com>
#
# $Id: test_dbtables.py,v 1.1.1.1 2006/05/30 06:04:24 hhzhou Exp $
import sys, os, re
try:
import cPickle
pickle = cPickle
except ImportError:
import pickle
import unittest
from test_all import verbose
try:
# For Pythons w/distutils pybsddb
from bsddb3 import db, dbtables
except ImportError:
# For Python 2.3
from bsddb import db, dbtables
#----------------------------------------------------------------------
class TableDBTestCase(unittest.TestCase):
db_home = 'db_home'
db_name = 'test-table.db'
def setUp(self):
homeDir = os.path.join(os.path.dirname(sys.argv[0]), 'db_home')
self.homeDir = homeDir
try: os.mkdir(homeDir)
except os.error: pass
self.tdb = dbtables.bsdTableDB(
filename='tabletest.db', dbhome=homeDir, create=1)
def tearDown(self):
self.tdb.close()
import glob
files = glob.glob(os.path.join(self.homeDir, '*'))
for file in files:
os.remove(file)
def test01(self):
tabname = "test01"
colname = 'cool numbers'
try:
self.tdb.Drop(tabname)
except dbtables.TableDBError:
pass
self.tdb.CreateTable(tabname, [colname])
self.tdb.Insert(tabname, {colname: pickle.dumps(3.14159, 1)})
if verbose:
self.tdb._db_print()
values = self.tdb.Select(
tabname, [colname], conditions={colname: None})
colval = pickle.loads(values[0][colname])
assert(colval > 3.141 and colval < 3.142)
def test02(self):
tabname = "test02"
col0 = 'coolness factor'
col1 = 'but can it fly?'
col2 = 'Species'
testinfo = [
{col0: pickle.dumps(8, 1), col1: 'no', col2: 'Penguin'},
{col0: pickle.dumps(-1, 1), col1: 'no', col2: 'Turkey'},
{col0: pickle.dumps(9, 1), col1: 'yes', col2: 'SR-71A Blackbird'}
]
try:
self.tdb.Drop(tabname)
except dbtables.TableDBError:
pass
self.tdb.CreateTable(tabname, [col0, col1, col2])
for row in testinfo :
self.tdb.Insert(tabname, row)
values = self.tdb.Select(tabname, [col2],
conditions={col0: lambda x: pickle.loads(x) >= 8})
assert len(values) == 2
if values[0]['Species'] == 'Penguin' :
assert values[1]['Species'] == 'SR-71A Blackbird'
elif values[0]['Species'] == 'SR-71A Blackbird' :
assert values[1]['Species'] == 'Penguin'
else :
if verbose:
print "values= %r" % (values,)
raise "Wrong values returned!"
def test03(self):
tabname = "test03"
try:
self.tdb.Drop(tabname)
except dbtables.TableDBError:
pass
if verbose:
print '...before CreateTable...'
self.tdb._db_print()
self.tdb.CreateTable(tabname, ['a', 'b', 'c', 'd', 'e'])
if verbose:
print '...after CreateTable...'
self.tdb._db_print()
self.tdb.Drop(tabname)
if verbose:
print '...after Drop...'
self.tdb._db_print()
self.tdb.CreateTable(tabname, ['a', 'b', 'c', 'd', 'e'])
try:
self.tdb.Insert(tabname,
{'a': "",
'e': pickle.dumps([{4:5, 6:7}, 'foo'], 1),
'f': "Zero"})
assert 0
except dbtables.TableDBError:
pass
try:
self.tdb.Select(tabname, [], conditions={'foo': '123'})
assert 0
except dbtables.TableDBError:
pass
self.tdb.Insert(tabname,
{'a': '42',
'b': "bad",
'c': "meep",
'e': 'Fuzzy wuzzy was a bear'})
self.tdb.Insert(tabname,
{'a': '581750',
'b': "good",
'd': "bla",
'c': "black",
'e': 'fuzzy was here'})
self.tdb.Insert(tabname,
{'a': '800000',
'b': "good",
'd': "bla",
'c': "black",
'e': 'Fuzzy wuzzy is a bear'})
if verbose:
self.tdb._db_print()
# this should return two rows
values = self.tdb.Select(tabname, ['b', 'a', 'd'],
conditions={'e': re.compile('wuzzy').search,
'a': re.compile('^[0-9]+$').match})
assert len(values) == 2
# now lets delete one of them and try again
self.tdb.Delete(tabname, conditions={'b': dbtables.ExactCond('good')})
values = self.tdb.Select(
tabname, ['a', 'd', 'b'],
conditions={'e': dbtables.PrefixCond('Fuzzy')})
assert len(values) == 1
assert values[0]['d'] == None
values = self.tdb.Select(tabname, ['b'],
conditions={'c': lambda c: c == 'meep'})
assert len(values) == 1
assert values[0]['b'] == "bad"
def test04_MultiCondSelect(self):
tabname = "test04_MultiCondSelect"
try:
self.tdb.Drop(tabname)
except dbtables.TableDBError:
pass
self.tdb.CreateTable(tabname, ['a', 'b', 'c', 'd', 'e'])
try:
self.tdb.Insert(tabname,
{'a': "",
'e': pickle.dumps([{4:5, 6:7}, 'foo'], 1),
'f': "Zero"})
assert 0
except dbtables.TableDBError:
pass
self.tdb.Insert(tabname, {'a': "A", 'b': "B", 'c': "C", 'd': "D",
'e': "E"})
self.tdb.Insert(tabname, {'a': "-A", 'b': "-B", 'c': "-C", 'd': "-D",
'e': "-E"})
self.tdb.Insert(tabname, {'a': "A-", 'b': "B-", 'c': "C-", 'd': "D-",
'e': "E-"})
if verbose:
self.tdb._db_print()
# This select should return 0 rows. it is designed to test
# the bug identified and fixed in sourceforge bug # 590449
# (Big Thanks to "Rob Tillotson (n9mtb)" for tracking this down
# and supplying a fix!! This one caused many headaches to say
# the least...)
values = self.tdb.Select(tabname, ['b', 'a', 'd'],
conditions={'e': dbtables.ExactCond('E'),
'a': dbtables.ExactCond('A'),
'd': dbtables.PrefixCond('-')
} )
assert len(values) == 0, values
def test_CreateOrExtend(self):
tabname = "test_CreateOrExtend"
self.tdb.CreateOrExtendTable(
tabname, ['name', 'taste', 'filling', 'alcohol content', 'price'])
try:
self.tdb.Insert(tabname,
{'taste': 'crap',
'filling': 'no',
'is it Guinness?': 'no'})
assert 0, "Insert should've failed due to bad column name"
except:
pass
self.tdb.CreateOrExtendTable(tabname,
['name', 'taste', 'is it Guinness?'])
# these should both succeed as the table should contain the union of both sets of columns.
self.tdb.Insert(tabname, {'taste': 'crap', 'filling': 'no',
'is it Guinness?': 'no'})
self.tdb.Insert(tabname, {'taste': 'great', 'filling': 'yes',
'is it Guinness?': 'yes',
'name': 'Guinness'})
def test_CondObjs(self):
tabname = "test_CondObjs"
self.tdb.CreateTable(tabname, ['a', 'b', 'c', 'd', 'e', 'p'])
self.tdb.Insert(tabname, {'a': "the letter A",
'b': "the letter B",
'c': "is for cookie"})
self.tdb.Insert(tabname, {'a': "is for aardvark",
'e': "the letter E",
'c': "is for cookie",
'd': "is for dog"})
self.tdb.Insert(tabname, {'a': "the letter A",
'e': "the letter E",
'c': "is for cookie",
'p': "is for Python"})
values = self.tdb.Select(
tabname, ['p', 'e'],
conditions={'e': dbtables.PrefixCond('the l')})
assert len(values) == 2, values
assert values[0]['e'] == values[1]['e'], values
assert values[0]['p'] != values[1]['p'], values
values = self.tdb.Select(
tabname, ['d', 'a'],
conditions={'a': dbtables.LikeCond('%aardvark%')})
assert len(values) == 1, values
assert values[0]['d'] == "is for dog", values
assert values[0]['a'] == "is for aardvark", values
values = self.tdb.Select(tabname, None,
{'b': dbtables.Cond(),
'e':dbtables.LikeCond('%letter%'),
'a':dbtables.PrefixCond('is'),
'd':dbtables.ExactCond('is for dog'),
'c':dbtables.PrefixCond('is for'),
'p':lambda s: not s})
assert len(values) == 1, values
assert values[0]['d'] == "is for dog", values
assert values[0]['a'] == "is for aardvark", values
def test_Delete(self):
tabname = "test_Delete"
self.tdb.CreateTable(tabname, ['x', 'y', 'z'])
# prior to 2001-05-09 there was a bug where Delete() would
# fail if it encountered any rows that did not have values in
# every column.
# Hunted and Squashed by <Donwulff> (Jukka Santala - donwulff@nic.fi)
self.tdb.Insert(tabname, {'x': 'X1', 'y':'Y1'})
self.tdb.Insert(tabname, {'x': 'X2', 'y':'Y2', 'z': 'Z2'})
self.tdb.Delete(tabname, conditions={'x': dbtables.PrefixCond('X')})
values = self.tdb.Select(tabname, ['y'],
conditions={'x': dbtables.PrefixCond('X')})
assert len(values) == 0
def test_Modify(self):
tabname = "test_Modify"
self.tdb.CreateTable(tabname, ['Name', 'Type', 'Access'])
self.tdb.Insert(tabname, {'Name': 'Index to MP3 files.doc',
'Type': 'Word', 'Access': '8'})
self.tdb.Insert(tabname, {'Name': 'Nifty.MP3', 'Access': '1'})
self.tdb.Insert(tabname, {'Type': 'Unknown', 'Access': '0'})
def set_type(type):
if type == None:
return 'MP3'
return type
def increment_access(count):
return str(int(count)+1)
def remove_value(value):
return None
self.tdb.Modify(tabname,
conditions={'Access': dbtables.ExactCond('0')},
mappings={'Access': remove_value})
self.tdb.Modify(tabname,
conditions={'Name': dbtables.LikeCond('%MP3%')},
mappings={'Type': set_type})
self.tdb.Modify(tabname,
conditions={'Name': dbtables.LikeCond('%')},
mappings={'Access': increment_access})
# Delete key in select conditions
values = self.tdb.Select(
tabname, None,
conditions={'Type': dbtables.ExactCond('Unknown')})
assert len(values) == 1, values
assert values[0]['Name'] == None, values
assert values[0]['Access'] == None, values
# Modify value by select conditions
values = self.tdb.Select(
tabname, None,
conditions={'Name': dbtables.ExactCond('Nifty.MP3')})
assert len(values) == 1, values
assert values[0]['Type'] == "MP3", values
assert values[0]['Access'] == "2", values
# Make sure change applied only to select conditions
values = self.tdb.Select(
tabname, None, conditions={'Name': dbtables.LikeCond('%doc%')})
assert len(values) == 1, values
assert values[0]['Type'] == "Word", values
assert values[0]['Access'] == "9", values
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TableDBTestCase))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""ResNet models for Keras.
Reference:
- [Deep Residual Learning for Image Recognition](
https://arxiv.org/abs/1512.03385) (CVPR 2015)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras import backend
from tensorflow.python.keras.applications import imagenet_utils
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.layers import VersionAwareLayers
from tensorflow.python.keras.utils import data_utils
from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.lib.io import file_io
from tensorflow.python.util.tf_export import keras_export
BASE_WEIGHTS_PATH = (
'https://storage.googleapis.com/tensorflow/keras-applications/resnet/')
WEIGHTS_HASHES = {
'resnet50': ('2cb95161c43110f7111970584f804107',
'4d473c1dd8becc155b73f8504c6f6626'),
'resnet101': ('f1aeb4b969a6efcfb50fad2f0c20cfc5',
'88cf7a10940856eca736dc7b7e228a21'),
'resnet152': ('100835be76be38e30d865e96f2aaae62',
'ee4c566cf9a93f14d82f913c2dc6dd0c'),
'resnet50v2': ('3ef43a0b657b3be2300d5770ece849e0',
'fac2f116257151a9d068a22e544a4917'),
'resnet101v2': ('6343647c601c52e1368623803854d971',
'c0ed64b8031c3730f411d2eb4eea35b5'),
'resnet152v2': ('a49b44d1979771252814e80f8ec446f9',
'ed17cf2e0169df9d443503ef94b23b33'),
'resnext50': ('67a5b30d522ed92f75a1f16eef299d1a',
'62527c363bdd9ec598bed41947b379fc'),
'resnext101':
('34fb605428fcc7aa4d62f44404c11509', '0f678c91647380debd923963594981b3')
}
layers = None
def ResNet(stack_fn,
preact,
use_bias,
model_name='resnet',
include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation='softmax',
**kwargs):
"""Instantiates the ResNet, ResNetV2, and ResNeXt architecture.
Reference:
- [Deep Residual Learning for Image Recognition](
https://arxiv.org/abs/1512.03385) (CVPR 2015)
Optionally loads weights pre-trained on ImageNet.
Note that the data format convention used by the model is
the one specified in your Keras config at `~/.keras/keras.json`.
Caution: Be sure to properly pre-process your inputs to the application.
Please see `applications.resnet.preprocess_input` for an example.
Arguments:
stack_fn: a function that returns output tensor for the
stacked residual blocks.
preact: whether to use pre-activation or not
(True for ResNetV2, False for ResNet and ResNeXt).
use_bias: whether to use biases for convolutional layers or not
(True for ResNet and ResNetV2, False for ResNeXt).
model_name: string, model name.
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor
(i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `channels_last` data format)
or `(3, 224, 224)` (with `channels_first` data format).
It should have exactly 3 inputs channels.
pooling: optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
**kwargs: For backwards compatibility only.
Returns:
A `keras.Model` instance.
Raises:
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
ValueError: if `classifier_activation` is not `softmax` or `None` when
using a pretrained top layer.
"""
global layers
if 'layers' in kwargs:
layers = kwargs.pop('layers')
else:
layers = VersionAwareLayers()
if kwargs:
raise ValueError('Unknown argument(s): %s' % (kwargs,))
if not (weights in {'imagenet', None} or file_io.file_exists_v2(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as `"imagenet"` with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=224,
min_size=32,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
x = layers.ZeroPadding2D(
padding=((3, 3), (3, 3)), name='conv1_pad')(img_input)
x = layers.Conv2D(64, 7, strides=2, use_bias=use_bias, name='conv1_conv')(x)
if not preact:
x = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name='conv1_bn')(x)
x = layers.Activation('relu', name='conv1_relu')(x)
x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name='pool1_pad')(x)
x = layers.MaxPooling2D(3, strides=2, name='pool1_pool')(x)
x = stack_fn(x)
if preact:
x = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name='post_bn')(x)
x = layers.Activation('relu', name='post_relu')(x)
if include_top:
x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Dense(classes, activation=classifier_activation,
name='predictions')(x)
else:
if pooling == 'avg':
x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
elif pooling == 'max':
x = layers.GlobalMaxPooling2D(name='max_pool')(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = layer_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = training.Model(inputs, x, name=model_name)
# Load weights.
if (weights == 'imagenet') and (model_name in WEIGHTS_HASHES):
if include_top:
file_name = model_name + '_weights_tf_dim_ordering_tf_kernels.h5'
file_hash = WEIGHTS_HASHES[model_name][0]
else:
file_name = model_name + '_weights_tf_dim_ordering_tf_kernels_notop.h5'
file_hash = WEIGHTS_HASHES[model_name][1]
weights_path = data_utils.get_file(
file_name,
BASE_WEIGHTS_PATH + file_name,
cache_subdir='models',
file_hash=file_hash)
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
def block1(x, filters, kernel_size=3, stride=1, conv_shortcut=True, name=None):
"""A residual block.
Arguments:
x: input tensor.
filters: integer, filters of the bottleneck layer.
kernel_size: default 3, kernel size of the bottleneck layer.
stride: default 1, stride of the first layer.
conv_shortcut: default True, use convolution shortcut if True,
otherwise identity shortcut.
name: string, block label.
Returns:
Output tensor for the residual block.
"""
bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
if conv_shortcut:
shortcut = layers.Conv2D(
4 * filters, 1, strides=stride, name=name + '_0_conv')(x)
shortcut = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=name + '_0_bn')(shortcut)
else:
shortcut = x
x = layers.Conv2D(filters, 1, strides=stride, name=name + '_1_conv')(x)
x = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=name + '_1_bn')(x)
x = layers.Activation('relu', name=name + '_1_relu')(x)
x = layers.Conv2D(
filters, kernel_size, padding='SAME', name=name + '_2_conv')(x)
x = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=name + '_2_bn')(x)
x = layers.Activation('relu', name=name + '_2_relu')(x)
x = layers.Conv2D(4 * filters, 1, name=name + '_3_conv')(x)
x = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=name + '_3_bn')(x)
x = layers.Add(name=name + '_add')([shortcut, x])
x = layers.Activation('relu', name=name + '_out')(x)
return x
def stack1(x, filters, blocks, stride1=2, name=None):
"""A set of stacked residual blocks.
Arguments:
x: input tensor.
filters: integer, filters of the bottleneck layer in a block.
blocks: integer, blocks in the stacked blocks.
stride1: default 2, stride of the first layer in the first block.
name: string, stack label.
Returns:
Output tensor for the stacked blocks.
"""
x = block1(x, filters, stride=stride1, name=name + '_block1')
for i in range(2, blocks + 1):
x = block1(x, filters, conv_shortcut=False, name=name + '_block' + str(i))
return x
def block2(x, filters, kernel_size=3, stride=1, conv_shortcut=False, name=None):
"""A residual block.
Arguments:
x: input tensor.
filters: integer, filters of the bottleneck layer.
kernel_size: default 3, kernel size of the bottleneck layer.
stride: default 1, stride of the first layer.
conv_shortcut: default False, use convolution shortcut if True,
otherwise identity shortcut.
name: string, block label.
Returns:
Output tensor for the residual block.
"""
bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
preact = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=name + '_preact_bn')(x)
preact = layers.Activation('relu', name=name + '_preact_relu')(preact)
if conv_shortcut:
shortcut = layers.Conv2D(
4 * filters, 1, strides=stride, name=name + '_0_conv')(preact)
else:
shortcut = layers.MaxPooling2D(1, strides=stride)(x) if stride > 1 else x
x = layers.Conv2D(
filters, 1, strides=1, use_bias=False, name=name + '_1_conv')(preact)
x = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=name + '_1_bn')(x)
x = layers.Activation('relu', name=name + '_1_relu')(x)
x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name=name + '_2_pad')(x)
x = layers.Conv2D(
filters,
kernel_size,
strides=stride,
use_bias=False,
name=name + '_2_conv')(x)
x = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=name + '_2_bn')(x)
x = layers.Activation('relu', name=name + '_2_relu')(x)
x = layers.Conv2D(4 * filters, 1, name=name + '_3_conv')(x)
x = layers.Add(name=name + '_out')([shortcut, x])
return x
def stack2(x, filters, blocks, stride1=2, name=None):
"""A set of stacked residual blocks.
Arguments:
x: input tensor.
filters: integer, filters of the bottleneck layer in a block.
blocks: integer, blocks in the stacked blocks.
stride1: default 2, stride of the first layer in the first block.
name: string, stack label.
Returns:
Output tensor for the stacked blocks.
"""
x = block2(x, filters, conv_shortcut=True, name=name + '_block1')
for i in range(2, blocks):
x = block2(x, filters, name=name + '_block' + str(i))
x = block2(x, filters, stride=stride1, name=name + '_block' + str(blocks))
return x
def block3(x,
filters,
kernel_size=3,
stride=1,
groups=32,
conv_shortcut=True,
name=None):
"""A residual block.
Arguments:
x: input tensor.
filters: integer, filters of the bottleneck layer.
kernel_size: default 3, kernel size of the bottleneck layer.
stride: default 1, stride of the first layer.
groups: default 32, group size for grouped convolution.
conv_shortcut: default True, use convolution shortcut if True,
otherwise identity shortcut.
name: string, block label.
Returns:
Output tensor for the residual block.
"""
bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
if conv_shortcut:
shortcut = layers.Conv2D(
(64 // groups) * filters,
1,
strides=stride,
use_bias=False,
name=name + '_0_conv')(x)
shortcut = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=name + '_0_bn')(shortcut)
else:
shortcut = x
x = layers.Conv2D(filters, 1, use_bias=False, name=name + '_1_conv')(x)
x = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=name + '_1_bn')(x)
x = layers.Activation('relu', name=name + '_1_relu')(x)
c = filters // groups
x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name=name + '_2_pad')(x)
x = layers.DepthwiseConv2D(
kernel_size,
strides=stride,
depth_multiplier=c,
use_bias=False,
name=name + '_2_conv')(x)
x_shape = backend.int_shape(x)[1:-1]
x = layers.Reshape(x_shape + (groups, c, c))(x)
x = layers.Lambda(
lambda x: sum(x[:, :, :, :, i] for i in range(c)),
name=name + '_2_reduce')(x)
x = layers.Reshape(x_shape + (filters,))(x)
x = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=name + '_2_bn')(x)
x = layers.Activation('relu', name=name + '_2_relu')(x)
x = layers.Conv2D(
(64 // groups) * filters, 1, use_bias=False, name=name + '_3_conv')(x)
x = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=name + '_3_bn')(x)
x = layers.Add(name=name + '_add')([shortcut, x])
x = layers.Activation('relu', name=name + '_out')(x)
return x
def stack3(x, filters, blocks, stride1=2, groups=32, name=None):
"""A set of stacked residual blocks.
Arguments:
x: input tensor.
filters: integer, filters of the bottleneck layer in a block.
blocks: integer, blocks in the stacked blocks.
stride1: default 2, stride of the first layer in the first block.
groups: default 32, group size for grouped convolution.
name: string, stack label.
Returns:
Output tensor for the stacked blocks.
"""
x = block3(x, filters, stride=stride1, groups=groups, name=name + '_block1')
for i in range(2, blocks + 1):
x = block3(
x,
filters,
groups=groups,
conv_shortcut=False,
name=name + '_block' + str(i))
return x
@keras_export('keras.applications.resnet50.ResNet50',
'keras.applications.resnet.ResNet50',
'keras.applications.ResNet50')
def ResNet50(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
**kwargs):
"""Instantiates the ResNet50 architecture."""
def stack_fn(x):
x = stack1(x, 64, 3, stride1=1, name='conv2')
x = stack1(x, 128, 4, name='conv3')
x = stack1(x, 256, 6, name='conv4')
return stack1(x, 512, 3, name='conv5')
return ResNet(stack_fn, False, True, 'resnet50', include_top, weights,
input_tensor, input_shape, pooling, classes, **kwargs)
@keras_export('keras.applications.resnet.ResNet101',
'keras.applications.ResNet101')
def ResNet101(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
**kwargs):
"""Instantiates the ResNet101 architecture."""
def stack_fn(x):
x = stack1(x, 64, 3, stride1=1, name='conv2')
x = stack1(x, 128, 4, name='conv3')
x = stack1(x, 256, 23, name='conv4')
return stack1(x, 512, 3, name='conv5')
return ResNet(stack_fn, False, True, 'resnet101', include_top, weights,
input_tensor, input_shape, pooling, classes, **kwargs)
@keras_export('keras.applications.resnet.ResNet152',
'keras.applications.ResNet152')
def ResNet152(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
**kwargs):
"""Instantiates the ResNet152 architecture."""
def stack_fn(x):
x = stack1(x, 64, 3, stride1=1, name='conv2')
x = stack1(x, 128, 8, name='conv3')
x = stack1(x, 256, 36, name='conv4')
return stack1(x, 512, 3, name='conv5')
return ResNet(stack_fn, False, True, 'resnet152', include_top, weights,
input_tensor, input_shape, pooling, classes, **kwargs)
@keras_export('keras.applications.resnet50.preprocess_input',
'keras.applications.resnet.preprocess_input')
def preprocess_input(x, data_format=None):
return imagenet_utils.preprocess_input(
x, data_format=data_format, mode='caffe')
@keras_export('keras.applications.resnet50.decode_predictions',
'keras.applications.resnet.decode_predictions')
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(
mode='',
ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_CAFFE,
error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
DOC = """
Reference:
- [Deep Residual Learning for Image Recognition](
https://arxiv.org/abs/1512.03385) (CVPR 2015)
Optionally loads weights pre-trained on ImageNet.
Note that the data format convention used by the model is
the one specified in your Keras config at `~/.keras/keras.json`.
Caution: Be sure to properly pre-process your inputs to the application.
Please see `applications.resnet.preprocess_input` for an example.
Arguments:
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `'channels_last'` data format)
or `(3, 224, 224)` (with `'channels_first'` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(200, 200, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
Returns:
A Keras model instance.
"""
setattr(ResNet50, '__doc__', ResNet50.__doc__ + DOC)
setattr(ResNet101, '__doc__', ResNet101.__doc__ + DOC)
setattr(ResNet152, '__doc__', ResNet152.__doc__ + DOC)
| |
"""This script contains relevant functions to read the configuration for
MSAF.
A bunch of it is basically shamefully copy pasted from the almighty theano."""
from __future__ import absolute_import, print_function, division
try:
from configparser import (ConfigParser, NoOptionError, NoSectionError,
InterpolationError)
except ImportError:
from six.moves.configparser import (ConfigParser, NoOptionError, NoSectionError,
InterpolationError)
import os
import shlex
from six import StringIO
from six import string_types
import sys
import warnings
import msaf
MSAF_FLAGS = os.getenv(msaf.MSAF_FLAGS_VAR, "")
# The MSAF_FLAGS environment variable should be a list of comma-separated
# [section.]option=value entries. If the section part is omitted, there should
# be only one section that contains the given option.
class MsafConfigWarning(Warning):
def warn(cls, message, stacklevel=0):
warnings.warn(message, cls, stacklevel=stacklevel + 3)
warn = classmethod(warn)
def parse_config_string(config_string, issue_warnings=True):
"""
Parses a config string (comma-separated key=value components) into a dict.
"""
config_dict = {}
my_splitter = shlex.shlex(config_string, posix=True)
my_splitter.whitespace = ','
my_splitter.whitespace_split = True
for kv_pair in my_splitter:
kv_pair = kv_pair.strip()
if not kv_pair:
continue
kv_tuple = kv_pair.split('=', 1)
if len(kv_tuple) == 1:
if issue_warnings:
MsafConfigWarning.warn(
("Config key '%s' has no value, ignoring it" %
kv_tuple[0]), stacklevel=1)
else:
k, v = kv_tuple
# subsequent values for k will override earlier ones
config_dict[k] = v
return config_dict
MSAF_FLAGS_DICT = parse_config_string(MSAF_FLAGS, issue_warnings=True)
# MSAFRC can contain a colon-delimited list of config files, like
# MSAFRC=~rincewind/.msafrc:~/.msafrc
# In that case, definitions in files on the right (here, ~/.msafrc) have
# precedence over those in files on the left.
def config_files_from_msafrc():
rval = [os.path.expanduser(s) for s in
os.getenv(msaf.MSAFRC_VAR, msaf.MSAFRC_FILE).split(os.pathsep)]
if os.getenv(msaf.MSAFRC_VAR) is None and sys.platform == "win32":
# to don't need to change the filename and make it open easily
rval.append(os.path.expanduser(msaf.MSAFRC_WIN_FILE))
return rval
config_files = config_files_from_msafrc()
msaf_cfg = ConfigParser(
{'USER': os.getenv("USER", os.path.split(os.path.expanduser('~'))[-1]),
'LSCRATCH': os.getenv("LSCRATCH", ""),
'TMPDIR': os.getenv("TMPDIR", ""),
'TEMP': os.getenv("TEMP", ""),
'TMP': os.getenv("TMP", ""),
'PID': str(os.getpid()),
}
)
msaf_cfg.read(config_files)
# Having a raw version of the config around as well enables us to pass
# through config values that contain format strings.
# The time required to parse the config twice is negligible.
msaf_raw_cfg = ConfigParser()
msaf_raw_cfg.read(config_files)
def fetch_val_for_key(key, delete_key=False):
"""Return the overriding config value for a key.
A successful search returns a string value.
An unsuccessful search raises a KeyError
The (decreasing) priority order is:
- MSAF_FLAGS
- ~./msafrc
"""
# first try to find it in the FLAGS
try:
if delete_key:
return MSAF_FLAGS_DICT.pop(key)
return MSAF_FLAGS_DICT[key]
except KeyError:
pass
# next try to find it in the config file
# config file keys can be of form option, or section.option
key_tokens = key.rsplit('.', 1)
if len(key_tokens) == 2:
section, option = key_tokens
else:
section, option = 'global', key
try:
try:
return msaf_cfg.get(section, option)
except InterpolationError:
return msaf_raw_cfg.get(section, option)
except (NoOptionError, NoSectionError):
raise KeyError(key)
_config_var_list = []
def _config_print(thing, buf, print_doc=True):
for cv in _config_var_list:
print(cv, file=buf)
if print_doc:
print(" Doc: ", cv.doc, file=buf)
print(" Value: ", cv.__get__(True, None), file=buf)
print("", file=buf)
class MsafConfigParser(object):
# properties are installed by AddConfigVar
_i_am_a_config_class = True
def __str__(self, print_doc=True):
sio = StringIO()
_config_print(self.__class__, sio, print_doc=print_doc)
return sio.getvalue()
# N.B. all instances of MsafConfigParser give access to the same properties.
config = MsafConfigParser()
# The data structure at work here is a tree of CLASSES with
# CLASS ATTRIBUTES/PROPERTIES that are either a) INSTANTIATED
# dynamically-generated CLASSES, or b) ConfigParam instances. The root
# of this tree is the MsafConfigParser CLASS, and the internal nodes
# are the SubObj classes created inside of AddConfigVar().
# Why this design ?
# - The config object is a true singleton. Every instance of
# MsafConfigParser is an empty instance that looks up attributes/properties
# in the [single] MsafConfigParser.__dict__
# - The subtrees provide the same interface as the root
# - ConfigParser subclasses control get/set of config properties to guard
# against craziness.
def AddConfigVar(name, doc, configparam, root=config):
"""Add a new variable to msaf.config
Parameters
----------
name: str
String of the form "[section0.[section1.[etc]]]option", containing the
full name for this configuration variable.
string: str
What does this variable specify?
configparam: `ConfigParam`
An object for getting and setting this configuration parameter.
root: object
Used for recursive calls -- do not provide an argument for this
parameter.
"""
# This method also performs some of the work of initializing ConfigParam
# instances
if root is config:
# only set the name in the first call, not the recursive ones
configparam.fullname = name
sections = name.split('.')
if len(sections) > 1:
# set up a subobject
if not hasattr(root, sections[0]):
# every internal node in the config tree is an instance of its own
# unique class
class SubObj(object):
_i_am_a_config_class = True
setattr(root.__class__, sections[0], SubObj())
newroot = getattr(root, sections[0])
if (not getattr(newroot, '_i_am_a_config_class', False) or
isinstance(newroot, type)):
raise TypeError(
'Internal config nodes must be config class instances',
newroot)
return AddConfigVar('.'.join(sections[1:]), doc, configparam,
root=newroot)
else:
if hasattr(root, name):
raise AttributeError('This name is already taken',
configparam.fullname)
configparam.doc = doc
# Trigger a read of the value from config files and env vars
# This allow to filter wrong value from the user.
if not callable(configparam.default):
configparam.__get__(root, type(root), delete_key=True)
else:
# We do not want to evaluate now the default value
# when it is a callable.
try:
fetch_val_for_key(configparam.fullname)
# The user provided a value, filter it now.
configparam.__get__(root, type(root), delete_key=True)
except KeyError:
pass
setattr(root.__class__, sections[0], configparam)
_config_var_list.append(configparam)
class ConfigParam(object):
def __init__(self, default, filter=None, allow_override=True):
"""
If allow_override is False, we can't change the value after the import
of Theano. So the value should be the same during all the execution.
"""
self.default = default
self.filter = filter
self.allow_override = allow_override
self.is_default = True
# N.B. --
# self.fullname # set by AddConfigVar
# self.doc # set by AddConfigVar
# Note that we do not call `self.filter` on the default value: this
# will be done automatically in AddConfigVar, potentially with a
# more appropriate user-provided default value.
# Calling `filter` here may actually be harmful if the default value is
# invalid and causes a crash or has unwanted side effects.
def __get__(self, cls, type_, delete_key=False):
if cls is None:
return self
if not hasattr(self, 'val'):
try:
val_str = fetch_val_for_key(self.fullname,
delete_key=delete_key)
self.is_default = False
except KeyError:
if callable(self.default):
val_str = self.default()
else:
val_str = self.default
self.__set__(cls, val_str)
# print "RVAL", self.val
return self.val
def __set__(self, cls, val):
if not self.allow_override and hasattr(self, 'val'):
raise Exception(
"Can't change the value of this config parameter "
"after initialization!")
# print "SETTING PARAM", self.fullname,(cls), val
if self.filter:
self.val = self.filter(val)
else:
self.val = val
class EnumStr(ConfigParam):
def __init__(self, default, *options, **kwargs):
self.default = default
self.all = (default,) + options
# All options should be strings
for val in self.all:
if not isinstance(val, string_types) and val is not None:
raise ValueError('Valid values for an EnumStr parameter '
'should be strings or `None`', val, type(val))
convert = kwargs.get("convert", None)
def filter(val):
# uri: We want to keep None values
if val is None:
return val
if convert:
val = convert(val)
if val in self.all:
return val
else:
raise ValueError((
'Invalid value ("%s") for configuration variable "%s". '
'Valid options are %s'
% (val, self.fullname, self.all)))
over = kwargs.get("allow_override", True)
super(EnumStr, self).__init__(default, filter, over)
def __str__(self):
return '%s (%s) ' % (self.fullname, self.all)
class ListParam(ConfigParam):
def __init__(self, default, *options, **kwargs):
self.default = default
try:
assert len(default) > 0
except AssertionError:
raise ValueError("List is empty")
except TypeError:
raise ValueError("The parameter is not a list.")
over = kwargs.get("allow_override", True)
super(ListParam, self).__init__(default, None, over)
def __str__(self):
return '%s (%s) ' % (self.fullname, self.default)
class TypedParam(ConfigParam):
def __init__(self, default, mytype, is_valid=None, allow_override=True):
self.mytype = mytype
def filter(val):
# uri: We want to keep None values
if val is None:
return val
cast_val = mytype(val)
if callable(is_valid):
if is_valid(cast_val):
return cast_val
else:
raise ValueError(
'Invalid value (%s) for configuration variable '
'"%s".'
% (val, self.fullname), val)
return cast_val
super(TypedParam, self).__init__(default, filter,
allow_override=allow_override)
def __str__(self):
return '%s (%s) ' % (self.fullname, self.mytype)
def StrParam(default, is_valid=None, allow_override=True):
return TypedParam(default, str, is_valid, allow_override=allow_override)
def IntParam(default, is_valid=None, allow_override=True):
return TypedParam(default, int, is_valid, allow_override=allow_override)
def FloatParam(default, is_valid=None, allow_override=True):
return TypedParam(default, float, is_valid, allow_override=allow_override)
def BoolParam(default, is_valid=None, allow_override=True):
# see comment at the beginning of this file.
def booltype(s):
if s in ['False', 'false', '0', False]:
return False
elif s in ['True', 'true', '1', True]:
return True
def is_valid_bool(s):
if s in ['False', 'false', '0', 'True', 'true', '1', False, True]:
return True
else:
return False
if is_valid is None:
is_valid = is_valid_bool
return TypedParam(default, booltype, is_valid,
allow_override=allow_override)
| |
# -*- coding: utf-8 -*-
#
# climlab-0.2.13 Documentation documentation build configuration file, created by
# sphinx-quickstart on Mon Jan 25 10:23:51 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
import climlab
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
print("python exec:", sys.executable)
print("sys.path:", sys.path)
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'matplotlib.sphinxext.mathmpl',
#'matplotlib.sphinxext.only_directives',
'matplotlib.sphinxext.plot_directive',
#'matplotlib.sphinxext.ipython_directive',
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
#'sphinx.ext.imgmath',
'sphinx.ext.viewcode',
#'sphinx.ext.graphviz',
'sphinx.ext.inheritance_diagram',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.autosummary',
#'sphinx.ext.automodsumm',
#'nbsphinx',
'IPython.sphinxext.ipython_console_highlighting',
'sphinxcontrib.bibtex',
'numpydoc',
'nbsphinx'
]
nbsphinx_execute = 'never'
#inheritance_graph_attrs = dict(rankdir="TB", size='"6.0, 8.0"', fontsize=14, ratio='compress')
inheritance_graph_attrs = dict(rankdir="LR", size='""', ratio='expand')
#inheritance_node_attrs = dict(shape='ellipse', fontsize=14, height=0.75, color='dodgerblue1', style='filled')
inheritance_node_attrs = dict(fontsize=14, dirType="back")
inheritance_edge_attrs = dict(dirType="back")
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'climlab'
copyright = u'2017 Brian E. J. Rose, University at Albany (MIT License)'
author = u'Brian E. J. Rose'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
#version = '0.3'
# The full version, including alpha/beta/rc tags.
#release = '0.3.2'
# This is now more dynamic:
release = climlab.__version__
if release.count('.') >= 2:
version, ignored, subversion = release.rpartition('.')
else:
version = release
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '**.ipynb_checkpoints']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['climlab']
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- manually added --
# This value selects what content will be inserted into the main body of an autoclass directive. The possible values are: 'class', 'both', 'init'
autoclass_content = 'both'
# Intersphinx references
intersphinx_mapping = { 'python':('http://docs.python.org/', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None)
}
bibtex_bibfiles = ['bibliography.bib']
# -- Options for HTML output ----------------------------------------------
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
html_sidebars = {
'**': ['globaltoc.html', 'sourcelink.html', 'searchbox.html'],
'using/windows': ['windowssidebar.html', 'searchbox.html'],
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
#htmlhelp_basename = 'climlab-032Documentationdoc'
htmlhelp_basename = 'climlab-' + release + 'Documentationdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'a4paper',
# Let's go with the US standard here
'papersize': 'letterpaper',
# overwrite printindex -> no index
'printindex': '',
# The font size ('10pt', '11pt' or '12pt').
'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc,
'climlab-' + release + 'Documentation.tex',
u'climlab-' + version + ' Documentation',
u'Moritz Kreuzer and Brian E. J. Rose',
'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = os.path.abspath('/home/moritz/PIK/subversion/github/climlab-documentation/source/_static/logo.png')
# relative path within source directory
latex_logo = '_static/logo.png'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
latex_show_pagerefs = True
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
# Manually added:
#latex_use_modindex = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc,
'climlab-' + release + 'documentation',
u'climlab-' + version + ' Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc,
'climlab-' + release +'Documentation',
u'climlab-' + release + ' Documentation',
author,
'climlab-' + release +'Documentation',
'Python package for process-oriented climate modeling',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| |
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import logging
import os.path
import tokenize
from collections import defaultdict
from dataclasses import dataclass
from typing import DefaultDict
from pants.backend.python.lint.flake8.subsystem import Flake8
from pants.backend.python.lint.pylint.subsystem import Pylint
from pants.backend.python.target_types import PythonRequirementTarget
from pants.backend.python.typecheck.mypy.subsystem import MyPy
from pants.build_graph.address import AddressParseException, InvalidAddress
from pants.core.goals.update_build_files import (
DeprecationFixerRequest,
RewrittenBuildFile,
RewrittenBuildFileRequest,
UpdateBuildFilesSubsystem,
)
from pants.core.target_types import (
TargetGeneratorSourcesHelperSourcesField,
TargetGeneratorSourcesHelperTarget,
)
from pants.engine.addresses import (
Address,
Addresses,
AddressInput,
BuildFileAddress,
UnparsedAddressInputs,
)
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.engine.target import (
AllTargets,
Dependencies,
DependenciesRequest,
ExplicitlyProvidedDependencies,
UnexpandedTargets,
)
from pants.engine.unions import UnionRule
from pants.option.global_options import GlobalOptions
from pants.util.docutil import doc_url
from pants.util.frozendict import FrozenDict
from pants.util.logging import LogLevel
from pants.util.strutil import bullet_list
logger = logging.getLogger(__name__)
@dataclass(frozen=True, order=True)
class GeneratorRename:
build_path: str
alias: str
new_name: str | None
@dataclass(frozen=True)
class MacroRenames:
generators: tuple[GeneratorRename, ...]
# Includes the alias for the macro.
generated: FrozenDict[Address, tuple[Address, str]]
class MacroRenamesRequest:
pass
@rule(desc="Determine how to rename Python macros to target generators", level=LogLevel.DEBUG)
async def determine_macro_changes(all_targets: AllTargets, _: MacroRenamesRequest) -> MacroRenames:
# Strategy: Find `python_requirement` targets who depend on a `_python_requirements_file`
# target to figure out which macros we have. Note that context-aware object factories (CAOFs)
# are not actual targets and are "erased", so this is the way to find the macros.
#
# We also need to figure out if the new target generator can use the default `name=None` or
# if it needs to set an explicit name, based on whether it's the build root and whether the
# default is already taken.
dirs_with_default_name = set()
python_requirement_dependencies_fields = set()
for tgt in all_targets:
if tgt.address.is_default_target:
dirs_with_default_name.add(tgt.address.spec_path)
if isinstance(tgt, PythonRequirementTarget) and tgt[Dependencies].value is not None:
python_requirement_dependencies_fields.add(tgt[Dependencies])
build_file_addresses_per_tgt = await MultiGet(
Get(BuildFileAddress, Address, deps_field.address)
for deps_field in python_requirement_dependencies_fields
)
explicit_deps_per_tgt = await MultiGet(
Get(ExplicitlyProvidedDependencies, DependenciesRequest(deps_field))
for deps_field in python_requirement_dependencies_fields
)
deps_per_tgt = await MultiGet(
Get(UnexpandedTargets, Addresses(explicit_deps.includes))
for explicit_deps in explicit_deps_per_tgt
)
generators = set()
generated = {}
for python_req_deps_field, build_file_addr, deps in zip(
python_requirement_dependencies_fields, build_file_addresses_per_tgt, deps_per_tgt
):
generator_tgt = next(
(tgt for tgt in deps if isinstance(tgt, TargetGeneratorSourcesHelperTarget)), None
)
if generator_tgt is None:
continue
generator_sources = generator_tgt[TargetGeneratorSourcesHelperSourcesField].value
if not generator_sources or len(generator_sources) != 1:
continue
generator_source = generator_sources[0]
if "go." in generator_source:
continue
if "Pipfile" in generator_source:
generator_alias = "pipenv_requirements"
elif "pyproject.toml" in generator_source:
generator_alias = "poetry_requirements"
# It's common to override `source=` for `python_requirements` to something other than
# `requirements.txt`. Hence why we don't use `elif` to check for a certain file name.
else:
generator_alias = "python_requirements"
# TODO: Robustly handle if the `name` is already claimed? This can happen, for example,
# if you have two `python_requirements` in the same BUILD file. Perhaps error?
generator_name: str | None
if (
generator_tgt.address.spec_path
and generator_tgt.address.spec_path not in dirs_with_default_name
):
generator_name = None
elif generator_alias == "pipenv_requirements":
generator_name = "pipenv"
elif generator_alias == "poetry_requirements":
generator_name = "poetry"
else:
generator_name = "reqs"
generators.add(GeneratorRename(build_file_addr.rel_path, generator_alias, generator_name))
new_addr = Address(
generator_tgt.address.spec_path,
target_name=generator_name,
generated_name=python_req_deps_field.address.target_name,
)
generated[python_req_deps_field.address] = (new_addr, generator_alias)
generators_that_need_renames = sorted(
generator for generator in generators if generator.new_name is not None
)
if generators_that_need_renames:
changes = bullet_list(
f'`{generator.alias}` in {generator.build_path}: add `name="{generator.new_name}"'
for generator in generators_that_need_renames
)
logger.error(
"You must manually add the `name=` field to the following targets. This is not done "
f"automatically by the `update-build-files` goal.\n\n{changes}"
)
return MacroRenames(tuple(sorted(generators)), FrozenDict(sorted(generated.items())))
def maybe_address(val: str, renames: MacroRenames, *, relative_to: str | None) -> Address | None:
# All macros generate targets with a `name`, so we know they must have `:`. We know they
# also can't have `#` because they're not generated targets syntax.
if ":" not in val or "#" in val:
return None
try:
# We assume that all addresses are normal addresses, rather than file addresses, as
# we know that none of the generated targets will be file addresses. That is, we can
# ignore file addresses.
addr = AddressInput.parse(val, relative_to=relative_to).dir_to_address()
except (AddressParseException, InvalidAddress):
return None
return addr if addr in renames.generated else None
def new_addr_spec(original_val: str, new_addr: Address) -> str:
# Preserve relative addresses (`:tgt`), else use the normalized spec.
if original_val.startswith(":"):
return (
f"#{new_addr.generated_name}"
if new_addr.is_default_target
else f":{new_addr.target_name}#{new_addr.generated_name}"
)
return new_addr.spec
class OptionsChecker:
"""Checks a hardcoded list of options for using deprecated addresses.
This is returned as a rule so that it acts as a singleton.
"""
class OptionsCheckerRequest:
pass
@rule(desc="Check option values for Python macro syntax vs. target generator", level=LogLevel.DEBUG)
async def maybe_warn_options_macro_references(
_: OptionsCheckerRequest,
flake8: Flake8,
pylint: Pylint,
mypy: MyPy,
) -> OptionsChecker:
renames = await Get(MacroRenames, MacroRenamesRequest())
opt_to_renames: DefaultDict[str, set[tuple[str, str]]] = defaultdict(set)
def check(deps: UnparsedAddressInputs, option: str) -> None:
for runtime_dep in deps.values:
addr = maybe_address(runtime_dep, renames, relative_to=None)
if addr:
new_addr = new_addr_spec(runtime_dep, renames.generated[addr][0])
opt_to_renames[option].add((runtime_dep, new_addr))
check(flake8.source_plugins, "[flake8].source_plugins")
check(pylint.source_plugins, "[pylint].source_plugins")
check(mypy.source_plugins, "[mypy].source_plugins")
if opt_to_renames:
formatted_renames = []
for option, values in opt_to_renames.items():
formatted_values = sorted(f"{old} -> {new}" for old, new in values)
formatted_renames.append(f"{option}: {formatted_values}")
logger.error(
"These options contain references to generated targets using the old macro syntax, "
"and you need to manually update them to use the new target generator "
"syntax. (Typically, these are set in `pants.toml`, but you may need to check CLI "
f"args or env vars {doc_url('options')})\n\n{bullet_list(formatted_renames)}"
)
return OptionsChecker()
class UpdatePythonMacrosRequest(DeprecationFixerRequest):
pass
@rule(desc="Change Python macros to target generators", level=LogLevel.DEBUG)
async def maybe_update_macros_references(
request: UpdatePythonMacrosRequest,
global_options: GlobalOptions,
update_build_files_subsystem: UpdateBuildFilesSubsystem,
) -> RewrittenBuildFile:
if not update_build_files_subsystem.fix_python_macros:
return RewrittenBuildFile(request.path, request.lines, ())
if not global_options.options.use_deprecated_python_macros:
raise ValueError(
"`--update-build-files-fix-python-macros` specified when "
"`[GLOBAL].use_deprecated_python_macros` is already set to false, which means that "
"there is nothing left to fix."
)
renames, _ = await MultiGet(
Get(MacroRenames, MacroRenamesRequest()), Get(OptionsChecker, OptionsCheckerRequest())
)
changed_generator_aliases = set()
def maybe_update(input_lines: tuple[str, ...]) -> list[str]:
tokens = UpdatePythonMacrosRequest("", input_lines, colors_enabled=False).tokenize()
updated_text_lines = list(input_lines)
changed_line_indexes = set()
for token in tokens:
if token.type is not tokenize.STRING:
continue
line_index = token.start[0] - 1
line = input_lines[line_index]
# The `prefix` and `suffix` include the quotes for the string.
prefix = line[: token.start[1] + 1]
val = line[token.start[1] + 1 : token.end[1] - 1]
suffix = line[token.end[1] - 1 :]
addr = maybe_address(val, renames, relative_to=os.path.dirname(request.path))
if addr is None:
continue
# If this line has already been changed, we need to re-tokenize it before we can
# apply the change. Otherwise, we'll overwrite the prior change.
if line_index in changed_line_indexes:
return maybe_update(tuple(updated_text_lines))
new_addr, generator_alias = renames.generated[addr]
new_val = new_addr_spec(val, new_addr)
updated_text_lines[line_index] = f"{prefix}{new_val}{suffix}"
changed_line_indexes.add(line_index)
changed_generator_aliases.add(generator_alias)
return updated_text_lines
return RewrittenBuildFile(
request.path,
tuple(maybe_update(request.lines)),
change_descriptions=tuple(
f"Update references to targets generated by `{request.red(alias)}`"
for alias in changed_generator_aliases
),
)
def rules():
return (
*collect_rules(),
UnionRule(RewrittenBuildFileRequest, UpdatePythonMacrosRequest),
)
| |
""" This script generates a pandadoc.hpp file representing the Python
wrappers that can be parsed by doxygen to generate the Python documentation.
You need to run this before invoking Doxyfile.python.
It requires a valid makepanda installation with interrogatedb .in
files in the lib/pandac/input directory. """
__all__ = []
import os, re
import panda3d, pandac
from panda3d.dtoolconfig import *
LICENSE = """PANDA 3D SOFTWARE
Copyright (c) Carnegie Mellon University. All rights reserved.
All use of this software is subject to the terms of the revised BSD
license. You should have received a copy of this license along
with this source code in a file named \"LICENSE.\"""".split("\n")
def comment(code):
if not code:
return ""
comment = ''
empty_line = False
for line in code.splitlines(False):
line = line.strip('\t\n /')
if line:
if empty_line:
# New paragraph.
comment += '\n\n'
empty_line = False
elif comment:
comment += '\n'
comment += '/// ' + line
else:
empty_line = True
if comment:
return comment
else:
return ''
def block_comment(code):
if not code:
return ""
lines = code.split("\n")
newlines = []
indent = 0
reading_desc = False
for line in lines:
if line.startswith("////"):
continue
line = line.rstrip()
strline = line.lstrip('/ \t')
if ':' in strline:
pre, post = strline.split(':', 1)
pre = pre.rstrip()
if pre == "Description":
strline = post.lstrip()
elif pre in ("Class", "Access", "Function", "Created by", "Enum"):
continue
if strline or len(newlines) > 0:
newlines.append('/// ' + strline)
#if reading_desc:
# newlines.append('/// ' + line[min(indent, len(line) - len(strline)):])
#else:
# # A "Description:" text starts the description.
# if strline.startswith("Description"):
# strline = strline[11:].lstrip(': \t')
# indent = len(line) - len(strline)
# reading_desc = True
# newlines.append('/// ' + strline)
# else:
# print line
newcode = '\n'.join(newlines)
if len(newcode) > 0:
return newcode
else:
return ""
def translateFunctionName(name):
if name.startswith("__"):
return name
new = ""
for i in name.split("_"):
if new == "":
new += i
elif i == "":
pass
elif len(i) == 1:
new += i[0].upper()
else:
new += i[0].upper() + i[1:]
return new
def translateTypeName(name, mangle=True):
# Equivalent to C++ classNameFromCppName
class_name = ""
bad_chars = "!@#$%^&*()<>,.-=+~{}? "
next_cap = False
first_char = mangle
for chr in name:
if (chr == '_' or chr == ' ') and mangle:
next_cap = True
elif chr in bad_chars:
if not mangle:
class_name += '_'
elif next_cap or first_char:
class_name += chr.upper()
next_cap = False
first_char = False
else:
class_name += chr
return class_name
def translated_type_name(type, scoped=True):
while interrogate_type_is_wrapped(type):
if interrogate_type_is_const(type):
return 'const ' + translated_type_name(interrogate_type_wrapped_type(type))
else:
type = interrogate_type_wrapped_type(type)
typename = interrogate_type_name(type)
if typename in ("PyObject", "_object"):
return "object"
elif typename == "PN_stdfloat":
return "float"
if interrogate_type_is_atomic(type):
token = interrogate_type_atomic_token(type)
if token == 7:
return 'str'
else:
return typename
if not typename.endswith('_t'):
# Hack: don't mangle size_t etc.
typename = translateTypeName(typename)
if scoped and interrogate_type_is_nested(type):
return translated_type_name(interrogate_type_outer_class(type)) + '::' + typename
else:
return typename
def processElement(handle, element):
if interrogate_element_has_comment(element):
print >>handle, comment(interrogate_element_comment(element))
print >>handle, translated_type_name(interrogate_element_type(element)),
print >>handle, interrogate_element_name(element) + ';'
def processFunction(handle, function, isConstructor = False):
for i_wrapper in xrange(interrogate_function_number_of_python_wrappers(function)):
wrapper = interrogate_function_python_wrapper(function, i_wrapper)
if interrogate_wrapper_has_comment(wrapper):
print >>handle, block_comment(interrogate_wrapper_comment(wrapper))
if not isConstructor:
if interrogate_function_is_method(function):
if not interrogate_wrapper_number_of_parameters(wrapper) > 0 or not interrogate_wrapper_parameter_is_this(wrapper, 0):
print >>handle, "static",
if interrogate_wrapper_has_return_value(wrapper):
print >>handle, translated_type_name(interrogate_wrapper_return_type(wrapper)),
else:
pass#print >>handle, "void",
print >>handle, translateFunctionName(interrogate_function_name(function)) + "(",
else:
print >>handle, "__init__(",
first = True
for i_param in range(interrogate_wrapper_number_of_parameters(wrapper)):
if not interrogate_wrapper_parameter_is_this(wrapper, i_param):
if not first:
print >>handle, ",",
print >>handle, translated_type_name(interrogate_wrapper_parameter_type(wrapper, i_param)),
if interrogate_wrapper_parameter_has_name(wrapper, i_param):
print >>handle, interrogate_wrapper_parameter_name(wrapper, i_param),
first = False
print >>handle, ");"
def processType(handle, type):
typename = translated_type_name(type, scoped=False)
derivations = [ translated_type_name(interrogate_type_get_derivation(type, n)) for n in range(interrogate_type_number_of_derivations(type)) ]
if interrogate_type_has_comment(type):
print >>handle, block_comment(interrogate_type_comment(type))
if interrogate_type_is_enum(type):
print >>handle, "enum %s {" % typename
for i_value in range(interrogate_type_number_of_enum_values(type)):
docstring = comment(interrogate_type_enum_value_comment(type, i_value))
if docstring:
print >>handle, docstring
print >>handle, interrogate_type_enum_value_name(type, i_value), "=", interrogate_type_enum_value(type, i_value), ","
elif interrogate_type_is_typedef(type):
wrapped_type = translated_type_name(interrogate_type_wrapped_type(type))
print >>handle, "typedef %s %s;" % (wrapped_type, typename)
return
else:
if interrogate_type_is_struct(type):
classtype = "struct"
elif interrogate_type_is_class(type):
classtype = "class"
elif interrogate_type_is_union(type):
classtype = "union"
else:
print "I don't know what type %s is" % interrogate_type_true_name(type)
return
if len(derivations) > 0:
print >>handle, "%s %s : public %s {" % (classtype, typename, ", public ".join(derivations))
else:
print >>handle, "%s %s {" % (classtype, typename)
print >>handle, "public:"
for i_ntype in xrange(interrogate_type_number_of_nested_types(type)):
processType(handle, interrogate_type_get_nested_type(type, i_ntype))
for i_method in xrange(interrogate_type_number_of_constructors(type)):
processFunction(handle, interrogate_type_get_constructor(type, i_method), True)
for i_method in xrange(interrogate_type_number_of_methods(type)):
processFunction(handle, interrogate_type_get_method(type, i_method))
for i_method in xrange(interrogate_type_number_of_make_seqs(type)):
print >>handle, "list", translateFunctionName(interrogate_make_seq_seq_name(interrogate_type_get_make_seq(type, i_method))), "();"
for i_element in xrange(interrogate_type_number_of_elements(type)):
processElement(handle, interrogate_type_get_element(type, i_element))
print >>handle, "};"
def processModule(handle, package):
print >>handle, "namespace %s {" % package
if package != "core":
print >>handle, "using namespace core;"
for i_type in xrange(interrogate_number_of_global_types()):
type = interrogate_get_global_type(i_type)
if interrogate_type_has_module_name(type):
module_name = interrogate_type_module_name(type)
if "panda3d." + package == module_name:
processType(handle, type)
else:
print "Type %s has no module name" % typename
for i_func in xrange(interrogate_number_of_global_functions()):
func = interrogate_get_global_function(i_func)
if interrogate_function_has_module_name(func):
module_name = interrogate_function_module_name(func)
if "panda3d." + package == module_name:
processFunction(handle, func)
else:
print "Type %s has no module name" % typename
print >>handle, "}"
if __name__ == "__main__":
handle = open("pandadoc.hpp", "w")
print >>handle, comment("Panda3D modules that are implemented in C++.")
print >>handle, "namespace panda3d {"
# Determine the path to the interrogatedb files
interrogate_add_search_directory(os.path.join(os.path.dirname(pandac.__file__), "..", "..", "etc"))
interrogate_add_search_directory(os.path.join(os.path.dirname(pandac.__file__), "input"))
import panda3d.core
processModule(handle, "core")
for lib in os.listdir(os.path.dirname(panda3d.__file__)):
if lib.endswith(('.pyd', '.so')) and not lib.startswith('core.'):
module_name = os.path.splitext(lib)[0]
__import__("panda3d." + module_name)
processModule(handle, module_name)
print >>handle, "}"
handle.close()
| |
import copy
import os
import time
import traceback
from discord import Channel, Member, Server, User
import asyncio
from musicbot.exceptions import (BrokenEntryError, ExtractionError,
OutdatedEntryError)
from musicbot.lib.serialisable import Serialisable, WebSerialisable
from musicbot.lyrics import search_for_lyrics
from musicbot.radio import RadioSongExtractor, StationInfo
from musicbot.spotify import SpotifyTrack
from musicbot.utils import (clean_songname, get_header, get_image_brightness,
md5sum, slugify)
from musicbot.web_author import WebAuthor
class Entry:
version_code = "1.0.3"
version = int(version_code.replace(".", ""))
can_encode = (int, dict, list, str, int, float, bool)
default_encode = (Channel, Member, Server, User)
meta_dict_keys = ("author", "playlist")
@classmethod
def from_dict(cls, queue, data):
entry_version = data.get("version", 0)
if entry_version < Entry.version:
raise OutdatedEntryError("Version parameter signifies an outdated entry")
if data.get("broken", False):
raise BrokenEntryError("This entry has been marked as broken")
entry_type = data.get("type", None)
if not entry_type:
raise KeyError("Data does not include a type parameter")
target = globals().get(entry_type, None)
if not target:
raise TypeError("Cannot create an entry with this type")
return target.from_dict(queue, data)
@staticmethod
def create_meta_dict(meta):
meta_dict = {}
for key, value in meta.items():
if key is None or value is None:
continue
# remove unwanted meta stuff
if str(key).lower() not in Entry.meta_dict_keys:
continue
ser_value = {"type": value.__class__.__name__}
if isinstance(value, Entry.can_encode) or value is None:
ser_value.update({
"type": "built-in/" + ser_value["type"],
"value": value
})
else:
if isinstance(value, Entry.default_encode):
ser_value.update({
"id": value.id,
"name": value.name,
})
meta_dict[key] = ser_value
return meta_dict
@staticmethod
def meta_from_dict(data, bot):
meta = {}
for key, ser_value in data.items():
value = None
value_type = ser_value["type"]
if value_type.startswith("built-in"):
value = ser_value["value"]
elif value_type in ("Member", "User"):
value = bot.get_global_user(ser_value["id"])
elif value_type == "Server":
value = bot.get_server(ser_value["id"])
elif value == "Channel":
value = bot.get_channel(ser_value["id"])
if value:
meta[key] = value
return meta
class BaseEntry(Serialisable, WebSerialisable):
def __init__(self, queue, url, **meta):
self.queue = queue
self.url = url
self.meta = meta
self.filename = None
self.duration = 0
self._is_downloading = False
self._waiting_futures = []
self._lyrics = None
self._lyrics_dirty = False
@property
def title(self):
raise NotImplementedError
@property
def lyrics_title(self):
return self.title
@property
def lyrics(self):
if self._lyrics_dirty or not self._lyrics:
self._lyrics = search_for_lyrics(self.lyrics_title)
self._lyrics_dirty = False
return self._lyrics
@property
def _is_current_entry(self):
current_entry = self.queue.player.current_entry
return self == current_entry
@property
def is_downloaded(self):
if self._is_downloading:
return False
return bool(self.filename)
@property
def sortby(self):
return self.url
@property
def start_seconds(self):
return None
async def _download(self):
raise NotImplementedError
@classmethod
def from_dict(cls, data, queue):
raise NotImplementedError
def copy(self):
return copy.copy(self)
def to_dict(self):
raise NotImplementedError
def to_web_dict(self, skip_calc=False):
return self.to_dict()
def get_ready_future(self):
"""
Returns a future that will fire when the song is ready to be played. The future will either fire with the result (being the entry) or an exception
as to why the song download failed.
"""
future = asyncio.Future()
if self.is_downloaded:
# In the event that we're downloaded, we're already ready for
# playback.
future.set_result(self)
else:
# If we request a ready future, let's ensure that it'll actually
# resolve at one point.
asyncio.ensure_future(self._download())
self._waiting_futures.append(future)
return future
def _for_each_future(self, cb):
"""
Calls `cb` for each future that is not cancelled. Absorbs and logs any errors that may have occurred.
"""
futures = self._waiting_futures
self._waiting_futures = []
for future in futures:
if future.cancelled():
continue
try:
cb(future)
except:
traceback.print_exc()
def __eq__(self, other):
return self is other
def __hash__(self):
return id(self)
class StreamEntry(BaseEntry):
def __init__(self, queue, url, title, destination=None, **meta):
super().__init__(queue, url, **meta)
self._title = title
self.destination = destination
if self.destination:
self.filename = self.destination
@property
def title(self):
return self._title
@property
def sortby(self):
return self._title
def set_start(self, sec):
raise NotImplementedError
def set_end(self, sec):
raise NotImplementedError
@classmethod
def from_dict(cls, queue, data):
if data["type"] != cls.__name__:
raise AttributeError("This data isn't of this entry type")
meta_dict = data.get("meta", None)
if meta_dict:
meta = Entry.meta_from_dict(meta_dict, queue.bot)
else:
meta = {}
url = data["url"]
title = data["title"]
return cls(queue, url, title, **meta)
def to_dict(self):
meta_dict = Entry.create_meta_dict(self.meta)
data = {
"version": Entry.version,
"type": self.__class__.__name__,
"url": self.url,
"title": self._title,
"meta": meta_dict
}
return data
def to_web_dict(self, skip_calc=False):
origin = None
if self.meta:
if "playlist" in self.meta:
origin = {"type": "playlist"}
origin.update(self.meta["playlist"])
elif "author" in self.meta:
origin = {"type": "user"}
web_author = WebAuthor.from_id(self.meta["author"].id)
origin.update(web_author.to_dict())
data = {
"type": self.__class__.__name__,
"url": self.url,
"origin": origin,
"title": self.title
}
return data
async def _download(self, *, fallback=False):
self._is_downloading = True
url = self.destination if fallback else self.url
try:
result = await self.queue.downloader.extract_info(
self.queue.loop, url, download=False)
except Exception as e:
if not fallback and self.destination:
return await self._download(fallback=True)
raise ExtractionError(e)
else:
self.filename = result["url"]
finally:
self._is_downloading = False
class RadioStationEntry(StreamEntry):
def __init__(self, queue, station_data, destination=None, **meta):
super().__init__(queue, station_data.url, station_data.name, destination, **meta)
self.station_data = station_data
self.station_name = station_data.name
self._cover = self.station_data.cover
@property
def title(self):
return self._title
@property
def cover(self):
return self._cover
@property
def thumbnail(self):
return self.station_data.thumbnail
@property
def link(self):
return self.station_data.website
@classmethod
def from_dict(cls, queue, data):
if data["type"] != cls.__name__:
raise AttributeError("This data isn't of this entry type")
meta_dict = data.get("meta", None)
if meta_dict:
meta = Entry.meta_from_dict(meta_dict, queue.bot)
else:
meta = {}
url = data["url"]
title = data["title"]
station_data = StationInfo.from_dict(data["station_data"])
return cls(queue, url, title, station_data, **meta)
def to_dict(self):
d = super().to_dict()
d.update({
"station_data": self.station_data.to_dict()
})
return d
def to_web_dict(self, skip_calc=False):
origin = None
if self.meta:
if "playlist" in self.meta:
origin = {"type": "playlist"}
origin.update(self.meta["playlist"])
elif "author" in self.meta:
origin = {"type": "user"}
web_author = WebAuthor.from_id(self.meta["author"].id)
origin.update(web_author.to_dict())
data = {
"type": self.__class__.__name__,
"url": self.url,
"thumbnail": self.thumbnail,
"thumbnail_brightness": get_image_brightness(url=self.thumbnail),
"origin": origin,
"title": self.title,
"cover": self.cover,
"link": self.link
}
return data
class RadioSongEntry(RadioStationEntry):
def __init__(self, queue, station_data, destination=None, **meta):
super().__init__(queue, station_data, destination, **meta)
self._current_song_info = None
self._csi_poll_time = 0
self.poll_time = station_data.poll_time
self.uncertainty = station_data.uncertainty
@property
def sortby(self):
return self.title
@property
def lyrics_title(self):
return "{} - {}".format(self.title, self.artist)
def _get_new_song_info(self):
self._current_song_info = RadioSongExtractor.get_current_song(
self.station_data)
self._csi_poll_time = time.time()
@property
def current_song_info(self):
if self._current_song_info is None or (time.time() - self._csi_poll_time) > 5:
print("[RadioEntry] getting new current_song_info")
self._lyrics_dirty = True
self._get_new_song_info()
return self._current_song_info
@property
def song_progress(self):
if not self._is_current_entry:
return None
return self.current_song_info["progress"]
@property
def song_duration(self):
if not self._is_current_entry:
return None
return self.current_song_info["duration"]
@property
def link(self):
if not self._is_current_entry:
return super().link
return self.current_song_info["youtube"] or super().link
@property
def title(self):
if not self._is_current_entry:
return super().title
return self.current_song_info["title"]
@property
def artist(self):
if not self._is_current_entry:
return None
return self.current_song_info["artist"]
@property
def cover(self):
if not self._is_current_entry:
return super().cover
return self.current_song_info["cover"]
def to_web_dict(self, skip_calc=False):
data = super().to_web_dict(skip_calc=skip_calc)
data.update({
"station": self.station_data.to_dict(),
"title": self.title,
"artist": self.artist,
"cover": self.cover,
"song_progress": self.song_progress,
"song_duration": self.song_duration
})
return data
class YoutubeEntry(BaseEntry):
def __init__(self, queue, video_id, url, title, duration, thumbnail, description, expected_filename=None, thumbnail_brightness=None, **meta):
super().__init__(queue, url, **meta)
self.video_id = video_id
self._title = title
self.thumbnail = thumbnail
self._thumbnail_brightness = thumbnail_brightness
self.description = description
self.duration = duration
self.end_seconds = meta.get("end_seconds", duration)
self._start_seconds = meta.get("start_seconds", 0)
self._seek_seconds = meta.get("seek_seconds", None)
self.expected_filename = expected_filename
self.download_folder = self.queue.downloader.download_folder
@property
def title(self):
return clean_songname(self._title)
@property
def thumbnail_brightness(self):
if not self._thumbnail_brightness:
self._thumbnail_brightness = get_image_brightness(url=self.thumbnail)
return self._thumbnail_brightness
@property
def sortby(self):
return clean_songname(self._title)
@property
def start_seconds(self):
secs = 0
if self._seek_seconds is not None:
secs = self._seek_seconds
else:
secs = self._start_seconds
return secs
@classmethod
def from_dict(cls, queue, data):
if data["type"] != cls.__name__:
raise AttributeError("This data isn't of this entry type")
meta_dict = data.get("meta", None)
if meta_dict:
meta = Entry.meta_from_dict(meta_dict, queue.bot)
else:
meta = {}
filename = data["expected_filename"]
video_id = data["video_id"]
url = data["url"]
title = data["title"]
duration = data["duration"]
thumbnail = data["thumbnail"]
thumbnail_brightness = data.get("thumbnail_brightness")
description = data["description"]
return cls(queue, video_id, url, title, duration, thumbnail, description, expected_filename=filename, thumbnail_brightness=thumbnail_brightness, **meta)
def seek(self, secs):
if not 0 <= secs < self.end_seconds:
return False
self._seek_seconds = secs
return True
def set_start(self, secs):
if not 0 <= secs < self.end_seconds:
return False
self._start_seconds = secs
return True
def set_end(self, secs):
if not 0 < secs <= self.duration:
return False
self.end_seconds = sec
return True
def copy(self):
new = copy.copy(self)
new._seek_seconds = None
return new
def to_dict(self):
meta_dict = Entry.create_meta_dict(self.meta)
data = {
"version": Entry.version,
"type": self.__class__.__name__,
"expected_filename": self.expected_filename,
"video_id": self.video_id,
"url": self.url,
"title": self._title,
"duration": self.duration,
"thumbnail": self.thumbnail,
"thumbnail_brightness": self._thumbnail_brightness,
"description": self.description,
"meta": meta_dict
}
return data
def to_web_dict(self, skip_calc=False):
origin = None
if self.meta:
if "playlist" in self.meta:
origin = {"type": "playlist"}
origin.update(self.meta["playlist"])
elif "author" in self.meta:
origin = {"type": "user"}
web_author = WebAuthor.from_id(self.meta["author"].id)
origin.update(web_author.to_dict())
data = {
"type": self.__class__.__name__,
"url": self.url,
"thumbnail": self.thumbnail,
"origin": origin,
"title": self.title,
"duration": self.duration,
}
if not skip_calc:
data["thumbnail_brightness"] = self.thumbnail_brightness
return data
async def _download(self):
if self._is_downloading:
return
self._is_downloading = True
try:
# Ensure the folder that we're going to move into exists.
if not os.path.exists(self.download_folder):
os.makedirs(self.download_folder)
# self.expected_filename:
# audio_cache\youtube-9R8aSKwTEMg-NOMA_-_Brain_Power.m4a
if self.expected_filename is None:
self.expected_filename = slugify("unknown" + self.title)
extractor = os.path.basename(self.expected_filename).split("-")[0]
# the generic extractor requires special handling
if extractor == "generic":
# print("Handling generic")
flistdir = [
f.rsplit("-", 1)[0]
for f in os.listdir(self.download_folder)
]
expected_fname_noex, fname_ex = os.path.basename(
self.expected_filename).rsplit(".", 1)
if expected_fname_noex in flistdir:
try:
rsize = int(
await get_header(self.queue.bot.aiosession,
self.url, "CONTENT-LENGTH"))
except:
rsize = 0
lfile = os.path.join(self.download_folder,
os.listdir(self.download_folder)
[flistdir.index(expected_fname_noex)])
# print("Resolved %s to %s" % (self.expected_filename, lfile))
lsize = os.path.getsize(lfile)
# print("Remote size: %s Local size: %s" % (rsize, lsize))
if lsize != rsize:
await self._really_download(hash=True)
else:
# print("[Download] Cached:", self.url)
self.filename = lfile
else:
# print("File not found in cache (%s)" % expected_fname_noex)
await self._really_download(hash=True)
else:
ldir = os.listdir(self.download_folder)
flistdir = [f.rsplit(".", 1)[0] for f in ldir]
expected_fname_base = os.path.basename(self.expected_filename)
expected_fname_noex = expected_fname_base.rsplit(".", 1)[0]
# idk wtf this is but its probably legacy code
# or i have youtube to blame for changing shit again
if expected_fname_base in ldir:
self.filename = os.path.join(self.download_folder,
expected_fname_base)
print("[Download] Cached:", self.url)
elif expected_fname_noex in flistdir:
print("[Download] Cached (different extension):", self.url)
self.filename = os.path.join(
self.download_folder,
ldir[flistdir.index(expected_fname_noex)])
print("Expected %s, got %s" %
(self.expected_filename.rsplit(".", 1)[-1],
self.filename.rsplit(".", 1)[-1]))
else:
await self._really_download()
# Trigger ready callbacks.
self._for_each_future(lambda future: future.set_result(self))
except Exception as e:
traceback.print_exc()
self._for_each_future(lambda future: future.set_exception(e))
finally:
self._is_downloading = False
async def _really_download(self, *, hash=False):
print("[Download] Started:", self.url)
try:
result = await self.queue.downloader.extract_info(
self.queue.loop, self.url, download=True)
except Exception as e:
raise ExtractionError(e)
print("[Download] Complete:", self.url)
if result is None:
raise ExtractionError("ytdl broke and hell if I know why")
# What the duck do I do now?
self.filename = unhashed_fname = self.queue.downloader.ytdl.prepare_filename(
result)
if hash:
# insert the 8 last characters of the file hash to the file name to
# ensure uniqueness
self.filename = md5sum(
unhashed_fname,
8).join("-.").join(unhashed_fname.rsplit(".", 1))
if os.path.isfile(self.filename):
# Oh bother it was actually there.
os.unlink(unhashed_fname)
else:
# Move the temporary file to it's final location.
os.rename(unhashed_fname, self.filename)
class TimestampEntry(YoutubeEntry):
def __init__(self, queue, video_id, url, title, duration, thumbnail, description, sub_queue, expected_filename=None, thumbnail_brightness=None, **meta):
super().__init__(queue, video_id, url, title, duration, thumbnail, description, expected_filename=expected_filename, thumbnail_brightness=thumbnail_brightness, **meta)
self.sub_queue = sub_queue
@property
def current_sub_entry(self):
if not self._is_current_entry:
return self.sub_queue[0]
progress = self.queue.player.progress
sub_entry = None
for entry in self.sub_queue:
if progress >= entry["start"] or sub_entry is None:
sub_entry = entry
sub_entry["progress"] = max(progress - sub_entry["start"], 0)
self._lyrics_dirty = True
return sub_entry
@property
def title(self):
if self._is_current_entry:
return clean_songname(self.current_sub_entry["name"])
else:
return clean_songname(self._title)
@property
def whole_title(self):
return clean_songname(self._title)
@classmethod
def from_dict(cls, queue, data):
if data["type"] != cls.__name__:
raise AttributeError("This data isn't of this entry type")
meta_dict = data.get("meta", None)
if meta_dict:
meta = Entry.meta_from_dict(meta_dict, queue.bot)
else:
meta = {}
filename = data["expected_filename"]
video_id = data["video_id"]
url = data["url"]
title = data["title"]
duration = data["duration"]
thumbnail = data["thumbnail"]
thumbnail_brightness = data.get("thumbnail_brightness")
description = data["description"]
sub_queue = data["sub_queue"]
return cls(queue, video_id, url, title, duration, thumbnail, description, sub_queue, expected_filename=filename, thumbnail_brightness=thumbnail_brightness, **meta)
def to_dict(self):
d = super().to_dict()
d.update({
"sub_queue": self.sub_queue
})
return d
def to_web_dict(self, skip_calc=False):
data = super().to_web_dict(skip_calc=skip_calc)
data.update({
"whole_title": self.whole_title,
"title": self.title,
"sub_entry": self.current_sub_entry
})
return data
class GieselaEntry(YoutubeEntry):
def __init__(self, queue, video_id, url, title, duration, thumbnail, description, song_title, artist, artist_image, album, cover, expected_filename=None, thumbnail_brightness=None, **meta):
super().__init__(queue, video_id, url, title, duration, thumbnail, description, expected_filename=expected_filename, thumbnail_brightness=thumbnail_brightness, **meta)
self.song_title = song_title
self.artist = artist
self.artist_image = artist_image
self.cover = cover
self.album = album
@property
def title(self):
return "{} - {}".format(self.artist, self.song_title)
@property
def lyrics_title(self):
return "{} - {}".format(self.song_title, self.artist)
@property
def sortby(self):
return self.song_title
@classmethod
def from_dict(cls, queue, data):
if data["type"] != cls.__name__:
raise AttributeError("This data isn't of this entry type")
meta_dict = data.get("meta", None)
if meta_dict:
meta = Entry.meta_from_dict(meta_dict, queue.bot)
else:
meta = {}
filename = data["expected_filename"]
video_id = data["video_id"]
url = data["url"]
title = data["title"]
duration = data["duration"]
thumbnail = data["thumbnail"]
thumbnail_brightness = data.get("thumbnail_brightness")
description = data["description"]
song_title = data["song_title"]
artist = data["artist"]
artist_image = data["artist_image"]
cover = data["cover"]
album = data["album"]
return cls(queue, video_id, url, title, duration, thumbnail, description, song_title, artist, artist_image, album, cover, expected_filename=filename, thumbnail_brightness=thumbnail_brightness, **meta)
@classmethod
def upgrade(cls, previous_entry, song_title, artist, artist_image, album, cover):
kwargs = {
"queue": previous_entry.queue,
"video_id": previous_entry.video_id,
"url": previous_entry.url,
"title": previous_entry._title,
"duration": previous_entry.duration,
"thumbnail": previous_entry.thumbnail,
"description": previous_entry.description,
"expected_filename": previous_entry.expected_filename,
"song_title": song_title,
"artist": artist,
"artist_image": artist_image,
"album": album,
"cover": cover
}
kwargs.update(previous_entry.meta)
return cls(**kwargs)
def to_dict(self):
d = super().to_dict()
d.update({
"song_title": self.song_title,
"artist": self.artist,
"artist_image": self.artist_image,
"cover": self.cover,
"album": self.album
})
return d
def to_web_dict(self, skip_calc=False):
data = super().to_web_dict(skip_calc=skip_calc)
data.update({
"title": self.song_title,
"artist": self.artist,
"album": self.album,
"cover": self.cover
})
return data
class VGMEntry(GieselaEntry):
pass
class DiscogsEntry(GieselaEntry):
pass
class SpotifyEntry(GieselaEntry):
def __init__(self, queue, video_id, url, title, duration, thumbnail, description, spotify_track, expected_filename=None, thumbnail_brightness=None, **meta):
super().__init__(
queue, video_id, url, title, duration, thumbnail, description,
spotify_track.name,
spotify_track.artist_string,
spotify_track.artists[0].image,
spotify_track.album.name,
spotify_track.cover_url,
expected_filename=expected_filename,
thumbnail_brightness=thumbnail_brightness,
**meta
)
self.spotify_data = spotify_track
self.popularity = spotify_track.popularity / 100
@classmethod
def from_dict(cls, queue, data):
if data["type"] != cls.__name__:
raise AttributeError("This data isn't of this entry type")
meta_dict = data.get("meta", None)
if meta_dict:
meta = Entry.meta_from_dict(meta_dict, queue.bot)
else:
meta = {}
filename = data["expected_filename"]
video_id = data["video_id"]
url = data["url"]
title = data["title"]
duration = data["duration"]
thumbnail = data["thumbnail"]
thumbnail_brightness = data.get("thumbnail_brightness")
description = data["description"]
spotify_data = SpotifyTrack.from_dict(data["spotify_data"])
return cls(queue, video_id, url, title, duration, thumbnail, description, spotify_data, expected_filename=filename, thumbnail_brightness=thumbnail_brightness, **meta)
def to_dict(self):
d = super().to_dict()
d.update({
"spotify_data": self.spotify_data.get_dict()
})
return d
| |
#
# AstroImage.py -- Abstraction of an astronomical data image.
#
# Eric Jeschke (eric@naoj.org)
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import sys, os
import re
import math
import logging
import time
import traceback
import numpy, numpy.ma
from ginga.util import wcsmod, io_fits, iohelper
from ginga.util import wcs, iqcalc
from ginga.BaseImage import BaseImage, ImageError, Header
from ginga.misc import Bunch
from ginga import trcalc
import ginga.util.six as six
from ginga.util.six.moves import map, zip
class AstroHeader(Header):
pass
class AstroImage(BaseImage):
"""
Abstraction of an astronomical data (image).
NOTE: this module is NOT thread-safe!
"""
# class variables for WCS and IO can be set
wcsClass = None
ioClass = None
@classmethod
def set_wcsClass(cls, klass):
cls.wcsClass = klass
@classmethod
def set_ioClass(cls, klass):
cls.ioClass = klass
def __init__(self, data_np=None, metadata=None, logger=None,
name=None, wcsclass=wcsClass, ioclass=ioClass,
inherit_primary_header=False):
BaseImage.__init__(self, data_np=data_np, metadata=metadata,
logger=logger, name=name)
# wcsclass specifies a pluggable WCS module
if wcsclass is None:
wcsclass = wcsmod.WCS
self.wcs = wcsclass(self.logger)
# wcsclass specifies a pluggable IO module
if ioclass is None:
ioclass = io_fits.fitsLoaderClass
self.io = ioclass(self.logger)
self.inherit_primary_header = inherit_primary_header
if self.inherit_primary_header:
# User wants to inherit from primary header--this will hold it
self._primary_hdr = AstroHeader()
else:
self._primary_hdr = None
if metadata is not None:
header = self.get_header()
self.wcs.load_header(header)
# For navigating multidimensional data
self.naxispath = []
self.revnaxis = []
self._md_data = None
def load_hdu(self, hdu, fobj=None, naxispath=None):
self.clear_metadata()
ahdr = self.get_header()
loader = io_fits.PyFitsFileHandler(self.logger)
_data, naxispath = loader.load_hdu(hdu, ahdr, naxispath=naxispath)
# this is a handle to the full data array
self._md_data = _data
if naxispath is None:
naxispath = []
# Drill down to 2D data slice
if len(naxispath) == 0:
naxispath = ([0] * (len(_data.shape)-2))
self.set_naxispath(naxispath)
# Set PRIMARY header
if self.inherit_primary_header and fobj is not None:
self.io.fromHDU(fobj[0], self._primary_hdr)
# Try to make a wcs object on the header
self.wcs.load_header(hdu.header, fobj=fobj)
def load_file(self, filepath, numhdu=None, naxispath=None,
allow_numhdu_override=True):
self.logger.debug("Loading file '%s' ..." % (filepath))
self.clear_metadata()
ahdr = self.get_header()
info = iohelper.get_fileinfo(filepath)
if numhdu is None:
numhdu = info.numhdu
_data, numhdu_, naxispath = self.io.load_file(info.filepath, ahdr,
numhdu=numhdu,
naxispath=naxispath,
phdr=self._primary_hdr)
# this is a handle to the full data array
self._md_data = _data
if naxispath is None:
naxispath = []
# Drill down to 2D data slice
if len(naxispath) == 0:
naxispath = ([0] * (len(_data.shape)-2))
# Set the image name if no name currently exists for this image
# TODO: should this *change* the existing name, if any?
if not (self.name is None):
self.set(name=self.name)
else:
name = self.get('name', None)
if name is None:
name = info.name
if ('[' not in name):
if (numhdu is None) or allow_numhdu_override:
numhdu = numhdu_
name += iohelper.get_hdu_suffix(numhdu)
self.set(name=name)
self.set(path=filepath, idx=numhdu)
self.set_naxispath(naxispath)
# Try to make a wcs object on the header
# TODO: in order to do more sophisticated WCS (e.g. distortion
# correction) that requires info in additional headers we need
# to pass additional information to the wcs class
#self.wcs.load_header(hdu.header, fobj=fobj)
self.wcs.load_header(ahdr)
def load_buffer(self, data, dims, dtype, byteswap=False,
metadata=None):
data = numpy.fromstring(data, dtype=dtype)
if byteswap:
data.byteswap(True)
data = data.reshape(dims)
self.set_data(data, metadata=metadata)
def get_mddata(self):
return self._md_data
def set_naxispath(self, naxispath):
"""Choose a slice out of multidimensional data.
"""
revnaxis = list(naxispath)
revnaxis.reverse()
# construct slice view and extract it
view = revnaxis + [slice(None), slice(None)]
data = self.get_mddata()[view]
assert len(data.shape) == 2, \
ImageError("naxispath does not lead to a 2D slice: %s" % (
str(naxispath)))
self.naxispath = naxispath
self.revnaxis = revnaxis
self.set_data(data)
def set_wcs(self, wcs):
self.wcs = wcs
def set_io(self, io):
self.io = io
def get_data_size(self):
return self.get_size()
def get_header(self, create=True):
try:
# By convention, the fits header is stored in a dictionary
# under the metadata keyword 'header'
hdr = self.metadata['header']
if self.inherit_primary_header and self._primary_hdr is not None:
# Inherit PRIMARY header for display but keep metadata intact
displayhdr = AstroHeader()
for key in hdr.keyorder:
card = hdr.get_card(key)
bnch = displayhdr.__setitem__(card.key, card.value)
bnch.comment = card.comment
for key in self._primary_hdr.keyorder:
if key not in hdr:
card = self._primary_hdr.get_card(key)
bnch = displayhdr.__setitem__(card.key, card.value)
bnch.comment = card.comment
else:
# Normal, separate header
displayhdr = hdr
except KeyError as e:
if not create:
raise e
#hdr = {}
hdr = AstroHeader()
self.metadata['header'] = hdr
displayhdr = hdr
return displayhdr
def get_keyword(self, kwd, *args):
"""Get an item from the fits header, if any."""
try:
kwds = self.get_header()
return kwds[kwd]
except KeyError:
# return a default if there is one
if len(args) > 0:
return args[0]
raise KeyError(kwd)
def get_keywords_list(self, *args):
return list(map(self.get_keyword, args))
def set_keyword(self, kwd, value, create=True):
kwds = self.get_header(create=create)
kwd = kwd.upper()
if not create:
prev = kwds[kwd]
kwds[kwd] = value
def update_keywords(self, keyDict):
hdr = self.get_header()
# Upcase all keywords
for kwd, val in keyDict.items():
hdr[kwd.upper()] = val
# Try to make a wcs object on the header
if hasattr(self, 'wcs'):
self.wcs.load_header(hdr)
def set_keywords(self, **kwds):
"""Set an item in the fits header, if any."""
return self.update_keywords(kwds)
def update_data(self, data_np, metadata=None, astype=None):
"""DO NOT USE: this method will be deprecated!
"""
self.set_data(data_np.copy(), metadata=metadata,
astype=astype)
def update_metadata(self, keyDict):
for key, val in keyDict.items():
self.metadata[key] = val
# refresh the WCS
if hasattr(self, 'wcs'):
header = self.get_header()
self.wcs.load_header(header)
def clear_metadata(self):
self.metadata = {}
def transfer(self, other, astype=None):
data = self._get_data()
other.update_data(data, astype=astype)
other.update_metadata(self.metadata)
def copy(self, astype=None):
data = self._get_data()
other = AstroImage(data, logger=self.logger)
self.transfer(other, astype=astype)
return other
def save_as_file(self, filepath, **kwdargs):
data = self._get_data()
header = self.get_header()
self.io.save_as_file(filepath, data, header, **kwdargs)
def pixtocoords(self, x, y, system=None, coords='data'):
args = [x, y] + self.revnaxis
return self.wcs.pixtocoords(args, system=system, coords=coords)
def spectral_coord(self, coords='data'):
args = [0, 0] + self.revnaxis
return self.wcs.spectral_coord(args, coords=coords)
def pixtoradec(self, x, y, format='deg', coords='data'):
args = [x, y] + self.revnaxis
ra_deg, dec_deg = self.wcs.pixtoradec(args, coords=coords)
if format == 'deg':
return ra_deg, dec_deg
return wcs.deg2fmt(ra_deg, dec_deg, format)
def radectopix(self, ra_deg, dec_deg, format='deg', coords='data'):
if format != 'deg':
# convert coordinates to degrees
ra_deg = wcs.lon_to_deg(ra_deg)
dec_deg = wcs.lat_to_deg(dec_deg)
return self.wcs.radectopix(ra_deg, dec_deg, coords=coords,
naxispath=self.revnaxis)
#-----> TODO: merge into wcs.py ?
#
def get_starsep_XY(self, x1, y1, x2, y2):
# source point
ra_org, dec_org = self.pixtoradec(x1, y1)
# destination point
ra_dst, dec_dst = self.pixtoradec(x2, y2)
return wcs.get_starsep_RaDecDeg(ra_org, dec_org, ra_dst, dec_dst)
def calc_radius_xy(self, x, y, radius_deg):
"""Calculate a radius (in pixels) from the point (x, y) to a circle
defined by radius in degrees.
"""
# calculate ra/dec of x,y pixel
ra_deg, dec_deg = self.pixtoradec(x, y)
# Calculate position 1 degree from the given one
# NOTE: this needs to add in DEC, not RA
ra2_deg, dec2_deg = wcs.add_offset_radec(ra_deg, dec_deg,
0.0, 1.0)
# Calculate the length of this segment--it is pixels/deg
x2, y2 = self.radectopix(ra2_deg, dec2_deg)
px_per_deg_e = math.sqrt(math.fabs(x2-x)**2 + math.fabs(y2-y)**2)
# calculate radius based on desired radius_deg
radius_px = px_per_deg_e * radius_deg
return radius_px
def calc_radius_deg2pix(self, ra_deg, dec_deg, delta_deg,
equinox=None):
x, y = self.radectopix(ra_deg, dec_deg, equinox=equinox)
return self.calc_radius_xy(x, y, delta_deg)
def add_offset_xy(self, x, y, delta_deg_x, delta_deg_y):
# calculate ra/dec of x,y pixel
ra_deg, dec_deg = self.pixtoradec(x, y)
# add offsets
ra2_deg, dec2_deg = wcs.add_offset_radec(ra_deg, dec_deg,
delta_deg_x, delta_deg_y)
# then back to new pixel coords
x2, y2 = self.radectopix(ra2_deg, dec2_deg)
return (x2, y2)
def calc_radius_center(self, delta_deg):
return self.calc_radius_xy(float(self.width / 2.0),
float(self.height / 2.0),
delta_deg)
def calc_compass(self, x, y, len_deg_e, len_deg_n):
# Get east and north coordinates
xe, ye = self.add_offset_xy(x, y, len_deg_e, 0.0)
xe = int(round(xe))
ye = int(round(ye))
xn, yn = self.add_offset_xy(x, y, 0.0, len_deg_n)
xn = int(round(xn))
yn = int(round(yn))
return (x, y, xn, yn, xe, ye)
def calc_compass_radius(self, x, y, radius_px):
xe, ye = self.add_offset_xy(x, y, 1.0, 0.0)
xn, yn = self.add_offset_xy(x, y, 0.0, 1.0)
# now calculate the length in pixels of those arcs
# (planar geometry is good enough here)
px_per_deg_e = math.sqrt(math.fabs(ye - y)**2 + math.fabs(xe - x)**2)
px_per_deg_n = math.sqrt(math.fabs(yn - y)**2 + math.fabs(xn - x)**2)
# now calculate the arm length in degrees for each arm
# (this produces same-length arms)
len_deg_e = radius_px / px_per_deg_e
len_deg_n = radius_px / px_per_deg_n
return self.calc_compass(x, y, len_deg_e, len_deg_n)
def calc_compass_center(self):
# calculate center of data
x = float(self.width) / 2.0
y = float(self.height) / 2.0
# radius we want the arms to be (approx 1/4 the smallest dimension)
radius_px = float(min(self.width, self.height)) / 4.0
return self.calc_compass_radius(x, y, radius_px)
#
#<----- TODO: merge this into wcs.py ?
def get_wcs_rotation_deg(self):
header = self.get_header()
(rot, cdelt1, cdelt2) = wcs.get_rotation_and_scale(header)
return rot
def rotate(self, deg, update_wcs=False):
#old_deg = self.get_wcs_rotation_deg()
super(AstroImage, self).rotate(deg)
# TODO: currently this is not working!
## if update_wcs:
## self.wcs.rotate(deg)
def mosaic_inline(self, imagelist, bg_ref=None, trim_px=None,
merge=False, allow_expand=True, expand_pad_deg=0.01,
max_expand_pct=None,
update_minmax=True, suppress_callback=False):
"""Drops new images into the current image (if there is room),
relocating them according the WCS between the two images.
"""
# Get our own (mosaic) rotation and scale
header = self.get_header()
((xrot_ref, yrot_ref),
(cdelt1_ref, cdelt2_ref)) = wcs.get_xy_rotation_and_scale(header)
ref_rot = yrot_ref
scale_x, scale_y = math.fabs(cdelt1_ref), math.fabs(cdelt2_ref)
# drop each image in the right place in the new data array
mydata = self._get_data()
count = 1
for image in imagelist:
name = image.get('name', 'image%d' % (count))
count += 1
data_np = image._get_data()
# Calculate sky position at the center of the piece
ctr_x, ctr_y = trcalc.get_center(data_np)
ra, dec = image.pixtoradec(ctr_x, ctr_y)
# User specified a trim? If so, trim edge pixels from each
# side of the array
ht, wd = data_np.shape[:2]
if trim_px:
xlo, xhi = trim_px, wd - trim_px
ylo, yhi = trim_px, ht - trim_px
data_np = data_np[ylo:yhi, xlo:xhi, ...]
ht, wd = data_np.shape[:2]
# If caller asked us to match background of pieces then
# get the median of this piece
if bg_ref is not None:
bg = iqcalc.get_median(data_np)
bg_inc = bg_ref - bg
#print "bg=%f inc=%f" % (bg, bg_inc)
data_np = data_np + bg_inc
# Determine max/min to update our values
if update_minmax:
maxval = numpy.nanmax(data_np)
minval = numpy.nanmin(data_np)
self.maxval = max(self.maxval, maxval)
self.minval = min(self.minval, minval)
# Get rotation and scale of piece
header = image.get_header()
((xrot, yrot),
(cdelt1, cdelt2)) = wcs.get_xy_rotation_and_scale(header)
self.logger.debug("image(%s) xrot=%f yrot=%f cdelt1=%f cdelt2=%f" % (
name, xrot, yrot, cdelt1, cdelt2))
# scale if necessary
# TODO: combine with rotation?
if (not numpy.isclose(math.fabs(cdelt1), scale_x) or
not numpy.isclose(math.fabs(cdelt2), scale_y)):
nscale_x = math.fabs(cdelt1) / scale_x
nscale_y = math.fabs(cdelt2) / scale_y
self.logger.debug("scaling piece by x(%f), y(%f)" % (
nscale_x, nscale_y))
data_np, (ascale_x, ascale_y) = trcalc.get_scaled_cutout_basic(
data_np, 0, 0, wd-1, ht-1, nscale_x, nscale_y)
# Rotate piece into our orientation, according to wcs
rot_dx, rot_dy = xrot - xrot_ref, yrot - yrot_ref
flip_x = False
flip_y = False
## # Flip X due to negative CDELT1
## if numpy.sign(cdelt1) < 0:
## flip_x = True
## # Flip Y due to negative CDELT2
## if numpy.sign(cdelt2) < 0:
## flip_y = True
# Optomization for 180 rotations
if numpy.isclose(math.fabs(rot_dx), 180.0):
flip_x = not flip_x
rot_dx = 0.0
if numpy.isclose(math.fabs(rot_dy), 180.0):
flip_y = not flip_y
rot_dy = 0.0
self.logger.debug("flip_x=%s flip_y=%s" % (flip_x, flip_y))
if flip_x or flip_y:
rotdata = trcalc.transform(data_np,
flip_x=flip_x, flip_y=flip_y)
else:
rotdata = data_np
# Finish with any necessary rotation of piece
if not numpy.isclose(rot_dy, 0.0):
rot_deg = rot_dy
self.logger.debug("rotating %s by %f deg" % (name, rot_deg))
rotdata = trcalc.rotate(rotdata, rot_deg,
#rotctr_x=ctr_x, rotctr_y=ctr_y
)
# Get size and data of new image
ht, wd = rotdata.shape[:2]
ctr_x, ctr_y = trcalc.get_center(rotdata)
# Find location of image piece (center) in our array
x0, y0 = self.radectopix(ra, dec)
# Merge piece as closely as possible into our array
# Unfortunately we lose a little precision rounding to the
# nearest pixel--can't be helped with this approach
x0, y0 = int(round(x0)), int(round(y0))
self.logger.debug("Fitting image '%s' into mosaic at %d,%d" % (
name, x0, y0))
# This is for useful debugging info only
my_ctr_x, my_ctr_y = trcalc.get_center(mydata)
off_x, off_y = x0 - my_ctr_x, y0 - my_ctr_y
self.logger.debug("centering offsets: %d,%d" % (off_x, off_y))
# Sanity check piece placement
xlo, xhi = x0 - ctr_x, x0 + wd - ctr_x
ylo, yhi = y0 - ctr_y, y0 + ht - ctr_y
assert (xhi - xlo == wd), \
Exception("Width differential %d != %d" % (xhi - xlo, wd))
assert (yhi - ylo == ht), \
Exception("Height differential %d != %d" % (yhi - ylo, ht))
mywd, myht = self.get_size()
if xlo < 0 or xhi > mywd or ylo < 0 or yhi > myht:
if not allow_expand:
raise Exception("New piece doesn't fit on image and allow_expand=False")
#<-- Resize our data array to allow the new image
# determine amount to pad expansion by
expand_x = max(int(expand_pad_deg / scale_x), 0)
expand_y = max(int(expand_pad_deg / scale_y), 0)
nx1_off, nx2_off = 0, 0
if xlo < 0:
nx1_off = abs(xlo) + expand_x
if xhi > mywd:
nx2_off = (xhi - mywd) + expand_x
xlo, xhi = xlo + nx1_off, xhi + nx1_off
ny1_off, ny2_off = 0, 0
if ylo < 0:
ny1_off = abs(ylo) + expand_y
if yhi > myht:
ny2_off = (yhi - myht) + expand_y
ylo, yhi = ylo + ny1_off, yhi + ny1_off
new_wd = mywd + nx1_off + nx2_off
new_ht = myht + ny1_off + ny2_off
# sanity check on new mosaic size
old_area = mywd * myht
new_area = new_wd * new_ht
expand_pct = new_area / old_area
if ((max_expand_pct is not None) and
(expand_pct > max_expand_pct)):
raise Exception("New area exceeds current one by %.2f %%;"
"increase max_expand_pct (%.2f) to allow" %
(expand_pct*100, max_expand_pct))
# go for it!
new_data = numpy.zeros((new_ht, new_wd))
# place current data into new data
new_data[ny1_off:ny1_off+myht, nx1_off:nx1_off+mywd] = \
mydata
self._data = new_data
mydata = new_data
if (nx1_off > 0) or (ny1_off > 0):
# Adjust our WCS for relocation of the reference pixel
crpix1, crpix2 = self.get_keywords_list('CRPIX1', 'CRPIX2')
kwds = dict(CRPIX1=crpix1 + nx1_off,
CRPIX2=crpix2 + ny1_off)
self.update_keywords(kwds)
# fit image piece into our array
try:
if merge:
mydata[ylo:yhi, xlo:xhi, ...] += rotdata[0:ht, 0:wd, ...]
else:
idx = (mydata[ylo:yhi, xlo:xhi, ...] == 0.0)
mydata[ylo:yhi, xlo:xhi, ...][idx] = \
rotdata[0:ht, 0:wd, ...][idx]
except Exception as e:
self.logger.error("Error fitting tile: %s" % (str(e)))
raise
# TODO: recalculate min and max values
# Can't use usual techniques because it adds too much time to the
# mosacing
#self._set_minmax()
# Notify watchers that our data has changed
if not suppress_callback:
self.make_callback('modified')
return (xlo, ylo, xhi, yhi)
def info_xy(self, data_x, data_y, settings):
# Get the value under the data coordinates
try:
# We report the value across the pixel, even though the coords
# change halfway across the pixel
value = self.get_data_xy(int(data_x+0.5), int(data_y+0.5))
except Exception as e:
value = None
system = settings.get('wcs_coords', None)
format = settings.get('wcs_display', 'sexagesimal')
ra_lbl, dec_lbl = six.unichr(945), six.unichr(948)
# Calculate WCS coords, if available
ts = time.time()
try:
if self.wcs is None:
self.logger.debug("No WCS for this image")
ra_txt = dec_txt = 'NO WCS'
elif self.wcs.coordsys == 'raw':
self.logger.debug("No coordinate system determined")
ra_txt = dec_txt = 'NO WCS'
elif self.wcs.coordsys == 'pixel':
args = [data_x, data_y] + self.revnaxis
x, y = self.wcs.pixtosystem(#(data_x, data_y),
args, system=system, coords='data')
ra_txt = "%+.3f" % (x)
dec_txt = "%+.3f" % (y)
ra_lbl, dec_lbl = "X", "Y"
else:
args = [data_x, data_y] + self.revnaxis
lon_deg, lat_deg = self.wcs.pixtosystem(#(data_x, data_y),
args, system=system, coords='data')
if format == 'sexagesimal':
if system in ('galactic', 'ecliptic'):
sign, deg, min, sec = wcs.degToDms(lon_deg,
isLatitude=False)
ra_txt = '+%03d:%02d:%06.3f' % (deg, min, sec)
else:
deg, min, sec = wcs.degToHms(lon_deg)
ra_txt = '%02d:%02d:%06.3f' % (deg, min, sec)
sign, deg, min, sec = wcs.degToDms(lat_deg)
if sign < 0:
sign = '-'
else:
sign = '+'
dec_txt = '%s%02d:%02d:%06.3f' % (sign, deg, min, sec)
else:
ra_txt = '%+10.7f' % (lon_deg)
dec_txt = '%+10.7f' % (lat_deg)
if system == 'galactic':
ra_lbl, dec_lbl = "l", "b"
elif system == 'ecliptic':
ra_lbl, dec_lbl = six.unichr(0x03BB), six.unichr(0x03B2)
elif system == 'helioprojective':
ra_txt = "%+5.3f" % (lon_deg*3600)
dec_txt = "%+5.3f" % (lat_deg*3600)
ra_lbl, dec_lbl = "x-Solar", "y-Solar"
except Exception as e:
self.logger.warn("Bad coordinate conversion: %s" % (
str(e)))
ra_txt = 'BAD WCS'
dec_txt = 'BAD WCS'
try:
# log traceback, if possible
(type_, value_, tb) = sys.exc_info()
tb_str = "".join(traceback.format_tb(tb))
self.logger.error("Traceback:\n%s" % (tb_str))
except Exception:
tb_str = "Traceback information unavailable."
self.logger.error(tb_str)
te = time.time() - ts
#print "time elapsed: %.4f" % te
info = Bunch.Bunch(itype='astro', data_x=data_x, data_y=data_y,
x=data_x, y=data_y,
ra_txt=ra_txt, dec_txt=dec_txt,
ra_lbl=ra_lbl, dec_lbl=dec_lbl,
value=value)
return info
#END
| |
# -*- coding: utf-8; -*-
import re
import yaml
from os.path import basename, abspath
from sys import exit, argv
from copy import deepcopy
from getopt import getopt
from getpass import getpass
from functools import partial
from subprocess import CalledProcessError
from lxml import etree
from jenkins import Jenkins, JenkinsError
from requests.exceptions import RequestException
from jenkins_autojobs import __version__
from jenkins_autojobs.util import *
#-----------------------------------------------------------------------------
# Compatibility imports.
try:
from itertools import ifilterfalse as filterfalse
except ImportError:
from itertools import filterfalse
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
try:
input = raw_input
except NameError:
pass
#-----------------------------------------------------------------------------
usage = '''\
Usage: %s [-rvdtjnyoupUYOP] <config.yaml>
General Options:
-n dry run
-v show version and exit
-d debug config inheritance
-t debug http requests
Repository Options:
-r <arg> repository url
-y <arg> scm username
-o <arg> scm password
-Y scm username (read from stdin)
-O scm password (read from stdin)
Jenkins Options:
-j <arg> jenkins url
-u <arg> jenkins username
-p <arg> jenkins password
-U jenkins username (read from stdin)
-P jenkins password (read from stdin)\
''' % basename(argv[0])
#-----------------------------------------------------------------------------
# The *global* connection to jenkins - assigned in main().
jenkins = None
def main(argv, create_job, list_branches, getoptfmt='vdtnr:j:u:p:y:o:UPYO', config=None):
'''
:param argv: command-line arguments to parse (defaults to sys.argv[1:])
:param create_job: scm specific function that configures and creates jobs
:param list_branches: scm specific function that lists all branches/refs
:param getoptfmt: getopt short and long options
:param config: a config dictionary to use instead of parsing the
configuration from yaml (useful for testing)
'''
if '-h' in argv or '--help' in argv:
print(usage)
exit(1)
opts, args = parse_args(argv, getoptfmt)
if not args and not config:
print(usage)
exit(1)
# Load config, set default values and compile regexes.
if not config :
yamlfn = args[-1]
print('loading config from "%s"' % abspath(yamlfn))
config = yaml.load(open(yamlfn))
config = c = get_default_config(config, opts)
if config['debughttp']:
enable_http_logging()
# Connect to jenkins.
try:
global jenkins
jenkins = main.jenkins = Jenkins(c['jenkins'], c['username'], c['password'])
except (RequestException, JenkinsError) as e:
print(e); exit(1)
#-------------------------------------------------------------------------
# Get all the template names that the config references.
templates = set(i['template'] for i in c['refs'].values())
# Check if all referenced template jobs exist on the server.
missing = list(filterfalse(jenkins.job_exists, templates))
if missing:
missing.insert(0, '\nconfig references non-existent template jobs:')
print('\n - '.join(missing)); exit(1)
# Convert them to etree objects of the templates' config xmls.
templates = dict((i, get_job_etree(i)) for i in templates)
#-------------------------------------------------------------------------
# Check if all referenced views exist.
view_names = set(view for i in c['refs'].values() for view in i['view'])
missing = list(filterfalse(jenkins.view_exists, view_names))
if missing:
missing.insert(0, '\nconfig references non-existent views:')
print('\n - '.join(missing)); exit(1)
#-------------------------------------------------------------------------
# List all git refs, svn branches etc (implemented by child classes).
try:
branches = list(list_branches(config))
except CalledProcessError as e:
print('! cannot list branches')
print('! command %s failed' % ' '.join(e.cmd))
exit(1)
# See if any of the branches are ignored.
ignored, branches = get_ignored(branches, c['ignore'])
if ignored:
msg = ['\nexplicitly ignored:'] + ignored
print('\n - '.join(msg))
# Get branch config for each branch.
configs = map(partial(resolveconfig, config), branches)
configs = zip(branches, configs)
configs = filter(lambda x: bool(x[1]), configs)
# The names of all successfully created or updated jobs.
job_names = {}
for branch, branch_config in configs:
tmpl = templates[branch_config['template']]
job_name = create_job(branch, tmpl, config, branch_config)
job_names[job_name] = branch_config
# Add newly create jobs to views, if any.
views = branch_config['view']
for view_name in views:
view = jenkins.view(view_name)
if job_name in view:
print('. job already in view: %s' % view_name)
else:
if not config['dryrun']:
jenkins.view_add_job(view_name, job_name)
print('. job added to view: %s' % view_name)
if config['cleanup']:
job_names[config['template']] = {}
cleanup(config, job_names, jenkins)
#-----------------------------------------------------------------------------
def cleanup(config, job_names, jenkins, verbose=True):
print('\ncleaning up old jobs:')
tag = '</createdByJenkinsAutojobs>'
tagxpath = 'createdByJenkinsAutojobs/tag/text()'
managed_jobs = (job for job in jenkins.jobs if tag in job.config)
removed_jobs = []
for job in managed_jobs:
if job.name not in job_names and job.exists:
# If cleanup is a tag name, only cleanup builds with that tag.
if isinstance(config['cleanup'], str):
xml = etree.fromstring(job.config.encode('utf8'))
clean_tag = xml.xpath(tagxpath)
if not config['cleanup'] in clean_tag:
continue
removed_jobs.append(job)
if not config['dryrun']:
job.delete()
print(' - %s' % job.name)
if not removed_jobs:
print('. nothing to do')
#-----------------------------------------------------------------------------
def parse_args(argv, fmt):
'''Parse getopt arguments as a dictionary.'''
opts, args = getopt(argv, fmt)
opts = dict(opts)
if '-v' in opts:
print('jenkins-autojobs version %s' % __version__)
exit(0)
return opts, args
#-----------------------------------------------------------------------------
def get_default_config(config, opts):
'''Set default config values and compile regexes.'''
c, o = deepcopy(config), opts
# Default global settings (not inheritable).
c['dryrun'] = False
c['debug'] = config.get('debug', False)
c['debughttp'] = config.get('debughttp', False)
c['cleanup'] = config.get('cleanup', False)
c['username'] = config.get('username', None)
c['password'] = config.get('password', None)
c['scm-username'] = config.get('scm-username', None)
c['scm-password'] = config.get('scm-password', None)
# Default settings for each git ref/branch/ config.
c['defaults'] = {
'namesep': c.get('namesep', '-'),
'namefmt': c.get('namefmt', '{shortref}'),
'overwrite': c.get('overwrite', True),
'enable': c.get('enable', 'sticky'),
'substitute': c.get('substitute', {}),
'template': c.get('template'),
'sanitize': c.get('sanitize', {'@!?#&|\^_$%*': '_'}),
'tag': c.get('tag', []),
'view': c.get('view', [])
}
# Make sure some options are always lists.
c['defaults']['view'] = pluralize(c['defaults']['view'])
# Some options can be overwritten on the command line.
if '-r' in o: c['repo'] = o['-r']
if '-j' in o: c['jenkins'] = o['-j']
if '-n' in o: c['dryrun'] = True
if '-d' in o: c['debug'] = True
if '-t' in o: c['debughttp'] = True
# Jenkins authentication options.
if '-u' in o: c['username'] = o['-u']
if '-p' in o: c['password'] = o['-p']
if '-U' in o: c['username'] = input('Jenkins User: ')
if '-P' in o: c['password'] = getpass('Jenkins Password: ')
# SCM authentication options.
if '-y' in o: c['scm-username'] = o['-y']
if '-o' in o: c['scm-password'] = o['-o']
if '-Y' in o: c['scm-username'] = input('SCM User: ')
if '-O' in o: c['scm-password'] = getpass('SCM Password: ')
# Compile ignore regexes.
c.setdefault('ignore', {})
c['ignore'] = [re.compile(i) for i in c['ignore']]
if not 'refs' in c:
c['refs'] = ['.*']
# Get the effective (accounting for inheritance) config for all refs.
cfg = get_effective_branch_config(c['refs'], c['defaults'])
c['refs'] = cfg
return c
#-----------------------------------------------------------------------------
def get_effective_branch_config(branches, defaults):
'''Compile ref/branch regexes and map to their configuration with
inheritance factored in (think maven help:effective-pom).'''
ec = OrderedDict()
assert isinstance(branches, (list, tuple))
for entry in branches:
if isinstance(entry, dict):
key, overrides = list(entry.items())[0]
config = defaults.copy()
config.update(overrides)
ec[re.compile(key)] = config
else:
ec[re.compile(entry)] = defaults
# The 'All' view doesn't have an API endpoint (i.e. no /view/All/api).
# Since all jobs are added to it by default, it is safe to ignore it.
for config in ec.values():
if 'All' in config['view']:
config['view'].remove('All')
return ec
def get_ignored(branches, regexes):
'''Get refs, excluding ignored.'''
isignored = partial(anymatch, regexes)
ignored, branches = filtersplit(isignored, branches)
return ignored, branches
def resolveconfig(effective_config, branch):
'''Resolve a ref to its effective config.'''
for regex, config in effective_config['refs'].items():
if regex.match(branch):
config['re'] = regex
return config.copy()
def get_job_etree(job):
res = jenkins.job(job).config
res = etree.fromstring(res.encode('utf8'))
return res
def debug_refconfig(ref_config):
print('. config:')
for k,v in ref_config.items():
if k == 're':
print(' . %s: %s' % (k, v.pattern))
continue
if v: print(' . %s: %s' % (k, v))
def enable_http_logging():
import logging
try:
from http.client import HTTPConnection
except ImportError:
from httplib import HTTPConnection
HTTPConnection.debuglevel = 1
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
requests_log = logging.getLogger('requests.packages.urllib3')
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
| |
from __future__ import unicode_literals
import re
from wtforms.compat import string_types, text_type
__all__ = (
'DataRequired', 'data_required', 'Email', 'email', 'EqualTo', 'equal_to',
'IPAddress', 'ip_address', 'InputRequired', 'input_required', 'Length',
'length', 'NumberRange', 'number_range', 'Optional', 'optional',
'Required', 'required', 'Regexp', 'regexp', 'URL', 'url', 'AnyOf',
'any_of', 'NoneOf', 'none_of', 'MacAddress', 'mac_address', 'UUID'
)
class ValidationError(ValueError):
"""
Raised when a validator fails to validate its input.
"""
def __init__(self, message='', *args, **kwargs):
ValueError.__init__(self, message, *args, **kwargs)
class StopValidation(Exception):
"""
Causes the validation chain to stop.
If StopValidation is raised, no more validators in the validation chain are
called. If raised with a message, the message will be added to the errors
list.
"""
def __init__(self, message='', *args, **kwargs):
Exception.__init__(self, message, *args, **kwargs)
class EqualTo(object):
"""
Compares the values of two fields.
:param fieldname:
The name of the other field to compare to.
:param message:
Error message to raise in case of a validation error. Can be
interpolated with `%(other_label)s` and `%(other_name)s` to provide a
more helpful error.
"""
def __init__(self, fieldname, message=None):
self.fieldname = fieldname
self.message = message
def __call__(self, form, field):
try:
other = form[self.fieldname]
except KeyError:
raise ValidationError(field.gettext("Invalid field name '%s'.") % self.fieldname)
if field.data != other.data:
d = {
'other_label': hasattr(other, 'label') and other.label.text or self.fieldname,
'other_name': self.fieldname
}
message = self.message
if message is None:
message = field.gettext('Field must be equal to %(other_name)s.')
raise ValidationError(message % d)
class Length(object):
"""
Validates the length of a string.
:param min:
The minimum required length of the string. If not provided, minimum
length will not be checked.
:param max:
The maximum length of the string. If not provided, maximum length
will not be checked.
:param message:
Error message to raise in case of a validation error. Can be
interpolated using `%(min)d` and `%(max)d` if desired. Useful defaults
are provided depending on the existence of min and max.
"""
def __init__(self, min=-1, max=-1, message=None):
assert min != -1 or max != -1, 'At least one of `min` or `max` must be specified.'
assert max == -1 or min <= max, '`min` cannot be more than `max`.'
self.min = min
self.max = max
self.message = message
def __call__(self, form, field):
l = field.data and len(field.data) or 0
if l < self.min or self.max != -1 and l > self.max:
message = self.message
if message is None:
if self.max == -1:
message = field.ngettext('Field must be at least %(min)d character long.',
'Field must be at least %(min)d characters long.', self.min)
elif self.min == -1:
message = field.ngettext('Field cannot be longer than %(max)d character.',
'Field cannot be longer than %(max)d characters.', self.max)
else:
message = field.gettext('Field must be between %(min)d and %(max)d characters long.')
raise ValidationError(message % dict(min=self.min, max=self.max))
class NumberRange(object):
"""
Validates that a number is of a minimum and/or maximum value, inclusive.
This will work with any comparable number type, such as floats and
decimals, not just integers.
:param min:
The minimum required value of the number. If not provided, minimum
value will not be checked.
:param max:
The maximum value of the number. If not provided, maximum value
will not be checked.
:param message:
Error message to raise in case of a validation error. Can be
interpolated using `%(min)s` and `%(max)s` if desired. Useful defaults
are provided depending on the existence of min and max.
"""
def __init__(self, min=None, max=None, message=None):
self.min = min
self.max = max
self.message = message
def __call__(self, form, field):
data = field.data
if data is None or (self.min is not None and data < self.min) or \
(self.max is not None and data > self.max):
message = self.message
if message is None:
# we use %(min)s interpolation to support floats, None, and
# Decimals without throwing a formatting exception.
if self.max is None:
message = field.gettext('Number must be at least %(min)s.')
elif self.min is None:
message = field.gettext('Number must be at most %(max)s.')
else:
message = field.gettext('Number must be between %(min)s and %(max)s.')
raise ValidationError(message % dict(min=self.min, max=self.max))
class Optional(object):
"""
Allows empty input and stops the validation chain from continuing.
If input is empty, also removes prior errors (such as processing errors)
from the field.
:param strip_whitespace:
If True (the default) also stop the validation chain on input which
consists of only whitespace.
"""
field_flags = ('optional', )
def __init__(self, strip_whitespace=True):
if strip_whitespace:
self.string_check = lambda s: s.strip()
else:
self.string_check = lambda s: s
def __call__(self, form, field):
if not field.raw_data or isinstance(field.raw_data[0], string_types) and not self.string_check(field.raw_data[0]):
field.errors[:] = []
raise StopValidation()
class DataRequired(object):
"""
Validates that the field contains coerced data. This validator will stop
the validation chain on error.
If the data is empty, also removes prior errors (such as processing errors)
from the field.
**NOTE** this validator used to be called `Required` but the way it behaved
(requiring coerced data, not input data) meant it functioned in a way
which was not symmetric to the `Optional` validator and furthermore caused
confusion with certain fields which coerced data to 'falsey' values like
``0``, ``Decimal(0)``, etc. Unless a very specific reason exists, we
recommend using the :class:`InputRequired` instead.
:param message:
Error message to raise in case of a validation error.
"""
field_flags = ('required', )
def __init__(self, message=None):
self.message = message
def __call__(self, form, field):
if not field.data or isinstance(field.data, string_types) and not field.data.strip():
if self.message is None:
message = field.gettext('This field is required.')
else:
message = self.message
field.errors[:] = []
raise StopValidation(message)
class Required(DataRequired):
"""
Legacy alias for DataRequired.
This is needed over simple aliasing for those who require that the
class-name of required be 'Required.'
This class will start throwing deprecation warnings in WTForms 1.1 and be removed by 1.2.
"""
class InputRequired(object):
"""
Validates that input was provided for this field.
Note there is a distinction between this and DataRequired in that
InputRequired looks that form-input data was provided, and DataRequired
looks at the post-coercion data.
"""
field_flags = ('required', )
def __init__(self, message=None):
self.message = message
def __call__(self, form, field):
if not field.raw_data or not field.raw_data[0]:
if self.message is None:
message = field.gettext('This field is required.')
else:
message = self.message
field.errors[:] = []
raise StopValidation(message)
class Regexp(object):
"""
Validates the field against a user provided regexp.
:param regex:
The regular expression string to use. Can also be a compiled regular
expression pattern.
:param flags:
The regexp flags to use, for example re.IGNORECASE. Ignored if
`regex` is not a string.
:param message:
Error message to raise in case of a validation error.
"""
def __init__(self, regex, flags=0, message=None):
if isinstance(regex, string_types):
regex = re.compile(regex, flags)
self.regex = regex
self.message = message
def __call__(self, form, field, message=None):
if not self.regex.match(field.data or ''):
if message is None:
if self.message is None:
message = field.gettext('Invalid input.')
else:
message = self.message
raise ValidationError(message)
class Email(Regexp):
"""
Validates an email address. Note that this uses a very primitive regular
expression and should only be used in instances where you later verify by
other means, such as email activation or lookups.
:param message:
Error message to raise in case of a validation error.
"""
def __init__(self, message=None):
super(Email, self).__init__(r'^.+@[^.].*\.[a-z]{2,10}$', re.IGNORECASE, message)
def __call__(self, form, field):
message = self.message
if message is None:
message = field.gettext('Invalid email address.')
super(Email, self).__call__(form, field, message)
class IPAddress(object):
"""
Validates an IP address.
:param ipv4:
If True, accept IPv4 addresses as valid (default True)
:param ipv6:
If True, accept IPv6 addresses as valid (default False)
:param message:
Error message to raise in case of a validation error.
"""
def __init__(self, ipv4=True, ipv6=False, message=None):
if not ipv4 and not ipv6:
raise ValueError('IP Address Validator must have at least one of ipv4 or ipv6 enabled.')
self.ipv4 = ipv4
self.ipv6 = ipv6
self.message = message
def __call__(self, form, field):
value = field.data
valid = False
if value:
valid = (self.ipv4 and self.check_ipv4(value)) or (self.ipv6 and self.check_ipv6(value))
if not valid:
message = self.message
if message is None:
message = field.gettext('Invalid IP address.')
raise ValidationError(message)
def check_ipv4(self, value):
parts = value.split('.')
if len(parts) == 4 and all(x.isdigit() for x in parts):
numbers = list(int(x) for x in parts)
return all(num >= 0 and num < 256 for num in numbers)
return False
def check_ipv6(self, value):
parts = value.split(':')
if len(parts) > 8:
return False
num_blank = 0
for part in parts:
if not part:
num_blank += 1
else:
try:
value = int(part, 16)
except ValueError:
return False
else:
if value < 0 or value >= 65536:
return False
if num_blank < 2:
return True
elif num_blank == 2 and not parts[0] and not parts[1]:
return True
return False
class MacAddress(Regexp):
"""
Validates a MAC address.
:param message:
Error message to raise in case of a validation error.
"""
def __init__(self, message=None):
pattern = r'^(?:[0-9a-fA-F]{2}:){5}[0-9a-fA-F]{2}$'
super(MacAddress, self).__init__(pattern, message=message)
def __call__(self, form, field):
message = self.message
if message is None:
message = field.gettext('Invalid Mac address.')
super(MacAddress, self).__call__(form, field, message)
class URL(Regexp):
"""
Simple regexp based url validation. Much like the email validator, you
probably want to validate the url later by other means if the url must
resolve.
:param require_tld:
If true, then the domain-name portion of the URL must contain a .tld
suffix. Set this to false if you want to allow domains like
`localhost`.
:param message:
Error message to raise in case of a validation error.
"""
def __init__(self, require_tld=True, message=None):
tld_part = (require_tld and r'\.[a-z]{2,10}' or '')
regex = r'^[a-z]+://([^/:]+%s|([0-9]{1,3}\.){3}[0-9]{1,3})(:[0-9]+)?(\/.*)?$' % tld_part
super(URL, self).__init__(regex, re.IGNORECASE, message)
def __call__(self, form, field):
message = self.message
if message is None:
message = field.gettext('Invalid URL.')
super(URL, self).__call__(form, field, message)
class UUID(Regexp):
"""
Validates a UUID.
:param message:
Error message to raise in case of a validation error.
"""
def __init__(self, message=None):
pattern = r'^[0-9a-fA-F]{8}-([0-9a-fA-F]{4}-){3}[0-9a-fA-F]{12}$'
super(UUID, self).__init__(pattern, message=message)
def __call__(self, form, field):
message = self.message
if message is None:
message = field.gettext('Invalid UUID.')
super(UUID, self).__call__(form, field, message)
class AnyOf(object):
"""
Compares the incoming data to a sequence of valid inputs.
:param values:
A sequence of valid inputs.
:param message:
Error message to raise in case of a validation error. `%(values)s`
contains the list of values.
:param values_formatter:
Function used to format the list of values in the error message.
"""
def __init__(self, values, message=None, values_formatter=None):
self.values = values
self.message = message
if values_formatter is None:
values_formatter = self.default_values_formatter
self.values_formatter = values_formatter
def __call__(self, form, field):
if field.data not in self.values:
message = self.message
if message is None:
message = field.gettext('Invalid value, must be one of: %(values)s.')
raise ValidationError(message % dict(values=self.values_formatter(self.values)))
@staticmethod
def default_values_formatter(values):
return ', '.join(text_type(x) for x in values)
class NoneOf(object):
"""
Compares the incoming data to a sequence of invalid inputs.
:param values:
A sequence of invalid inputs.
:param message:
Error message to raise in case of a validation error. `%(values)s`
contains the list of values.
:param values_formatter:
Function used to format the list of values in the error message.
"""
def __init__(self, values, message=None, values_formatter=None):
self.values = values
self.message = message
if values_formatter is None:
values_formatter = lambda v: ', '.join(text_type(x) for x in v)
self.values_formatter = values_formatter
def __call__(self, form, field):
if field.data in self.values:
message = self.message
if message is None:
message = field.gettext('Invalid value, can\'t be any of: %(values)s.')
raise ValidationError(message % dict(values=self.values_formatter(self.values)))
email = Email
equal_to = EqualTo
ip_address = IPAddress
mac_address = MacAddress
length = Length
number_range = NumberRange
optional = Optional
required = Required
input_required = InputRequired
data_required = DataRequired
regexp = Regexp
url = URL
any_of = AnyOf
none_of = NoneOf
| |
import copy
import gridfs
import hashlib
import datetime
import numpy as np
import pymongo as pm
import cPickle as pickle
from bson.binary import Binary
from bson.objectid import ObjectId
import torch
# import base
class DBInterface(object):
"""Interface for all DBInterface subclasses.
Your database class should subclass this interface by maintaining the
regular attribute
`db_name`
and implementing the following methods:
`save(obj)`
Save the python object `obj` to the database `db` and return an identifier
`object_id`.
`load(obj)`
`delete(obj)`
Remove `obj` from the database `self.db_name`.
"""
__name__ = 'dbinterface'
def __init__(self, *args, **kwargs):
super(DBInterface, self).__init__(*args, **kwargs)
def save(self):
raise NotImplementedError()
def load(self):
raise NotImplementedError()
def delete(self):
raise NotImplementedError()
class MongoInterface(DBInterface):
"""Simple and lightweight mongodb interface for saving experimental data files."""
__name__ = 'mongointerface'
_DEFAULTS = {
'port': 27017,
'hostname': 'localhost',
'db_name': 'DEFAULT_DATABASE',
'collection_name': 'DEFAULT_COLLECTION',
}
def __init__(self, db_name, collection_name, hostname='localhost', port=27017):
super(MongoInterface, self).__init__()
self.db_name = db_name
self.database_name = database_name
self.collection_name = collection_name
self.hostname = hostname
self.port = port
self.client = pm.MongoClient(self.hostname, self.port)
self.db = self.client[self.db_name]
self.collection = self.db[self.collection_name]
self.fs = gridfs.GridFS(self.db)
def _close(self):
self.client.close()
def __del__(self):
self._close()
# def state_dict(self):
# pass
# Public methods: ---------------------------------------------------------
def save(self, document):
"""Store a dictionary or list of dictionaries as as a document in collection.
The collection is specified in the initialization of the object.
Note that if the dictionary has an '_id' field, and a document in the
collection as the same '_id' key-value pair, that object will be
overwritten. Any tensors will be stored in the gridFS,
replaced with ObjectId pointers, and a list of their ObjectIds will be
also be stored in the 'tensor_id' key-value pair. If re-saving an
object- the method will check for old gridfs objects and delete them.
Args:
document: dictionary of arbitrary size and structure,
can contain tensors. Can also be a list of such objects.
Returns:
id_values: list of ObjectIds of the inserted object(s).
"""
# Simplfy things below by making even a single document a list.
if not isinstance(document, list):
document = [document]
object_ids = []
for doc in document:
# TODO: Only Variables created explicitly by the user (graph leaves)
# support the deepcopy protocal at the moment... Thus, a RuntimeError
# is raised when Variables not created by the users are saved.
doc_copy = copy.deepcopy(doc)
# Make a list of any existing referenced gridfs files.
try:
self._old_tensor_ids = doc_copy['_tensor_ids']
except KeyError:
self._old_tensor_ids = []
self._new_tensor_ids = []
# Replace tensors with either a new gridfs file or a reference to
# the old gridfs file.
doc_copy = self._save_tensors(doc_copy)
doc['_tensor_ids'] = self._new_tensor_ids
doc_copy['_tensor_ids'] = self._new_tensor_ids
# Cleanup any remaining gridfs files (these used to be pointed to by document, but no
# longer match any tensor that was in the db.
for id in self._old_tensor_ids:
self.fs.delete(id)
self._old_tensor_ids = []
# Add insertion date field to every document.
doc['insertion_date'] = datetime.datetime.now()
doc_copy['insertion_date'] = datetime.datetime.now()
# Insert into the collection and restore full data into original
# document object
doc_copy = self._mongoify(doc_copy)
new_id = self.collection.save(doc_copy)
doc['_id'] = new_id
object_ids.append(new_id)
return object_ids
def load_from_ids(self, ids):
"""Conveience function to load from a list of ObjectIds or from their
string representations. Takes a singleton or a list of either type.
Args:
ids: can be an ObjectId, string representation of an ObjectId,
or a list containing items of either type.
Returns:
out: list of documents from the DB. If a document w/the object
did not exist, a None object is returned instead.
"""
if type(ids) is not list:
ids = [ids]
out = []
for id in ids:
if type(id) is ObjectId:
obj_id = id
elif type(id) is str or type(id) is unicode:
try:
obj_id = ObjectId(id)
except TypeError:
obj_id = id
out.append(self.load({'_id': obj_id}))
return out
def load(self, query, get_tensors=True):
"""Perform a search using the presented query.
Args:
query: dictionary of key-value pairs to use for querying the mongodb
Returns:
all_results: list of full documents from the collection
"""
query = self._mongoify(query)
results = self.collection.find(query)
if get_tensors:
all_results = [self._de_mongoify(
self._load_tensor(doc)) for doc in results]
else:
all_results = [self._de_mongoify(doc) for doc in results]
if all_results:
if len(all_results) > 1:
return all_results
elif len(all_results) == 1:
return all_results[0]
else:
return None
else:
return None
def delete(self, object_id):
"""Delete a specific document from the collection based on the objectId.
Note that it first deletes all the gridFS files pointed to by ObjectIds
within the document.
Use with caution, clearly.
Args:
object_id: an id of an object in the database.
"""
document_to_delete = self.collection.find_one({"_id": object_id})
tensors_to_delete = document_to_delete['_tensor_ids']
for tensor_id in tensors_to_delete:
self.fs.delete(tensor_id)
self.collection.remove(object_id)
# Private methods ---------------------------------------------------------
def _tensor_to_binary(self, tensor):
"""Utility method to turn an tensor/array into a BSON Binary string.
Called by save_tensors.
Args:
tensor: tensor of arbitrary dimension.
Returns:
BSON Binary object a pickled tensor.
"""
return Binary(pickle.dumps(tensor, protocol=2), subtype=128)
def _binary_to_tensor(self, binary):
"""Convert a pickled tensor string back into a tensor.
Called by load_tensors.
Args:
binary: BSON Binary object a pickled tensor.
Returns:
Tensor of arbitrary dimension.
"""
return pickle.loads(binary)
def _replace(self, document, replace='.', replacement='__'):
"""Replace `replace` in dictionary keys with `replacement`."""
for (key, value) in document.items():
new_key = key.replace(replace, replacement)
if isinstance(value, dict):
document[new_key] = self._replace(document.pop(key),
replace=replace,
replacement=replacement)
else:
document[new_key] = document.pop(key)
return document
def _mongoify(self, document):
return self._replace(document)
def _de_mongoify(self, document):
return self._replace(document, replace='__', replacement='.')
def _load_tensor(self, document):
"""Replace ObjectIds with their corresponding gridFS data.
Utility method to recurse through a document and gather all ObjectIds and
replace them one by one with their corresponding data from the gridFS collection.
Skips any entries with a key of '_id'.
Note that it modifies the document in place.
Args:
document: dictionary-like document, storable in mongodb.
Returns:
document: dictionary-like document, storable in mongodb.
"""
for (key, value) in document.items():
if isinstance(value, ObjectId) and key != '_id':
if key == '_Variable_data':
document = torch.autograd.Variable(
self._binary_to_tensor(self.fs.get(value).read()))
else:
document[key] = self._binary_to_tensor(
self.fs.get(value).read())
elif isinstance(value, dict):
document[key] = self._load_tensor(value)
return document
def _save_tensors(self, document):
"""Replace tensors with a reference to their location in gridFS.
Utility method to recurse through a document and replace all tensors
and store them in the gridfs, replacing the actual tensors with references to the
gridfs path.
Called by save()
Note that it modifies the document in place, although we return it, too
Args:
document: dictionary like-document, storable in mongodb.
Returns:
document: dictionary like-document, storable in mongodb.
"""
for (key, value) in document.items():
if isinstance(value, torch.autograd.Variable):
value = {'_Variable_data': value.data}
if isinstance(value, np.ndarray) or torch.is_tensor(value):
data_BSON = self._tensor_to_binary(value)
data_MD5 = hashlib.md5(data_BSON).hexdigest()
# Does this tensor match the hash of anything in the object
# already?
match = False
for tensor_id in self._old_tensor_ids:
print('Checking if {} is already in the db... '.format(tensor_id))
if data_MD5 == self.fs.get(tensor_id).md5:
match = True
# print('Tensor is already in the db. Replacing tensor with old OjbectId: {}'.format(tensor_id))
document[key] = tensor_id
self._old_tensor_ids.remove(tensor_id)
self._new_tensor_ids.append(tensor_id)
if not match:
# print('Tensor is not in the db. Inserting new gridfs file...')
tensor_id = self.fs.put(self._tensor_to_binary(value))
document[key] = tensor_id
self._new_tensor_ids.append(tensor_id)
elif isinstance(value, dict):
document[key] = self._save_tensors(value)
elif isinstance(value, np.number):
if isinstance(value, np.integer):
document[key] = int(value)
elif isinstance(value, np.inexact):
document[key] = float(value)
return document
| |
from os import path
from django.template import Template, Context, TemplateSyntaxError
try:
from PIL import Image
except ImportError:
import Image
from django.core.files import storage as django_storage
from easy_thumbnails import alias, storage
from easy_thumbnails.conf import settings
from easy_thumbnails.files import get_thumbnailer
from easy_thumbnails.tests import utils as test
class Base(test.BaseTest):
def setUp(self):
super(Base, self).setUp()
self.storage = test.TemporaryStorage()
# Save a test image.
self.filename = self.create_image(self.storage, 'test.jpg')
# Required so that IOError's get wrapped as TemplateSyntaxError
settings.TEMPLATE_DEBUG = True
def tearDown(self):
self.storage.delete_temporary_storage()
super(Base, self).tearDown()
def render_template(self, source):
source_image = get_thumbnailer(self.storage, self.filename)
source_image.thumbnail_storage = self.storage
context = Context({
'source': source_image,
'storage': self.storage,
'filename': self.filename,
'invalid_filename': 'not%s' % self.filename,
'size': (90, 100),
'invalid_size': (90, 'fish'),
'strsize': '80x90',
'invalid_strsize': ('1notasize2'),
'invalid_q': 'notanumber'})
source = '{% load thumbnail %}' + source
return Template(source).render(context)
def verify_thumbnail(self, expected_size, options, source_filename=None,
transparent=False):
if source_filename is None:
source_filename = self.filename
self.assertTrue(isinstance(options, dict))
# Verify that the thumbnail file exists
thumbnailer = get_thumbnailer(self.storage, source_filename)
expected_filename = thumbnailer.get_thumbnail_name(
options, transparent=transparent)
self.assertTrue(
self.storage.exists(expected_filename),
'Thumbnail file %r not found' % expected_filename)
# Verify the thumbnail has the expected dimensions
image = Image.open(self.storage.open(expected_filename))
self.assertEqual(image.size, expected_size)
return expected_filename
class ThumbnailTagTest(Base):
restore_settings = ['THUMBNAIL_DEBUG', 'TEMPLATE_DEBUG']
def testTagInvalid(self):
# No args, or wrong number of args
src = '{% thumbnail %}'
self.assertRaises(TemplateSyntaxError, self.render_template, src)
src = '{% thumbnail source %}'
self.assertRaises(TemplateSyntaxError, self.render_template, src)
src = '{% thumbnail source 80x80 as variable crop %}'
self.assertRaises(TemplateSyntaxError, self.render_template, src)
# Invalid option
src = '{% thumbnail source 240x200 invalid %}'
self.assertRaises(TemplateSyntaxError, self.render_template, src)
# Old comma separated options format can only have an = for quality
src = '{% thumbnail source 80x80 crop=1,quality=1 %}'
self.assertRaises(TemplateSyntaxError, self.render_template, src)
# Invalid quality
src_invalid = '{% thumbnail source 240x200 quality=invalid_q %}'
src_missing = '{% thumbnail source 240x200 quality=missing_q %}'
# ...with THUMBNAIL_DEBUG = False
settings.THUMBNAIL_DEBUG = False
self.assertEqual(self.render_template(src_invalid), '')
self.assertEqual(self.render_template(src_missing), '')
# ...and with THUMBNAIL_DEBUG = True
settings.THUMBNAIL_DEBUG = True
self.assertRaises(TemplateSyntaxError, self.render_template,
src_invalid)
self.assertRaises(TemplateSyntaxError, self.render_template,
src_missing)
# Invalid source
src = '{% thumbnail invalid_source 80x80 %}'
src_on_context = '{% thumbnail invalid_source 80x80 as thumb %}'
# ...with THUMBNAIL_DEBUG = False
settings.THUMBNAIL_DEBUG = False
self.assertEqual(self.render_template(src), '')
# ...and with THUMBNAIL_DEBUG = True
settings.THUMBNAIL_DEBUG = True
self.assertRaises(TemplateSyntaxError, self.render_template, src)
self.assertRaises(TemplateSyntaxError, self.render_template,
src_on_context)
# Non-existant source
src = '{% thumbnail non_existant_source 80x80 %}'
src_on_context = '{% thumbnail non_existant_source 80x80 as thumb %}'
# ...with THUMBNAIL_DEBUG = False
settings.THUMBNAIL_DEBUG = False
self.assertEqual(self.render_template(src), '')
# ...and with THUMBNAIL_DEBUG = True
settings.THUMBNAIL_DEBUG = True
self.assertRaises(TemplateSyntaxError, self.render_template, src)
# Invalid size as a tuple:
src = '{% thumbnail source invalid_size %}'
# ...with THUMBNAIL_DEBUG = False
settings.THUMBNAIL_DEBUG = False
self.assertEqual(self.render_template(src), '')
# ...and THUMBNAIL_DEBUG = True
settings.THUMBNAIL_DEBUG = True
self.assertRaises(ValueError, self.render_template, src)
# Invalid size as a string:
src = '{% thumbnail source invalid_strsize %}'
# ...with THUMBNAIL_DEBUG = False
settings.THUMBNAIL_DEBUG = False
self.assertEqual(self.render_template(src), '')
# ...and THUMBNAIL_DEBUG = True
settings.THUMBNAIL_DEBUG = True
self.assertRaises(TemplateSyntaxError, self.render_template, src)
# Non-existant size
src = '{% thumbnail source non_existant_size %}'
# ...with THUMBNAIL_DEBUG = False
settings.THUMBNAIL_DEBUG = False
self.assertEqual(self.render_template(src), '')
# ...and THUMBNAIL_DEBUG = True
settings.THUMBNAIL_DEBUG = True
self.assertRaises(TemplateSyntaxError, self.render_template, src)
def testTag(self):
# Set THUMBNAIL_DEBUG = True to make it easier to trace any failures
settings.THUMBNAIL_DEBUG = True
# Basic
output = self.render_template(
'src="{% thumbnail source 240x240 %}"')
expected = self.verify_thumbnail((240, 180), {'size': (240, 240)})
expected_url = ''.join((settings.MEDIA_URL, expected))
self.assertEqual(output, 'src="%s"' % expected_url)
# Size from context variable
# as a tuple:
output = self.render_template(
'src="{% thumbnail source size %}"')
expected = self.verify_thumbnail((90, 68), {'size': (90, 100)})
expected_url = ''.join((settings.MEDIA_URL, expected))
self.assertEqual(output, 'src="%s"' % expected_url)
# as a string:
output = self.render_template(
'src="{% thumbnail source strsize %}"')
expected = self.verify_thumbnail((80, 60), {'size': (80, 90)})
expected_url = ''.join((settings.MEDIA_URL, expected))
self.assertEqual(output, 'src="%s"' % expected_url)
# On context
output = self.render_template(
'height:{% thumbnail source 240x240 as thumb %}{{ thumb.height }}')
self.assertEqual(output, 'height:180')
# With options and quality
output = self.render_template(
'src="{% thumbnail source 240x240 sharpen crop quality=95 %}"')
# Note that the opts are sorted to ensure a consistent filename.
expected = self.verify_thumbnail(
(240, 240),
{'size': (240, 240), 'crop': True, 'sharpen': True, 'quality': 95})
expected_url = ''.join((settings.MEDIA_URL, expected))
self.assertEqual(output, 'src="%s"' % expected_url)
# With option and quality on context (also using its unicode method to
# display the url)
output = self.render_template(
'{% thumbnail source 240x240 sharpen crop quality=95 as thumb %}'
'width:{{ thumb.width }}, url:{{ thumb.url }}')
self.assertEqual(output, 'width:240, url:%s' % expected_url)
# One dimensional resize
output = self.render_template('src="{% thumbnail source 100x0 %}"')
expected = self.verify_thumbnail((100, 75), {'size': (100, 0)})
expected_url = ''.join((settings.MEDIA_URL, expected))
self.assertEqual(output, 'src="%s"' % expected_url)
def test_high_resolution(self):
output = self.render_template(
'src="{% thumbnail source 80x80 HIGH_RESOLUTION %}"')
expected = self.verify_thumbnail((80, 60), {'size': (80, 80)})
expected_url = ''.join((settings.MEDIA_URL, expected))
self.assertEqual(output, 'src="%s"' % expected_url)
base, ext = path.splitext(expected)
hires_thumb_file = ''.join([base + '@2x', ext])
self.assertTrue(
self.storage.exists(hires_thumb_file), hires_thumb_file)
class ThumbnailerBase(Base):
restore_settings = ['THUMBNAIL_ALIASES', 'THUMBNAIL_MEDIA_ROOT']
def setUp(self):
super(ThumbnailerBase, self).setUp()
settings.THUMBNAIL_MEDIA_ROOT = self.storage.path('')
settings.THUMBNAIL_ALIASES = {
'': {
'small': {'size': (20, 20), 'crop': True},
},
}
alias.aliases.populate_from_settings()
# Make the temporary storage location the default storage for now.
self._old_default_storage = django_storage.default_storage._wrapped
django_storage.default_storage._wrapped = self.storage
self._old_thumbnail_default_storage = storage.thumbnail_default_storage
storage.thumbnail_default_storage = self.storage
def tearDown(self):
# Put the default storage back how we found it.
storage.thumbnail_default_storage = self._old_thumbnail_default_storage
django_storage.default_storage._wrapped = self._old_default_storage
super(ThumbnailerBase, self).tearDown()
# Repopulate the aliases (setting reverted by super)
alias.aliases.populate_from_settings()
class ThumbnailerFilterTest(ThumbnailerBase):
def test_get(self):
src = (
'{% with t=filename|thumbnailer %}'
'{{ t.small.url }}{% endwith %}'
)
output = self.render_template(src)
expected = self.verify_thumbnail(
(20, 20), settings.THUMBNAIL_ALIASES['']['small'])
expected_url = ''.join((settings.MEDIA_URL, expected))
self.assertEqual(output, expected_url)
def test_relative_name(self):
src = (
'{% with t=storage|thumbnailer:filename %}'
'{{ t.small.url }}{% endwith %}'
)
output = self.render_template(src)
expected = self.verify_thumbnail(
(20, 20), settings.THUMBNAIL_ALIASES['']['small'])
expected_url = ''.join((settings.MEDIA_URL, expected))
self.assertEqual(output, expected_url)
def test_invalid(self):
src = (
'{% with t=invalid_filename|thumbnailer %}'
'{{ t.small.url }}{% endwith %}'
)
output = self.render_template(src)
self.assertEqual(output, '')
class ThumbnailerPassiveFilterTest(ThumbnailerBase):
def test_check_generate(self):
src = (
'{% with t=filename|thumbnailer_passive %}'
'{{ t.generate }}{% endwith %}'
)
output = self.render_template(src)
self.assertEqual(output, 'False')
def test_get_existing(self):
options = settings.THUMBNAIL_ALIASES['']['small']
# Pregenerate the thumbnail.
get_thumbnailer(self.storage, self.filename).get_thumbnail(options)
src = (
'{% with t=filename|thumbnailer_passive %}'
'{{ t.small.url }}{% endwith %}'
)
output = self.render_template(src)
expected = self.verify_thumbnail((20, 20), options)
expected_url = ''.join((settings.MEDIA_URL, expected))
self.assertEqual(output, expected_url)
def test_get_missing(self):
src = (
'{% with t=filename|thumbnailer_passive %}'
'{{ t.small.url }}{% endwith %}'
)
output = self.render_template(src)
self.assertEqual(output, '')
def test_invalid(self):
src = (
'{% with t=invalid_filename|thumbnailer_passive %}'
'{{ t.small.url }}{% endwith %}'
)
output = self.render_template(src)
self.assertEqual(output, '')
class ThumbnailTagAliasTest(ThumbnailerBase):
def assertCorrectOutput(self, src, alias_name, **overrides):
options = settings.THUMBNAIL_ALIASES[''][alias_name]
options.update(overrides)
output = self.render_template(src)
expected = self.verify_thumbnail(options['size'], options)
expected_url = ''.join((settings.MEDIA_URL, expected))
self.assertEqual(output, expected_url)
def test_invalid_alias_name(self):
self.assertEqual(
self.render_template('{% thumbnail filename "notanalias" %}'),
''
)
def test_correct_alias(self):
self.assertCorrectOutput('{% thumbnail filename "small" %}', 'small')
def test_alias_overrides(self):
self.assertCorrectOutput(
'{% thumbnail filename "small" upscale %}',
'small',
upscale=True,
)
self.assertCorrectOutput(
'{% thumbnail filename "small" upscale bw %}',
'small',
bw=True,
upscale=True,
)
class ThumbnailerDataUriTest(ThumbnailerBase):
def test_data_uri(self):
src = (
'{% thumbnail source 25x25 as thumb %}'
'{{ thumb|data_uri }}'
)
output = self.render_template(src)[:64]
startswith = 'data:application/octet-stream;base64,/9j/4AAQSkZJRgABAQAAAQABAAD'
self.assertEqual(output, startswith)
| |
#!/usr/bin/python
# David Wu
# todo: add filters,
# change for different kernel versions (i.e. 3.18 BR_TRANSACTION logging)
#
# line numbers from http://androidxref.com/kernel_3.4/xref/drivers/staging/android/binder.c
'''
-d see kernel debug messages (i.e. binder printks)
followed by the number for types of debug messages to see
i.e. -d1 -d9 would show BINDER_DEBUG_USER_ERROR and BINDER_DEBUG_TRANSACTION
0 see BINDER_DEBUG_USER_ERROR
1 see BINDER_DEBUG_FAILED_TRANSACTION
2 see BINDER_DEBUG_DEAD_TRANSACTION
3 see BINDER_DEBUG_OPEN_CLOSE
4 see BINDER_DEBUG_DEAD_BINDER
5 see BINDER_DEBUG_DEATH_NOTIFICATION
6 see BINDER_DEBUG_READ_WRITE
7 see BINDER_DEBUG_USER_REFS
8 see BINDER_DEBUG_THREADS
9 see BINDER_DEBUG_TRANSACTION
10 see BINDER_DEBUG_TRANSACTION_COMPLETE
11 see BINDER_DEBUG_FREE_BUFFER
12 see BINDER_DEBUG_INTERNAL_REFS
13 see BINDER_DEBUG_BUFFER_ALLOC
14 see BINDER_DEBUG_PRIORITY_CAP
15 see BINDER_DEBUG_BUFFER_ALLOC_ASYNC
'''
import os, sys, getopt, time
import subprocess
from subprocess import Popen, PIPE
import datetime
from datetime import timedelta
# set at the beginning of running, used to print human readable timestamps
startingSystemTime = ""
startingTimestamp = ""
#[ 2159.006957] binder: 188:276 BR_TRANSACTION 325830 14054:14054, cmd -2144833022size 100-0 ptr b6982028-b698208c
def translateLog(line):
if line == "":
return
timestamp = line[1:line.find(']')]
timestamp = timestamp.strip() # handle 1-4 digit timestamps
timestamp = translateTimestamp(timestamp)
line = line[line.find(']')+2:] # strip the timestamp
# BINDER_DEBUG_OPEN_CLOSE
if "binder_open" in line:
translateBinderOpen(line, timestamp)
elif "binder_mmap" in line:
translateBinderMmap(line, timestamp)
elif "binder_flush" in line:
translateBinderFlush(line, timestamp)
elif "binder_release" in line and "active" in line:
translateBinderRelease(line, timestamp)
elif "open vm area" in line:
translateBinderOpenVma(line, timestamp)
elif "close vm area" in line:
translateBinderCloseVma(line, timestamp)
# BINDER_DEBUG_TRANSACTION
elif "BR_TRANSACTION" in line and "cmd" in line:
translateBinderReturn(line, timestamp)
elif "BR_REPLY" in line:
translateBinderReturn(line, timestamp)
elif "BC_TRANSACTION" in line:
translateBinderCommandTransaction(line, timestamp)
elif "BC_REPLY" in line:
translateBinderCommandReply(line, timestamp)
elif "buffer release" in line:
translateBinderBufferRelease(line, timestamp)
# BINDER_DEBUG_READ_WRITE
elif "write" in line:
traslateBinderWrite(line, timestamp)
elif "wrote" in line:
translateBinderWrote(line, timestamp)
else:
print "not found"
# binder.c#2937
# binder_open: 18298:18298
# binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
# current->group_leader->pid, current->pid);
def translateBinderOpen(line, timestamp):
c1 = line.find(':')+2
c2 = line.find(':', c1)
pid1 = line[c1 : c2]
pid2 = line[c2+1:]
name = getProcessNameFor(pid2)
print("[%s] binder_open: group leader pid %s, current pid %s (%s)" % (timestamp, pid1, pid2, name))
# binder.c#2848
# binder_mmap: 18298 ae942000-aea40000 (1016 K) vma 200071 pagep 79f
# binder_debug(BINDER_DEBUG_OPEN_CLOSE,
# "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
# proc->pid, vma->vm_start, vma->vm_end,
# (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
# (unsigned long)pgprot_val(vma->vm_page_prot));
def translateBinderMmap(line, timestamp):
splitLine = line.split(' ')
pid = splitLine[1]
vma = splitLine[2]
vmaStart = vma[:vma.find('-')]
vmaEnd = vma[vma.find('-')+1:]
size = splitLine[3][1:]
flags = splitLine[6]
prot = splitLine[8]
name = getProcessNameFor(pid)
print ("[%s] binder_mmap: pid %s (%s) mapped addr %s-%s, size %s, flags %s, prot %s" %
(timestamp, pid, name, vmaStart, vmaEnd, size, flags, prot))
# binder.c#2992
# binder_flush: 18298 woke 2 threads
# binder_debug(BINDER_DEBUG_OPEN_CLOSE,
# "binder_flush: %d woke %d threads\n", proc->pid, wake_count);
def translateBinderFlush(line, timestamp):
splitLine = line.split(' ')
pid = splitLine[1]
num = splitLine[3]
print("[%s] binder_flush: pid %s (%s) woke %s threads" % (timestamp, pid, getProcessNameFor(pid), num))
# binder.c#3120
# binder_release: 18298 threads 3, nodes 1 (ref 0), refs 2, active transactions 0, buffers 0, pages 1
# binder_debug(BINDER_DEBUG_OPEN_CLOSE,
# "binder_release: %d threads %d, nodes %d (ref %d), "
# "refs %d, active transactions %d, buffers %d, pages %d\n",
# proc->pid, threads, nodes, incoming_refs, outgoing_refs,
# active_transactions, buffers, page_count);
def translateBinderRelease(line, timestamp):
splitLine = line.split(' ')
pid = splitLine[1]
threads = splitLine[3][:-1]
nodes = splitLine[5]
irefs = splitLine[7][:-2]
orefs = splitLine[9][:-1]
ats = splitLine[12][:-1]
buffers = splitLine[14][:-1]
pages = splitLine[16]
print("[%s] binder_release: pid %s (%s) released %s threads, %s nodes, %s incoming refs, %s outgoing refs, %s active transactions, %s buffers, %s pages" %
(timestamp, pid, getProcessNameFor(pid), threads, nodes, irefs, orefs, ats, buffers, pages))
# binder.c#2812
# binder_debug(BINDER_DEBUG_OPEN_CLOSE,
# "binder: %d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
# proc->pid, vma->vm_start, vma->vm_end,
# (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
# (unsigned long)pgprot_val(vma->vm_page_prot));
def translateBinderOpenVma(line, timestamp):
splitLine = line.split(' ')
pid = splitLine[1]
vma = splitLine[5]
vmaStart = vma[:vma.find('-')]
vmaEnd = vma[vma.find('-')+1:]
size = splitLine[6][1:]
flags = splitLine[9]
prot = splitLine[11]
print("[%s] binder: pid %s (%s) opened vm area addr %s-%s, size %s, flags %s, prot %s" %
(timestamp, pid, getProcessNameFor(pid), vmaStart, vmaEnd, size, flags, prot))
# binder.c#2822
# binder: 12098 close vm area ae942000-aea40000 (1016 K) vma 2220051 pagep 79f
# binder_debug(BINDER_DEBUG_OPEN_CLOSE,
# "binder: %d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
# proc->pid, vma->vm_start, vma->vm_end,
# (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
# (unsigned long)pgprot_val(vma->vm_page_prot));
def translateBinderCloseVma(line, timestamp):
splitLine = line.split(' ')
pid = splitLine[1]
vma = splitLine[5]
vmaStart = vma[:vma.find('-')]
vmaEnd = vma[vma.find('-')+1:]
size = splitLine[6][1:]
flags = splitLine[9]
prot = splitLine[11]
print("[%s] binder: pid %s (%s) closed vm area addr %s-%s, size %s, flags %s, prot %s" %
(timestamp, pid, getProcessNameFor(pid), vmaStart, vmaEnd, size, flags, prot))
# binder.c#2496
# binder: 188:276 BR_TRANSACTION 325830 14054:14054, cmd -2144833022size 100-0 ptr b6982028-b698208c
# binder_debug(BINDER_DEBUG_TRANSACTION,
# "binder: %d:%d %s %d %d:%d, cmd %d"
# "size %zd-%zd ptr %p-%p\n",
# proc->pid, thread->pid,
# (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" : "BR_REPLY",
# t->debug_id, t->from ? t->from->proc->pid : 0,
# t->from ? t->from->pid : 0, cmd,
# t->buffer->data_size, t->buffer->offsets_size,
# tr.data.ptr.buffer, tr.data.ptr.offsets);
def translateBinderReturn(line, timestamp):
splitLine = line.split(' ')
pid = splitLine[1]
c = pid.find(':')
procPid = pid[:c]
threadPid = pid[c+1:]
cmd = splitLine[2]
transactionDebugId = splitLine[3]
fromPid = splitLine[4]
c2 = fromPid.find(':')
fromProcPid = fromPid[:c2]
fromThreadPid = fromPid[c2+1:-1]
cmdUInt = splitLine[6][:-4] # kernel 3.4 specific
bufferSize = splitLine[7]
bufferDataSize = bufferSize[:bufferSize.find('-')]
bufferOffsetsSize = bufferSize[bufferSize.find('-')+1:]
bufferAddresses = splitLine[9]
bufferDataAddress = bufferAddresses[:bufferAddresses.find('-')]
bufferOffsetsAddress = bufferAddresses[bufferAddresses.find('-')+1:]
fromString = "process pid " + fromProcPid + " (" + getProcessNameFor(fromProcPid) + "), thread pid " + fromThreadPid
if fromProcPid == "0":
fromString = "n/a"
print("[%s] binder_return %s: process pid %s (%s), thread pid %s, from %s, \
transaction id %s, command value %s, data address %s, data size %s, offsets address %s, offsets size %s" %
(timestamp, cmd, procPid, getProcessNameFor(procPid), threadPid, fromString, transactionDebugId,
cmdUInt, bufferDataAddress, bufferDataSize, bufferOffsetsAddress, bufferOffsetsSize))
# binder.c#1542
# binder: 188:8898 BC_REPLY 1449663 -> 635:1046, data (null)- (null) size 0-0
# binder: 635:25898 BC_REPLY 8134681 -> 25641:25641, data 94364740- (null) size 8-0
# binder_debug(BINDER_DEBUG_TRANSACTION,
# "binder: %d:%d BC_REPLY %d -> %d:%d, "
# "data %p-%p size %zd-%zd\n",
# proc->pid, thread->pid, t->debug_id,
# target_proc->pid, target_thread->pid,
# tr->data.ptr.buffer, tr->data.ptr.offsets,
# tr->data_size, tr->offsets_size);
def translateBinderCommandReply(line, timestamp):
splitLine = line.split(' ')
sender = splitLine[1]
senderPid = sender[:sender.find(':')]
senderThread = sender[sender.find(':')+1:]
debugId = splitLine[3]
target = splitLine[5]
targetPid = target[:target.find(':')]
targetThread = target[target.find(':')+1:]
addrs = line[line.find('data') : line.find('size')]
bufferAddr = addrs[addrs.find(' ') : addrs.find('-')].strip()
offsetsAddr = addrs[addrs.find('-')+1:].strip()
if "null" in bufferAddr:
bufferAddr = "null"
if "null" in offsetsAddr:
offsetsAddr = "null"
sizes = line[line.find('size'):]
bufferSize = sizes[sizes.find(' ') : sizes.find('-')].strip()
offsetsSize = sizes[sizes.find('-')+1:].strip()
extra = translateBinderCommandExtras(line, line.find('size')+1+len(sizes))
print("[%s] binder_command BC_REPLY: process pid %s (%s), thread pid %s -> process pid %s (%s), thread pid %s \
transaction id %s, data address %s, data size %s, offsets address %s, offsets size %s %s" %
(timestamp, senderPid, getProcessNameFor(senderPid), senderThread, targetPid, getProcessNameFor(targetPid),
targetThread, debugId, bufferAddr, bufferSize, offsetsAddr, offsetsSize, extra))
# binder.c#1550
# binder: 635:653 BC_TRANSACTION 1449664 -> 188 - node 6351, data 9cb20400- (null) size 80-0
# binder_debug(BINDER_DEBUG_TRANSACTION,
# "binder: %d:%d BC_TRANSACTION %d -> "
# "%d - node %d, data %p-%p size %zd-%zd\n",
# proc->pid, thread->pid, t->debug_id,
# target_proc->pid, target_node->debug_id,
# tr->data.ptr.buffer, tr->data.ptr.offsets,
# tr->data_size, tr->offsets_size);
def translateBinderCommandTransaction(line, timestamp):
splitLine = line.split(' ')
sender = splitLine[1]
senderPid = sender[:sender.find(':')]
senderThread = sender[sender.find(':')+1:]
debugId = splitLine[3]
targetPid = splitLine[5]
targetNodeDebugId = splitLine[8]
addrs = line[line.find('data') : line.find('size')]
bufferAddr = addrs[addrs.find(' ') : addrs.find('-')].strip()
offsetsAddr = addrs[addrs.find('-')+1:].strip()
if "null" in bufferAddr:
bufferAddr = "null"
if "null" in offsetsAddr:
offsetsAddr = "null"
sizes = line[line.find('size'):]
bufferSize = sizes[sizes.find(' ') : sizes.find('-')].strip()
offsetsSize = sizes[sizes.find('-')+1:].strip()
extra = translateBinderCommandExtras(line, line.find('size')+1+len(sizes))
print("[%s] binder_command BC_TRANSACTION: process pid %s (%s), thread pid %s -> process pid %s (%s), node id %s \
transaction id %s, data address %s, data size %s, offsets address %s, offsets size %s %s" %
(timestamp, senderPid, getProcessNameFor(senderPid), senderThread, targetPid, getProcessNameFor(targetPid),
targetNodeDebugId, debugId, bufferAddr, bufferSize, offsetsAddr, offsetsSize, extra))
# binder_debug(BINDER_DEBUG_TRANSACTION,
# " node %d u%p -> ref %d desc %d\n",
# node->debug_id, node->ptr, ref->debug_id,
# ref->desc);
# binder_debug(BINDER_DEBUG_TRANSACTION,
# " ref %d desc %d -> node %d u%p\n",
# ref->debug_id, ref->desc, ref->node->debug_id,
# ref->node->ptr);
# binder_debug(BINDER_DEBUG_TRANSACTION,
# " ref %d desc %d -> ref %d desc %d (node %d)\n",
# ref->debug_id, ref->desc, new_ref->debug_id,
# new_ref->desc, ref->node->debug_id);
def translateBinderCommandExtras(line, end):
extra = ""
pos = line.find('node', end)
pos2 = line.find('ref', end)
if pos2 != -1 and pos2 < pos:
pos = pos2
if pos != -1:
return line[pos:]
print "here"
time.sleep(5)
else:
return ""
# binder.c:1332
# binder: 14054 buffer release 325831, size 0-0, failed at (null)
# binder_debug(BINDER_DEBUG_TRANSACTION,
# "binder: %d buffer release %d, size %zd-%zd, failed at %p\n",
# proc->pid, buffer->debug_id,
# buffer->data_size, buffer->offsets_size, failed_at);
# binder_debug(BINDER_DEBUG_TRANSACTION, " fd %ld\n", fp->handle);
# binder_debug(BINDER_DEBUG_TRANSACTION, " ref %d desc %d (node %d)\n",
# ref->debug_id, ref->desc, ref->node->debug_id);
# binder_debug(BINDER_DEBUG_TRANSACTION, " node %d u%p\n",
# node->debug_id, node->ptr);
def translateBinderBufferRelease(line, timestamp):
splitLine = line.split(' ')
pid = splitLine[1]
debugId = splitLine[4][:-1]
size = splitLine[6]
sizeData = size[:size.find('-')]
sizeOffsets = size[size.find('-')+1:-1]
failedAt = line[line.find('at')+2:]
if "null" in failedAt:
failedAt = ""
else:
failedAt = ", " + failedAt
extra = ""
end = line.find('at') + 2
pos = line.find('fd', end)
pos2 = line.find('ref', end)
if pos != -1 and pos2 < pos:
pos = pos2
pos2 = line.find('node', end)
if pos != -1 and pos2 < pos:
pos = pos2
if pos != -1:
extra = line[pos:]
print "here"
time.sleep(5)
print("[%s] binder: process pid %s (%s) buffer release id %s, data size %s, offsets size %s %s %s" %
(timestamp, pid, getProcessNameFor(pid), debugId, sizeData, sizeOffsets, failedAt, extra))
# binder.c#2707
# binder: 9489:9489 write 44 at acb0aa00, read 256 at acb0a500
# binder_debug(BINDER_DEBUG_READ_WRITE,
# "binder: %d:%d write %ld at %08lx, read %ld at %08lx\n",
# proc->pid, thread->pid, bwr.write_size, bwr.write_buffer,
# bwr.read_size, bwr.read_buffer);
def traslateBinderWrite(line, timestamp):
splitLine = line.split(' ')
pid = splitLine[1]
procPid = pid[:pid.find(':')]
threadPid = pid[pid.find(':')+1:]
writeSize = splitLine[3]
readSize = splitLine[7]
writeAddr = splitLine[5]
readAddr = splitLine[9]
print("[%s] binder: process pid %s (%s), thread pid %s, writing %s bytes at addr %s reading %s bytes at addr %s" %
(timestamp, procPid, getProcessNameFor(procPid), threadPid, writeSize, writeAddr, readSize, readAddr))
# binder.c#2733
# binder: 635:646 wrote 8 of 8, read return 48 of 256
# binder_debug(BINDER_DEBUG_READ_WRITE,
# "binder: %d:%d wrote %ld of %ld, read return %ld of %ld\n",
# proc->pid, thread->pid, bwr.write_consumed, bwr.write_size,
# bwr.read_consumed, bwr.read_size);
def translateBinderWrote(line, timestamp):
splitLine = line.split(' ')
pid = splitLine[1]
procPid = pid[:pid.find(':')]
threadPid = pid[pid.find(':')+1:]
writeConsumed = splitLine[3]
writeSize = splitLine[5][:-1]
readConsumed = splitLine[8]
readSize = splitLine[10]
print("[%s] binder: process pid %s (%s), thread pid %s, wrote %s of %s bytes, read %s of %s bytes" %
(timestamp, procPid, getProcessNameFor(procPid), threadPid, writeConsumed, writeSize, readConsumed, readSize))
# [122214.186086] [seconds.milliseconds]
# time printed will be based on local system time (i.e. computer time, not android time)
def translateTimestamp(ts):
secondsPassed = float(ts) - float(startingTimestamp)
hts = (startingSystemTime + timedelta(seconds=secondsPassed)).time()
#return str(hts)[:str(hts).find('.')+3]
return hts
def getProcessNameFor(pid):
val = subprocess.check_output(["adb", "shell", "ps", "-p", str(pid)])
val = val[val.find('\n')+1:]
val = val[val.rfind(' ')+1:]
val = val.rstrip()
if val == "":
return "process exited"
return val
# might be able to do some of the shell commands in python equivalents to speed it up
def getDmesg():
p1 = Popen(["adb", "shell", "dmesg"], stdout=PIPE)
p2 = Popen(["grep", "binder"], stdin=p1.stdout, stdout=PIPE)
#p3 = Popen(["tail", "-r"], stdin=p2.stdout, stdout=PIPE)
return p2.communicate()[0]
def getTimeStampFromLine(l):
a = l.find('[')
b = l.find(']', a)
return l[a+1:b]
def systemChecks():
# check for kernel version (and also adb shell access)
val=""
try:
val = subprocess.check_output(["adb", "shell", "cat", "/proc/version"])
except subprocess.CalledProcessError:
sys.exit()
version = val[len("Linux version "):val.find('-')]
if int(version[0]) < 3 or (int(version[2:][0:version[2:].find('.')]) < 4):
print "Linux kernel version", version, "is older than 3.4.0, logging may not be accurate!!"
try:
val = subprocess.check_output(["adb", "shell", "su", "-c", "ls", "/data"])
except subprocess.CalledProcessError:
sys.exit()
if "su: not found" in val:
print "No root access!"
sys.exit()
def generateDebugMask(l):
debugMask = 0
for i in l:
debugMask += 1 << i
return debugMask
def PrettyPrint(debugMask, debugArray, printForever):
if debugMask == 0:
debugMask = generateDebugMask(debugArray)
systemChecks()
# set the kernel module parameter for binder_debug() statements
cmd='adb shell \"su -c echo ' + str(debugMask) + ' \'> /sys/module/binder/parameters/debug_mask\'\"'
subprocess.call(cmd, shell=True)
# set the kernel log level
cmd='adb shell \"su -c echo 7 \'> /proc/sys/kernel/printk\'\"'
subprocess.call(cmd, shell=True)
p1 = Popen(["adb", "shell", "dmesg"], stdout=PIPE)
p2 = Popen(["grep", "binder"], stdin=p1.stdout, stdout=PIPE)
output = p2.communicate()[0]
firstTime = getTimeStampFromLine(output.splitlines()[0])
global startingSystemTime, startingTimestamp
startingSystemTime = datetime.datetime.now()
startingTimestamp = firstTime
if printForever == False:
for line in getDmesg().splitlines():
translateLog(line)
sys.exit()
mostRecentTime = 0
while True:
lines = getDmesg().splitlines()
for line in getDmesg().splitlines():
if (getTimeStampFromLine(line) > mostRecentTime):
translateLog(line)
mostRecentTime = getTimeStampFromLine(lines[-1])
| |
# Copyright 2013 - Mirantis, Inc.
# Copyright 2018 - Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import wsme
from wsme import types as wtypes
from mistral.api.controllers import resource
from mistral.api.controllers.v2 import types
from mistral.workflow import states
SCOPE_TYPES = wtypes.Enum(str, 'private', 'public')
class Workbook(resource.Resource):
"""Workbook resource."""
id = wtypes.text
name = wtypes.text
definition = wtypes.text
"workbook definition in Mistral v2 DSL"
tags = [wtypes.text]
scope = SCOPE_TYPES
"'private' or 'public'"
project_id = wsme.wsattr(wtypes.text, readonly=True)
created_at = wtypes.text
updated_at = wtypes.text
@classmethod
def sample(cls):
return cls(id='123e4567-e89b-12d3-a456-426655440000',
name='book',
definition='HERE GOES'
'WORKBOOK DEFINITION IN MISTRAL DSL v2',
tags=['large', 'expensive'],
scope='private',
project_id='a7eb669e9819420ea4bd1453e672c0a7',
created_at='1970-01-01T00:00:00.000000',
updated_at='1970-01-01T00:00:00.000000')
class Workbooks(resource.ResourceList):
"""A collection of Workbooks."""
workbooks = [Workbook]
def __init__(self, **kwargs):
self._type = 'workbooks'
super(Workbooks, self).__init__(**kwargs)
@classmethod
def sample(cls):
return cls(workbooks=[Workbook.sample()])
class Workflow(resource.Resource):
"""Workflow resource."""
id = wtypes.text
name = wtypes.text
namespace = wtypes.text
input = wtypes.text
definition = wtypes.text
"Workflow definition in Mistral v2 DSL"
tags = [wtypes.text]
scope = SCOPE_TYPES
"'private' or 'public'"
project_id = wtypes.text
created_at = wtypes.text
updated_at = wtypes.text
@classmethod
def sample(cls):
return cls(id='123e4567-e89b-12d3-a456-426655440000',
name='flow',
input='param1, param2',
definition='HERE GOES'
'WORKFLOW DEFINITION IN MISTRAL DSL v2',
tags=['large', 'expensive'],
scope='private',
project_id='a7eb669e9819420ea4bd1453e672c0a7',
created_at='1970-01-01T00:00:00.000000',
updated_at='1970-01-01T00:00:00.000000',
namespace='')
@classmethod
def _set_input(cls, obj, wf_spec):
input_list = []
if wf_spec:
input = wf_spec.get('input', [])
for param in input:
if isinstance(param, dict):
for k, v in param.items():
input_list.append("%s=%s" % (k, v))
else:
input_list.append(param)
setattr(obj, 'input', ", ".join(input_list) if input_list else '')
return obj
@classmethod
def from_dict(cls, d):
obj = super(Workflow, cls).from_dict(d)
return cls._set_input(obj, d.get('spec'))
@classmethod
def from_db_model(cls, db_model):
obj = super(Workflow, cls).from_db_model(db_model)
return cls._set_input(obj, db_model.spec)
class Workflows(resource.ResourceList):
"""A collection of workflows."""
workflows = [Workflow]
def __init__(self, **kwargs):
self._type = 'workflows'
super(Workflows, self).__init__(**kwargs)
@classmethod
def sample(cls):
workflows_sample = cls()
workflows_sample.workflows = [Workflow.sample()]
workflows_sample.next = ("http://localhost:8989/v2/workflows?"
"sort_keys=id,name&"
"sort_dirs=asc,desc&limit=10&"
"marker=123e4567-e89b-12d3-a456-426655440000")
return workflows_sample
class Action(resource.Resource):
"""Action resource.
NOTE: *name* is immutable. Note that name and description get inferred
from action definition when Mistral service receives a POST request.
So they can't be changed in another way.
"""
id = wtypes.text
name = wtypes.text
is_system = bool
input = wtypes.text
description = wtypes.text
tags = [wtypes.text]
definition = wtypes.text
scope = SCOPE_TYPES
project_id = wsme.wsattr(wtypes.text, readonly=True)
created_at = wtypes.text
updated_at = wtypes.text
@classmethod
def sample(cls):
return cls(
id='123e4567-e89b-12d3-a456-426655440000',
name='flow',
definition='HERE GOES ACTION DEFINITION IN MISTRAL DSL v2',
tags=['large', 'expensive'],
scope='private',
project_id='a7eb669e9819420ea4bd1453e672c0a7',
created_at='1970-01-01T00:00:00.000000',
updated_at='1970-01-01T00:00:00.000000'
)
class Actions(resource.ResourceList):
"""A collection of Actions."""
actions = [Action]
def __init__(self, **kwargs):
self._type = 'actions'
super(Actions, self).__init__(**kwargs)
@classmethod
def sample(cls):
sample = cls()
sample.actions = [Action.sample()]
sample.next = (
"http://localhost:8989/v2/actions?sort_keys=id,name&"
"sort_dirs=asc,desc&limit=10&"
"marker=123e4567-e89b-12d3-a456-426655440000"
)
return sample
class Execution(resource.Resource):
"""Execution resource."""
id = wtypes.text
"execution ID. It is immutable and auto assigned or determined by the API "
"client on execution creation. "
"If it's passed to POST method from a client it'll be assigned to the "
"newly created execution object, but only if an execution with such ID "
"doesn't exist. If it exists, then the endpoint will just return "
"execution properties in JSON."
workflow_id = wtypes.text
"workflow ID"
workflow_name = wtypes.text
"workflow name"
workflow_namespace = wtypes.text
"""Workflow namespace. The workflow namespace is also saved
under params and passed to all sub-workflow executions. When looking for
the next sub-workflow to run, The correct workflow will be found by
name and namespace, where the namespace can be the workflow namespace or
the default namespace. Workflows in the same namespace as the top workflow
will be given a higher priority."""
description = wtypes.text
"description of workflow execution"
params = types.jsontype
"""'params' define workflow type specific parameters. Specific parameters
are:
'task_name' - the name of the target task. Only for reverse workflows.
'env' - A string value containing the name of the stored environment
object or a dictionary with the environment variables used during
workflow execution and accessible as 'env()' from within expressions
(YAQL or Jinja) defined in the workflow text.
'evaluate_env' - If present, controls whether or not Mistral should
recursively find and evaluate all expressions (YAQL or Jinja) within
the specified environment (via 'env' parameter). 'True' - evaluate
all expressions recursively in the environment structure. 'False' -
don't evaluate expressions. 'True' by default.
"""
task_execution_id = wtypes.text
"reference to the parent task execution"
root_execution_id = wtypes.text
"reference to the root execution"
source_execution_id = wtypes.text
"""reference to a workflow execution id which will signal the api to
perform a lookup of a current workflow_execution and create a replica
based on that workflow inputs and parameters"""
state = wtypes.text
"state can be one of: IDLE, RUNNING, SUCCESS, ERROR, PAUSED"
state_info = wtypes.text
"an optional state information string"
input = types.jsontype
"input is a JSON structure containing workflow input values"
output = types.jsontype
"output is a workflow output"
created_at = wtypes.text
updated_at = wtypes.text
project_id = wsme.wsattr(wtypes.text, readonly=True)
@classmethod
def sample(cls):
return cls(
id='123e4567-e89b-12d3-a456-426655440000',
workflow_name='flow',
workflow_namespace='some_namespace',
workflow_id='123e4567-e89b-12d3-a456-426655441111',
description='this is the first execution.',
project_id='40a908dbddfe48ad80a87fb30fa70a03',
state='SUCCESS',
input={},
output={},
params={
'env': {'k1': 'abc', 'k2': 123},
'notify': [
{
'type': 'webhook',
'url': 'http://endpoint/of/webhook',
'headers': {
'Content-Type': 'application/json',
'X-Auth-Token': '123456789'
}
},
{
'type': 'queue',
'topic': 'failover_queue',
'backend': 'rabbitmq',
'host': '127.0.0.1',
'port': 5432
}
]
},
created_at='1970-01-01T00:00:00.000000',
updated_at='1970-01-01T00:00:00.000000'
)
class Executions(resource.ResourceList):
"""A collection of Execution resources."""
executions = [Execution]
def __init__(self, **kwargs):
self._type = 'executions'
super(Executions, self).__init__(**kwargs)
@classmethod
def sample(cls):
sample = cls()
sample.executions = [Execution.sample()]
sample.next = (
"http://localhost:8989/v2/executions?"
"sort_keys=id,workflow_name&sort_dirs=asc,desc&limit=10&"
"marker=123e4567-e89b-12d3-a456-426655440000"
)
return sample
class Task(resource.Resource):
"""Task resource."""
id = wtypes.text
name = wtypes.text
type = wtypes.text
workflow_name = wtypes.text
workflow_namespace = wtypes.text
workflow_id = wtypes.text
workflow_execution_id = wtypes.text
state = wtypes.text
"""state can take one of the following values:
IDLE, RUNNING, SUCCESS, ERROR, DELAYED"""
state_info = wtypes.text
"an optional state information string"
project_id = wsme.wsattr(wtypes.text, readonly=True)
runtime_context = types.jsontype
result = wtypes.text
published = types.jsontype
processed = bool
created_at = wtypes.text
updated_at = wtypes.text
# Add this param to make Mistral API work with WSME 0.8.0 or higher version
reset = wsme.wsattr(bool, mandatory=True)
env = types.jsontype
@classmethod
def sample(cls):
return cls(
id='123e4567-e89b-12d3-a456-426655440000',
workflow_name='flow',
workflow_id='123e4567-e89b-12d3-a456-426655441111',
workflow_execution_id='123e4567-e89b-12d3-a456-426655440000',
name='task',
state=states.SUCCESS,
project_id='40a908dbddfe48ad80a87fb30fa70a03',
runtime_context={
'triggered_by': [
{
'task_id': '123-123-123',
'event': 'on-success'
}
]
},
result='task result',
published={'key': 'value'},
processed=True,
created_at='1970-01-01T00:00:00.000000',
updated_at='1970-01-01T00:00:00.000000',
reset=True
)
class Tasks(resource.ResourceList):
"""A collection of tasks."""
tasks = [Task]
def __init__(self, **kwargs):
self._type = 'tasks'
super(Tasks, self).__init__(**kwargs)
@classmethod
def sample(cls):
return cls(tasks=[Task.sample()])
class ActionExecution(resource.Resource):
"""ActionExecution resource."""
id = wtypes.text
workflow_name = wtypes.text
workflow_namespace = wtypes.text
task_name = wtypes.text
task_execution_id = wtypes.text
state = wtypes.text
state_info = wtypes.text
tags = [wtypes.text]
name = wtypes.text
description = wtypes.text
project_id = wsme.wsattr(wtypes.text, readonly=True)
accepted = bool
input = types.jsontype
output = types.jsontype
created_at = wtypes.text
updated_at = wtypes.text
params = types.jsontype # TODO(rakhmerov): What is this??
@classmethod
def sample(cls):
return cls(
id='123e4567-e89b-12d3-a456-426655440000',
workflow_name='flow',
task_name='task1',
workflow_execution_id='653e4127-e89b-12d3-a456-426655440076',
task_execution_id='343e45623-e89b-12d3-a456-426655440090',
state=states.SUCCESS,
state_info=states.SUCCESS,
tags=['foo', 'fee'],
name='std.echo',
description='My running action',
project_id='40a908dbddfe48ad80a87fb30fa70a03',
accepted=True,
input={'first_name': 'John', 'last_name': 'Doe'},
output={'some_output': 'Hello, John Doe!'},
created_at='1970-01-01T00:00:00.000000',
updated_at='1970-01-01T00:00:00.000000',
params={'save_result': True, "run_sync": False}
)
class ActionExecutions(resource.ResourceList):
"""A collection of action_executions."""
action_executions = [ActionExecution]
def __init__(self, **kwargs):
self._type = 'action_executions'
super(ActionExecutions, self).__init__(**kwargs)
@classmethod
def sample(cls):
return cls(action_executions=[ActionExecution.sample()])
class CronTrigger(resource.Resource):
"""CronTrigger resource."""
id = wtypes.text
name = wtypes.text
workflow_name = wtypes.text
workflow_id = wtypes.text
workflow_input = types.jsontype
workflow_params = types.jsontype
project_id = wsme.wsattr(wtypes.text, readonly=True)
scope = SCOPE_TYPES
pattern = wtypes.text
remaining_executions = wtypes.IntegerType(minimum=1)
first_execution_time = wtypes.text
next_execution_time = wtypes.text
created_at = wtypes.text
updated_at = wtypes.text
@classmethod
def sample(cls):
return cls(
id='123e4567-e89b-12d3-a456-426655440000',
name='my_trigger',
workflow_name='my_wf',
workflow_id='123e4567-e89b-12d3-a456-426655441111',
workflow_input={},
workflow_params={},
project_id='40a908dbddfe48ad80a87fb30fa70a03',
scope='private',
pattern='* * * * *',
remaining_executions=42,
created_at='1970-01-01T00:00:00.000000',
updated_at='1970-01-01T00:00:00.000000'
)
class CronTriggers(resource.ResourceList):
"""A collection of cron triggers."""
cron_triggers = [CronTrigger]
def __init__(self, **kwargs):
self._type = 'cron_triggers'
super(CronTriggers, self).__init__(**kwargs)
@classmethod
def sample(cls):
return cls(cron_triggers=[CronTrigger.sample()])
class Environment(resource.Resource):
"""Environment resource."""
id = wtypes.text
name = wtypes.text
description = wtypes.text
variables = types.jsontype
scope = SCOPE_TYPES
project_id = wsme.wsattr(wtypes.text, readonly=True)
created_at = wtypes.text
updated_at = wtypes.text
@classmethod
def sample(cls):
return cls(
id='123e4567-e89b-12d3-a456-426655440000',
name='sample',
description='example environment entry',
variables={
'server': 'localhost',
'database': 'temp',
'timeout': 600,
'verbose': True
},
scope='private',
project_id='40a908dbddfe48ad80a87fb30fa70a03',
created_at='1970-01-01T00:00:00.000000',
updated_at='1970-01-01T00:00:00.000000'
)
class Environments(resource.ResourceList):
"""A collection of Environment resources."""
environments = [Environment]
def __init__(self, **kwargs):
self._type = 'environments'
super(Environments, self).__init__(**kwargs)
@classmethod
def sample(cls):
return cls(environments=[Environment.sample()])
class Member(resource.Resource):
id = types.uuid
resource_id = wtypes.text
resource_type = wtypes.text
project_id = wtypes.text
member_id = wtypes.text
status = wtypes.Enum(str, 'pending', 'accepted', 'rejected')
created_at = wtypes.text
updated_at = wtypes.text
@classmethod
def sample(cls):
return cls(
id='123e4567-e89b-12d3-a456-426655440000',
resource_id='123e4567-e89b-12d3-a456-426655440011',
resource_type='workflow',
project_id='40a908dbddfe48ad80a87fb30fa70a03',
member_id='a7eb669e9819420ea4bd1453e672c0a7',
status='accepted',
created_at='1970-01-01T00:00:00.000000',
updated_at='1970-01-01T00:00:00.000000'
)
class Members(resource.ResourceList):
members = [Member]
@classmethod
def sample(cls):
return cls(members=[Member.sample()])
class Service(resource.Resource):
"""Service resource."""
name = wtypes.text
type = wtypes.text
@classmethod
def sample(cls):
return cls(name='host1_1234', type='executor_group')
class Services(resource.Resource):
"""A collection of Services."""
services = [Service]
@classmethod
def sample(cls):
return cls(services=[Service.sample()])
class EventTrigger(resource.Resource):
"""EventTrigger resource."""
id = wsme.wsattr(wtypes.text, readonly=True)
created_at = wsme.wsattr(wtypes.text, readonly=True)
updated_at = wsme.wsattr(wtypes.text, readonly=True)
project_id = wsme.wsattr(wtypes.text, readonly=True)
name = wtypes.text
workflow_id = types.uuid
workflow_input = types.jsontype
workflow_params = types.jsontype
exchange = wtypes.text
topic = wtypes.text
event = wtypes.text
scope = SCOPE_TYPES
@classmethod
def sample(cls):
return cls(id='123e4567-e89b-12d3-a456-426655441414',
created_at='1970-01-01T00:00:00.000000',
updated_at='1970-01-01T00:00:00.000000',
project_id='project',
name='expiration_event_trigger',
workflow_id='123e4567-e89b-12d3-a456-426655441414',
workflow_input={},
workflow_params={},
exchange='nova',
topic='notifications',
event='compute.instance.create.end')
class EventTriggers(resource.ResourceList):
"""A collection of event triggers."""
event_triggers = [EventTrigger]
def __init__(self, **kwargs):
self._type = 'event_triggers'
super(EventTriggers, self).__init__(**kwargs)
@classmethod
def sample(cls):
triggers_sample = cls()
triggers_sample.event_triggers = [EventTrigger.sample()]
triggers_sample.next = ("http://localhost:8989/v2/event_triggers?"
"sort_keys=id,name&"
"sort_dirs=asc,desc&limit=10&"
"marker=123e4567-e89b-12d3-a456-426655440000")
return triggers_sample
| |
# -*- coding: utf-8 -*-
"""Tests for go.vumitools.bulk_send_application"""
import json
from twisted.internet.defer import inlineCallbacks, returnValue
from vumi.message import TransportUserMessage
from vumi.tests.helpers import VumiTestCase
from go.apps.surveys.vumi_app import SurveyApplication
from go.apps.tests.helpers import AppWorkerHelper
from go.vumitools.api import VumiApiCommand
class TestSurveyApplication(VumiTestCase):
default_questions = [{
'copy': 'What is your favorite color? 1. Red 2. Yellow '
'3. Blue',
'label': 'favorite color',
'valid_responses': [u'1', u'2', u'3'],
}, {
'checks': [
['equal', 'favorite color', u'1'],
],
'copy': 'What shade of red? 1. Dark or 2. Light',
'label': 'what shade',
'valid_responses': [u'1', u'2'],
}, {
'copy': 'What is your favorite fruit? 1. Apples 2. Oranges '
'3. Bananas',
'label': 'favorite fruit',
'valid_responses': [u'1', u'2', u'3'],
}, {
'copy': 'What is your favorite editor? 1. Vim 2. Emacs '
'3. Other',
'label': 'editor',
'valid_responses': [u'1', u'2', u'3']
}]
@inlineCallbacks
def setUp(self):
self.app_helper = self.add_helper(AppWorkerHelper(SurveyApplication))
self.app = yield self.app_helper.get_app_worker({
'vxpolls': {'prefix': 'test.'},
})
self.pm = self.app.pm
self.group = yield self.app_helper.create_group(u'test group')
self.conversation = yield self.app_helper.create_conversation(
groups=[self.group])
def reply_to(self, msg, content, **kw):
return self.app_helper.make_dispatch_inbound(
content, to_addr=msg['from_addr'], from_addr=msg['to_addr'],
conv=self.conversation, **kw)
@inlineCallbacks
def create_survey(self, conversation, questions=None, end_response=None):
# Create a sample survey
questions = questions or self.default_questions
poll_id = 'poll-%s' % (conversation.key,)
config = yield self.pm.get_config(poll_id)
config.update({
'poll_id': poll_id,
'questions': questions
})
config.setdefault('survey_completed_response',
(end_response or 'Thanks for completing the survey'))
yield self.pm.set(poll_id, config)
poll = yield self.pm.get(poll_id)
returnValue(poll)
@inlineCallbacks
def wait_for_messages(self, nr_of_messages, total_length):
msgs = yield self.app_helper.wait_for_dispatched_outbound(total_length)
returnValue(msgs[-1 * nr_of_messages:])
@inlineCallbacks
def send_send_survey_command(self, conversation):
batch_id = self.conversation.batch.key
yield self.app_helper.dispatch_command(
"send_survey",
user_account_key=self.conversation.user_account.key,
conversation_key=conversation.key,
batch_id=batch_id,
msg_options={},
delivery_class=conversation.delivery_class,
)
@inlineCallbacks
def test_clearing_old_survey_data(self):
contact = yield self.app_helper.create_contact(
u'+27831234567', name=u'First', surname=u'Contact',
groups=[self.group])
# Populate all the known labels with 'to-be-cleared', these should
# be overwritten with new values later
for question in self.default_questions:
contact.extra[question['label']] = u'to-be-cleared'
# Also fill in junk data for an unknown field which should be left
# alone.
contact.extra['litmus'] = u'test'
yield contact.save()
yield self.create_survey(self.conversation)
yield self.app_helper.start_conversation(self.conversation)
yield self.send_send_survey_command(self.conversation)
yield self.submit_answers(self.default_questions,
answers=[
'2', # Yellow, skips the second question because of the check
'2', # Oranges
'1', # Vim
])
# The 4th message should be the closing one
[closing_message] = yield self.wait_for_messages(1, 4)
self.assertEqual(closing_message['content'],
'Thanks for completing the survey')
user_helper = yield self.app_helper.vumi_helper.get_or_create_user()
contact_store = user_helper.user_api.contact_store
contact = yield contact_store.get_contact_by_key(contact.key)
self.assertEqual(contact.extra['litmus'], u'test')
self.assertTrue('to-be-cleared' not in contact.extra.values())
def _reformat_participant_for_comparison(self, participant):
clone = participant.copy()
clone['labels'] = json.loads(participant['labels'])
clone['polls'] = json.loads(participant['polls'])
clone.pop('updated_at')
return clone
def assert_participants_equalish(self, participant1, participant2):
self.assertEqual(
self._reformat_participant_for_comparison(participant1),
self._reformat_participant_for_comparison(participant2))
self.assertAlmostEqual(
participant1['updated_at'], participant2['updated_at'], 2)
@inlineCallbacks
def complete_survey(self, questions, start_at=0):
for i in range(len(questions)):
[msg] = yield self.wait_for_messages(1, i + start_at + 1)
self.assertEqual(msg['content'], questions[i]['copy'])
response = str(questions[i]['valid_responses'][0])
last_sent_msg = yield self.reply_to(msg, response)
nr_of_messages = 1 + len(questions) + start_at
all_messages = yield self.app_helper.wait_for_dispatched_outbound(
nr_of_messages)
last_msg = all_messages[-1]
self.assertEqual(last_msg['content'],
'Thanks for completing the survey')
self.assertEqual(last_msg['session_event'],
TransportUserMessage.SESSION_CLOSE)
poll_id = 'poll-%s' % (self.conversation.key,)
[app_event] = self.app_helper.get_dispatched_app_events()
# The poll has been completed and so the results have been
# archived, get the participant from the archive
[participant] = (yield self.pm.get_archive(poll_id,
last_sent_msg['from_addr']))
self.assertEqual(
app_event['account_key'], self.conversation.user_account.key)
self.assertEqual(app_event['conversation_key'], self.conversation.key)
# make sure we have a participant, pop it out and
# compare with expected result further down.
event_participant = app_event['content'].pop('participant')
self.assertTrue(event_participant)
self.assertEqual(app_event['content'], {
'from_addr': last_sent_msg['from_addr'],
'transport_type': last_sent_msg['transport_type'],
'message_id': last_sent_msg['message_id'],
})
self.assert_participants_equalish(
event_participant, participant.dump())
returnValue(last_msg)
@inlineCallbacks
def submit_answers(self, questions, answers, start_at=0):
for i in range(len(answers)):
[msg] = yield self.wait_for_messages(1, i + start_at + 1)
yield self.reply_to(msg, answers.pop(0))
@inlineCallbacks
def test_survey_completion(self):
yield self.app_helper.create_contact(
u'+27831234567', name=u'First', surname=u'Contact',
groups=[self.group])
yield self.create_survey(self.conversation)
yield self.app_helper.start_conversation(self.conversation)
yield self.send_send_survey_command(self.conversation)
yield self.complete_survey(self.default_questions)
@inlineCallbacks
def test_ensure_participant_cleared_after_archiving(self):
contact = yield self.app_helper.create_contact(
u'+27831234567', name=u'First', surname=u'Contact',
groups=[self.group])
yield self.create_survey(self.conversation)
yield self.app_helper.start_conversation(self.conversation)
yield self.send_send_survey_command(self.conversation)
yield self.complete_survey(self.default_questions)
# This participant should be empty
poll_id = 'poll-%s' % (self.conversation.key,)
participant = yield self.pm.get_participant(poll_id, contact.msisdn)
self.assertEqual(participant.labels, {})
@inlineCallbacks
def test_send_message_command(self):
msg_options = {
'transport_name': 'sphex_transport',
'from_addr': '666666',
'transport_type': 'sphex',
'helper_metadata': {'foo': {'bar': 'baz'}},
}
yield self.app_helper.start_conversation(self.conversation)
batch_id = self.conversation.batch.key
yield self.app_helper.dispatch_command(
"send_message",
user_account_key=self.conversation.user_account.key,
conversation_key=self.conversation.key,
command_data={
"batch_id": batch_id,
"to_addr": "123456",
"content": "hello world",
"msg_options": msg_options,
})
[msg] = self.app_helper.get_dispatched_outbound()
self.assertEqual(msg.payload['to_addr'], "123456")
self.assertEqual(msg.payload['from_addr'], "666666")
self.assertEqual(msg.payload['content'], "hello world")
self.assertEqual(msg.payload['transport_name'], "sphex_transport")
self.assertEqual(msg.payload['transport_type'], "sphex")
self.assertEqual(msg.payload['message_type'], "user_message")
self.assertEqual(msg.payload['helper_metadata']['go'], {
'user_account': self.conversation.user_account.key,
'conversation_type': 'survey',
'conversation_key': self.conversation.key,
})
self.assertEqual(msg.payload['helper_metadata']['foo'],
{'bar': 'baz'})
@inlineCallbacks
def test_process_command_send_message_in_reply_to(self):
yield self.app_helper.start_conversation(self.conversation)
batch_id = self.conversation.batch.key
msg = yield self.app_helper.make_stored_inbound(
self.conversation, "foo")
command = VumiApiCommand.command(
'worker', 'send_message',
user_account_key=self.conversation.user_account.key,
conversation_key=self.conversation.key,
command_data={
u'batch_id': batch_id,
u'content': u'foo',
u'to_addr': u'to_addr',
u'msg_options': {
u'transport_name': u'smpp_transport',
u'in_reply_to': msg['message_id'],
u'transport_type': u'sms',
u'from_addr': u'default10080',
},
})
yield self.app.consume_control_command(command)
[sent_msg] = self.app_helper.get_dispatched_outbound()
self.assertEqual(sent_msg['to_addr'], msg['from_addr'])
self.assertEqual(sent_msg['content'], 'foo')
self.assertEqual(sent_msg['in_reply_to'], msg['message_id'])
@inlineCallbacks
def test_closing_menu_if_unavailable(self):
poll_id = 'poll-%s' % (self.conversation.key,)
config = yield self.pm.get_config(poll_id)
self.assertEqual(config, {}) # incomplete or empty
yield self.app_helper.make_dispatch_inbound(
"foo", helper_metadata={"poll_id": poll_id},
conv=self.conversation)
[reply] = yield self.app_helper.wait_for_dispatched_outbound(1)
self.assertTrue('Service Unavailable' in reply['content'])
self.assertEqual(reply['session_event'],
TransportUserMessage.SESSION_CLOSE)
| |
# Copyright (c) 2012-2015 Netforce Co. Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from netforce.model import Model, fields, get_model
from netforce.utils import get_data_path
import time
from decimal import Decimal
from netforce import config
from netforce import database
from netforce.access import get_active_company, set_active_user, set_active_company
from netforce.utils import get_file_path
class Invoice(Model):
_name = "account.invoice"
_string = "Invoice"
_audit_log = True
_key = ["company_id", "number"]
_name_field = "number"
_multi_company = True
_fields = {
"type": fields.Selection([["out", "Receivable"], ["in", "Payable"]], "Type", required=True),
"inv_type": fields.Selection([["invoice", "Invoice"], ["credit", "Credit Note"], ["debit", "Debit Note"], ["prepay", "Prepayment"], ["overpay", "Overpayment"]], "Subtype", required=True, search=True),
"number": fields.Char("Number", search=True),
"ref": fields.Char("Ref", size=256, search=True),
"memo": fields.Char("Memo", size=1024, search=True),
"contact_id": fields.Many2One("contact", "Contact", required=True, search=True),
"contact_credit": fields.Decimal("Outstanding Credit", function="get_contact_credit"),
"account_id": fields.Many2One("account.account", "Account"),
"date": fields.Date("Date", required=True, search=True),
"due_date": fields.Date("Due Date", search=True),
"currency_id": fields.Many2One("currency", "Currency", required=True, search=True),
"tax_type": fields.Selection([["tax_ex", "Tax Exclusive"], ["tax_in", "Tax Inclusive"], ["no_tax", "No Tax"]], "Tax Type", required=True),
"state": fields.Selection([("draft", "Draft"), ("waiting_approval", "Waiting Approval"), ("waiting_payment", "Waiting Payment"), ("paid", "Paid"), ("voided", "Voided")], "Status", function="get_state", store=True, function_order=20, search=True),
"lines": fields.One2Many("account.invoice.line", "invoice_id", "Lines"),
"amount_subtotal": fields.Decimal("Subtotal", function="get_amount", function_multi=True, store=True),
"amount_tax": fields.Decimal("Tax Amount", function="get_amount", function_multi=True, store=True),
"amount_total": fields.Decimal("Total", function="get_amount", function_multi=True, store=True),
"amount_paid": fields.Decimal("Paid Amount", function="get_amount", function_multi=True, store=True),
"amount_due": fields.Decimal("Due Amount", function="get_amount", function_multi=True, store=True),
"amount_credit_total": fields.Decimal("Total Credit", function="get_amount", function_multi=True, store=True),
"amount_credit_remain": fields.Decimal("Remaining Credit", function="get_amount", function_multi=True, store=True),
"amount_total_cur": fields.Decimal("Total Amount", function="get_amount", function_multi=True, store=True),
"amount_due_cur": fields.Decimal("Due Amount", function="get_amount", function_multi=True, store=True),
"amount_paid_cur": fields.Decimal("Paid Amount", function="get_amount", function_multi=True, store=True),
"amount_credit_remain_cur": fields.Decimal("Remaining Credit", function="get_amount", function_multi=True, store=True),
"amount_rounding": fields.Decimal("Rounding", function="get_amount", function_multi=True, store=True),
"qty_total": fields.Decimal("Total Quantity", function="get_qty_total"),
"attachment": fields.File("Attachment"),
"payments": fields.One2Many("account.payment.line", "invoice_id", "Payments", condition=[["payment_id.state", "=", "posted"]]),
"move_id": fields.Many2One("account.move", "Journal Entry"),
"reconcile_move_line_id": fields.Many2One("account.move.line", "Reconcile Item"),
"credit_alloc": fields.One2Many("account.credit.alloc", "credit_id", "Credit Allocation"),
"credit_notes": fields.One2Many("account.credit.alloc", "invoice_id", "Credit Notes"),
"currency_rate": fields.Decimal("Currency Rate", scale=6),
"payment_id": fields.Many2One("account.payment", "Payment"),
"related_id": fields.Reference([["sale.order", "Sales Order"], ["purchase.order", "Purchase Order"], ["production.order","Production Order"], ["project", "Project"], ["job", "Service Order"], ["service.contract", "Service Contract"]], "Related To"),
"company_id": fields.Many2One("company", "Company"),
"amount_discount": fields.Decimal("Discount", function="get_discount"),
"bill_address_id": fields.Many2One("address", "Billing Address"),
"comments": fields.One2Many("message", "related_id", "Comments"),
"documents": fields.One2Many("document", "related_id", "Documents"),
"fixed_assets": fields.One2Many("account.fixed.asset", "invoice_id", "Fixed Assets"),
"tax_no": fields.Char("Tax No."),
"tax_branch_no": fields.Char("Tax Branch No."),
"pay_method_id": fields.Many2One("payment.method", "Payment Method"),
"journal_id": fields.Many2One("account.journal", "Journal"),
"sequence_id": fields.Many2One("sequence", "Sequence"),
"original_invoice_id": fields.Many2One("account.invoice", "Original Invoice"),
"product_id": fields.Many2One("product","Product",store=False,function_search="search_product",search=True),
"taxes": fields.One2Many("account.invoice.tax","invoice_id","Taxes"),
"agg_amount_total": fields.Decimal("Total Amount", agg_function=["sum", "amount_total"]),
"agg_amount_subtotal": fields.Decimal("Total Amount w/o Tax", agg_function=["sum", "amount_subtotal"]),
"year": fields.Char("Year", sql_function=["year", "date"]),
"quarter": fields.Char("Quarter", sql_function=["quarter", "date"]),
"month": fields.Char("Month", sql_function=["month", "date"]),
"week": fields.Char("Week", sql_function=["week", "date"]),
"note" : fields.Text("Note"),
}
_order = "date desc,number desc"
def _get_currency(self, context={}):
settings = get_model("settings").browse(1)
return settings.currency_id.id
def _get_number(self, context={}):
defaults = context.get("defaults")
if defaults: # XXX
type = defaults.get("type")
inv_type = defaults.get("inv_type")
else:
type = context.get("type")
inv_type = context.get("inv_type")
seq_id = context.get("sequence_id")
if not seq_id:
seq_type = None
if type == "out":
if inv_type in ("invoice", "prepay"):
seq_type = "cust_invoice"
elif inv_type == "credit":
seq_type = "cust_credit"
elif inv_type == "debit":
seq_type = "cust_debit"
elif type == "in":
if inv_type in ("invoice", "prepay"):
seq_type = "supp_invoice"
elif inv_type == "credit":
seq_type = "supp_credit"
elif inv_type == "debit":
seq_type = "supp_debit"
if not seq_type:
return
seq_id = get_model("sequence").find_sequence(type=seq_type)
if not seq_id:
return None
while 1:
num = get_model("sequence").get_next_number(seq_id, context=context)
res = self.search([["number", "=", num]])
if not res:
return num
get_model("sequence").increment_number(seq_id, context=context)
_defaults = {
"state": "draft",
"currency_id": _get_currency,
"tax_type": "tax_ex",
"number": _get_number,
"date": lambda *a: time.strftime("%Y-%m-%d"),
"company_id": lambda *a: get_active_company(),
}
_constraints = ["check_fields"]
def search_product(self, clause, context={}):
product_id = clause[2]
product = get_model("product").browse(product_id)
product_ids = [product_id]
for var in product.variants:
product_ids.append(var.id)
for comp in product.components:
product_ids.append(comp.component_id.id)
invoice_ids = []
for line in get_model("account.invoice.line").search_browse([["product_id","in",product_ids]]):
invoice_ids.append(line.invoice_id.id)
cond = [["id","in",invoice_ids]]
return cond
def check_fields(self, ids, context={}):
for obj in self.browse(ids):
if obj.state in ("waiting_approval", "waiting_payment"):
if obj.inv_type == "invoice":
if not obj.due_date:
raise Exception("Missing due date")
# if not obj.lines: # XXX: in myob, lines can be empty...
# raise Exception("Lines are empty")
def name_get(self, ids, context={}):
vals = []
for obj in self.browse(ids):
name = obj.number
if not name:
if obj.inv_type == "invoice":
name = "Invoice"
elif obj.inv_type == "credit":
name = "Credit Note"
elif obj.inv_type == "prepay":
name = "Prepayment"
elif obj.inv_type == "overpay":
name = "Overpayment"
if obj.ref:
name += " [%s]" % obj.ref
if obj.tax_no:
name+=", "+obj.tax_no
vals.append((obj.id, name))
return vals
def create(self, vals, context={}):
id = super(Invoice, self).create(vals, context=context)
self.function_store([id])
return id
def write(self, ids, vals, **kw):
super(Invoice, self).write(ids, vals, **kw)
self.function_store(ids)
sale_ids = []
purch_ids = []
for inv in self.browse(ids):
for line in inv.lines:
if line.sale_id:
sale_ids.append(line.sale_id.id)
if line.purch_id:
purch_ids.append(line.purch_id.id)
if sale_ids:
get_model("sale.order").function_store(sale_ids)
if purch_ids:
get_model("purchase.order").function_store(purch_ids)
def delete(self, ids, context={}):
sale_ids = []
purch_ids = []
for inv in self.browse(ids):
if inv.inv_type == "prepay" and inv.type == "out" and "can_delete" not in context:
raise Exception("Can't delete invoice with Prepayment. Please delete by using To Draft option in payment.")
if inv.inv_type not in ("prepay", "overpay"):
if inv.state not in ("draft", "waiting_approval", "voided"):
raise Exception("Can't delete invoice with this status")
for line in inv.lines:
if line.sale_id:
sale_ids.append(line.sale_id.id)
if line.purch_id:
purch_ids.append(line.purch_id.id)
super(Invoice, self).delete(ids, context=context)
if sale_ids:
get_model("sale.order").function_store(sale_ids)
if purch_ids:
get_model("purchase.order").function_store(purch_ids)
def function_store(self, ids, field_names=None, context={}):
super().function_store(ids, field_names, context)
sale_ids = []
purch_ids = []
for obj in self.browse(ids):
for line in obj.lines:
if line.sale_id:
sale_ids.append(line.sale_id.id)
if line.purch_id:
purch_ids.append(line.purch_id.id)
if sale_ids:
get_model("sale.order").function_store(sale_ids)
if purch_ids:
get_model("purchase.order").function_store(purch_ids)
def submit_for_approval(self, ids, context={}):
for obj in self.browse(ids):
if obj.state != "draft":
raise Exception("Invalid state")
obj.write({"state": "waiting_approval"})
self.trigger(ids, "submit_for_approval")
return {
"flash": "Invoice submitted for approval."
}
def approve(self, ids, context={}):
obj = self.browse(ids)[0]
if obj.state not in ("draft", "waiting_approval"):
raise Exception("Invalid state")
obj.post()
if obj.inv_type == "invoice":
msg = "Invoice approved."
if obj.type == "in":
obj.create_fixed_assets()
elif obj.inv_type == "credit":
msg = "Credit note approved."
elif obj.inv_type == "debit":
msg = "Debit note approved."
return {
"flash": msg,
}
def calc_taxes(self,ids,context={}):
obj=self.browse(ids[0])
obj.taxes.delete()
settings = get_model("settings").browse(1)
if obj.currency_rate:
currency_rate = obj.currency_rate
else:
if obj.currency_id.id == settings.currency_id.id:
currency_rate = 1
else:
rate_type=obj.type=="out" and "sell" or "buy"
rate_from = obj.currency_id.get_rate(date=obj.date,rate_type=rate_type)
if not rate_from:
raise Exception("Missing currency rate for %s" % obj.currency_id.code)
if not settings.currency_id:
raise Exception("Missing default currency in Financial Settings")
rate_to = settings.currency_id.get_rate(date=obj.date)
if not rate_to:
raise Exception("Missing currency rate for %s" % settings.currency_id.code)
currency_rate = rate_from / rate_to
obj.write({"currency_rate": currency_rate})
taxes = {}
tax_nos = []
total_amt = 0
total_base = 0
total_tax = 0
for line in obj.lines:
tax_id = line.tax_id
if tax_id and obj.tax_type != "no_tax":
base_amt = get_model("account.tax.rate").compute_base(tax_id, line.amount, tax_type=obj.tax_type)
if settings.rounding_account_id:
base_amt=get_model("currency").round(obj.currency_id.id,base_amt)
tax_comps = get_model("account.tax.rate").compute_taxes(tax_id, base_amt, when="invoice")
for comp_id, tax_amt in tax_comps.items():
tax_vals = taxes.setdefault(comp_id, {"tax_amt": 0, "base_amt": 0})
tax_vals["tax_amt"] += tax_amt
tax_vals["base_amt"] += base_amt
else:
base_amt = line.amount
for comp_id, tax_vals in taxes.items():
comp = get_model("account.tax.component").browse(comp_id)
acc_id = comp.account_id.id
if not acc_id:
raise Exception("Missing account for tax component %s" % comp.name)
vals = {
"invoice_id": obj.id,
"tax_comp_id": comp_id,
"base_amount": get_model("currency").round(obj.currency_id.id,tax_vals["base_amt"]),
"tax_amount": get_model("currency").round(obj.currency_id.id,tax_vals["tax_amt"]),
}
if comp.type in ("vat", "vat_exempt"):
if obj.type == "out":
if obj.tax_no:
tax_no = obj.tax_no
else:
tax_no = self.gen_tax_no(exclude=tax_nos, context={"date": obj.date})
tax_nos.append(tax_no)
obj.write({"tax_no": tax_no})
vals["tax_no"] = tax_no
elif obj.type == "in":
vals["tax_no"] = obj.tax_no
get_model("account.invoice.tax").create(vals)
def post(self, ids, context={}):
t0 = time.time()
settings = get_model("settings").browse(1)
for obj in self.browse(ids):
obj.check_related()
if obj.amount_total == 0:
raise Exception("Invoice total is zero")
if obj.amount_total < 0:
raise Exception("Invoice total is negative")
if not obj.taxes:
obj.calc_taxes()
obj=obj.browse()[0]
contact = obj.contact_id
if obj.type == "out":
account_id = contact.account_receivable_id.id or settings.account_receivable_id.id
if not account_id:
raise Exception("Account receivable not found")
elif obj.type == "in":
account_id = contact.account_payable_id.id or settings.account_payable_id.id
if not account_id:
raise Exception("Account payable not found")
sign = obj.type == "out" and 1 or -1
if obj.inv_type == "credit":
sign *= -1
obj.write({"account_id": account_id})
if obj.type == "out":
desc = "Sale; " + contact.name
elif obj.type == "in":
desc = "Purchase; " + contact.name
if obj.type == "out":
journal_id = obj.journal_id.id or settings.sale_journal_id.id
if not journal_id:
raise Exception("Sales journal not found")
elif obj.type == "in":
journal_id = obj.journal_id.id or settings.purchase_journal_id.id
if not journal_id:
raise Exception("Purchases journal not found")
if obj.currency_rate:
currency_rate = obj.currency_rate
else:
if obj.currency_id.id == settings.currency_id.id:
currency_rate = 1
else:
rate_type=obj.type=="out" and "sell" or "buy"
rate_from = obj.currency_id.get_rate(date=obj.date,rate_type=rate_type)
if not rate_from:
raise Exception("Missing currency rate for %s" % obj.currency_id.code)
rate_to = settings.currency_id.get_rate(date=obj.date)
if not rate_to:
raise Exception("Missing currency rate for %s" % settings.currency_id.code)
currency_rate = rate_from / rate_to
obj.write({"currency_rate": currency_rate})
move_vals = {
"journal_id": journal_id,
"number": obj.number,
"date": obj.date,
"ref": obj.ref,
"narration": desc,
"related_id": "account.invoice,%s" % obj.id,
"company_id": obj.company_id.id,
}
lines = []
t01 = time.time()
for line in obj.lines:
cur_amt = get_model("currency").convert(
line.amount, obj.currency_id.id, settings.currency_id.id, rate=currency_rate)
tax_id = line.tax_id
if tax_id and obj.tax_type != "no_tax":
base_amt = get_model("account.tax.rate").compute_base(tax_id, cur_amt, tax_type=obj.tax_type)
else:
base_amt = cur_amt
acc_id = line.account_id.id
if not acc_id:
raise Exception("Missing line account for invoice line '%s'" % line.description)
amt = base_amt * sign
line_vals = {
"description": line.description,
"account_id": acc_id,
"credit": amt > 0 and amt or 0,
"debit": amt < 0 and -amt or 0,
"track_id": line.track_id.id,
"track2_id": line.track2_id.id,
"contact_id": contact.id,
}
lines.append(line_vals)
for tax in obj.taxes:
comp = tax.tax_comp_id
acc_id = comp.account_id.id
if not acc_id:
raise Exception("Missing account for tax component %s" % comp.name)
tax_amt = get_model("currency").convert(
tax.tax_amount, obj.currency_id.id, settings.currency_id.id, rate=currency_rate)
base_amt = get_model("currency").convert(
tax.base_amount, obj.currency_id.id, settings.currency_id.id, rate=currency_rate)
amt = sign * tax_amt
line_vals = {
"description": desc,
"account_id": acc_id,
"credit": amt > 0 and amt or 0,
"debit": amt < 0 and -amt or 0,
"tax_comp_id": comp.id,
"tax_base": base_amt,
"contact_id": contact.id,
"invoice_id": obj.id,
"tax_no": tax.tax_no,
}
lines.append(line_vals)
t02 = time.time()
dt01 = (t02 - t01) * 1000
print("post dt01", dt01)
groups = {}
keys = ["description", "account_id", "track_id", "tax_comp_id", "contact_id", "invoice_id", "reconcile_id"]
for line in lines:
key_val = tuple(line.get(k) for k in keys)
if key_val in groups:
group = groups[key_val]
group["debit"] += line["debit"]
group["credit"] += line["credit"]
if line.get("tax_base"):
if "tax_base" not in group:
group["tax_base"] = 0
group["tax_base"] += line["tax_base"]
else:
groups[key_val] = line.copy()
group_lines = sorted(groups.values(), key=lambda l: (l["debit"], l["credit"]))
for line in group_lines:
amt = line["debit"] - line["credit"]
amt = get_model("currency").round(obj.currency_id.id,amt)
if amt >= 0:
line["debit"] = amt
line["credit"] = 0
else:
line["debit"] = 0
line["credit"] = -amt
amt = 0
for line in group_lines:
amt -= line["debit"] - line["credit"]
line_vals = {
"description": desc,
"account_id": account_id,
"debit": amt > 0 and amt or 0,
"credit": amt < 0 and -amt or 0,
"due_date": obj.due_date,
"contact_id": contact.id,
}
acc = get_model("account.account").browse(account_id)
if acc.currency_id.id != settings.currency_id.id:
if acc.currency_id.id != obj.currency_id.id:
raise Exception("Invalid account currency for this invoice: %s" % acc.code)
line_vals["amount_cur"] = obj.amount_total * sign
move_vals["lines"] = [("create", line_vals)]
move_vals["lines"] += [("create", vals) for vals in group_lines]
t03 = time.time()
dt02 = (t03 - t02) * 1000
print("post dt02", dt02)
move_id = get_model("account.move").create(move_vals)
t04 = time.time()
dt03 = (t04 - t03) * 1000
print("post dt03", dt03)
get_model("account.move").post([move_id])
t05 = time.time()
dt04 = (t05 - t04) * 1000
print("post dt04", dt04)
obj.write({"move_id": move_id, "state": "waiting_payment"})
t06 = time.time()
dt05 = (t06 - t05) * 1000
print("post dt05", dt05)
t1 = time.time()
dt = (t1 - t0) * 1000
print("invoice.post <<< %d ms" % dt)
def repost_invoices(self, context={}): # XXX
ids = self.search([["state", "in", ("waiting_payment", "paid")]], order="date")
for obj in self.browse(ids):
print("invoice %d..." % obj.id)
if not obj.move_id:
raise Exception("No journal entry for invoice #%d" % obj.id)
obj.move_id.delete()
obj.post()
def void(self, ids, context={}):
print("invoice.void", ids)
obj = self.browse(ids)[0]
if obj.state not in ("draft", "waiting_payment"):
raise Exception("Invalid invoice state")
if obj.payments:
raise Exception("Can't void invoice because there are related payments")
if obj.credit_alloc:
raise Exception("Can't void invoice because there are credit allocations")
if obj.credit_notes:
raise Exception("Can't void invoice because there are linked credit notes")
if obj.move_id:
obj.move_id.void()
obj.move_id.delete()
obj.write({"state": "voided"})
def to_draft(self, ids, context={}):
obj = self.browse(ids)[0]
if obj.state != "waiting_payment":
raise Exception("Invalid status")
if obj.credit_notes:
raise Exception("There are still payment entries for this invoice")
if obj.move_id:
obj.move_id.void()
obj.move_id.delete()
obj.taxes.delete()
obj.write({"state": "draft"})
def get_amount(self, ids, context={}):
t0 = time.time()
settings = get_model("settings").browse(1)
res = {}
for inv in self.browse(ids):
vals = {}
subtotal = 0
tax = 0
for line in inv.lines:
tax_id = line.tax_id
if tax_id and inv.tax_type != "no_tax":
base_amt = get_model("account.tax.rate").compute_base(tax_id, line.amount, tax_type=inv.tax_type)
tax_comps = get_model("account.tax.rate").compute_taxes(tax_id, base_amt, when="invoice")
for comp_id, tax_amt in tax_comps.items():
tax += tax_amt
else:
base_amt = line.amount
subtotal += base_amt
subtotal=get_model("currency").round(inv.currency_id.id,subtotal)
tax=get_model("currency").round(inv.currency_id.id,tax)
vals["amount_subtotal"] = subtotal
if inv.taxes:
tax=sum(t.tax_amount for t in inv.taxes)
vals["amount_tax"] = tax
if inv.tax_type == "tax_in":
vals["amount_rounding"] = sum(l.amount for l in inv.lines) - (subtotal + tax)
else:
vals["amount_rounding"] = 0
vals["amount_total"] = subtotal + tax + vals["amount_rounding"]
vals["amount_total_cur"] = get_model("currency").convert(
vals["amount_total"], inv.currency_id.id, settings.currency_id.id, round=True, rate=inv.currency_rate)
vals["amount_credit_total"] = vals["amount_total"]
paid = 0
for pmt in inv.payments:
if pmt.payment_id.id == inv.payment_id.id:
continue
if inv.type == pmt.type:
paid -= pmt.amount_currency
else:
paid += pmt.amount_currency
vals["amount_paid"] = paid
if inv.inv_type in ("invoice", "debit"):
cred_amt = 0
for alloc in inv.credit_notes:
cred_amt += alloc.amount
vals["amount_due"] = vals["amount_total"] - paid - cred_amt
vals["amount_paid"] = paid + cred_amt # TODO: check this doesn't break anything...
elif inv.inv_type in ("credit", "prepay", "overpay"):
cred_amt = 0
for alloc in inv.credit_alloc:
cred_amt += alloc.amount
for pmt in inv.payments:
if pmt.payment_id.type == inv.type:
cred_amt += pmt.amount
else:
cred_amt -= pmt.amount # XXX: check this
vals["amount_credit_remain"] = vals["amount_total"] - cred_amt
vals["amount_due"] = -vals["amount_credit_remain"]
vals["amount_due_cur"] = get_model("currency").convert(
vals["amount_due"], inv.currency_id.id, settings.currency_id.id, round=True, rate=inv.currency_rate)
vals["amount_paid_cur"] = get_model("currency").convert(
vals["amount_paid"], inv.currency_id.id, settings.currency_id.id, round=True, rate=inv.currency_rate)
vals["amount_credit_remain_cur"] = get_model("currency").convert(
vals.get("amount_credit_remain", 0), inv.currency_id.id, settings.currency_id.id, round=True, rate=inv.currency_rate)
res[inv.id] = vals
t1 = time.time()
dt = (t1 - t0) * 1000
print("invoice.get_amount <<< %d ms" % dt)
return res
def get_qty_total(self, ids, context={}):
res = {}
for obj in self.browse(ids):
qty = sum([line.qty or 0 for line in obj.lines])
res[obj.id] = qty
return res
def update_amounts(self, context):
data = context["data"]
settings=get_model("settings").browse(1)
currency_id = data["currency_id"]
data["amount_subtotal"] = 0
data["amount_tax"] = 0
tax_type = data["tax_type"]
tax_in_total = 0
for line in data["lines"]:
if not line:
continue
if line.get("unit_price") is not None:
amt = (line.get("qty") or 0) * (line.get("unit_price") or 0)
if line.get("discount"):
disc = amt * line["discount"] / 100
amt -= disc
if line.get("discount_amount"):
amt -= line["discount_amount"]
line["amount"] = amt
else:
amt = line.get("amount") or 0
tax_id = line.get("tax_id")
if tax_id and tax_type != "no_tax":
base_amt = get_model("account.tax.rate").compute_base(tax_id, amt, tax_type=tax_type)
tax_comps = get_model("account.tax.rate").compute_taxes(tax_id, base_amt, when="invoice")
for comp_id, tax_amt in tax_comps.items():
data["amount_tax"] += tax_amt
else:
base_amt = amt
data["amount_subtotal"] += Decimal(base_amt)
if tax_type == "tax_in":
data["amount_rounding"] = sum(
l.get("amount") or 0 for l in data["lines"] if l) - (data["amount_subtotal"] + data["amount_tax"])
else:
data["amount_rounding"] = 0
data["amount_total"] = data["amount_subtotal"] + data["amount_tax"] + data["amount_rounding"]
paid = 0
for pmt in data['payments']:
if pmt['payment_id'] == data['payment_id']:
continue
if data['type'] == pmt['type']:
paid -= pmt['amount_currency']
else:
paid += pmt['amount_currency']
if data['inv_type'] in ("invoice", "debit"):
cred_amt = 0
for alloc in data['credit_notes']:
cred_amt += alloc['amount']
data["amount_due"] = data["amount_total"] - paid - cred_amt
data["amount_paid"] = paid + cred_amt
elif data['inv_type'] in ("credit", "prepay", "overpay"):
cred_amt = 0
for alloc in data['credit_alloc']:
cred_amt += alloc['amount']
for pmt in data['payments']:
payment=get_model("account.payment").browse(pmt['payment_id'])
if payment.type == data['type']:
cred_amt += pmt['amount']
else:
cred_amt -= pmt['amount']
data["amount_credit_remain"] = data["amount_total"] - cred_amt
data["amount_due"] = -data["amount_credit_remain"]
return data
def onchange_product(self, context):
data = context["data"]
type = data["type"]
path = context["path"]
contact_id = data["contact_id"]
contact = get_model("contact").browse(contact_id)
line = get_data_path(data, path, parent=True)
prod_id = line.get("product_id")
if not prod_id:
return {}
prod = get_model("product").browse(prod_id)
line["description"] = prod.description
line["qty"] = 1
if prod.uom_id is not None:
line["uom_id"] = prod.uom_id.id
if type == "out":
if prod.sale_price:
line["unit_price"] = prod.sale_price
if prod.sale_account_id:
line["account_id"] = prod.sale_account_id.id
elif prod.categ_id and prod.categ_id.sale_account_id:
line["account_id"] = prod.categ_id.sale_account_id.id
if contact.tax_receivable_id:
line["tax_id"] = contact.tax_receivable_id.id
elif prod.sale_tax_id:
line["tax_id"] = prod.sale_tax_id.id
elif prod.categ_id and prod.categ_id.sale_tax_id:
line["tax_id"] = prod.categ_id.sale_tax_id.id
elif type == "in":
if prod.purchase_price:
line["unit_price"] = prod.purchase_price
if prod.purchase_account_id:
line["account_id"] = prod.purchase_account_id.id
elif prod.categ_id and prod.categ_id.purchase_account_id:
line["account_id"] = prod.categ_id.purchase_account_id.id
if contact.tax_payable_id:
line["tax_id"] = contact.tax_payable_id.id
elif prod.purchase_tax_id:
line["tax_id"] = prod.purchase_tax_id.id
elif prod.categ_id and prod.categ_id.purchase_tax_id:
line["tax_id"] = prod.categ_id.purchase_tax_id.id
data = self.update_amounts(context)
return data
def onchange_account(self, context):
data = context["data"]
path = context["path"]
line = get_data_path(data, path, parent=True)
acc_id = line.get("account_id")
if not acc_id:
return {}
acc = get_model("account.account").browse(acc_id)
line["tax_id"] = acc.tax_id.id
data = self.update_amounts(context)
return data
def onchange_contact(self, context):
data = context["data"]
contact_id = data.get("contact_id")
if not contact_id:
return {}
contact = get_model("contact").browse(contact_id)
data["bill_address_id"] = contact.get_address(pref_type="billing")
if data["type"] == "out":
data["journal_id"] = contact.sale_journal_id.id
elif data["type"] == "in":
data["journal_id"] = contact.purchase_journal_id.id
self.onchange_journal(context=context)
if contact.currency_id:
data["currency_id"] = contact.currency_id.id
else:
settings = get_model("settings").browse(1)
data["currency_id"] = settings.currency_id.id
return data
def view_invoice(self, ids, context={}):
obj = self.browse(ids[0])
if obj.type == "out":
action = "cust_invoice"
if obj.inv_type == "invoice":
layout = "cust_invoice_form"
elif obj.inv_type == "credit":
layout = "cust_credit_form"
elif obj.inv_type == "debit":
layout = "cust_debit_form"
elif obj.inv_type == "prepay":
layout = "cust_prepay_form"
elif obj.inv_type == "overpay":
layout = "cust_overpay_form"
elif obj.type == "in":
action = "supp_invoice"
if obj.inv_type == "invoice":
layout = "supp_invoice_form"
elif obj.inv_type == "credit":
layout = "supp_credit_form"
elif obj.inv_type == "debit":
layout = "supp_debit_form"
elif obj.inv_type == "prepay":
layout = "supp_prepay_form"
elif obj.inv_type == "overpay":
layout = "supp_overpay_form"
return {
"next": {
"name": action,
"mode": "form",
"form_view_xml": layout,
"active_id": obj.id,
}
}
def get_contact_credit(self, ids, context={}):
obj = self.browse(ids[0])
amt=0
vals = {}
if obj.contact_id:
contact = get_model("contact").browse(obj.contact_id.id, context={"currency_id": obj.currency_id.id})
if obj.type == "out":
amt = contact.receivable_credit
elif obj.type == "in":
amt = contact.payable_credit
vals[obj.id] = amt
return vals
def get_state(self, ids, context={}):
vals = {}
for obj in self.browse(ids):
state = obj.state
if state == "waiting_payment":
if obj.inv_type in ("invoice", "debit"):
if obj.amount_due == 0:
state = "paid"
elif obj.inv_type in ("credit", "prepay", "overpay"):
if obj.amount_credit_remain == 0:
state = "paid"
elif state == "paid":
if obj.inv_type in ("invoice", "debit"):
if obj.amount_due > 0:
state = "waiting_payment"
elif obj.inv_type in ("credit", "prepay", "overpay"):
if obj.amount_credit_remain > 0:
state = "waiting_payment"
vals[obj.id] = state
return vals
def copy(self, ids, context):
obj = self.browse(ids)[0]
vals = {
"type": obj.type,
"inv_type": obj.inv_type,
"ref": obj.ref,
"contact_id": obj.contact_id.id,
"currency_id": obj.currency_id.id,
"tax_type": obj.tax_type,
"memo": obj.memo,
"lines": [],
}
if obj.related_id:
vals["related_id"] = "%s,%s" % (obj.related_id._model, obj.related_id.id)
for line in obj.lines:
line_vals = {
"product_id": line.product_id.id,
"description": line.description,
"qty": line.qty,
"uom_id": line.uom_id.id,
"unit_price": line.unit_price,
"tax_id": line.tax_id.id,
"account_id": line.account_id.id,
"sale_id": line.sale_id.id,
"purch_id": line.purch_id.id,
"amount": line.amount,
}
vals["lines"].append(("create", line_vals))
ctx={"type": obj.type, "inv_type": obj.inv_type, "date": context.get("date")}
new_id = self.create(vals, context=ctx)
new_obj = self.browse(new_id)
if obj.type == "out":
msg = "Invoice %s copied to %s" % (obj.number, new_obj.number)
else:
msg = "Invoice copied"
return {
"next": {
"name": "view_invoice",
"active_id": new_id,
},
"flash": msg,
}
def copy_to_debit_note(self, ids, context):
obj = self.browse(ids)[0]
vals = {
"type": obj.type,
"inv_type": "debit",
"ref": obj.number,
"contact_id": obj.contact_id.id,
"bill_address_id": obj.bill_address_id.id,
"currency_id": obj.currency_id.id,
"currency_rate": obj.currency_rate,
"tax_type": obj.tax_type,
"memo": obj.memo,
"tax_no": obj.tax_no,
"pay_method_id": obj.pay_method_id.id,
"original_invoice_id": obj.id,
"lines": [],
}
if obj.related_id:
vals["related_id"] = "%s,%s" % (obj.related_id._model, obj.related_id.id)
for line in obj.lines:
line_vals = {
"product_id": line.product_id.id,
"description": line.description,
"qty": line.qty,
"uom_id": line.uom_id.id,
"unit_price": line.unit_price,
"tax_id": line.tax_id.id,
"account_id": line.account_id.id,
"sale_id": line.sale_id.id,
"purch_id": line.purch_id.id,
"amount": line.amount,
}
vals["lines"].append(("create", line_vals))
new_id = self.create(vals, context={"type": vals["type"], "inv_type": vals["inv_type"]})
new_obj = self.browse(new_id)
msg = "Debit note %s created from invoice %s" % (new_obj.number, obj.number)
return {
"next": {
"name": "view_invoice",
"active_id": new_id,
},
"flash": msg,
}
def copy_to_credit_note(self, ids, context):
obj = self.browse(ids)[0]
vals = {
"type": obj.type,
"inv_type": "credit",
"ref": obj.number,
"contact_id": obj.contact_id.id,
"bill_address_id": obj.bill_address_id.id,
"currency_id": obj.currency_id.id,
"currency_rate": obj.currency_rate,
"tax_type": obj.tax_type,
"memo": obj.memo,
"tax_no": obj.tax_no,
"pay_method_id": obj.pay_method_id.id,
"original_invoice_id": obj.id,
"lines": [],
}
if obj.related_id:
vals["related_id"] = "%s,%s" % (obj.related_id._model, obj.related_id.id)
for line in obj.lines:
line_vals = {
"product_id": line.product_id.id,
"description": line.description,
"qty": line.qty,
"uom_id": line.uom_id.id,
"unit_price": line.unit_price,
"tax_id": line.tax_id.id,
"account_id": line.account_id.id,
"sale_id": line.sale_id.id,
"purch_id": line.purch_id.id,
"amount": line.amount,
}
vals["lines"].append(("create", line_vals))
new_id = self.create(vals, context={"type": vals["type"], "inv_type": vals["inv_type"]})
new_obj = self.browse(new_id)
msg = "Credit note %s created from invoice %s" % (new_obj.number, obj.number)
return {
"next": {
"name": "view_invoice",
"active_id": new_id,
},
"flash": msg,
}
def view_journal_entry(self, ids, context={}):
obj = self.browse(ids)[0]
return {
"next": {
"name": "journal_entry",
"mode": "form",
"active_id": obj.move_id.id,
}
}
def gen_tax_no(self, exclude=None, context={}):
company_id = get_active_company() # XXX: improve this?
seq_id = get_model("sequence").find_sequence(type="tax_no")
if not seq_id:
return None
while 1:
num = get_model("sequence").get_next_number(seq_id, context=context)
if exclude and num in exclude:
get_model("sequence").increment_number(seq_id, context=context)
continue
res = get_model("account.move.line").search([["tax_no", "=", num], ["move_id.company_id", "=", company_id]])
if not res:
return num
get_model("sequence").increment_number(seq_id, context=context)
def get_discount(self, ids, context={}):
vals = {}
for obj in self.browse(ids):
amt = 0
for line in obj.lines:
amt += line.amount_discount
vals[obj.id] = amt
return vals
def create_fixed_assets(self, ids, context={}):
for obj in self.browse(ids):
if obj.fixed_assets:
raise Exception("Fixed assets already created for invoice %s" % obj.number)
for line in obj.lines:
acc = line.account_id
if acc.type != "fixed_asset":
continue
ass_type = acc.fixed_asset_type_id
if not ass_type:
continue
vals = {
"name": line.description,
"type_id": ass_type.id,
"date_purchase": obj.date,
"price_purchase": line.amount, # XXX: should be tax-ex
"fixed_asset_account_id": acc.id,
"dep_rate": ass_type.dep_rate,
"dep_method": ass_type.dep_method,
"accum_dep_account_id": ass_type.accum_dep_account_id.id,
"dep_exp_account_id": ass_type.dep_exp_account_id.id,
"invoice_id": obj.id,
}
context['date']=obj.date
get_model("account.fixed.asset").create(vals,context)
def delete_alloc(self, context={}):
alloc_id = context["alloc_id"]
get_model("account.credit.alloc").delete([alloc_id])
def onchange_date(self, context={}):
data = context["data"]
ctx = {
"type": data["type"],
"inv_type": data["inv_type"],
"date": data["date"],
}
number = self._get_number(context=ctx)
data["number"] = number
return data
def check_related(self, ids, context={}):
obj = self.browse(ids)[0]
rel = obj.related_id
if not rel:
return
# if rel._model=="job": # XXX: doesn't work for bkkbase modules
# if not rel.done_approved_by_id:
# raise Exception("Service order has to be approved before it is invoiced")
def get_template_invoice_form(self, ids=None, context={}):
if ids is None: # XXX: for backward compat with old templates
ids = context["ids"]
obj = get_model("account.invoice").browse(ids)[0]
if obj.type == "out":
if obj.amount_discount:
return "cust_invoice_form_disc"
else:
return "cust_invoice_form"
elif obj.type == "in":
return "supp_invoice_form"
def get_report_data(self, ids=None, context={}): # XXX: deprecated
print("invoice.get_report_data")
if ids is not None: # for new templates
return super().get_report_data(ids, context=context)
ids = context["ids"]
print("ids", ids, type(ids))
inv_id = ids[0]
inv = get_model("account.invoice").browse(inv_id)
dbname = database.get_active_db()
company = inv.company_id
settings = get_model("settings").browse(1)
comp_addr = settings.get_address_str()
comp_name = company.name
comp_phone = settings.phone
comp_fax = settings.fax
comp_tax_no = settings.tax_no
contact = inv.contact_id
cust_addr = contact.get_address_str()
cust_name = contact.name
cust_fax = contact.fax
cust_phone = contact.phone
cust_tax_no = contact.tax_no
data = {
"comp_name": comp_name,
"comp_addr": comp_addr,
"comp_phone": comp_phone or "-",
"comp_fax": comp_fax or "-",
"comp_tax_no": comp_tax_no or "-",
"cust_name": cust_name,
"cust_addr": cust_addr,
"cust_phone": cust_phone or "-",
"cust_fax": cust_fax or "-",
"cust_tax_no": cust_tax_no or "-",
"date": inv.date or "-",
"due_date": inv.due_date or "-",
"number": inv.number or "-",
"ref": inv.ref or "-",
"memo": inv.memo or "",
"lines": [],
}
if settings.logo:
data["logo"] = get_file_path(settings.logo)
#support settings.logo
data['settings']={
"logo": get_file_path(settings.logo)
}
for line in inv.lines:
data["lines"].append({
"description": line.description,
"code": line.product_id.code,
"qty": line.qty,
"uom": line.uom_id.name,
"unit_price": line.unit_price,
"discount": line.discount,
"tax_rate": line.tax_id.rate,
"amount": line.amount,
})
is_cash = 'No'
is_cheque = 'No'
for obj in inv.payments:
account_type = obj.payment_id.account_id.type
if account_type in ("bank", "cash"):
is_cash = 'Yes'
if account_type in ("cheque"):
is_cheque = 'Yes'
data.update({
"amount_subtotal": inv.amount_subtotal,
"amount_discount": inv.amount_discount,
"amount_tax": inv.amount_tax,
"amount_total": inv.amount_total,
"amount_paid": inv.amount_paid,
"payment_terms": inv.related_id.payment_terms or "-",
"is_cash": is_cash,
"is_cheque": is_cheque,
"currency_code": inv.currency_id.code,
"tax_rate": get_model("currency").round(inv.currency_id.id,inv.amount_tax * 100 / inv.amount_subtotal) if inv.amount_subtotal else 0,
"qty_total": inv.qty_total,
"memo": inv.memo,
})
if inv.credit_alloc:
data.update({
"original_inv_subtotal": inv.credit_alloc[0].invoice_id.amount_subtotal,
})
return data
def onchange_journal(self, context={}):
data = context["data"]
journal_id = data["journal_id"]
if journal_id:
journal = get_model("account.journal").browse(journal_id)
data["sequence_id"] = journal.sequence_id.id
else:
data["sequence_id"] = None
self.onchange_sequence(context=context)
return data
def onchange_sequence(self, context={}):
data = context["data"]
seq_id = data["sequence_id"]
num = self._get_number(context={"type": data["type"], "inv_type": data["inv_type"], "date": data["date"], "sequence_id": seq_id})
data["number"] = num
return data
Invoice.register()
| |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cache for nodes currently under introspection."""
import contextlib
import json
import logging
import os
import sqlite3
import sys
import time
from oslo_config import cfg
from ironic_discoverd.common.i18n import _, _LC, _LE
from ironic_discoverd import utils
CONF = cfg.CONF
LOG = logging.getLogger("ironic_discoverd.node_cache")
_DB_NAME = None
_SCHEMA = """
create table if not exists nodes
(uuid text primary key, started_at real, finished_at real, error text);
create table if not exists attributes
(name text, value text, uuid text,
primary key (name, value),
foreign key (uuid) references nodes);
create table if not exists options
(uuid text, name text, value text,
primary key (uuid, name),
foreign key (uuid) references nodes);
"""
MACS_ATTRIBUTE = 'mac'
class NodeInfo(object):
"""Record about a node in the cache."""
def __init__(self, uuid, started_at, finished_at=None, error=None):
self.uuid = uuid
self.started_at = started_at
self.finished_at = finished_at
self.error = error
self.invalidate_cache()
@property
def options(self):
"""Node introspection options as a dict."""
if self._options is None:
rows = _db().execute('select name, value from options '
'where uuid=?', (self.uuid,))
self._options = {row['name']: json.loads(row['value'])
for row in rows}
return self._options
def set_option(self, name, value):
"""Set an option for a node."""
encoded = json.dumps(value)
self.options[name] = value
with _db() as db:
db.execute('delete from options where uuid=? and name=?',
(self.uuid, name))
db.execute('insert into options(uuid, name, value) values(?,?,?)',
(self.uuid, name, encoded))
def finished(self, error=None):
"""Record status for this node.
Also deletes look up attributes from the cache.
:param error: error message
"""
self.finished_at = time.time()
self.error = error
with _db() as db:
db.execute('update nodes set finished_at=?, error=? where uuid=?',
(self.finished_at, error, self.uuid))
db.execute("delete from attributes where uuid=?", (self.uuid,))
db.execute("delete from options where uuid=?", (self.uuid,))
def add_attribute(self, name, value, database=None):
"""Store look up attribute for a node in the database.
:param name: attribute name
:param value: attribute value or list of possible values
:param database: optional existing database connection
:raises: Error if attributes values are already in database
"""
if not isinstance(value, list):
value = [value]
with _maybe_db(database) as db:
try:
db.executemany("insert into attributes(name, value, uuid) "
"values(?, ?, ?)",
[(name, v, self.uuid) for v in value])
except sqlite3.IntegrityError as exc:
LOG.error(_LE('Database integrity error %s during '
'adding attributes'), exc)
raise utils.Error(_(
'Some or all of %(name)s\'s %(value)s are already '
'on introspection') % {'name': name, 'value': value})
@classmethod
def from_row(cls, row):
"""Construct NodeInfo from a database row."""
fields = {key: row[key]
for key in ('uuid', 'started_at', 'finished_at', 'error')}
return cls(**fields)
def invalidate_cache(self):
"""Clear all cached info, so that it's reloaded next time."""
self._options = None
def init():
"""Initialize the database."""
global _DB_NAME
_DB_NAME = CONF.discoverd.database.strip()
if not _DB_NAME:
LOG.critical(_LC('Configuration option discoverd.database'
' should be set'))
sys.exit(1)
db_dir = os.path.dirname(_DB_NAME)
if db_dir and not os.path.exists(db_dir):
os.makedirs(db_dir)
sqlite3.connect(_DB_NAME).executescript(_SCHEMA)
def _db():
if _DB_NAME is None:
init()
conn = sqlite3.connect(_DB_NAME)
conn.row_factory = sqlite3.Row
return conn
@contextlib.contextmanager
def _maybe_db(db=None):
if db is None:
with _db() as db:
yield db
else:
yield db
def add_node(uuid, **attributes):
"""Store information about a node under introspection.
All existing information about this node is dropped.
Empty values are skipped.
:param uuid: Ironic node UUID
:param attributes: attributes known about this node (like macs, BMC etc)
:returns: NodeInfo
"""
started_at = time.time()
with _db() as db:
db.execute("delete from nodes where uuid=?", (uuid,))
db.execute("delete from attributes where uuid=?", (uuid,))
db.execute("delete from options where uuid=?", (uuid,))
db.execute("insert into nodes(uuid, started_at) "
"values(?, ?)", (uuid, started_at))
node_info = NodeInfo(uuid=uuid, started_at=started_at)
for (name, value) in attributes.items():
if not value:
continue
node_info.add_attribute(name, value, database=db)
return node_info
def active_macs():
"""List all MAC's that are on introspection right now."""
return {x[0] for x in _db().execute("select value from attributes "
"where name=?", (MACS_ATTRIBUTE,))}
def get_node(uuid):
"""Get node from cache by it's UUID.
:param uuid: node UUID.
:returns: structure NodeInfo.
"""
row = _db().execute('select * from nodes where uuid=?', (uuid,)).fetchone()
if row is None:
raise utils.Error(_('Could not find node %s in cache') % uuid,
code=404)
return NodeInfo.from_row(row)
def find_node(**attributes):
"""Find node in cache.
:param attributes: attributes known about this node (like macs, BMC etc)
:returns: structure NodeInfo with attributes ``uuid`` and ``created_at``
:raises: Error if node is not found
"""
# NOTE(dtantsur): sorting is not required, but gives us predictability
found = set()
db = _db()
for (name, value) in sorted(attributes.items()):
if not value:
LOG.debug('Empty value for attribute %s', name)
continue
if not isinstance(value, list):
value = [value]
LOG.debug('Trying to use %s of value %s for node look up'
% (name, value))
rows = db.execute('select distinct uuid from attributes where ' +
' OR '.join('name=? AND value=?' for _ in value),
sum(([name, v] for v in value), [])).fetchall()
if rows:
found.update(item[0] for item in rows)
if not found:
raise utils.Error(_(
'Could not find a node for attributes %s') % attributes, code=404)
elif len(found) > 1:
raise utils.Error(_(
'Multiple matching nodes found for attributes %(attr)s: %(found)s')
% {'attr': attributes, 'found': list(found)}, code=404)
uuid = found.pop()
row = db.execute('select started_at, finished_at from nodes where uuid=?',
(uuid,)).fetchone()
if not row:
raise utils.Error(_(
'Could not find node %s in introspection cache, '
'probably it\'s not on introspection now') % uuid, code=404)
if row['finished_at']:
raise utils.Error(_(
'Introspection for node %(node)s already finished on %(finish)s') %
{'node': uuid, 'finish': row['finished_at']})
return NodeInfo(uuid=uuid, started_at=row['started_at'])
def clean_up():
"""Clean up the cache.
* Finish introspection for timed out nodes.
* Drop outdated node status information.
:return: list of timed out node UUID's
"""
status_keep_threshold = (time.time() -
CONF.discoverd.node_status_keep_time)
with _db() as db:
db.execute('delete from nodes where finished_at < ?',
(status_keep_threshold,))
timeout = CONF.discoverd.timeout
if timeout <= 0:
return []
threshold = time.time() - timeout
with _db() as db:
uuids = [row[0] for row in
db.execute('select uuid from nodes where '
'started_at < ? and finished_at is null',
(threshold,))]
if not uuids:
return []
LOG.error(_LE('Introspection for nodes %s has timed out'), uuids)
db.execute('update nodes set finished_at=?, error=? '
'where started_at < ? and finished_at is null',
(time.time(), 'Introspection timeout', threshold))
db.executemany('delete from attributes where uuid=?',
[(u,) for u in uuids])
db.executemany('delete from options where uuid=?',
[(u,) for u in uuids])
return uuids
| |
"""Support for Rheem EcoNet water heaters."""
import datetime
import logging
import voluptuous as vol
from homeassistant.components.water_heater import (
DOMAIN, PLATFORM_SCHEMA, STATE_ECO, STATE_ELECTRIC, STATE_GAS,
STATE_HEAT_PUMP, STATE_HIGH_DEMAND, STATE_OFF, STATE_PERFORMANCE,
SUPPORT_OPERATION_MODE, SUPPORT_TARGET_TEMPERATURE, WaterHeaterDevice)
from homeassistant.const import (
ATTR_ENTITY_ID, ATTR_TEMPERATURE, CONF_PASSWORD, CONF_USERNAME,
TEMP_FAHRENHEIT)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['pyeconet==0.0.10']
_LOGGER = logging.getLogger(__name__)
ATTR_VACATION_START = 'next_vacation_start_date'
ATTR_VACATION_END = 'next_vacation_end_date'
ATTR_ON_VACATION = 'on_vacation'
ATTR_TODAYS_ENERGY_USAGE = 'todays_energy_usage'
ATTR_IN_USE = 'in_use'
ATTR_START_DATE = 'start_date'
ATTR_END_DATE = 'end_date'
SUPPORT_FLAGS_HEATER = (SUPPORT_TARGET_TEMPERATURE | SUPPORT_OPERATION_MODE)
SERVICE_ADD_VACATION = 'econet_add_vacation'
SERVICE_DELETE_VACATION = 'econet_delete_vacation'
ADD_VACATION_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Optional(ATTR_START_DATE): cv.positive_int,
vol.Required(ATTR_END_DATE): cv.positive_int,
})
DELETE_VACATION_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
})
ECONET_DATA = 'econet'
ECONET_STATE_TO_HA = {
'Energy Saver': STATE_ECO,
'gas': STATE_GAS,
'High Demand': STATE_HIGH_DEMAND,
'Off': STATE_OFF,
'Performance': STATE_PERFORMANCE,
'Heat Pump Only': STATE_HEAT_PUMP,
'Electric-Only': STATE_ELECTRIC,
'Electric': STATE_ELECTRIC,
'Heat Pump': STATE_HEAT_PUMP
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the EcoNet water heaters."""
from pyeconet.api import PyEcoNet
hass.data[ECONET_DATA] = {}
hass.data[ECONET_DATA]['water_heaters'] = []
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
econet = PyEcoNet(username, password)
water_heaters = econet.get_water_heaters()
hass_water_heaters = [
EcoNetWaterHeater(water_heater) for water_heater in water_heaters]
add_entities(hass_water_heaters)
hass.data[ECONET_DATA]['water_heaters'].extend(hass_water_heaters)
def service_handle(service):
"""Handle the service calls."""
entity_ids = service.data.get('entity_id')
all_heaters = hass.data[ECONET_DATA]['water_heaters']
_heaters = [
x for x in all_heaters
if not entity_ids or x.entity_id in entity_ids]
for _water_heater in _heaters:
if service.service == SERVICE_ADD_VACATION:
start = service.data.get(ATTR_START_DATE)
end = service.data.get(ATTR_END_DATE)
_water_heater.add_vacation(start, end)
if service.service == SERVICE_DELETE_VACATION:
for vacation in _water_heater.water_heater.vacations:
vacation.delete()
_water_heater.schedule_update_ha_state(True)
hass.services.register(DOMAIN, SERVICE_ADD_VACATION, service_handle,
schema=ADD_VACATION_SCHEMA)
hass.services.register(DOMAIN, SERVICE_DELETE_VACATION, service_handle,
schema=DELETE_VACATION_SCHEMA)
class EcoNetWaterHeater(WaterHeaterDevice):
"""Representation of an EcoNet water heater."""
def __init__(self, water_heater):
"""Initialize the water heater."""
self.water_heater = water_heater
self.supported_modes = self.water_heater.supported_modes
self.econet_state_to_ha = {}
self.ha_state_to_econet = {}
for mode in ECONET_STATE_TO_HA:
if mode in self.supported_modes:
self.econet_state_to_ha[mode] = ECONET_STATE_TO_HA.get(mode)
for key, value in self.econet_state_to_ha.items():
self.ha_state_to_econet[value] = key
for mode in self.supported_modes:
if mode not in ECONET_STATE_TO_HA:
error = "Invalid operation mode mapping. " + mode + \
" doesn't map. Please report this."
_LOGGER.error(error)
@property
def name(self):
"""Return the device name."""
return self.water_heater.name
@property
def available(self):
"""Return if the the device is online or not."""
return self.water_heater.is_connected
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_FAHRENHEIT
@property
def device_state_attributes(self):
"""Return the optional device state attributes."""
data = {}
vacations = self.water_heater.get_vacations()
if vacations:
data[ATTR_VACATION_START] = vacations[0].start_date
data[ATTR_VACATION_END] = vacations[0].end_date
data[ATTR_ON_VACATION] = self.water_heater.is_on_vacation
todays_usage = self.water_heater.total_usage_for_today
if todays_usage:
data[ATTR_TODAYS_ENERGY_USAGE] = todays_usage
data[ATTR_IN_USE] = self.water_heater.in_use
return data
@property
def current_operation(self):
"""
Return current operation as one of the following.
["eco", "heat_pump", "high_demand", "electric_only"]
"""
current_op = self.econet_state_to_ha.get(self.water_heater.mode)
return current_op
@property
def operation_list(self):
"""List of available operation modes."""
op_list = []
for mode in self.supported_modes:
ha_mode = self.econet_state_to_ha.get(mode)
if ha_mode is not None:
op_list.append(ha_mode)
return op_list
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_FLAGS_HEATER
def set_temperature(self, **kwargs):
"""Set new target temperature."""
target_temp = kwargs.get(ATTR_TEMPERATURE)
if target_temp is not None:
self.water_heater.set_target_set_point(target_temp)
else:
_LOGGER.error("A target temperature must be provided")
def set_operation_mode(self, operation_mode):
"""Set operation mode."""
op_mode_to_set = self.ha_state_to_econet.get(operation_mode)
if op_mode_to_set is not None:
self.water_heater.set_mode(op_mode_to_set)
else:
_LOGGER.error("An operation mode must be provided")
def add_vacation(self, start, end):
"""Add a vacation to this water heater."""
if not start:
start = datetime.datetime.now()
else:
start = datetime.datetime.fromtimestamp(start)
end = datetime.datetime.fromtimestamp(end)
self.water_heater.set_vacation_mode(start, end)
def update(self):
"""Get the latest date."""
self.water_heater.update_state()
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self.water_heater.set_point
@property
def min_temp(self):
"""Return the minimum temperature."""
return self.water_heater.min_set_point
@property
def max_temp(self):
"""Return the maximum temperature."""
return self.water_heater.max_set_point
| |
from __future__ import annotations
import re
from typing import Generator
from ..utils.fs import Path
from .plugin.interface import PublisherInterface
MULTIPLE_USE_METADATA_FIELDS = {
'classifier',
'dynamic',
'license_file',
'obsoletes_dist',
'platform',
'project_url',
'provides_dist',
'provides_extra',
'requires_dist',
'requires_external',
'supported_platform',
}
RENAMED_METADATA_FIELDS = {'classifier': 'classifiers', 'project_url': 'project_urls'}
class PyPIPublisher(PublisherInterface):
PLUGIN_NAME = 'pypi'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.repos = self.plugin_config.get('repos', {}).copy()
self.repos['main'] = 'https://upload.pypi.org/legacy/'
self.repos['test'] = 'https://test.pypi.org/legacy/'
def publish(self, artifacts: list, options: dict):
"""
https://warehouse.readthedocs.io/api-reference/legacy.html#upload-api
"""
import hashlib
import io
from collections import defaultdict
from urllib.parse import urlparse
import httpx
if not artifacts:
from hatchling.builders.constants import DEFAULT_BUILD_DIRECTORY
artifacts = [DEFAULT_BUILD_DIRECTORY]
if 'repo' in options:
repo = options['repo']
else:
repo = self.plugin_config.get('repo', 'main')
if repo in self.repos:
repo = self.repos[repo]
cached_user_file = CachedUserFile(self.cache_dir)
updated_user = None
if 'user' in options:
user = options['user']
else:
user = self.plugin_config.get('user', '')
if not user:
user = cached_user_file.get_user(repo)
if user is None:
if options['no_prompt']:
self.app.abort('Missing required option: user')
else:
user = updated_user = self.app.prompt('Enter your username')
updated_auth = None
if 'auth' in options:
auth = options['auth']
else:
auth = self.plugin_config.get('auth', '')
if not auth:
import keyring
auth = keyring.get_password(repo, user)
if auth is None:
if options['no_prompt']:
self.app.abort('Missing required option: auth')
else:
auth = updated_auth = self.app.prompt('Enter your credentials', hide_input=True)
repo_components = urlparse(repo)
domain = repo_components.netloc
if domain == 'upload.pypi.org': # no cov
domain = 'pypi.org'
index_url = f'{repo_components.scheme}://{domain}/simple/'
existing_artifacts: dict[str, set[str]] = {}
# Use as an ordered set
project_versions: dict[str, dict[str, None]] = defaultdict(dict)
artifacts_found = False
for artifact in recurse_artifacts(artifacts, self.root):
if artifact.name.endswith('.whl'):
data = get_wheel_form_data(self.app, artifact)
elif artifact.name.endswith('.tar.gz'):
data = get_sdist_form_data(self.app, artifact)
else:
continue
artifacts_found = True
for field in ('name', 'version'):
if field not in data:
self.app.abort(f'Missing required field `{field}` in artifact: {artifact}')
try:
displayed_path = str(artifact.relative_to(self.root))
except ValueError:
displayed_path = str(artifact)
self.app.display_info(f'{displayed_path} ...', end=' ')
project_name = normalize_project_name(data['name'])
if project_name not in existing_artifacts:
try:
response = httpx.get(f'{index_url}{project_name}/')
response.raise_for_status()
except Exception: # no cov
existing_artifacts[project_name] = set()
else:
existing_artifacts[project_name] = set(parse_artifacts(response.text))
if artifact.name in existing_artifacts[project_name]:
self.app.display_warning('already exists')
continue
data[':action'] = 'file_upload'
data['protocol_version'] = '1'
with artifact.open('rb') as f:
# https://github.com/pypa/warehouse/blob/7fc3ce5bd7ecc93ef54c1652787fb5e7757fe6f2/tests/unit/packaging/test_tasks.py#L189-L191
md5_hash = hashlib.md5()
sha256_hash = hashlib.sha256()
blake2_256_hash = hashlib.blake2b(digest_size=32)
while True:
chunk = f.read(io.DEFAULT_BUFFER_SIZE)
if not chunk:
break
md5_hash.update(chunk)
sha256_hash.update(chunk)
blake2_256_hash.update(chunk)
data['md5_digest'] = md5_hash.hexdigest()
data['sha256_digest'] = sha256_hash.hexdigest()
data['blake2_256_digest'] = blake2_256_hash.hexdigest()
f.seek(0)
try:
response = httpx.post(
repo,
data=data,
files={'content': (artifact.name, f, 'application/octet-stream')},
auth=(user, auth),
)
response.raise_for_status()
except Exception as e:
self.app.display_error('failed')
self.app.abort(str(e).replace(auth, '*****'))
else:
self.app.display_success('success')
existing_artifacts[project_name].add(artifact.name)
project_versions[project_name][data['version']] = None
if not artifacts_found:
self.app.abort('No artifacts found')
elif not project_versions:
self.app.abort(code=0)
if domain.endswith('pypi.org'):
for project_name, versions in project_versions.items():
self.app.display_info()
self.app.display_mini_header(project_name)
for version in versions:
self.app.display_info(f'https://{domain}/project/{project_name}/{version}/')
else: # no cov
for project_name in project_versions:
self.app.display_info()
self.app.display_mini_header(project_name)
self.app.display_info(f'{index_url}{project_name}/')
if updated_user is not None:
cached_user_file.set_user(repo, user)
if updated_auth is not None:
import keyring
keyring.set_password(repo, user, auth)
def get_wheel_form_data(app, artifact):
import zipfile
from packaging.tags import parse_tag
with zipfile.ZipFile(str(artifact), 'r') as zip_archive:
dist_info_dir = ''
for path in zip_archive.namelist():
root = path.split('/', 1)[0]
if root.endswith('.dist-info'):
dist_info_dir = root
break
else: # no cov
app.abort(f'Could not find the `.dist-info` directory in wheel: {artifact}')
try:
with zip_archive.open(f'{dist_info_dir}/METADATA') as zip_file:
metadata_file_contents = zip_file.read().decode('utf-8')
except KeyError: # no cov
app.abort(f'Could not find a `METADATA` file in the `{dist_info_dir}` directory')
else:
data = parse_headers(metadata_file_contents)
data['filetype'] = 'bdist_wheel'
# Examples:
# cryptography-3.4.7-pp37-pypy37_pp73-manylinux2014_x86_64.whl -> pp37
# hatchling-1rc1-py2.py3-none-any.whl -> py2.py3
tag_component = '-'.join(artifact.stem.split('-')[-3:])
data['pyversion'] = '.'.join(sorted(set(tag.interpreter for tag in parse_tag(tag_component))))
return data
def get_sdist_form_data(app, artifact):
import tarfile
with tarfile.open(str(artifact), 'r:gz') as tar_archive:
pkg_info_dir_parts = []
for tar_info in tar_archive:
if tar_info.isfile():
pkg_info_dir_parts.extend(tar_info.name.split('/')[:-1])
break
else: # no cov
pass
else: # no cov
app.abort(f'Could not find any files in sdist: {artifact}')
pkg_info_dir_parts.append('PKG-INFO')
pkg_info_path = '/'.join(pkg_info_dir_parts)
try:
with tar_archive.extractfile(pkg_info_path) as tar_file:
metadata_file_contents = tar_file.read().decode('utf-8')
except KeyError: # no cov
app.abort(f'Could not find file: {pkg_info_path}')
else:
data = parse_headers(metadata_file_contents)
data['filetype'] = 'sdist'
data['pyversion'] = 'source'
return data
def parse_headers(metadata_file_contents):
import email
message = email.message_from_string(metadata_file_contents)
headers = {'description': message.get_payload()}
for header, value in message.items():
normalized_header = header.lower().replace('-', '_')
header_name = RENAMED_METADATA_FIELDS.get(normalized_header, normalized_header)
if normalized_header in MULTIPLE_USE_METADATA_FIELDS:
if header_name in headers:
headers[header_name].append(value)
else:
headers[header_name] = [value]
else:
headers[header_name] = value
return headers
def recurse_artifacts(artifacts: list, root) -> Generator[Path, None, None]:
for artifact in artifacts:
artifact = Path(artifact)
if not artifact.is_absolute():
artifact = root / artifact
if artifact.is_file():
yield artifact
elif artifact.is_dir():
yield from artifact.iterdir()
def normalize_project_name(name):
# https://www.python.org/dev/peps/pep-0503/#normalized-names
return re.sub(r'[-_.]+', '-', name).lower()
def parse_artifacts(artifact_payload):
for match in re.finditer(r'<a [^>]+>([^<]+)</a>', artifact_payload):
yield match.group(1)
class CachedUserFile:
def __init__(self, cache_dir: Path):
self.path = cache_dir / 'previous_working_users.json'
self._data = None
def get_user(self, repo: str):
return self.data.get(repo)
def set_user(self, repo: str, user: str):
import json
self.data[repo] = user
self.path.ensure_parent_dir_exists()
self.path.write_text(json.dumps(self.data))
@property
def data(self):
if self._data is None:
if not self.path.is_file():
self._data = {}
else:
contents = self.path.read_text()
if not contents: # no cov
self._data = {}
else:
import json
self._data = json.loads(contents)
return self._data
| |
from __future__ import (absolute_import, print_function)
from base64 import b64decode, b64encode
import cherrypy
from datetime import datetime
import dozer.dao as dao
import dozer.filesystem as fs
import dozer.jsonrpc as jsonrpc
from dozer.exception import (
FileNotFoundError, InvalidParameterError, LoginDeniedError,
PermissionDeniedError)
from functools import partial
from httplib import METHOD_NOT_ALLOWED
from logging import getLogger
from mako.lookup import TemplateLookup
from mako.runtime import Context
from mako.template import Template
from os.path import abspath, dirname, exists, isfile
import sqlalchemy.orm.exc
from sqlite3 import Connection
from sys import exit
from urllib import quote_plus as quote_url
log = getLogger("dozer.app")
@jsonrpc.expose
class DozerAPI(object):
@jsonrpc.expose
def create_folder(self, node_name=None, inherit_permissions=True):
return fs.create_folder(folder_name=node_name,
inherit_permissions=inherit_permissions)
@jsonrpc.expose
def create_notepage(self, node_name=None, inherit_permissions=True):
return fs.create_notepage(notepage_name=node_name,
inherit_permissions=inherit_permissions)
@jsonrpc.expose
def create_note(self, notepage_id=None, pos_um=None, size_um=None):
notepage = fs.FilesystemNode.get_node_by_id(notepage_id)
if not isinstance(notepage, fs.Notepage):
raise InvalidParameterError(
"notepage_id %r does not refer to a notepage", notepage_id)
return notepage.create_note(pos_um=pos_um, size_um=size_um)
@jsonrpc.expose
def update_notepage(self, notepage_id=None, updates=None):
if not isinstance(updates, (list, tuple)):
raise InvalidParameterError(
"updates must be a list of update objects")
notepage = fs.FilesystemNode.get_node_by_id(notepage_id)
if not isinstance(notepage, fs.Notepage):
raise InvalidParameterError(
"notepage_id %r does not refer to a notepage", notepage_id)
# Running change log.
changes = []
# Updated notes
results = []
for update_id, update in enumerate(updates):
action = update.get('action')
if action is None:
raise InvalidParameterError(
"update %d does not have an action", update_id)
update['update_id'] = update_id
if action == "edit_note":
change, result = self._edit_note(notepage, update)
changes.append(change)
results.append(result)
else:
raise InvalidParameterError(
"update %d has invalid action %r", update_id, action)
# end for
notepage.update(changes)
return {
'notepage_revision_id': notepage.revision_id,
'results': results
}
@jsonrpc.expose
def list_folder(self, node_name=None):
return fs.get_node(node_name).children
def _edit_note(self, notepage, update):
change = {}
result = {}
update_id = update['update_id']
note_id = update.get('note_id')
if note_id is None:
raise InvalidParameterError(
"update %d action edit_note does not have a note_id",
update_id)
revision_id = update.get('revision_id')
if revision_id is None:
raise InvalidParameterError(
"update %d action edit_note does not have a "
"revision_id", update_id)
note = fs.FilesystemNode.get_node_by_id(note_id)
if not isinstance(note, fs.Note):
raise InvalidParameterError(
"update %d action edit_note node_id %d does not refer "
"to a note", update_id, note_id)
change['action'] = 'edit_note'
change['note_id'] = note_id
result['note_id'] = note_id
pos_um = update.get('pos_um')
if pos_um is not None:
change['pos_um'] = [note.pos_um, pos_um]
result['pos_um'] = pos_um
note.pos_um = pos_um
size_um = update.get('size_um')
if size_um is not None:
change['size_um'] = [note.size_um, size_um]
result['size_um'] = size_um
note.size_um = size_um
z_index = update.get('z_index')
if z_index is not None:
change['z_index'] = [note.z_index, z_index]
result['z_index'] = z_index
note.z_index = z_index
contents_markdown = update.get('contents_markdown')
if contents_markdown is not None:
change['contents_markdown'] = [
note.contents_markdown, contents_markdown]
result['contents_markdown'] = contents_markdown
note.contents_markdown = contents_markdown
note.update()
result['revision_id'] = note.revision_id
return change, result
class DreadfulBulldozer(object):
def __init__(self, server_root):
super(DreadfulBulldozer, self).__init__()
self.server_root = server_root
self.template_dir = self.server_root + "/pages"
self.template_lookup = TemplateLookup(directories=[self.template_dir])
self.jsonrpc = jsonrpc.JSONRPC()
self.jsonrpc.dozer = DozerAPI()
return
@cherrypy.expose
def index(self, *args, **kw):
page = Template(filename=self.template_dir + "/index.html",
lookup=self.template_lookup,
strict_undefined=True)
cherrypy.serving.response.headers['Content-Type'] = "text/html"
return page.render(app=self)
@cherrypy.expose
def login(self, username=None, password=None, redirect="/", logout=None,
**kw):
request = cherrypy.serving.request
response = cherrypy.serving.response
error_msg = None
if request.method in ("POST", "PUT"):
# See if we have a username/password combination
if username is not None and password is not None:
try:
cherrypy.tools.user_session.local_login(
username=username, password=password)
raise cherrypy.HTTPRedirect(redirect, 303)
except LoginDeniedError:
error_msg = "Invalid username/password"
if logout:
cherrypy.tools.user_session.logout()
page = Template(filename=self.template_dir + "/login.html",
lookup=self.template_lookup,
strict_undefined=True)
response.headers['Content-Type'] = "text/html"
return page.render(app=self, redirect=redirect, error_msg=error_msg)
@cherrypy.expose
def browse(self, *args, **kw):
request = cherrypy.serving.request
response = cherrypy.serving.response
if request.method in ("POST", "PUT"):
raise cherrypy.HTTPError(METHOD_NOT_ALLOWED)
# Make sure we have a valid session.
if request.user_session is None:
# Redirect to the login page.
raise cherrypy.HTTPRedirect("/login?redirect=" +
quote_url("/browse"))
home_folder = request.user.home_folder
if home_folder is None:
home_folder = "/"
elif not home_folder.startswith("/"):
home_folder = "/" + home_folder
raise cherrypy.HTTPRedirect("/files" + home_folder)
@cherrypy.expose
def files(self, *args, **kw):
request = cherrypy.serving.request
response = cherrypy.serving.response
if request.method in ("POST", "PUT"):
# TODO: Handle file upload.
raise cherrypy.HTTPError(500, "Unable to handle uploads right now")
# Make sure we have a valid session.
if request.user_session is None:
# Redirect to the login page.
raise cherrypy.HTTPRedirect(
"/login?redirect=%s" % quote_url("/files/" + "/".join(args)))
try:
node = fs.get_node("/" + "/".join(args))
except FileNotFoundError as e:
raise cherrypy.HTTPError(404, str(e))
except PermissionDeniedError as e:
raise cherrypy.HTTPError(403, str(e))
if isinstance(node, fs.Folder):
template = "folder.html"
elif isinstance(node, fs.Notepage):
template = "notepage.html"
elif isinstance(node, fs.Note):
template = "note.html"
page = Template(filename=self.template_dir + "/" + template,
lookup=self.template_lookup,
strict_undefined=True)
response.headers['Content-Type'] = "text/html"
return page.render(app=self, node=node)
@cherrypy.expose
def notepage(self, *args, **kw):
path = cherrypy.request.path_info.split("/")[1:]
if len(path) <= 1:
raise cherrypy.HTTPRedirect("/notepage/", 302)
path = "/" + "/".join(path[1:])
obj, remaining = dao.get_object_by_path(cherrypy.request.db_session, path)
if obj is None:
raise cherrypy.HTTPError(
404, "Notepage %s does not exist" % (path,))
if obj.document_type == "notepage":
return self.handle_document(obj, remaining)
elif obj.document_type == "folder":
return self.handle_folder(obj)
raise cherrypy.HTTPError(
500, "Unknown document type %s" % (obj.document_type,))
def handle_document(self, doc, remaining):
remaining_elements = remaining.split("/")
if len(remaining_elements) == 0:
if cherrypy.request.method in ("GET", "HEAD"):
return self.fetch_document(doc)
elif cherrypy.request.method in ("POST", "PUT"):
return self.put_document(doc)
elif cherrypy.request.method in ("DELETE",):
return self.delete_document(doc)
else:
raise cherrypy.HTTPError(
400, "Invalid method %s" % cherrypy.serving.request.method)
def fetch_document(self, doc):
page = Template(filename=self.template_dir + "/notepage.html",
lookup=self.template_lookup,
strict_undefined=True)
cherrypy.response.headers['Content-Type'] = "text/html"
return page.render(document=doc)
def get_session(self, session_token):
cherrypy.serving.request.user_session = None
cherrypy.serving.request.user = None
| |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import mock
from rally.plugins.openstack.context.vm import servers_ext
from tests.unit import test
@ddt.ddt
class ServerGeneratorExtTestCase(test.ContextTestCase):
def test__get_servers_per_tenant(self):
context = {
"task": mock.Mock(),
"config": {
"servers_ext": {
"servers_per_tenant": 42
}
}
}
server_generator = servers_ext.ServerGeneratorExt(context)
retval = server_generator._get_servers_per_tenant()
self.assertEqual(42, retval)
def test__get_servers_per_tenant_hypervisors(self):
context = {
"task": mock.Mock(),
"admin": {
"endpoint": "admin_endpoint"
},
"config": {
"servers_ext": {
"servers_per_tenant": "hypervisors"
}
}
}
mock_list = self.admin_clients("nova").hypervisors.list
mock_list.return_value.__len__.return_value = 42
server_generator = servers_ext.ServerGeneratorExt(context)
retval = server_generator._get_servers_per_tenant()
self.assertEqual(42, retval)
mock_list.return_value.__len__.assert_called_once_with()
def test__get_servers_per_tenant_incorrect(self):
context = {
"task": mock.Mock(),
"config": {
"servers_ext": {
"servers_per_tenant": "foobar"
}
}
}
server_generator = servers_ext.ServerGeneratorExt(context)
self.assertRaises(ValueError, server_generator._get_servers_per_tenant)
def test__get_server_group_none(self):
context = {
"task": mock.Mock(),
"config": {
"servers_ext": {
"placement_policy": None,
}
}
}
server_generator = servers_ext.ServerGeneratorExt(context)
retval = server_generator._get_server_group(None)
self.assertIsNone(retval)
@mock.patch("rally.common.utils.generate_random_name")
def test__get_server_group(self, mock_generate_random_name):
context = {
"task": mock.Mock(),
"config": {
"servers_ext": {
"placement_policy": "foobar",
}
}
}
mock_clients = mock.Mock(
**{
"nova.return_value.server_groups.create.return_value.id": "42"
})
mock_generate_random_name.return_value = "fooname"
server_generator = servers_ext.ServerGeneratorExt(context)
retval = server_generator._get_server_group(mock_clients)
mock_generate_random_name.assert_called_once_with(
prefix="rally_server_group_")
mock_create = mock_clients.nova.return_value.server_groups.create
mock_create.assert_called_once_with(
name="fooname", policies=["foobar"])
self.assertEqual("42", retval)
def _get_context(self, servers_with_ips, config_override={}):
context = {
"task": mock.Mock(),
"config": {
"servers_ext": {
"placement_policy": "foobar",
"floating_ips": "once",
"image": "image",
"flavor": "flavor",
"floating_network": "floating_network",
"internal_network": "internal_network",
"userdata": "{servers_with_ips[0][1]}.{server_num}.foo"
}
}
}
if config_override:
context["config"]["servers_ext"].update(config_override)
server_generator = servers_ext.ServerGeneratorExt(context)
server_generator._get_server_group = mock.Mock(
return_value="server_group")
server_generator._get_servers_per_tenant = mock.Mock(
return_value=len(servers_with_ips))
server_generator._boot_one_server_with_ips = mock.Mock(
side_effect=servers_with_ips
)
return server_generator
def test__boot_tenant_servers(self):
user = {
"endpoint": "user_endpoint"
}
tenant = {}
servers_with_ips = [("foo", 10), ("bar", 20)]
server_generator = self._get_context(servers_with_ips)
server_generator._boot_tenant_servers(user, tenant)
self.assertEqual(
{
"group": "server_group",
"servers_with_ips": servers_with_ips
},
tenant)
common_kwargs = dict(
flavor="flavor", image="image",
floating_network="floating_network",
internal_network="internal_network",
scheduler_hints={
"group": "server_group"
},
user=user
)
self.assertEqual(
[
mock.call(
use_floating_ip=True,
userdata=None,
**common_kwargs
),
mock.call(
use_floating_ip=False,
userdata="10.1.foo",
**common_kwargs
),
],
server_generator._boot_one_server_with_ips.mock_calls)
def test__boot_tenant_servers_floating_ips_none(self):
user = {
"endpoint": "user_endpoint"
}
tenant = {}
servers_with_ips = [("foo", 10), ("bar", 20)]
server_generator = self._get_context(
servers_with_ips, {"floating_ips": "none"})
server_generator._get_server_group.return_value = None
server_generator._boot_tenant_servers(user, tenant)
self.assertEqual(
{
"servers_with_ips": servers_with_ips
},
tenant)
common_kwargs = dict(
flavor="flavor", image="image",
floating_network="floating_network",
internal_network="internal_network",
user=user
)
self.assertEqual(
[
mock.call(
use_floating_ip=False,
userdata=None,
**common_kwargs
),
mock.call(
use_floating_ip=False,
userdata="10.1.foo",
**common_kwargs
),
],
server_generator._boot_one_server_with_ips.mock_calls)
def test__boot_tenant_servers_floating_ips_each(self):
user = {
"endpoint": "user_endpoint"
}
tenant = {}
servers_with_ips = [("foo", 10), ("bar", 20), ("buzz", 42)]
server_generator = self._get_context(
servers_with_ips, {"floating_ips": "each"})
server_generator._get_server_group.return_value = None
server_generator._boot_tenant_servers(user, tenant)
self.assertEqual(
{
"servers_with_ips": servers_with_ips
},
tenant)
common_kwargs = dict(
flavor="flavor", image="image",
floating_network="floating_network",
internal_network="internal_network",
user=user
)
self.assertEqual(
[
mock.call(
use_floating_ip=True,
userdata=None,
**common_kwargs
),
mock.call(
use_floating_ip=True,
userdata="10.1.foo",
**common_kwargs
),
mock.call(
use_floating_ip=True,
userdata="10.2.foo",
**common_kwargs
),
],
server_generator._boot_one_server_with_ips.mock_calls)
@ddt.unpack
@ddt.data(
(
[
[{
"OS-EXT-IPS:type": "floating",
}],
[{
"OS-EXT-IPS:type": "fixed",
"addr": "foo_addr"
}],
],
{"is_floating": True},
({"is_floating": True}, "foo_addr")
),
(
[],
{"is_floating": False, "ip": "bar_addr"},
(None, "bar_addr")
)
)
def test__boot_one_server_with_ips(self, addresses, fip, expected):
context = {
"task": mock.Mock(),
}
server_generator = servers_ext.ServerGeneratorExt(context)
mock_server = mock.Mock(
**{
"addresses.values.return_value": addresses,
"id": "foo_server"
})
server_generator._boot_server_for_user = mock.Mock(
return_value=(mock.Mock(), mock_server, fip)
)
retval = server_generator._boot_one_server_with_ips(
foo_arg="foo_value")
server_generator._boot_server_for_user.assert_called_once_with(
foo_arg="foo_value"
)
self.assertEqual("foo_server", retval[0])
self.assertEqual(expected, retval[1:])
def test_setup(self):
context = {
"task": mock.MagicMock(uuid="foo"),
"users": [
{"tenant_id": "tenant_id0"},
{"tenant_id": "tenant_id1"},
{"tenant_id": "tenant_id2"}
],
"tenants": {
"tenant_id0": {"foo": 0},
"tenant_id1": {"foo": 1},
"tenant_id2": {"foo": 2}
}
}
server_generator = servers_ext.ServerGeneratorExt(context)
server_generator._boot_tenant_servers = mock.Mock()
server_generator.setup()
self.assertEqual(
[
mock.call({"tenant_id": "tenant_id0"}, {"foo": 0}),
mock.call({"tenant_id": "tenant_id1"}, {"foo": 1}),
mock.call({"tenant_id": "tenant_id2"}, {"foo": 2}),
],
server_generator._boot_tenant_servers.mock_calls)
def test__cleanup_one_tenant(self):
user = {
"endpoint": "user_endpoint"
}
tenant = {
"group": "foobar",
"servers_with_ips": [
("server_id_1", "fip", "fixed_ip_1"),
("server_id_2", None, "fixed_ip_2")
]
}
mock_servers_get = self.clients("nova").servers.get
mock_servers_get.side_effect = ["server_1", "server_2"]
server_generator = servers_ext.ServerGeneratorExt(
{"task": mock.Mock()})
server_generator._delete_server = mock.Mock()
server_generator._delete_server_with_fip = mock.Mock()
server_generator._cleanup_one_tenant(user, tenant)
server_generator._delete_server_with_fip.assert_called_once_with(
"server_1", "fip")
server_generator._delete_server.assert_called_once_with("server_2")
self.assertEqual(
[
mock.call("server_id_1"),
mock.call("server_id_2"),
],
mock_servers_get.mock_calls)
self.clients("nova").server_groups.delete.assert_called_once_with(
"foobar")
def test_cleanup(self):
context = {
"task": mock.MagicMock(uuid="foo"),
"users": [
{"tenant_id": "tenant_id0"},
{"tenant_id": "tenant_id1"},
{"tenant_id": "tenant_id2"}
],
"tenants": {
"tenant_id0": {"foo": 0},
"tenant_id1": {"foo": 1},
"tenant_id2": {"foo": 2}
}
}
server_generator = servers_ext.ServerGeneratorExt(context)
server_generator._cleanup_one_tenant = mock.Mock()
server_generator.cleanup()
self.assertEqual(
[
mock.call({"tenant_id": "tenant_id0"}, {"foo": 0}),
mock.call({"tenant_id": "tenant_id1"}, {"foo": 1}),
mock.call({"tenant_id": "tenant_id2"}, {"foo": 2}),
],
server_generator._cleanup_one_tenant.mock_calls)
| |
# Copyright (c) 2011-2016 Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from lunr.storage.helper.utils import directio, ProcessError, execute
from timeit import default_timer as Timer
from struct import unpack_from
from lunr.common import logger
from tempfile import mkdtemp
from shutil import rmtree
from mmap import mmap
import time
import os
import re
log = logger.get_logger()
class ScrubError(RuntimeError):
pass
class Scrub(object):
def __init__(self, conf):
self._display_only = conf.bool('scrub', 'display-only', False)
self._display_exceptions = conf.bool('scrub',
'display-exceptions', False)
# Throttle speed is in MB/s
self._throttle_speed = conf.float('scrub', 'throttle_speed', 0)
self.scrub_buf = ''
def run(self, cmd, *args, **kwargs):
for attempts in range(0, 3):
try:
return execute(cmd, *args, **kwargs)
except ProcessError, e:
log.error("Command '%s' returned non-zero exit status" % e.cmd)
log.error("stdout: %s" % e.out)
log.error("stderr: %s" % e.err)
raise ScrubError("Aborted after 3 attempts to execute '%s'" % cmd)
def write(self, fd, offset, buf):
# Seek to the offset
if fd.seek(offset, os.SEEK_SET) == -1:
raise ScrubError("Unable to seek to offset '%d'" % offset)
try:
return fd.write(buf)
except (OSError, IOError), e:
raise ScrubError("Write on '%s' offset '%d' failed with '%s'"
% (fd.raw.path, offset, e))
def read(self, fd, offset, length):
# Seek to the offset
if fd.seek(offset, os.SEEK_SET) == -1:
raise ScrubError("Unable to seek to offset '%d'" % offset)
try:
return fd.read(length)
except (OSError, IOError), e:
raise ScrubError("Read on '%s' failed: %s" % (fd.raw.path, e))
def read_exception_metadata(self, fd, chunk_size, index):
# exception = { uint64 old_chunk, uint64 new_chunkc }
# if the size of each exception metadata is 16 bytes,
# exceptions_per_chunk is how many exceptions can fit in one chunk
exceptions_per_chunk = chunk_size / 16
# Offset where the exception metadata store begins
# 1 + for the header chunk, then + 1 to take into
# account the exception metadata chunk
store_offset = 1 + ((exceptions_per_chunk + 1) * index)
# seek to the begining of the exception metadata store
# and read the entire store
store = self.read(fd, chunk_size * store_offset, chunk_size)
if not self._display_only:
log.debug("Scrubbing metadata at %d" % (chunk_size * store_offset))
self.write(fd, chunk_size * store_offset, self.scrub_buf)
exception = 0
while exception < exceptions_per_chunk:
# Unpack 1 exception metadata from the store
(old_chunk, new_chunk) = unpack_from('<QQ', store, exception * 16)
# Yields the offset where the exception exists in the cow
yield new_chunk * chunk_size
# Increment to the next exception in the metatdata store
exception = exception + 1
def read_header(self, fd):
SECTOR_SHIFT = 9
SNAPSHOT_DISK_MAGIC = 0x70416e53
SNAPSHOT_DISK_VERSION = 1
SNAPSHOT_VALID_FLAG = 1
# Read the cow metadata
header = unpack_from("<IIII", self.read(fd, 0, 16))
if header[0] != SNAPSHOT_DISK_MAGIC:
raise ScrubError(
"Invalid COW device; header magic doesn't match")
if header[1] != SNAPSHOT_VALID_FLAG:
log.warning(
"Inactive COW device; valid flag not set '%d' got '%d'"
% (SNAPSHOT_VALID_FLAG, header[1]))
if header[2] != SNAPSHOT_DISK_VERSION:
raise ScrubError(
"Unknown metadata version; expected '%d' got '%d' "
% (SNAPSHOT_DISK_VERSION, header[2]))
log.info("Magic: %X" % header[0])
log.info("Valid: %d" % header[1])
log.info("Version: %d" % header[2])
log.info("Chunk Size: %d" % header[3])
header = list(header)
# Chunk size is byte aligned to 512 bytes
# (0 << SECTOR_SHIFT) == 512
return header[3] << SECTOR_SHIFT
def scrub_cow(self, cow_path):
try:
log.info("Opening Cow '%s'" % cow_path)
# Open the cow block device
fd = directio.open(cow_path, mode='+o', buffered=32768)
except OSError, e:
raise ScrubError("Failed to open cow '%s'" % e)
# Read the meta data header
chunk_size = self.read_header(fd)
if not self._display_only:
log.debug("Scrubbing cow header")
self.write(fd, 0, '\0' * 16)
# Create a buffer of nulls the size of the chunk
self.scrub_buf = '\0' * chunk_size
store, count = (0, 0)
while True:
# Iterate through all the exceptions
for offset in self.read_exception_metadata(fd, chunk_size, store):
# zero means we reached the last exception
if offset == 0:
if self._display_only:
log.info("Counted '%d' exceptions" % count)
else:
log.info("Scrubbed '%d' exceptions" % count)
return fd.close()
if self._display_exceptions:
log.debug("Exception: %s",
self.read(fd, offset, chunk_size))
count = count + 1
if not self._display_only:
# Write a chunk full of NULL's at 'offset'
self.write(fd, offset, self.scrub_buf)
# Seek the next store
store = store + 1
def _dash(self, value):
""" When dev-mapper creates symlinks in /dev/mapper it
replaces all occurances of '-' with '--'. Presumably
to make parsing the pattern 'name-volume-type' easier
"""
return re.sub('-', '--', value)
def get_writable_cow(self, snapshot, volume):
"""Remove the COWing from the volume so we can scrub it.
Change the vg-vol device from snapshot-origin to linear.
Remove the vg-snapshot device.
Remove the vg-vol-real.
Only the vg-vol linear and vg-snap-cow linear devices remain.
"""
path, vol = os.path.split(snapshot['path'])
path, vg = os.path.split(path)
path, dev = os.path.split(path)
snap_name = "%s-%s" % (self._dash(vg), self._dash(vol))
snap_path = os.path.join(os.sep, dev, 'mapper', snap_name)
cow_name = snap_name + "-cow"
cow_path = os.path.join(os.sep, dev, 'mapper', cow_name)
if self._display_only:
return (cow_name, cow_path)
if not os.path.exists(cow_path):
raise ScrubError(
"non-existant cow '%s'; invalid snapshot volume?" % cow_path)
# If the snap device is gone, we've already been here before.
if not os.path.exists(snap_path):
return (cow_name, cow_path)
path, vol = os.path.split(volume['path'])
path, vg = os.path.split(path)
path, dev = os.path.split(path)
vol_name = "%s-%s" % (self._dash(vg), self._dash(vol))
vol_real_name = vol_name + "-real"
try:
real_table = self.run(
'/sbin/dmsetup', 'table', vol_real_name).rstrip()
log.info("real_table: %s" % real_table)
except ProcessError, e:
raise ScrubError("dmsetup failed '%s'; not running as root?" % e)
tmpdir = mkdtemp()
tmpfile = os.path.join(tmpdir, 'table')
try:
with open(tmpfile, 'w') as f:
f.write(real_table)
self.run('/sbin/dmsetup', 'suspend', vol_name)
try:
self.run('/sbin/dmsetup', 'load', vol_name, tmpfile)
finally:
self.run('/sbin/dmsetup', 'resume', vol_name)
self.run('/sbin/dmsetup', 'remove', snap_name)
self.run('/sbin/dmsetup', 'remove', vol_real_name)
finally:
rmtree(tmpdir)
return (cow_name, cow_path)
def remove_cow(self, cow_name):
log.info("Removing cow'%s'" % cow_name)
self.run('/sbin/dmsetup', 'remove', cow_name, '-f')
def scrub_snapshot(self, snapshot, volume):
(cow_name, cow_path) = self.get_writable_cow(snapshot, volume)
if not os.path.exists(cow_path):
raise ScrubError("snapshot '%s' has no cow" % snapshot['name'])
# scrub the cow
self.scrub_cow(cow_path)
if not self._display_only:
self.remove_cow(cow_name)
def scrub_volume(self, volume, byte='\x00'):
CHUNKSIZE = 4 * 1024 ** 2 # 4 MB
chunk = mmap(-1, CHUNKSIZE)
chunk.write(byte * CHUNKSIZE)
log.debug('Chunk Size: %d' % CHUNKSIZE)
fd = os.open(volume, os.O_DIRECT | os.O_SYNC | os.O_WRONLY)
try:
# Get the size of the block device
size = os.lseek(fd, 0, os.SEEK_END)
# Seek back to the beginning of the device
os.lseek(fd, 0, os.SEEK_SET)
sample_size = 15
target_sleep, start = 0, Timer()
# If config included a throttle speed for scrubbing
if self._throttle_speed > 0:
# microseconds it takes to transfer 1 MB at our
# throttle speed, multiplied by the MB in our sample size
target_sleep = (1 / self._throttle_speed) * \
((sample_size * CHUNKSIZE) / 1048576.0)
# TODO: this math only works if CHUNKSIZE is == lvm PE size
for block_num in xrange(0, size, CHUNKSIZE):
os.write(fd, chunk)
# Sample scrub progress every 'sample_size' ( in blocks )
if (block_num % sample_size) == 0 and block_num != 0:
elapsed = (Timer() - start)
log.debug("Throughput %.3fMB/s POS: %s (%d%%)" %
(float(((sample_size * CHUNKSIZE) / 1048576.0) /
elapsed), block_num,
float(block_num) / size * 100))
# If we are throttling our scrub
if target_sleep != 0:
# Calculate how long we must sleep to
# achieve our target throughput
time.sleep(abs(target_sleep - elapsed))
start = Timer()
finally:
os.fsync(fd)
os.close(fd)
| |
from ZODB.DB import DB
import BTrees.IOBTree
import BTrees.LOBTree
import BTrees.OIBTree
import BTrees.OLBTree
import BTrees.OOBTree
import ZODB.FileStorage
import ZODB.POSException
import argparse
import collections
import logging
import pdb # noqa
import transaction
import persistent
import zodbpickle
log = logging.getLogger(__name__)
def wake_object(obj):
"""Wake the object so its `__dict__` gets filled."""
try:
getattr(obj, 'some_attribute', None)
except ZODB.POSException.POSKeyError as e:
# For example if a ZODB Blob was not found.
log.error('POSKeyError: %s', e)
def is_container(obj):
return isinstance(obj, (
BTrees.IOBTree.IOBTree,
BTrees.LOBTree.LOBTree,
BTrees.OIBTree.OIBTree,
BTrees.OLBTree.OLBTree,
BTrees.OOBTree.OOBTree,
persistent.mapping.PersistentMapping,
persistent.list.PersistentList))
def is_treeset(obj):
return isinstance(obj, (
BTrees.IOBTree.IOTreeSet,
BTrees.LOBTree.LOTreeSet,
BTrees.OIBTree.OITreeSet,
BTrees.OLBTree.OLTreeSet,
BTrees.OOBTree.OOTreeSet))
def get_data(obj):
"""Return data of object. Return `None` if not possible.
We try to fetch data by reading __dict__, but this is not possible for
`BTree`s. Call `keys` or `items` on obj respectively.
"""
result = None
if is_container(obj):
result = obj
elif is_treeset(obj):
result = dict.fromkeys(obj.keys())
else:
try:
result = vars(obj)
except TypeError:
pass
return result
def find_binary(value):
"""Return type if value is or contains binary strings. None otherwise."""
if isinstance(value, persistent.Persistent):
# Avoid duplicate analysis of the same object and circular references
return None
if isinstance(value, zodbpickle.binary):
# Already marked as binary, skip.
return None
if isinstance(value, str):
try:
value.decode('ascii')
except UnicodeDecodeError:
return 'string'
else:
return None
elif isinstance(value, collections.Mapping):
for k, v in value.items():
if find_binary(k) or find_binary(v):
return 'dict'
elif hasattr(value, '__iter__'):
try:
for v in value:
if find_binary(v):
return 'iterable'
except TypeError:
# e. g. <type 'tuple'> has __iter__ but as it is a class it can
# not be called successfully.
pass
return None
def get_classname(obj):
return obj.__class__.__module__ + '.' + obj.__class__.__name__
def get_items(obj):
"""Get the items of a dict-like or list-like object."""
if hasattr(obj, 'items'):
items = obj.items()
else:
items = enumerate(obj)
return items
def find_obj_with_binary_content(
storage, errors, start_at=None, limit=None, watermark=10000):
"""Generator which finds objects in `storage` having binary content.
Yields tuple: (object, data, key-name, value, type)
`type` can be one of 'string', 'dict', 'iterable', 'key'.
"""
db = DB(storage)
connection = db.open()
if start_at is not None:
next = ZODB.utils.repr_to_oid(start_at)
else:
next = None # first OID in storage
len_storage = len(storage)
log.warn('Analyzing about %s objects.', len_storage)
count = 0
run = True
while run:
oid, tid, data, next = storage.record_iternext(next)
if next is None:
run = False
obj = connection.get(oid)
klassname = get_classname(obj)
wake_object(obj)
data = get_data(obj)
if data is None:
errors[klassname] += 1
continue
for key, value in get_items(data):
try:
type_ = find_binary(value)
if type_ is not None:
yield obj, data, key, value, type_
type_ = find_binary(key)
if type_ is not None:
yield obj, data, key, key, 'key'
except Exception:
log.error('Could not execute %r', value, exc_info=True)
continue
count += 1
if count % watermark == 0:
log.warn('%s of about %s objects analyzed.', count, len_storage)
transaction.savepoint()
connection.cacheMinimize()
if limit is not None and count >= limit:
return
def get_format_string(obj, display_type=False, verbose=False):
format_string = ''
if is_treeset(obj) or is_container(obj):
format_string = '{klassname}[{key!r}]'
else:
format_string = '{klassname}.{key}'
if display_type:
format_string += ' is {type_}%s' % (
': {value!r:.30}' if verbose else '')
return format_string
def print_results(result, errors, verb, verbose):
"""Print the analysis results."""
if verbose:
print ("Found {} classes whose objects do not have __dict__: "
"(number of occurrences)".format(len(errors)))
for key, value in sorted_by_key(errors):
print "{} ({})".format(key, value)
print
print "# ########################################################### #"
print
print "{} {} binary fields: (number of occurrences)".format(
verb, len(result))
for key, value in sorted_by_key(result):
print "{} ({})".format(key, value)
def sorted_by_key(dict):
"""Get dict entries sorted by the key."""
for key in sorted(dict):
yield key, dict[key]
def get_argparse_parser(description):
"""Return an ArgumentParser with the default configuration."""
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
'zodb_path', help='Path to Data.fs', metavar='Data.fs')
group = parser.add_argument_group('General options')
group.add_argument(
'-b', '--blob-dir', default=None,
help='Path to the blob directory if ZODB blobs are used.')
group.add_argument(
'-v', '--verbose', action='store_true',
help='Be more verbose in output')
group.add_argument(
'--pdb', action='store_true', help='Drop into a debugger on an error')
return parser
def run(parser, callable, *arg_names, **kw):
"""Parse the command line args and feed them to `callable`.
*arg_names ... command line arguments which should be used as arguments of
the `callable`.
**kw ... Only the key `args` is allowed here to override the command line
arguments in tests.
"""
logging.basicConfig(level=logging.INFO)
args = kw.pop('args', None)
assert not kw, \
"Don't know how to handle the following kwargs: {!r}".format(kw)
args = parser.parse_args(args)
try:
storage = ZODB.FileStorage.FileStorage(
args.zodb_path, blob_dir=args.blob_dir)
callable_args = [getattr(args, x) for x in arg_names]
return callable(storage, *callable_args)
except Exception:
if args.pdb:
pdb.post_mortem()
raise
| |
#!/usr/bin/env python
import MKDatabase
from MKFlowMessage import FBconvertLong
# Main Class
class MKFlowCommunication():
def __init__(self):
self.nullNode = MKFlowNode(-1)
self.node_numbers = []
self.nodes = []
def addNode(self, number):
if not self.isNode(number):
self.node_numbers += [number]
node = MKFlowNode(number)
self.nodes += [node]
def isNode(self, number):
return (number in self.node_numbers)
def getNode(self, number):
if self.isNode(number):
id = self.node_numbers.index(number)
node = self.nodes[id]
return node
else:
return self.nullNode
def Node(self, number):
if not self.isNode(number):
self.addNode(number)
return self.getNode(number)
class MKFlowNode():
def __init__(self, node):
self.node = node
self.nullSequence = MKFlowSequence(-1)
self.sequence_numbers = []
self.sequences = []
def getNumber(self):
return self.node
def addSequence(self, number):
if not self.isSequence(number):
self.sequence_numbers += [number]
sequence = MKFlowSequence(number)
self.sequences += [sequence]
def isSequence(self, number):
return (number in self.sequence_numbers)
def getSequence(self, number):
if self.isSequence(number):
id = self.sequence_numbers.index(number)
sequence = self.sequences[id]
return sequence
else:
return self.nullSequence
def Sequence(self, number):
if not self.isSequence(number):
self.addSequence(number)
return self.getSequence(number)
class MKFlowSequence():
def __init__(self, sequence):
self.sequence = sequence
self.nullChild = MKFlowModbus(-1)
self.reset()
def reset(self):
self.parameter_ids = []
self.parameters = []
self.hasAnswer = False
self.hasRequest = False
self.RequestHasValue = False
self.isAnalysed = False
self.isStatus = False
self.isError = False
self.isValid = False
def setReadRequest(self, Message):
self.Request = Message.getSubType()
self.hasRequest = True
self.hasAnswer = False
self.RequestHasValue = False
self.timeRequest = Message.getSeconds()
def setWriteRequest(self, Message):
self.setReadRequest(Message)
self.RequestHasValue = True
def setStatus(self, Message):
self.setAnswer(Message)
self.isStatus = True
def setError(self, Message):
self.setAnswer(Message)
self.isError = True
def setAnswer(self, Message):
self.Answer = Message.getSubType()
self.timeAnswer = Message.getSeconds()
self.hasAnswer = True
self.isStatus = False
self.isError = False
def check(self):
if self.hasAnswer and self.hasRequest:
if abs(self.timeAnswer - self.timeRequest) > 10:
return False
else:
return True
else:
return False
def addParameter(self, index):
if not self.isParameter(index):
self.parameter_ids += [index]
Parameter = MKFlowModbus(index)
self.parameters += [Parameter]
def isParameter(self, index):
return index in self.parameter_ids
def getParameter(self, index):
if self.isParameter(index):
id = self.parameter_ids.index(index)
Parameter = self.parameters[id]
return Parameter
else:
return self.nullChild
def Parameter(self, index):
if not self.isParameter(index):
self.addParameter(index)
return self.getParameter(index)
def analyse(self):
if self.check():
# Process Request
for process in self.Request.process:
for parameter in process.Parameter:
self.Parameter(parameter.getIndex()).setNumber(parameter.getNumber())
self.Parameter(parameter.getIndex()).setProcess(parameter.getProcess())
self.Parameter(parameter.getIndex()).setName(parameter.getHuman())
self.Parameter(parameter.getIndex()).setLength(parameter.getLength())
if self.RequestHasValue:
self.Parameter(parameter.getIndex()).setValue(parameter.getValue())
self.Parameter(parameter.getIndex()).setDataType(parameter.getDataType())
# Process Answer
if not self.RequestHasValue and not self.isStatus and not self.isError:
for process in self.Answer.process:
for parameter in process.Parameter:
self.Parameter(parameter.getIndex()).setValue(parameter.getValue())
self.Parameter(parameter.getIndex()).setDataType(parameter.getDataType())
# Answer with Status or Error and set valid
self.valid = True
self.analyseStatus()
self.analyseError()
self.isAnalysed = True
def analyseStatus(self):
if self.isStatus:
if self.Answer.getStatus() == 0:
# no error
self.valid = True
elif self.Answer.getStatus() > 3 and self.Answer.getStatus() < 8:
# Parameter Error
where = self.Answer.getIndex()
count = 4
for index in self.parameter_ids:
Parameter = self.getParameter(index)
if not self.RequestHasValue:
Parameter.setInvalid()
if where == count:
self.error = "Status: %s\t Parameter: %s" % (self.Answer.getHuman(), Parameter.getName())
Parameter.setError(self.Answer.getHuman())
count += int(Parameter.getLength())
else:
self.error = self.Answer.getHuman()
self.valid = False
def analyseError(self):
if self.isError:
self.error = self.Answer.getText()
self.valid = False
if not self.valid:
for index in self.parameter_ids:
Parameter = self.getParameter(index)
Parameter.setError(self.error)
def output(self):
if self.check():
if not self.isAnalysed:
self.analyse()
for index in self.parameter_ids:
Parameter = self.getParameter(index)
try:
Parameter.stdout()
except:
self.stdout()
raise ValueError("error in MKFlowCommunication ModbusClass stdout")
def save(self, Database, instrument = 0):
if self.check():
reset = True
if not self.isAnalysed:
self.analyse()
for index in self.parameter_ids:
Parameter = self.getParameter(index)
try:
if not Parameter.isInvalid():
valid = True
proc = Parameter.getProcess()
fbnr = Parameter.getNumber()
name = Parameter.getName()
value = Parameter.getValue()
dataType = Parameter.getDataType()
time = self.timeAnswer
parameter = Parameter.getName()
reset = Database.setFlowbus(instrument, proc, fbnr, dataType, value, time, parameter)
except:
self.stdout()
print "error storing parameter."
reset = False
if reset:
self.reset()
else:
print "Sequence not cleared."
def stdout(self):
print "--- sequence: %i ---" % self.sequence
print "---- parameters: %s ----" % self.parameter_ids
if self.hasRequest:
print "---- request ----"
self.Request.stdout()
if self.hasAnswer:
print "---- answer ----"
self.Answer.stdout()
class MKFlowModbus():
def __init__(self, index):
self.index = index
self.invalid = False
self.error = ''
self.value = None
self.human = ''
self.dataType = 'invalid' # readybility. store as string
self.length = 0
def setProcess(self, process):
self.process = process
def getProcess(self):
return self.process
def setNumber(self, number):
self.number = number
def getNumber(self):
return self.number
def setValue(self, value):
self.value = value
def getValue(self):
return self.value
def setDataType(self, dataType):
self.dataType = dataType
def getDataType(self):
return self.dataType
def setName(self, string):
self.human = string
def getName(self):
return self.human
def setInvalid(self):
self.invalid = True
def setLength(self, length):
self.length = length
def getLength(self):
return self.length
def setError(self, error):
self.error = error
self.setInvalid()
def isInvalid(self):
if self.invalid:
return True
else:
return False
def stdout(self):
returnarray = [self.isInvalid(), self.getProcess(), self.getNumber(), self.getName()]
if not self.invalid:
returnarray += [FBconvertLong(self.getProcess(), self.getNumber(), self.getValue())]
else:
returnarray += [self.error]
print '\t'.join(str(i) for i in returnarray)
| |
"""Z-Wave Constants."""
DOMAIN = "zwave"
ATTR_NODE_ID = "node_id"
ATTR_TARGET_NODE_ID = "target_node_id"
ATTR_ASSOCIATION = "association"
ATTR_INSTANCE = "instance"
ATTR_GROUP = "group"
ATTR_VALUE_ID = "value_id"
ATTR_OBJECT_ID = "object_id"
ATTR_NAME = "name"
ATTR_SCENE_ID = "scene_id"
ATTR_BASIC_LEVEL = "basic_level"
ATTR_CONFIG_PARAMETER = "parameter"
ATTR_CONFIG_SIZE = "size"
ATTR_CONFIG_VALUE = "value"
NETWORK_READY_WAIT_SECS = 30
DISCOVERY_DEVICE = 'device'
SERVICE_CHANGE_ASSOCIATION = "change_association"
SERVICE_ADD_NODE = "add_node"
SERVICE_ADD_NODE_SECURE = "add_node_secure"
SERVICE_REMOVE_NODE = "remove_node"
SERVICE_CANCEL_COMMAND = "cancel_command"
SERVICE_HEAL_NETWORK = "heal_network"
SERVICE_SOFT_RESET = "soft_reset"
SERVICE_TEST_NETWORK = "test_network"
SERVICE_SET_CONFIG_PARAMETER = "set_config_parameter"
SERVICE_PRINT_CONFIG_PARAMETER = "print_config_parameter"
SERVICE_PRINT_NODE = "print_node"
SERVICE_REMOVE_FAILED_NODE = "remove_failed_node"
SERVICE_REPLACE_FAILED_NODE = "replace_failed_node"
SERVICE_SET_WAKEUP = "set_wakeup"
SERVICE_STOP_NETWORK = "stop_network"
SERVICE_START_NETWORK = "start_network"
SERVICE_RENAME_NODE = "rename_node"
SERVICE_REFRESH_ENTITY = "refresh_entity"
SERVICE_REFRESH_NODE = "refresh_node"
EVENT_SCENE_ACTIVATED = "zwave.scene_activated"
EVENT_NODE_EVENT = "zwave.node_event"
EVENT_NETWORK_READY = "zwave.network_ready"
EVENT_NETWORK_COMPLETE = "zwave.network_complete"
EVENT_NETWORK_START = "zwave.network_start"
EVENT_NETWORK_STOP = "zwave.network_stop"
COMMAND_CLASS_ALARM = 113
COMMAND_CLASS_ANTITHEFT = 93
COMMAND_CLASS_APPLICATION_CAPABILITY = 87
COMMAND_CLASS_APPLICATION_STATUS = 34
COMMAND_CLASS_ASSOCIATION = 133
COMMAND_CLASS_ASSOCIATION_COMMAND_CONFIGURATION = 155
COMMAND_CLASS_ASSOCIATION_GRP_INFO = 89
COMMAND_CLASS_BARRIER_OPERATOR = 102
COMMAND_CLASS_BASIC = 32
COMMAND_CLASS_BASIC_TARIFF_INFO = 54
COMMAND_CLASS_BASIC_WINDOW_COVERING = 80
COMMAND_CLASS_BATTERY = 128
COMMAND_CLASS_CENTRAL_SCENE = 91
COMMAND_CLASS_CLIMATE_CONTROL_SCHEDULE = 70
COMMAND_CLASS_CLOCK = 129
COMMAND_CLASS_CONFIGURATION = 112
COMMAND_CLASS_CONTROLLER_REPLICATION = 33
COMMAND_CLASS_CRC_16_ENCAP = 86
COMMAND_CLASS_DCP_CONFIG = 58
COMMAND_CLASS_DCP_MONITOR = 59
COMMAND_CLASS_DEVICE_RESET_LOCALLY = 90
COMMAND_CLASS_DOOR_LOCK = 98
COMMAND_CLASS_DOOR_LOCK_LOGGING = 76
COMMAND_CLASS_ENERGY_PRODUCTION = 144
COMMAND_CLASS_ENTRY_CONTROL = 111
COMMAND_CLASS_FIRMWARE_UPDATE_MD = 122
COMMAND_CLASS_GEOGRAPHIC_LOCATION = 140
COMMAND_CLASS_GROUPING_NAME = 123
COMMAND_CLASS_HAIL = 130
COMMAND_CLASS_HRV_CONTROL = 57
COMMAND_CLASS_HRV_STATUS = 55
COMMAND_CLASS_HUMIDITY_CONTROL_MODE = 109
COMMAND_CLASS_HUMIDITY_CONTROL_OPERATING_STATE = 110
COMMAND_CLASS_HUMIDITY_CONTROL_SETPOINT = 100
COMMAND_CLASS_INDICATOR = 135
COMMAND_CLASS_IP_ASSOCIATION = 92
COMMAND_CLASS_IP_CONFIGURATION = 14
COMMAND_CLASS_IRRIGATION = 107
COMMAND_CLASS_LANGUAGE = 137
COMMAND_CLASS_LOCK = 118
COMMAND_CLASS_MAILBOX = 105
COMMAND_CLASS_MANUFACTURER_PROPRIETARY = 145
COMMAND_CLASS_MANUFACTURER_SPECIFIC = 114
COMMAND_CLASS_MARK = 239
COMMAND_CLASS_METER = 50
COMMAND_CLASS_METER_PULSE = 53
COMMAND_CLASS_METER_TBL_CONFIG = 60
COMMAND_CLASS_METER_TBL_MONITOR = 61
COMMAND_CLASS_METER_TBL_PUSH = 62
COMMAND_CLASS_MTP_WINDOW_COVERING = 81
COMMAND_CLASS_MULTI_CHANNEL = 96
COMMAND_CLASS_MULTI_CHANNEL_ASSOCIATION = 142
COMMAND_CLASS_MULTI_COMMAND = 143
COMMAND_CLASS_NETWORK_MANAGEMENT_BASIC = 77
COMMAND_CLASS_NETWORK_MANAGEMENT_INCLUSION = 52
COMMAND_CLASS_NETWORK_MANAGEMENT_PRIMARY = 84
COMMAND_CLASS_NETWORK_MANAGEMENT_PROXY = 82
COMMAND_CLASS_NO_OPERATION = 0
COMMAND_CLASS_NODE_NAMING = 119
COMMAND_CLASS_NON_INTEROPERABLE = 240
COMMAND_CLASS_NOTIFICATION = 113
COMMAND_CLASS_POWERLEVEL = 115
COMMAND_CLASS_PREPAYMENT = 63
COMMAND_CLASS_PREPAYMENT_ENCAPSULATION = 65
COMMAND_CLASS_PROPRIETARY = 136
COMMAND_CLASS_PROTECTION = 117
COMMAND_CLASS_RATE_TBL_CONFIG = 72
COMMAND_CLASS_RATE_TBL_MONITOR = 73
COMMAND_CLASS_REMOTE_ASSOCIATION_ACTIVATE = 124
COMMAND_CLASS_REMOTE_ASSOCIATION = 125
COMMAND_CLASS_SCENE_ACTIVATION = 43
COMMAND_CLASS_SCENE_ACTUATOR_CONF = 44
COMMAND_CLASS_SCENE_CONTROLLER_CONF = 45
COMMAND_CLASS_SCHEDULE = 83
COMMAND_CLASS_SCHEDULE_ENTRY_LOCK = 78
COMMAND_CLASS_SCREEN_ATTRIBUTES = 147
COMMAND_CLASS_SCREEN_MD = 146
COMMAND_CLASS_SECURITY = 152
COMMAND_CLASS_SECURITY_SCHEME0_MARK = 61696
COMMAND_CLASS_SENSOR_ALARM = 156
COMMAND_CLASS_SENSOR_BINARY = 48
COMMAND_CLASS_SENSOR_CONFIGURATION = 158
COMMAND_CLASS_SENSOR_MULTILEVEL = 49
COMMAND_CLASS_SILENCE_ALARM = 157
COMMAND_CLASS_SIMPLE_AV_CONTROL = 148
COMMAND_CLASS_SUPERVISION = 108
COMMAND_CLASS_SWITCH_ALL = 39
COMMAND_CLASS_SWITCH_BINARY = 37
COMMAND_CLASS_SWITCH_COLOR = 51
COMMAND_CLASS_SWITCH_MULTILEVEL = 38
COMMAND_CLASS_SWITCH_TOGGLE_BINARY = 40
COMMAND_CLASS_SWITCH_TOGGLE_MULTILEVEL = 41
COMMAND_CLASS_TARIFF_TBL_CONFIG = 74
COMMAND_CLASS_TARIFF_TBL_MONITOR = 75
COMMAND_CLASS_THERMOSTAT_FAN_MODE = 68
COMMAND_CLASS_THERMOSTAT_FAN_STATE = 69
COMMAND_CLASS_THERMOSTAT_MODE = 64
COMMAND_CLASS_THERMOSTAT_OPERATING_STATE = 66
COMMAND_CLASS_THERMOSTAT_SETBACK = 71
COMMAND_CLASS_THERMOSTAT_SETPOINT = 67
COMMAND_CLASS_TIME = 138
COMMAND_CLASS_TIME_PARAMETERS = 139
COMMAND_CLASS_TRANSPORT_SERVICE = 85
COMMAND_CLASS_USER_CODE = 99
COMMAND_CLASS_VERSION = 134
COMMAND_CLASS_WAKE_UP = 132
COMMAND_CLASS_ZIP = 35
COMMAND_CLASS_ZIP_NAMING = 104
COMMAND_CLASS_ZIP_ND = 88
COMMAND_CLASS_ZIP_6LOWPAN = 79
COMMAND_CLASS_ZIP_GATEWAY = 95
COMMAND_CLASS_ZIP_PORTAL = 97
COMMAND_CLASS_ZWAVEPLUS_INFO = 94
COMMAND_CLASS_WHATEVER = None # Match ALL
COMMAND_CLASS_WINDOW_COVERING = 106
GENERIC_TYPE_WHATEVER = None # Match ALL
SPECIFIC_TYPE_WHATEVER = None # Match ALL
SPECIFIC_TYPE_NOT_USED = 0 # Available in all Generic types
GENERIC_TYPE_AV_CONTROL_POINT = 3
SPECIFIC_TYPE_DOORBELL = 18
SPECIFIC_TYPE_SATELLITE_RECIEVER = 4
SPECIFIC_TYPE_SATELLITE_RECIEVER_V2 = 17
GENERIC_TYPE_DISPLAY = 4
SPECIFIC_TYPE_SIMPLE_DISPLAY = 1
GENERIC_TYPE_ENTRY_CONTROL = 64
SPECIFIC_TYPE_DOOR_LOCK = 1
SPECIFIC_TYPE_ADVANCED_DOOR_LOCK = 2
SPECIFIC_TYPE_SECURE_KEYPAD_DOOR_LOCK = 3
SPECIFIC_TYPE_SECURE_KEYPAD_DOOR_LOCK_DEADBOLT = 4
SPECIFIC_TYPE_SECURE_DOOR = 5
SPECIFIC_TYPE_SECURE_GATE = 6
SPECIFIC_TYPE_SECURE_BARRIER_ADDON = 7
SPECIFIC_TYPE_SECURE_BARRIER_OPEN_ONLY = 8
SPECIFIC_TYPE_SECURE_BARRIER_CLOSE_ONLY = 9
SPECIFIC_TYPE_SECURE_LOCKBOX = 10
SPECIFIC_TYPE_SECURE_KEYPAD = 11
GENERIC_TYPE_GENERIC_CONTROLLER = 1
SPECIFIC_TYPE_PORTABLE_CONTROLLER = 1
SPECIFIC_TYPE_PORTABLE_SCENE_CONTROLLER = 2
SPECIFIC_TYPE_PORTABLE_INSTALLER_TOOL = 3
SPECIFIC_TYPE_REMOTE_CONTROL_AV = 4
SPECIFIC_TYPE_REMOTE_CONTROL_SIMPLE = 6
GENERIC_TYPE_METER = 49
SPECIFIC_TYPE_SIMPLE_METER = 1
SPECIFIC_TYPE_ADV_ENERGY_CONTROL = 2
SPECIFIC_TYPE_WHOLE_HOME_METER_SIMPLE = 3
GENERIC_TYPE_METER_PULSE = 48
GENERIC_TYPE_NON_INTEROPERABLE = 255
GENERIC_TYPE_REPEATER_SLAVE = 15
SPECIFIC_TYPE_REPEATER_SLAVE = 1
SPECIFIC_TYPE_VIRTUAL_NODE = 2
GENERIC_TYPE_SECURITY_PANEL = 23
SPECIFIC_TYPE_ZONED_SECURITY_PANEL = 1
GENERIC_TYPE_SEMI_INTEROPERABLE = 80
SPECIFIC_TYPE_ENERGY_PRODUCTION = 1
GENERIC_TYPE_SENSOR_ALARM = 161
SPECIFIC_TYPE_ADV_ZENSOR_NET_ALARM_SENSOR = 5
SPECIFIC_TYPE_ADV_ZENSOR_NET_SMOKE_SENSOR = 10
SPECIFIC_TYPE_BASIC_ROUTING_ALARM_SENSOR = 1
SPECIFIC_TYPE_BASIC_ROUTING_SMOKE_SENSOR = 6
SPECIFIC_TYPE_BASIC_ZENSOR_NET_ALARM_SENSOR = 3
SPECIFIC_TYPE_BASIC_ZENSOR_NET_SMOKE_SENSOR = 8
SPECIFIC_TYPE_ROUTING_ALARM_SENSOR = 2
SPECIFIC_TYPE_ROUTING_SMOKE_SENSOR = 7
SPECIFIC_TYPE_ZENSOR_NET_ALARM_SENSOR = 4
SPECIFIC_TYPE_ZENSOR_NET_SMOKE_SENSOR = 9
SPECIFIC_TYPE_ALARM_SENSOR = 11
GENERIC_TYPE_SENSOR_BINARY = 32
SPECIFIC_TYPE_ROUTING_SENSOR_BINARY = 1
GENERIC_TYPE_SENSOR_MULTILEVEL = 33
SPECIFIC_TYPE_ROUTING_SENSOR_MULTILEVEL = 1
SPECIFIC_TYPE_CHIMNEY_FAN = 2
GENERIC_TYPE_STATIC_CONTROLLER = 2
SPECIFIC_TYPE_PC_CONTROLLER = 1
SPECIFIC_TYPE_SCENE_CONTROLLER = 2
SPECIFIC_TYPE_STATIC_INSTALLER_TOOL = 3
SPECIFIC_TYPE_SET_TOP_BOX = 4
SPECIFIC_TYPE_SUB_SYSTEM_CONTROLLER = 5
SPECIFIC_TYPE_TV = 6
SPECIFIC_TYPE_GATEWAY = 7
GENERIC_TYPE_SWITCH_BINARY = 16
SPECIFIC_TYPE_POWER_SWITCH_BINARY = 1
SPECIFIC_TYPE_SCENE_SWITCH_BINARY = 3
SPECIFIC_TYPE_POWER_STRIP = 4
SPECIFIC_TYPE_SIREN = 5
SPECIFIC_TYPE_VALVE_OPEN_CLOSE = 6
SPECIFIC_TYPE_COLOR_TUNABLE_BINARY = 2
SPECIFIC_TYPE_IRRIGATION_CONTROLLER = 7
GENERIC_TYPE_SWITCH_MULTILEVEL = 17
SPECIFIC_TYPE_CLASS_A_MOTOR_CONTROL = 5
SPECIFIC_TYPE_CLASS_B_MOTOR_CONTROL = 6
SPECIFIC_TYPE_CLASS_C_MOTOR_CONTROL = 7
SPECIFIC_TYPE_MOTOR_MULTIPOSITION = 3
SPECIFIC_TYPE_POWER_SWITCH_MULTILEVEL = 1
SPECIFIC_TYPE_SCENE_SWITCH_MULTILEVEL = 4
SPECIFIC_TYPE_FAN_SWITCH = 8
SPECIFIC_TYPE_COLOR_TUNABLE_MULTILEVEL = 2
GENERIC_TYPE_SWITCH_REMOTE = 18
SPECIFIC_TYPE_REMOTE_BINARY = 1
SPECIFIC_TYPE_REMOTE_MULTILEVEL = 2
SPECIFIC_TYPE_REMOTE_TOGGLE_BINARY = 3
SPECIFIC_TYPE_REMOTE_TOGGLE_MULTILEVEL = 4
GENERIC_TYPE_SWITCH_TOGGLE = 19
SPECIFIC_TYPE_SWITCH_TOGGLE_BINARY = 1
SPECIFIC_TYPE_SWITCH_TOGGLE_MULTILEVEL = 2
GENERIC_TYPE_THERMOSTAT = 8
SPECIFIC_TYPE_SETBACK_SCHEDULE_THERMOSTAT = 3
SPECIFIC_TYPE_SETBACK_THERMOSTAT = 5
SPECIFIC_TYPE_SETPOINT_THERMOSTAT = 4
SPECIFIC_TYPE_THERMOSTAT_GENERAL = 2
SPECIFIC_TYPE_THERMOSTAT_GENERAL_V2 = 6
SPECIFIC_TYPE_THERMOSTAT_HEATING = 1
GENERIC_TYPE_VENTILATION = 22
SPECIFIC_TYPE_RESIDENTIAL_HRV = 1
GENERIC_TYPE_WINDOWS_COVERING = 9
SPECIFIC_TYPE_SIMPLE_WINDOW_COVERING = 1
GENERIC_TYPE_ZIP_NODE = 21
SPECIFIC_TYPE_ZIP_ADV_NODE = 2
SPECIFIC_TYPE_ZIP_TUN_NODE = 1
GENERIC_TYPE_WALL_CONTROLLER = 24
SPECIFIC_TYPE_BASIC_WALL_CONTROLLER = 1
GENERIC_TYPE_NETWORK_EXTENDER = 5
SPECIFIC_TYPE_SECURE_EXTENDER = 1
GENERIC_TYPE_APPLIANCE = 6
SPECIFIC_TYPE_GENERAL_APPLIANCE = 1
SPECIFIC_TYPE_KITCHEN_APPLIANCE = 2
SPECIFIC_TYPE_LAUNDRY_APPLIANCE = 3
GENERIC_TYPE_SENSOR_NOTIFICATION = 7
SPECIFIC_TYPE_NOTIFICATION_SENSOR = 1
GENRE_WHATEVER = None
GENRE_USER = "User"
GENRE_SYSTEM = "System"
TYPE_WHATEVER = None
TYPE_BYTE = "Byte"
TYPE_BOOL = "Bool"
TYPE_DECIMAL = "Decimal"
TYPE_INT = "Int"
TYPE_LIST = "List"
TYPE_STRING = "String"
DISC_COMMAND_CLASS = "command_class"
DISC_COMPONENT = "component"
DISC_GENERIC_DEVICE_CLASS = "generic_device_class"
DISC_GENRE = "genre"
DISC_INDEX = "index"
DISC_INSTANCE = "instance"
DISC_LABEL = "label"
DISC_NODE_ID = "node_id"
DISC_OPTIONAL = "optional"
DISC_PRIMARY = "primary"
DISC_READONLY = "readonly"
DISC_SPECIFIC_DEVICE_CLASS = "specific_device_class"
DISC_TYPE = "type"
DISC_VALUES = "values"
DISC_WRITEONLY = "writeonly"
| |
'''
Jared Smith
PyMiner, Version 0.1
DatabaseUtilities.py
Authored under MIT License.
'''
# Built-in Modules
import logging
import urllib2
import re
# 3rd-party imports
import MySQLdb
from django.utils.encoding import smart_str, smart_unicode
# SQL class handles all database operations
class SQL:
# Initialize database connection and parameters for processing tweets into database
def __init__(self, host, db_user, db_pass, shorteners_file = None):
# Default url shorteners, to be used for url parsing or tweet urls
self.shorteners = ['bit.ly', 'goo.gl', 'tiny.cc',
't.co', 'tinyurl.com', 'fb.me']
# Load URL shorteners from file if there is one
if shorteners_file is not None:
regex = re.compile(' +|,+|\n')
with open(file) as shorteners_file:
ids = regex.split(shorteners_file.read().rstrip())
ids = filter(None, ids)
self.logger = logging.getLogger('TwitterCollector')
# Try to connect to the database, connect any exceptions and exit if connection fails
try:
self.db_con = MySQLdb.Connect(host, db_user, db_pass)
self.db_con.autocommit(True)
except Exception, e:
print e, "\nCheck DB password/user in config file.\nExiting..."
self.logger.error("Error: %s\nCheck DB pass/user in config file.\nExiting..." % str(e))
exit(0)
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
START: Database Insert Methods
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
def __insert_user(self, tweet):
''' Insert user into db.
Args:
tweet: Tweet object containing parsed tweet
'''
try:
sql = "INSERT INTO users (user_id, created_at) VALUES"\
"(%d, '%s')"\
%(
tweet['user']['user_id'],
tweet['user']['created_at'].strftime('%Y-%m-%d %H:%M:%S')
)
#exec stms
self.cursor.execute(sql)
except Exception, e:
if "or read-only" not in str(e) and "1062" not in str(e):
self.self.print_err('__insert_user', str(e))
def __insert_user_info(self, tweet):
''' Insert user info into db.
Args:
tweet: Tweet object containing parsed tweet
'''
try:
sql = "INSERT INTO user_info VALUES"\
"(%d, '%s', '%s', %d, %d, '%s', '%s', '%s', '%s')"\
%(
tweet['user']['user_id'],
MySQLdb.escape_string(tweet['user']['screen_name'].encode('utf-8').strip()),
MySQLdb.escape_string(tweet['user']['name'].encode('utf-8').strip()),
tweet['user']['followers_count'],
tweet['user']['friends_count'],
MySQLdb.escape_string(tweet['user']['description'].encode('utf-8').strip()),
MySQLdb.escape_string(tweet['user']['image_url'].encode('utf-8').strip()),
tweet['user']['created_at'].strftime('%Y-%m-%d %H:%M:%S'),
MySQLdb.escape_string(tweet['user']['location'].encode('utf-8').strip())
)
self.cursor.execute(sql)
except Exception, e:
if "or read-only" not in str(e) and "1062" not in str(e):
self.print_err('__insert_info', str(e))
def __insert_tweet(self, tweet):
''' Insert tweet into db.
Args:
tweet: Tweet object containing parsed tweet
'''
try:
sql = "INSERT INTO tweets VALUES"\
"(%d, '%s', '%s', %d, %d, %d, '%s', %d, %d)"\
%(
tweet['tweet']['tweet_id'],
MySQLdb.escape_string(tweet['tweet']['tweet_text'].encode('utf-8').strip()),
tweet['tweet']['created_at'].strftime('%Y-%m-%d %H:%M:%S'),
tweet['tweet']['geo_lat'],
tweet['tweet']['geo_long'],
tweet['user']['user_id'],
MySQLdb.escape_string(tweet['tweet']['tweet_url'].encode('utf-8').strip()),
tweet['tweet']['retweet_count'],
tweet['tweet']['original_tweet_id']
)
self.cursor.execute(sql)
except Exception, e:
if "or read-only" not in str(e) and "1062" not in str(e):
self.print_err('__insert_tweet', str(e))
def __insert_hashtags(self, tweet):
''' Insert hashtags into db.
Args:
tweet: Tweet object containing parsed tweet
'''
try:
sql = "INSERT INTO tweet_hashtags VALUES(%d, '%s', '%s', %d)"
for hashtag in tweet['tweet']['hashtags']:
tmp_sql = sql\
%(
tweet['tweet']['tweet_id'],
#MySQLdb.escape_string(hashtag['text']).encode('utf-8').strip(),
MySQLdb.escape_string(smart_str(hashtag['text'])).strip(),
tweet['tweet']['created_at'].strftime('%Y-%m-%d %H:%M:%S'),
tweet['user']['user_id']
)
self.cursor.execute(tmp_sql)
except Exception, e:
if "or read-only" not in str(e) and "1062" not in str(e):
self.print_err('__insert_hashtags', str(e))
def __insert_mentions(self, tweet):
''' Insert mentions info into db.
Args:
tweet: Tweet object containing parsed tweet
'''
sql = "INSERT INTO tweet_mentions VALUES(%d, %d, %d)"
#insert mentions
try:
for mention in tweet['tweet']['mentions']:
tmp_sql = sql\
%(
tweet['tweet']['tweet_id'],
tweet['user']['user_id'],
mention['id']
)
self.cursor.execute(tmp_sql)
except Exception, e:
if "or read-only" not in str(e) and "1062" not in str(e):
self.print_err('__insert_mentions', str(e))
def __insert_urls(self, tweet):
''' Insert urls into db.
Args:
tweet: Tweet object containing parsed tweet
'''
sql = "INSERT INTO tweet_links VALUES(%d, %d, '%s', '%s', '%s')"
#insert urls
try:
for url in tweet['tweet']['urls']:
if url is None or url == '':
continue
tmp_sql = sql\
%(
tweet['tweet']['tweet_id'],
tweet['user']['user_id'],
MySQLdb.escape_string(self.__expand_url(url['expanded_url'])),
MySQLdb.escape_string(url['url']),
tweet['tweet']['created_at'].strftime('%Y-%m-%d %H:%M:%S')
)
self.cursor.execute(tmp_sql)
except Exception, e:
if "or read-only" not in str(e) and "1062" not in str(e):
self.print_err('__insert_urls', str(e))
def __insert_raw_JSON(self, tweet):
if tweet['raw_json'] is not None:
insert_raw_json = "INSERT INTO tweet_json_cache VALUES(%d, \"%s\")"\
%(tweet['tweet']['tweet_id'],
MySQLdb.escape_string(tweet['raw_json']
.encode('utf-8').strip()))
self.cursor.execute(insert_raw_json)
def insert_into(self, db, tweet):
''' Insert tweet info into db.
Args:
db: Name of db to use
tweet: Tweet object containing parsed tweet
'''
try:
#select db
self.db_con.select_db(db)
self.cursor = self.db_con.cursor()
except Exception, e:
self.self.print_err('insert_into', str(e))
self.__insert_user(tweet)
self.__insert_user_info(tweet)
self.__insert_tweet(tweet)
self.__insert_hashtags(tweet)
self.__insert_mentions(tweet)
self.__insert_urls(tweet)
self.__insert_raw_JSON(tweet)
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
END: Database Insert Methods
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
# Method to print error if error occurs in SQL execution
def print_err(self, func, msg):
err = "\nDB insert error\n@ %s IN SQL.py\nMSG( %s )" % (func, msg)
print err;
self.logger.error(err)
def testDB(self, db):
try:
# Select Database
self.db_con.select_db(db)
self.cursor = self.db_con.cursor()
# Test Database
sql = "SELECT * FROM users LIMIT 1"
self.cursor.execute(sql)
# Catch exception in config file
except Exception, e:
print e, "\nCheck DB name in config file.\nExiting..."
self.logger.error("Error: %s\nCheck DB name in config file.\nExiting..." % str(e))
exit(0)
# Method to expand shortened url to valid full url
# Uses urllib2 library
def __expand_url(self, url):
if url is None:
return "N/A"
url = url.replace("https://", "")
url = url.replace("http://", "")
url = url.replace("www.", "")
# Check for known shorteners
isShort = [shortener for shortener in self.shorteners
if shortener.lower() in url.lower()]
if len(isShort) == 0 \
and len(url[:url.index(".")]) > 7:
return url
url = "http://"+url
try:
# Send request using short url
req = urllib2.Request(url)
res = urllib2.urlopen(req)
# Return final url
url = res.geturl()
if "http://" not in url and "https://" not in url:
url = "http://"+url
return url
except Exception, e:
return "404/Invalid URL(",url,")"
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import re
from .saferscanner import SaferScanner
class LexingError(Exception):
@classmethod
def from_text(cls, rulestr, unmatched, msg='Lexing error'):
bad_char = len(rulestr) - len(unmatched)
linenum = rulestr[:bad_char].count('\n') + 1
charnum = len(rulestr[:bad_char].rsplit('\n', 1)[-1]) + 1
raise cls(linenum, charnum, msg)
def __init__(self, linenum, charnum, msg='Lexing error'):
self.linenum = linenum
self.charnum = charnum
self.msg = msg
self.args = (linenum, charnum, msg)
def __str__(self):
return '%s at line %d, char %d' % (self.msg, self.linenum, self.charnum)
class Hint:
def __init__(self, text):
self.text = text
def __hash__(self):
return hash((id(self.__class__), self.text))
def __eq__(self, other):
return isinstance(other, self.__class__) and other.text == self.text
def __repr__(self):
return '%s(%r)' % (self.__class__, self.text)
def is_hint(x):
return isinstance(x, Hint)
class ParseContext:
"""
These are meant to be immutable, although it would be something of a
pain to enforce that in python.
"""
def __init__(self, ruleset, bindings, matched, remainder, productionname):
self.ruleset = ruleset
self.bindings = bindings
self.matched = matched
self.remainder = remainder
self.productionname = productionname
def get_production_by_name(self, name):
return self.ruleset[name]
def get_completer(self, symname):
return self.ruleset[(self.productionname, symname)]
def get_binding(self, name, default=None):
return self.bindings.get(name, default)
def with_binding(self, name, val):
newbinds = self.bindings.copy()
newbinds[name] = val
return self.__class__(self.ruleset, newbinds, self.matched,
self.remainder, self.productionname)
def with_match(self, num):
return self.__class__(self.ruleset, self.bindings,
self.matched + self.remainder[:num],
self.remainder[num:], self.productionname)
def with_production_named(self, newname):
return self.__class__(self.ruleset, self.bindings, self.matched,
self.remainder, newname)
def __repr__(self):
return '<%s matched=%r remainder=%r prodname=%r>' % (self.__class__.__name__, self.matched, self.remainder,
self.productionname)
class matcher:
def __init__(self, arg):
self.arg = arg
def match(self, ctxt, completions):
raise NotImplementedError
def match_with_results(self, ctxt, completions):
matched_before = len(ctxt.matched)
newctxts = self.match(ctxt, completions)
return [(newctxt, newctxt.matched[matched_before:]) for newctxt in newctxts]
@staticmethod
def try_registered_completion(ctxt, symname, completions):
if ctxt.remainder or completions is None:
return False
try:
completer = ctxt.get_completer(symname)
except KeyError:
return False
try:
new_compls = completer(ctxt)
except Exception:
if ctxt.get_binding('*DEBUG*', False):
import traceback
traceback.print_exc()
return False
completions.update(new_compls)
return True
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.arg)
class choice(matcher):
def match(self, ctxt, completions):
foundctxts = []
for a in self.arg:
subctxts = a.match(ctxt, completions)
foundctxts.extend(subctxts)
return foundctxts
class one_or_none(matcher):
def match(self, ctxt, completions):
return [ctxt] + list(self.arg.match(ctxt, completions))
class repeat(matcher):
def match(self, ctxt, completions):
found = [ctxt]
ctxts = [ctxt]
while True:
new_ctxts = []
for c in ctxts:
new_ctxts.extend(self.arg.match(c, completions))
if not new_ctxts:
return found
found.extend(new_ctxts)
ctxts = new_ctxts
class rule_reference(matcher):
def match(self, ctxt, completions):
prevname = ctxt.productionname
try:
rule = ctxt.get_production_by_name(self.arg)
except KeyError:
raise ValueError("Can't look up production rule named %r" % (self.arg,))
output = rule.match(ctxt.with_production_named(self.arg), completions)
return [c.with_production_named(prevname) for c in output]
class rule_series(matcher):
def match(self, ctxt, completions):
ctxts = [ctxt]
for patpiece in self.arg:
new_ctxts = []
for c in ctxts:
new_ctxts.extend(patpiece.match(c, completions))
if not new_ctxts:
return ()
ctxts = new_ctxts
return ctxts
class named_symbol(matcher):
def __init__(self, name, arg):
matcher.__init__(self, arg)
self.name = name
def match(self, ctxt, completions):
pass_in_compls = completions
if self.try_registered_completion(ctxt, self.name, completions):
# don't collect other completions under this; use a dummy
pass_in_compls = set()
results = self.arg.match_with_results(ctxt, pass_in_compls)
return [c.with_binding(self.name, tokens_to_text(matchtoks)) for (c, matchtoks) in results]
def __repr__(self):
return '%s(%r, %r)' % (self.__class__.__name__, self.name, self.arg)
class named_collector(named_symbol):
def match(self, ctxt, completions):
pass_in_compls = completions
if self.try_registered_completion(ctxt, self.name, completions):
# don't collect other completions under this; use a dummy
pass_in_compls = set()
output = []
for ctxt, matchtoks in self.arg.match_with_results(ctxt, pass_in_compls):
oldval = ctxt.get_binding(self.name, ())
output.append(ctxt.with_binding(self.name, oldval + (tokens_to_text(matchtoks),)))
return output
class terminal_matcher(matcher):
def pattern(self):
raise NotImplementedError
class regex_rule(terminal_matcher):
def __init__(self, pat):
terminal_matcher.__init__(self, pat)
self.regex = pat
self.re = re.compile(pat + '$', re.I | re.S)
def match(self, ctxt, completions):
if ctxt.remainder:
if self.re.match(ctxt.remainder[0][1]):
return [ctxt.with_match(1)]
elif completions is not None:
completions.add(Hint('<%s>' % ctxt.productionname))
return []
def pattern(self):
return self.regex
class text_match(terminal_matcher):
alpha_re = re.compile(r'[a-zA-Z]')
def __init__(self, text):
try:
terminal_matcher.__init__(self, eval(text))
except SyntaxError:
print "bad syntax %r" % (text,)
def match(self, ctxt, completions):
if ctxt.remainder:
if self.arg.lower() == ctxt.remainder[0][1].lower():
return [ctxt.with_match(1)]
elif completions is not None:
completions.add(self.arg)
return []
def pattern(self):
# can't use (?i) here- Scanner component regex flags won't be applied
def ignorecaseify(matchobj):
c = matchobj.group(0)
return '[%s%s]' % (c.upper(), c.lower())
return self.alpha_re.sub(ignorecaseify, re.escape(self.arg))
class case_match(text_match):
def match(self, ctxt, completions):
if ctxt.remainder:
if self.arg == ctxt.remainder[0][1]:
return [ctxt.with_match(1)]
elif completions is not None:
completions.add(self.arg)
return []
def pattern(self):
return re.escape(self.arg)
def tokens_to_text(toks):
return ' '.join([t[1] for t in toks])
class ParsingRuleSet:
RuleSpecScanner = SaferScanner([
(r'::=', lambda s,t: t),
(r'\[[a-z0-9_]+\]=', lambda s,t: ('named_collector', t[1:-2])),
(r'[a-z0-9_]+=', lambda s,t: ('named_symbol', t[:-1])),
(r'/(\[\^?.[^]]*\]|[^/]|\\.)*/', lambda s,t: ('regex', t[1:-1].replace(r'\/', '/'))),
(r'"([^"]|\\.)*"', lambda s,t: ('litstring', t)),
(r'<[^>]*>', lambda s,t: ('reference', t[1:-1])),
(r'\bJUNK\b', lambda s,t: ('junk', t)),
(r'[@()|?*;]', lambda s,t: t),
(r'\s+', None),
(r'#[^\n]*', None),
], re.I | re.S)
def __init__(self):
self.ruleset = {}
self.scanner = None
self.terminals = []
@classmethod
def from_rule_defs(cls, rule_defs):
prs = cls()
prs.ruleset, prs.terminals = cls.parse_rules(rule_defs)
return prs
@classmethod
def parse_rules(cls, rulestr):
tokens, unmatched = cls.RuleSpecScanner.scan(rulestr)
if unmatched:
raise LexingError.from_text(rulestr, unmatched, msg="Syntax rules unparseable")
rules = {}
terminals = []
tokeniter = iter(tokens)
for t in tokeniter:
if isinstance(t, tuple) and t[0] in ('reference', 'junk'):
assign = tokeniter.next()
if assign != '::=':
raise ValueError('Unexpected token %r; expected "::="' % (assign,))
name = t[1]
production = cls.read_rule_tokens_until(';', tokeniter)
rules[name] = production
if isinstance(production, terminal_matcher):
terminals.append((name, production))
else:
raise ValueError('Unexpected token %r; expected name' % (t,))
return rules, terminals
@staticmethod
def mkrule(pieces):
if isinstance(pieces, (tuple, list)):
if len(pieces) == 1:
return pieces[0]
return rule_series(pieces)
return pieces
@classmethod
def read_rule_tokens_until(cls, endtoks, tokeniter):
if isinstance(endtoks, basestring):
endtoks = (endtoks,)
counttarget = None
if isinstance(endtoks, int):
counttarget = endtoks
endtoks = ()
countsofar = 0
myrules = []
mybranches = [myrules]
for t in tokeniter:
countsofar += 1
if t in endtoks:
if len(mybranches) == 1:
return cls.mkrule(mybranches[0])
return choice(map(cls.mkrule, mybranches))
if isinstance(t, tuple):
if t[0] == 'reference':
t = rule_reference(t[1])
elif t[0] == 'litstring':
t = text_match(t[1])
elif t[0] == 'regex':
t = regex_rule(t[1])
elif t[0] == 'named_collector':
t = named_collector(t[1], cls.read_rule_tokens_until(1, tokeniter))
elif t[0] == 'named_symbol':
t = named_symbol(t[1], cls.read_rule_tokens_until(1, tokeniter))
elif t == '(':
t = cls.read_rule_tokens_until(')', tokeniter)
elif t == '?':
t = one_or_none(myrules.pop(-1))
elif t == '*':
t = repeat(myrules.pop(-1))
elif t == '@':
x = tokeniter.next()
if not isinstance(x, tuple) or x[0] != 'litstring':
raise ValueError("Unexpected token %r following '@'" % (x,))
t = case_match(x[1])
elif t == '|':
myrules = []
mybranches.append(myrules)
continue
else:
raise ValueError('Unparseable rule token %r after %r' % (t, myrules[-1]))
myrules.append(t)
if countsofar == counttarget:
if len(mybranches) == 1:
return cls.mkrule(mybranches[0])
return choice(map(cls.mkrule, mybranches))
raise ValueError('Unexpected end of rule tokens')
def append_rules(self, rulestr):
rules, terminals = self.parse_rules(rulestr)
self.ruleset.update(rules)
self.terminals.extend(terminals)
if terminals:
self.scanner = None # recreate it if/when necessary
def register_completer(self, func, rulename, symname):
self.ruleset[(rulename, symname)] = func
def make_lexer(self):
def make_handler(name):
if name == 'JUNK':
return None
return lambda s, t: (name, t)
regexes = [(p.pattern(), make_handler(name)) for (name, p) in self.terminals]
return SaferScanner(regexes, re.I | re.S).scan
def lex(self, text):
if self.scanner is None:
self.scanner = self.make_lexer()
tokens, unmatched = self.scanner(text)
if unmatched:
raise LexingError.from_text(text, unmatched, 'text could not be lexed')
return tokens
def parse(self, startsymbol, tokens, init_bindings=None):
if init_bindings is None:
init_bindings = {}
ctxt = ParseContext(self.ruleset, init_bindings, (), tuple(tokens), startsymbol)
pattern = self.ruleset[startsymbol]
return pattern.match(ctxt, None)
def whole_match(self, startsymbol, tokens):
newctxts = [c for c in self.parse(startsymbol, tokens) if not c.remainder]
if newctxts:
return newctxts[0]
def lex_and_parse(self, text, startsymbol='Start'):
return self.parse(startsymbol, self.lex(text))
def complete(self, startsymbol, tokens, init_bindings=None):
if init_bindings is None:
init_bindings = {}
ctxt = ParseContext(self.ruleset, init_bindings, (), tuple(tokens), startsymbol)
pattern = self.ruleset[startsymbol]
if init_bindings.get('*DEBUG*', False):
completions = Debugotron(stream=sys.stderr)
else:
completions = set()
pattern.match(ctxt, completions)
return completions
import sys, traceback
class Debugotron(set):
depth = 10
def __init__(self, initializer=(), stream=sys.stdout):
set.__init__(self, initializer)
self.stream = stream
def add(self, item):
self._note_addition(item)
set.add(self, item)
def _note_addition(self, foo):
self.stream.write("\nitem %r added by:\n" % (foo,))
frame = sys._getframe().f_back.f_back
for i in range(self.depth):
name = frame.f_code.co_name
filename = frame.f_code.co_filename
lineno = frame.f_lineno
if 'self' in frame.f_locals:
clsobj = frame.f_locals['self']
cls = clsobj.__class__
line = '%s.%s() (%s:%d)' % (clsobj, name, filename, lineno)
else:
line = '%s (%s:%d)' % (name, filename, lineno)
self.stream.write(' %s\n' % (line,))
frame = frame.f_back
def update(self, items):
if items:
self._note_addition(items)
set.update(self, items)
| |
from __future__ import division
from Constant import Constant
from Moment import Moment
from Team import Team
plt = None
from sportvu import data
import numpy as np
class EventException(Exception):
pass
def format_pbp(pbp):
event_str = "Play-By-Play Annotations\n"
g = pbp.iterrows()
for eind, pp in pbp.iterrows():
event_str += '------------Event: %i ---------------\n' % eind
event_str += str(pp['HOMEDESCRIPTION'])+ " , " +\
str(pp['VISITORDESCRIPTION'])+ " , "+\
str(pp['PCTIMESTRING'])+ " , "+\
str(pp['event_str']) + '\n'
return event_str
class Event:
"""A class for handling and showing events"""
def __init__(self, event, gameid = ''):
self.gameid = gameid
moments = event['moments']
self.moments = [Moment(moment) for moment in moments]
if gameid != '':
self._resolve_home_basket()
self.home_team_id = event['home']['teamid']
home_players = event['home']['players']
guest_players = event['visitor']['players']
players = home_players + guest_players
player_ids = [player['playerid'] for player in players]
player_names = [" ".join([player['firstname'],
player['lastname']]) for player in players]
player_jerseys = [player['jersey'] for player in players]
values = list(zip(player_names, player_jerseys))
# Example: 101108: ['Chris Paul', '3']
self.player_ids_dict = dict(zip(player_ids, values))
self.pbp = event['playbyplay']
def _resolve_home_basket(self):
"""
hardcoded for the 3 games labelled
'0021500357' q1 home: 0
'0021500150' q1 home: 1
'0021500278' q1 home: 0
"""
hard_code = {'0021500357':0, '0021500150':1, '0021500278':0}
self.home_basket = (hard_code[self.gameid] + (self.moments[0].quarter > 2))%2
def is_home_possession(self, moment):
ball_basket = int(moment.ball.x > 50)
if ball_basket == self.home_basket: # HOME possession
return True
else: # VISITOR possession
return False
def truncate_by_following_event(self, event2):
"""
use the given event to truncate the current (i.e. do not include the
trailing frames shown in a later event)
"""
# trunctate
end_time_from_e2 = event2['moments'][0][2]
last_idx = -1
for idx, moment in enumerate(self.moments):
if moment.game_clock < end_time_from_e2:
last_idx = idx
break
if last_idx != -1:
self.moments = self.moments[:last_idx]
def sequence_around_t(self, T_a, tfr, seek_last=True):
"""
segment [T_a - tfr, T_a + tfr]
note: when seek_last = True, seek for the last T_a
(this detail becomes important when game-clock stops within one Event)
"""
T_a_index = -1
for idx, moment in enumerate(self.moments):
if moment.game_clock < T_a:
T_a_index = idx
break
if T_a_index == -1:
# print ('EventException')
raise EventException('bad T_a, or bad event')
start_ind = np.max([0, T_a_index-tfr])
end_ind = np.min([len(self.moments)-1, T_a_index + tfr])
if end_ind - start_ind != 2*tfr:
raise EventException('incorrect length')
self.moments = self.moments[start_ind:end_ind]
def update_radius(self, i, player_circles, ball_circle, annotations, clock_info, lines, pred_lines):
line = lines[0]
ret = [player_circles, ball_circle, line]
if i in self.futures[0]:
frame_ind = self.futures[0].index(i)
for sample_idx, l in enumerate(pred_lines):
l.set_ydata(self.futures[2][frame_ind, sample_idx,:,1])
l.set_xdata(self.futures[2][frame_ind, sample_idx,:,0])
ret.append(l)
line.set_ydata(self.futures[1][frame_ind, :, 1])
line.set_xdata(self.futures[1][frame_ind, :, 0])
moment = self.moments[i]
for j, circle in enumerate(player_circles):
try:
circle.center = moment.players[j].x, moment.players[j].y
except:
raise EventException()
annotations[j].set_position(circle.center)
clock_test = 'Quarter {:d}\n {:02d}:{:02d}\n {:03.1f}'.format(
moment.quarter,
int(moment.game_clock) % 3600 // 60,
int(moment.game_clock) % 60,
moment.shot_clock)
clock_info.set_text(clock_test)
ball_circle.center = moment.ball.x, moment.ball.y
ball_circle.radius = moment.ball.radius / Constant.NORMALIZATION_COEF
x = np.arange(Constant.X_MIN, Constant.X_MAX, 1)
court_center_x = Constant.X_MAX /2
court_center_y = Constant.Y_MAX /2
player_of_interest = moment.players[7]
return ret
def show(self, save_path='', futures=None):
global plt
if plt is None:
import matplotlib.pyplot as plt
from matplotlib import animation
from matplotlib.patches import Circle, Rectangle, Arc
import cPickle as pkl
# Leave some space for inbound passes
ax = plt.axes(xlim=(Constant.X_MIN,
Constant.X_MAX),
ylim=(Constant.Y_MIN,
Constant.Y_MAX))
ax.axis('off')
fig = plt.gcf()
ax.grid(False) # Remove grid
try:
start_moment = self.moments[0]
except IndexError as e:
raise EventException()
player_dict = self.player_ids_dict
clock_info = ax.annotate('', xy=[Constant.X_CENTER, Constant.Y_CENTER],
color='black', horizontalalignment='center',
verticalalignment='center')
annotations = [ax.annotate(self.player_ids_dict[player.id][1], xy=[0, 0], color='w',
horizontalalignment='center',
verticalalignment='center', fontweight='bold')
for player in start_moment.players]
x = np.arange(Constant.X_MIN, Constant.X_MAX, 1)
if futures is not None:
self.futures = futures
pred_lines = []
for _ in range(self.futures[2].shape[1]):
l, = ax.plot([],[], color='k', alpha=1./self.futures[2].shape[1])
pred_lines.append(l)
line, = ax.plot([], [], color='w')
# Prepare table
sorted_players = sorted(start_moment.players, key=lambda player: player.team.id)
home_player = sorted_players[0]
guest_player = sorted_players[5]
column_labels = tuple([home_player.team.name, guest_player.team.name])
column_colours = tuple([home_player.team.color, guest_player.team.color])
cell_colours = [column_colours for _ in range(5)]
home_players = [' #'.join([player_dict[player.id][0], player_dict[player.id][1]]) for player in sorted_players[:5]]
guest_players = [' #'.join([player_dict[player.id][0], player_dict[player.id][1]]) for player in sorted_players[5:]]
players_data = list(zip(home_players, guest_players))
try:
table = plt.table(cellText=players_data,
colLabels=column_labels,
colColours=column_colours,
colWidths=[Constant.COL_WIDTH, Constant.COL_WIDTH],
loc='bottom',
cellColours=cell_colours,
fontsize=Constant.FONTSIZE,
cellLoc='center')
except ValueError as e:
raise EventException() ### unknown error, probably malformed sequence
else:
pass
finally:
pass
table.scale(1, Constant.SCALE)
table_cells = table.properties()['child_artists']
for cell in table_cells:
cell._text.set_color('white')
player_circles = [plt.Circle((0, 0), Constant.PLAYER_CIRCLE_SIZE, color=player.color)
for player in start_moment.players]
ball_circle = plt.Circle((0, 0), Constant.PLAYER_CIRCLE_SIZE,
color=start_moment.ball.color)
for circle in player_circles:
ax.add_patch(circle)
ax.add_patch(ball_circle)
anim = animation.FuncAnimation(
fig, self.update_radius,
fargs=(player_circles, ball_circle, annotations, clock_info, [line], pred_lines),
frames=len(self.moments), interval=Constant.INTERVAL)
court = plt.imread(data.constant.court_path)
plt.imshow(court, zorder=0, extent=[Constant.X_MIN, Constant.X_MAX - Constant.DIFF,
Constant.Y_MAX, Constant.Y_MIN])
plt.title(format_pbp(self.pbp))
if save_path == '':
plt.show()
else:
plt.ioff()
Writer = animation.writers['ffmpeg']
writer = Writer(fps=25, metadata=dict(artist='Me'), bitrate=1800)
anim.save(save_path, writer)
plt.clf()
| |
import pickle
from kafka_base_monitor import KafkaBaseMonitor
class InfoMonitor(KafkaBaseMonitor):
regex = "info:*:*"
def setup(self, settings):
'''
Setup kafka
'''
KafkaBaseMonitor.setup(self, settings)
def handle(self, key, value):
'''
Processes a vaild action info request
@param key: The key that matched the request
@param value: The value associated with the key
'''
# the master dict to return
master = {}
master['uuid'] = value
master['total_pending'] = 0
master['server_time'] = int(self.get_current_time())
# break down key
elements = key.split(":")
dict = {}
dict['spiderid'] = elements[1]
dict['appid'] = elements[2]
# log we received the info message
extras = self.get_log_dict('info', dict['appid'],
dict['spiderid'], master['uuid'])
if len(elements) == 4:
dict['crawlid'] = elements[3]
extras = self.get_log_dict('info', dict['appid'],
dict['spiderid'], master['uuid'],
elements[3])
self.logger.info('Received info request', extra=extras)
# generate the information requested
if 'crawlid' in dict:
master = self._build_crawlid_info(master, dict)
else:
master = self._build_appid_info(master, dict)
if self._send_to_kafka(master):
extras['success'] = True
self.logger.info('Sent info to kafka', extra=extras)
else:
extras['success'] = False
self.logger.error('Failed to send info to kafka',
extra=extras)
def _get_bin(self, key):
'''
Returns a binned dictionary based on redis zscore
@return: The sorted dict
'''
# keys based on score
sortedDict = {}
# this doesnt return them in order, need to bin first
for item in self.redis_conn.zscan_iter(key):
my_item = pickle.loads(item[0])
# score is negated in redis
my_score = -item[1]
if my_score not in sortedDict:
sortedDict[my_score] = []
sortedDict[my_score].append(my_item)
return sortedDict
def _build_appid_info(self, master, dict):
'''
Builds the appid info object
@param master: the master dict
@param dict: the dict object received
@return: the appid info object
'''
master['total_crawlids'] = 0
master['total_pending'] = 0
master['total_domains'] = 0
master['crawlids'] = {}
master['appid'] = dict['appid']
master['spiderid'] = dict['spiderid']
# used for finding total count of domains
domain_dict = {}
# get all domain queues
match_string = '{sid}:*:queue'.format(sid=dict['spiderid'])
for key in self.redis_conn.scan_iter(match=match_string):
domain = key.split(":")[1]
sortedDict = self._get_bin(key)
# now iterate through binned dict
for score in sortedDict:
for item in sortedDict[score]:
if 'meta' in item:
item = item['meta']
if item['appid'] == dict['appid']:
crawlid = item['crawlid']
# add new crawlid to master dict
if crawlid not in master['crawlids']:
master['crawlids'][crawlid] = {}
master['crawlids'][crawlid]['total'] = 0
master['crawlids'][crawlid]['domains'] = {}
master['crawlids'][crawlid]['distinct_domains'] = 0
timeout_key = 'timeout:{sid}:{aid}:{cid}'.format(
sid=dict['spiderid'],
aid=dict['appid'],
cid=crawlid)
if self.redis_conn.exists(timeout_key):
master['crawlids'][crawlid]['expires'] = self.redis_conn.get(timeout_key)
master['total_crawlids'] = master['total_crawlids'] + 1
master['crawlids'][crawlid]['total'] = master['crawlids'][crawlid]['total'] + 1
if domain not in master['crawlids'][crawlid]['domains']:
master['crawlids'][crawlid]['domains'][domain] = {}
master['crawlids'][crawlid]['domains'][domain]['total'] = 0
master['crawlids'][crawlid]['domains'][domain]['high_priority'] = -9999
master['crawlids'][crawlid]['domains'][domain]['low_priority'] = 9999
master['crawlids'][crawlid]['distinct_domains'] = master['crawlids'][crawlid]['distinct_domains'] + 1
domain_dict[domain] = True
master['crawlids'][crawlid]['domains'][domain]['total'] = master['crawlids'][crawlid]['domains'][domain]['total'] + 1
if item['priority'] > master['crawlids'][crawlid]['domains'][domain]['high_priority']:
master['crawlids'][crawlid]['domains'][domain]['high_priority'] = item['priority']
if item['priority'] < master['crawlids'][crawlid]['domains'][domain]['low_priority']:
master['crawlids'][crawlid]['domains'][domain]['low_priority'] = item['priority']
master['total_pending'] = master['total_pending'] + 1
master['total_domains'] = len(domain_dict)
return master
def _build_crawlid_info(self, master, dict):
'''
Builds the crawlid info object
@param master: the master dict
@param dict: the dict object received
@return: the crawlid info object
'''
master['total_pending'] = 0
master['total_domains'] = 0
master['appid'] = dict['appid']
master['crawlid'] = dict['crawlid']
master['spiderid'] = dict['spiderid']
master['domains'] = {}
timeout_key = 'timeout:{sid}:{aid}:{cid}'.format(sid=dict['spiderid'],
aid=dict['appid'],
cid=dict['crawlid'])
if self.redis_conn.exists(timeout_key):
master['expires'] = self.redis_conn.get(timeout_key)
# get all domain queues
match_string = '{sid}:*:queue'.format(sid=dict['spiderid'])
for key in self.redis_conn.scan_iter(match=match_string):
domain = key.split(":")[1]
sortedDict = self._get_bin(key)
# now iterate through binned dict
for score in sortedDict:
for item in sortedDict[score]:
if 'meta' in item:
item = item['meta']
if item['appid'] == dict['appid'] and item['crawlid'] == dict['crawlid']:
if domain not in master['domains']:
master['domains'][domain] = {}
master['domains'][domain]['total'] = 0
master['domains'][domain]['high_priority'] = -9999
master['domains'][domain]['low_priority'] = 9999
master['total_domains'] = master['total_domains'] + 1
master['domains'][domain]['total'] = master['domains'][domain]['total'] + 1
if item['priority'] > master['domains'][domain]['high_priority']:
master['domains'][domain]['high_priority'] = item['priority']
if item['priority'] < master['domains'][domain]['low_priority']:
master['domains'][domain]['low_priority'] = item['priority']
master['total_pending'] = master['total_pending'] + 1
return master
| |
"""""FLASK'S VIEW FILE"""""
from functools import wraps
from flask import render_template, request, session, redirect, url_for, flash
from app import app, usr_account, shopn_list, shopn_items
from app import list_table_creator, item_table_creator
@app.route('/')
def index():
"""LOADS HOME PAGE"""
return render_template("index.html")
def login_required(f):
"""RESTRICTS ACCESS TO PAGES THAT REQUIRE USER TO BE LOGGED IN"""
@wraps(f)
def wrap(*args, **kwargs):
"""WRAPS AROUND THE f FUNCTION"""
if 'username' in session:
return f(*args, **kwargs)
else:
msg = "Please login"
return render_template("login.html", response=msg)
return wrap
@app.route('/signup', methods=['GET', 'POST'])
def signup():
"""SIGNS UP NEW USER"""
if request.method == 'POST':
uname = request.form['username']
email = request.form['email']
pwd = request.form['password']
pwd_confirmed = request.form['password_confirm']
msg = usr_account.registration(uname, email, pwd, pwd_confirmed)
if msg == "Your account is now registered please proceed to login"\
or msg == "Your Account Already Active. Proceed to login":
return render_template("login.html", response=msg)
else:
return render_template("signup.html", response=msg)
return render_template("signup.html")
@app.route('/dashboard/<old_name>', methods=['GET', 'POST'])
@app.route('/dashboard', methods=['GET', 'POST'])
@login_required
def dashboard(old_name=" "):
"""RENDERS THE DASHBOARD PAGE"""
old_name = old_name
user = session['username']
#print(shopn_list.users_list(user))
table_response = list_table_creator.ItemTable(shopn_list.users_list(user))
return render_template("dashboard.html",
table_out=table_response,
user=str(user).capitalize(),
old_name=old_name)
@app.route('/details/<list_name>/<item_name>', methods=['GET', 'POST'])
@app.route('/details/<list_name>/', methods=['GET', 'POST'])
@login_required
def details(list_name, item_name=""):
"""Loads the details page"""
item_name = item_name
specific_list_items = [item
for item in shopn_items.list_of_shopping_list_items
if item['list'] == list_name]
table_response = item_table_creator.ItemTable(specific_list_items)
return render_template("details.html",
table_out=table_response,
list_name=str(list_name).capitalize(),
item_name = item_name)
@app.route('/login', methods=['GET', 'POST'])
def login():
"""HANDLES REQUESTS SENT BY LOGIN PAGE"""
if request.method == 'POST':
email = request.form['email']
pwd = request.form['password']
msg = usr_account.login(email, pwd)
if msg == "Success!":
session['email'] = email
session['username'] = usr_account.get_uname_by_email(email)
table_response = list_table_creator.ItemTable(
shopn_list.users_list(session['username']))
return render_template('dashboard.html',
response=msg,
table_out=table_response,
user=str(session['username'])).capitalize()
else:
return render_template('signup.html', response=msg, alert_type="alert-danger")
return render_template("login.html")
@app.route('/logout')
@login_required
def log_out():
"""LOGS USER OUT"""
username = str(session['username']).capitalize()
session.clear()
flash("Goodbye {}".format(username), 'alert-warning')
return render_template('index.html', response="You are now logged Out", alert_type="alert-info")
@app.route('/dashboard/add_list', methods=['GET', 'POST'])
@login_required
def add():
"""ADDS A SHOPPING LIST"""
user = session['username']
if request.method == 'POST':
list_name = str(request.form['list_name']).lower()
add_response = shopn_list.create(user, list_name)
if isinstance(add_response, list):
if add_response == shopn_list.list_of_shopping_lists:
table_response = list_table_creator.ItemTable(shopn_list.users_list(user))
text_out = "Successfully added"
return render_template('dashboard.html',
response=text_out,
table_out=table_response,
user=str(user)).capitalize()
else:
table_response = list_table_creator.ItemTable(shopn_list.users_list(user))
text_out = "Unable to add the List Please Try again"
return render_template('dashboard.html',
table_out=table_response,
user=str.capitalize(user),
alert_type="alert-danger")
else:
table_response = list_table_creator.ItemTable(shopn_list.users_list(user))
return render_template('dashboard.html',
response=add_response,
table_out=table_response,
user=str.capitalize(user),
alert_type='alert-danger')
return redirect(url_for('dashboard'))
@app.route('/dashboard/edit_list/<old_name>', methods=['GET', 'POST'])
@app.route('/dashboard/edit_list', methods=['GET', 'POST'])
@login_required
def edit_list(old_name=""):
"""EDITS THE NAME OF A SHOPPING LIST"""
user = session['username']
if old_name:
return redirect(url_for('dashboard', old_name=old_name) + '#editModal')
elif request.method == 'POST':
new_name = str(request.form['new_name']).lower()
old_name = str(request.form['old_name']).lower()
if new_name and old_name:
edit_response = shopn_list.edit(new_name, old_name, user)
if isinstance(edit_response, list):
if edit_response == shopn_list.list_of_shopping_lists:
table_response = list_table_creator.ItemTable(shopn_list.users_list(user))
text_out = "Successfully Changed {} to {}".format(old_name, new_name)
return render_template('dashboard.html',
response=text_out,
table_out=table_response,
user=str(user)).capitalize()
else:
table_response = list_table_creator.ItemTable(shopn_list.users_list(user))
text_out = edit_response
return render_template('dashboard.html',
table_out=table_response,
user=str.capitalize(user),
alert_type='alert-danger')
else:
table_response = list_table_creator.ItemTable(shopn_list.users_list(user))
return render_template('dashboard.html',
response=edit_response,
table_out=table_response,
user=str.capitalize(user),
alert_type='alert-danger')
else:
flash("New name cannot be blank", 'alert-warning')
return redirect(url_for('dashboard'))
@app.route('/dashboard/del_list/<list_name>', methods=['GET', 'POST'])
@login_required
def del_list(list_name):
"""DELETES A SHOPPING LIST """
user = session['username']
if list_name:
#list_name = request.form['list_name']
del_response = shopn_list.delete(list_name, user)
if del_response == shopn_list.list_of_shopping_lists:
text_out = "{} has been removed".format(list_name)
table_response = list_table_creator.ItemTable(shopn_list.users_list(user))
return render_template('dashboard.html',
response=text_out,
table_out=table_response,
user=str.capitalize(user),
alert_type='alert-danger')
else:
table_response = list_table_creator.ItemTable(shopn_list.users_list(user))
return render_template('dashboard.html',
response=del_response,
table_out=table_response,
user=str.capitalize(user),
alert_type='alert-danger')
return redirect(url_for('dashboard'))
@app.route('/details/<list_name>/add_item', methods=['GET', 'POST'])
@login_required
def add_item(list_name):
"""ADDS AN ITEM TO A SHOPPING LIST"""
user = session['username']
if request.method == 'POST':
item_name = str(request.form['item_name']).lower()
add_response = shopn_items.add(list_name, item_name, user)
if add_response == shopn_items.list_of_shopping_list_items \
or shopn_items.get_user_items(user, list_name):
flash('Sucessfully added.', 'alert-success')
return redirect(url_for('details', list_name=list_name))
else:
if add_response == str("Added 1 more " + item_name):
flash(add_response, 'alert-success')
else:
flash(add_response, 'alert-danger')
return redirect(url_for('details', list_name=list_name))
return redirect(url_for('details', list_name=list_name))
@app.route('/details/<list_name>/edit_item/<item_name>', methods=['GET', 'POST'])
@app.route('/details/<list_name>/edit_item', methods=['GET', 'POST'])
@login_required
def edit_item(list_name, item_name=""):
"""EDITS AN ITEMS NAME"""
user = session['username']
list_name = list_name
if item_name:
return redirect(url_for('details',
list_name=list_name,
item_name=item_name) + '#editModal')
elif request.method == 'POST':
new_name = str(request.form['new_name']).lower()
old_name = str(request.form['old_name']).lower()
if new_name and old_name:
edit_response = shopn_items.edit(new_name, old_name, list_name, user)
if edit_response == shopn_items.get_user_items(user, list_name):
text_out = "Successfully changed {} to {}".format(old_name, new_name)
flash(text_out, 'alert-success')
return redirect(url_for('details', list_name=list_name))
else:
flash(edit_response, 'alert-danger')
return redirect(url_for('details', list_name=list_name))
else:
flash("New Item Name cannot be blank", 'alert-warning')
return redirect(url_for('details', list_name=list_name))
@app.route('/del_item/<list_name>/<item_name>', methods=['GET', 'POST'])
@login_required
def del_item(item_name, list_name):
"""DELETES AN ITEM """
if item_name:
#item_name = request.form['item_name']
del_response = shopn_items.delete(item_name, list_name)
if del_response is None:
text_out = "Successfully Removed {}.".format(item_name)
flash(text_out)
return render_template('details.html',
response=text_out,
list_name=str.capitalize(list_name),
alert_type='alert-success')
elif isinstance(del_response, list):
text_out = "Successfully Removed {}.".format(item_name)
flash(text_out, 'alert-success')
return redirect(url_for('details', list_name=list_name))
else:
table_response = item_table_creator.ItemTable(shopn_items.list_of_shopping_list_items)
return render_template('details.html',
response=del_response,
table_out=table_response,
list_name=str.capitalize(list_name),
alert_type='alert-dander')
return redirect(url_for('details'))
@app.route('/details/<list_name>/share_list', methods=['GET', 'POST'])
@login_required
def share_list(list_name):
"""This function shares a list with a specified user or users"""
if request.method == 'POST':
if request.form['share_with']:
share_with = request.form['share_with'].lower()
user = session['username']
shared_with_list = share_with.split(',')
for item in shared_with_list:
if item == user:
flash("You cannot share to yourself",
'alert-danger')
shared_with_list.remove(item)
elif item in [item['shared_with']
for item in shopn_list.users_list(user)
if item['name'] == list_name]:
flash('You had already shared with {}'.format(item),
'alert-warning')
shared_with_list.remove(item)
elif item not in [item['uname'] for item in usr_account.list_of_accounts]:
flash('{} does not exist'.format(item),
'alert-warning')
shared_with_list.remove(item)
share_response = shopn_list.share_list(list_name, user, shared_with_list)
if isinstance(share_response, list):
flash("Thank you for sharing \"{}\" with {}"
.format(str.capitalize(list_name),
str.capitalize(share_with)),
'alert-success')
redirect(url_for('dashboard'))
else:
flash(share_response, 'alert-danger')
else:
flash('Please specify who to share with', 'alert-danger')
return redirect(url_for('details', list_name=list_name))
| |
#!/usr/bin/python2.7 -tt
from __future__ import unicode_literals
import sys
if sys.version_info < (2, 7):
print "Must use python 2.7 or greater\n"
sys.exit()
try:
import wx
except ImportError:
print "You do not appear to have wxpython installed.\n"
print "Without wxpython, this program cannot run.\n"
print "You can download wxpython at: http://www.wxpython.org/download.php#stable \n"
sys.exit()
import os
from os.path import expanduser, isfile, join
from threading import Thread
from epub import epubParser
FILE_IMPORT = 650
FILE_EXPORT = 651
FILE_SAVE = 661
FILE_CLOSE = 666
FILE_PARSE_FIRST = 662
FILE_PARSE_LAST = 663
FILE_PARSE_ALL = 664
FILE_CLEAR_FIRST = 668
FILE_CLEAR_LAST = 669
FILE_CLEAR_ALL = 670
SETTING_ORDER = 680
SETTING_ORDER_MENU = 681
SETTING_VERSION_MENU = 682
SETTING_FULLIMAGES_MENU = 683
SETTING_COMBINE_MENU = 684
HOME_DIR = expanduser("~")
if os.name == 'nt':
HOME_DIR = join(HOME_DIR, "Documents")
SAVE_FILE = join(HOME_DIR, "epublist.txt")
WIDTH_MIN = 500
WIDTH_INITIAL = 500
HEIGHT_MIN = 400
HEIGHT_INITIAL = 400
class epubThread(Thread):
def __init__(self, pType, lines, frame, order=True):
Thread.__init__(self)
self.pType = pType
self.lines = lines
self.parser = epubParser()
self.frame = frame
self.order = order
self.start()
def run(self):
wx.CallAfter(self.frame.SetLocked, True)
if self.pType == 0:
first = True
combine = self.frame.menuItemSettingsCombineTrue.IsChecked()
for line in self.lines:
if self.parser.cancel:
break
elif self.order:
self.ParseFirstThread(line, first, combine)
else:
self.ParseLastThread(line, first, combine)
first = False
elif self.pType == 1:
self.ParseLastThread(self.lines, True, False)
else:
self.ParseFirstThread(self.lines, True, False)
wx.CallAfter(self.frame.SetLocked, False)
if self.parser.cancel:
wx.CallAfter(self.frame.UiPrint, '')
def ParseFirstThread(self, line, first, combine):
self.ParseLine(line, first, combine)
if not self.parser.cancel:
wx.CallAfter(self.frame.UiClear, False)
def ParseLastThread(self, line, first, combine):
self.ParseLine(line, first, combine)
if not self.parser.cancel:
wx.CallAfter(self.frame.UiClear, True)
def ParseLine(self, line, first, combine):
global HOME_DIR
self.parser.start(
line, HOME_DIR,
self.frame.menuItemSettingsFullImagesTrue.IsChecked(),
3 if self.frame.menuItemSettingsVersion3.IsChecked() else 2,
first,
combine
)
class epubFrame(wx.Frame):
def __init__(self, *args, **kwargs):
super(epubFrame, self).__init__(*args, **kwargs)
self.Bind(wx.EVT_CLOSE, self.Exit)
self.SetTitle("Baka-Tsuki EPUB Downloader")
#self.SetIcon(wx.Icon('jr.png', wx.BITMAP_TYPE_PNG))
self.SetSize((WIDTH_INITIAL,HEIGHT_INITIAL))
self.SetMinSize((WIDTH_MIN,HEIGHT_MIN))
self.InitUI()
self.thread = None
def InitUI(self):
self.ConstructMenu()
panel = wx.Panel(self)
hbox = wx.BoxSizer(wx.HORIZONTAL)
fgs = wx.FlexGridSizer(2, 2, 9, 25)
title = wx.StaticText(panel, label="URL:")
self.inputText = wx.TextCtrl(panel)
self.URLList = wx.TextCtrl(panel, style=wx.TE_MULTILINE|wx.TE_DONTWRAP)
self.URLList.SetEditable(False)
self.btnBox = self.ConstructButtons(panel)
fgs.AddMany([(title), (self.inputText, 1, wx.EXPAND), self.btnBox, (self.URLList, 2, wx.EXPAND)])
fgs.AddGrowableRow(1, 1)
fgs.AddGrowableCol(1, 1)
hbox.Add(fgs, proportion=1, flag=wx.ALL|wx.EXPAND, border=5)
panel.SetSizer(hbox)
self.LoadListFromFile()
self.Show(True)
def ConstructMenu(self):
menubar = wx.MenuBar()
menuFile = wx.Menu()
menuParse = wx.Menu()
menuClear = wx.Menu()
menuSettings = wx.Menu()
menuItemImport = wx.MenuItem(menuFile, FILE_IMPORT, '&Import\tCtrl+I')
menuItemExport = wx.MenuItem(menuFile, FILE_EXPORT, '&Export\tCtrl+E')
menuItemSave = wx.MenuItem(menuFile, FILE_SAVE, '&Save\tCtrl+S')
menuItemQuit = wx.MenuItem(menuFile, FILE_CLOSE, '&Quit\tStrl+Q')
menuItemParseFirst = wx.MenuItem(menuParse, FILE_PARSE_FIRST, 'Parse &First')
menuItemParseLast = wx.MenuItem(menuParse, FILE_PARSE_LAST, 'Parse &Last')
menuItemParseAll = wx.MenuItem(menuParse, FILE_PARSE_ALL, 'Parse &All')
menuItemClearFirst = wx.MenuItem(menuClear, FILE_CLEAR_FIRST, 'Clear &First')
menuItemClearLast = wx.MenuItem(menuClear, FILE_CLEAR_LAST, 'Clear &Last')
menuItemClearAll = wx.MenuItem(menuClear, FILE_CLEAR_ALL, 'Clear &All')
menuSettingsOrder = wx.Menu()
self.menuItemSettingsOrderNew = menuSettingsOrder.AppendRadioItem(SETTING_ORDER, 'Newest First')
self.menuItemSettingsOrderOld = menuSettingsOrder.AppendRadioItem(SETTING_ORDER, 'Oldest First')
menuSettingsOrder.Check(self.menuItemSettingsOrderNew.GetId(), True)
menuSettingsVersion = wx.Menu()
self.menuItemSettingsVersion2 = menuSettingsVersion.AppendRadioItem(SETTING_ORDER, 'Version 2')
self.menuItemSettingsVersion3 = menuSettingsVersion.AppendRadioItem(SETTING_ORDER, 'Version 3')
menuSettingsVersion.Check(self.menuItemSettingsVersion2.GetId(), True)
menuSettingsFullImages = wx.Menu()
self.menuItemSettingsFullImagesFalse = menuSettingsFullImages.AppendRadioItem(SETTING_ORDER, 'No')
self.menuItemSettingsFullImagesTrue = menuSettingsFullImages.AppendRadioItem(SETTING_ORDER, 'Yes')
menuSettingsFullImages.Check(self.menuItemSettingsFullImagesFalse.GetId(), True)
menuSettingsCombine = wx.Menu()
self.menuItemSettingsCombineFalse = menuSettingsCombine.AppendRadioItem(SETTING_ORDER, 'Leave separate')
self.menuItemSettingsCombineTrue = menuSettingsCombine.AppendRadioItem(SETTING_ORDER, 'Combine Epubs')
menuSettingsCombine.Check(self.menuItemSettingsFullImagesFalse.GetId(), True)
#menuItemOpen.SetBitmap(wx.Bitmap('file.png'))
menuFile.AppendItem(menuItemImport)
menuFile.AppendItem(menuItemExport)
menuFile.AppendSeparator()
menuFile.AppendItem(menuItemSave)
menuFile.AppendSeparator()
menuFile.AppendItem(menuItemQuit)
menuParse.AppendItem(menuItemParseFirst)
menuParse.AppendItem(menuItemParseLast)
menuParse.AppendItem(menuItemParseAll)
menuClear.AppendItem(menuItemClearFirst)
menuClear.AppendItem(menuItemClearLast)
menuClear.AppendItem(menuItemClearAll)
menuSettings.AppendMenu(SETTING_ORDER_MENU, '&Parse All Order', menuSettingsOrder)
menuSettings.AppendMenu(SETTING_VERSION_MENU, '&Version Of EPUB', menuSettingsVersion)
menuSettings.AppendMenu(SETTING_FULLIMAGES_MENU, '&Download Full Images', menuSettingsFullImages)
menuSettings.AppendMenu(SETTING_COMBINE_MENU, '&Combine Downloaded Epubs', menuSettingsCombine)
menubar.Append(menuFile, '&File')
menubar.Append(menuParse, '&Parse')
menubar.Append(menuClear, '&Clear')
menubar.Append(menuSettings, '&Settings')
self.Bind(wx.EVT_MENU, self.Import, id=FILE_IMPORT)
self.Bind(wx.EVT_MENU, self.Export, id=FILE_EXPORT)
self.Bind(wx.EVT_MENU, self.Save, id=FILE_SAVE)
self.Bind(wx.EVT_MENU, self.Exit, id=FILE_CLOSE)
self.Bind(wx.EVT_MENU, self.ParseFirst, id=FILE_PARSE_FIRST)
self.Bind(wx.EVT_MENU, self.ParseLast, id=FILE_PARSE_LAST)
self.Bind(wx.EVT_MENU, self.ParseAll, id=FILE_PARSE_ALL)
self.Bind(wx.EVT_MENU, self.ClearFirst, id=FILE_CLEAR_FIRST)
self.Bind(wx.EVT_MENU, self.ClearLast, id=FILE_CLEAR_LAST)
self.Bind(wx.EVT_MENU, self.ClearAll, id=FILE_CLEAR_ALL)
self.statusbar = self.CreateStatusBar()
self.SetMenuBar(menubar)
def ConstructButtons(self, panel):
btnBox = wx.BoxSizer(wx.VERTICAL)
self.btn1 = wx.Button(panel, label='Add URL')
self.btn2 = wx.Button(panel, label='Parse First')
self.btn3 = wx.Button(panel, label='Parse Last')
self.btn4 = wx.Button(panel, label='Parse All')
self.btn5 = wx.Button(panel, label='Clear First')
self.btn6 = wx.Button(panel, label='Clear Last')
self.btn7 = wx.Button(panel, label='Clear All')
self.btn8 = wx.Button(panel, label='Cancel')
self.btn1.Bind(wx.EVT_BUTTON, self.AddURL)
self.btn2.Bind(wx.EVT_BUTTON, self.ParseFirst)
self.btn3.Bind(wx.EVT_BUTTON, self.ParseLast)
self.btn4.Bind(wx.EVT_BUTTON, self.ParseAll)
self.btn5.Bind(wx.EVT_BUTTON, self.ClearFirst)
self.btn6.Bind(wx.EVT_BUTTON, self.ClearLast)
self.btn7.Bind(wx.EVT_BUTTON, self.ClearAll)
self.btn8.Bind(wx.EVT_BUTTON, self.Cancel)
self.btn8.Disable()
btnBox.AddMany([(self.btn1, 1, wx.EXPAND), (self.btn2, 1, wx.EXPAND), (self.btn3, 1, wx.EXPAND), (self.btn4, 1, wx.EXPAND), (self.btn5, 1, wx.EXPAND), (self.btn6, 1, wx.EXPAND), (self.btn7, 1, wx.EXPAND), (self.btn8, 1, wx.EXPAND)])
return btnBox
def Import(self, e):
openFileDialog = wx.FileDialog(self, "Open Text file", "", "", "Text files (*.txt)|*.txt", wx.FD_OPEN | wx.FD_FILE_MUST_EXIST)
if (openFileDialog.ShowModal() == wx.ID_CANCEL):
return
self.URLList.LoadFile(openFileDialog.GetPath())
def Export(self, e):
saveFileDialog = wx.FileDialog(self, "Save Text file", "", "", "Text file (*.txt)|*.txt", wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)
if (saveFileDialog.ShowModal() == wx.ID_CANCEL):
return
try:
self.URLList.SaveFile(saveFileDialog.GetPath())
except Exception, e:
wx.MessageDialog(None, 'Error saving file', 'Error', wx.OK | wx.ICON_ERROR).ShowModal()
def Save(self, e):
self.URLList.SaveFile(SAVE_FILE)
def Exit(self, e):
wx.Exit()
def AddURL(self, e):
line = self.inputText.GetLineText(0)
if (line[:4] == "http" or line[:4] == "www."):
self.inputText.Clear()
self.DirectlyAddURL(line)
self.Save(e)
def DirectlyAddURL(self, line):
if (len(self.URLList.GetLineText(0)) > 0):
self.URLList.AppendText("\n")
self.URLList.AppendText(line)
def ParseFirst(self, e):
totalLines = self.UiGetNumberOfLines()
if (totalLines > 0):
line = self.URLList.GetLineText(0)
self.thread = epubThread(2, line, self)
def ParseLast(self, e):
totalLines = self.UiGetNumberOfLines()
if totalLines > 0:
line = self.URLList.GetLineText(totalLines - 1)
self.thread = epubThread(1, line, self)
def ParseAll(self, e):
totalLines = self.UiGetNumberOfLines()
if (totalLines > 0):
lines = []
oldOrder = self.menuItemSettingsOrderOld.IsChecked()
if oldOrder:
count = 0
while count < totalLines:
lines.append(self.URLList.GetLineText(count))
count += 1
else:
count = totalLines
while count > 0:
lines.append(self.URLList.GetLineText(count))
count -= 1
self.thread = epubThread(0, lines, self, oldOrder)
def Cancel(self, e):
if self.thread != None:
self.thread.parser.Cancel(True)
self.btn8.Disable()
def EnableCancel(self, enable):
if enable:
self.btn8.Enable()
else:
self.btn8.Disable()
def ClearFirst(self, e):
end = self.URLList.GetLineLength(0) + 1
self.URLList.Remove(0,end)
self.Save(e)
def ClearLast(self, e):
totalLines = self.UiGetNumberOfLines()
if totalLines < 2:
self.ClearAll(e)
else:
length = self.URLList.GetLineLength(totalLines - 1) + 1
end = self.URLList.GetLastPosition()
start = end - length
self.URLList.Remove(start,end)
self.Save(e)
def ClearAll(self, e):
self.URLList.Clear()
self.Save(e)
def LoadListFromFile(self):
if isfile(SAVE_FILE):
self.URLList.LoadFile(SAVE_FILE)
def SetLocked(self, lock):
if lock:
self.btn1.Disable()
self.btn2.Disable()
self.btn3.Disable()
self.btn4.Disable()
self.btn5.Disable()
self.btn6.Disable()
self.btn7.Disable()
self.btn8.Enable()
else:
self.btn1.Enable()
self.btn2.Enable()
self.btn3.Enable()
self.btn4.Enable()
self.btn5.Enable()
self.btn6.Enable()
self.btn7.Enable()
self.btn8.Disable()
self.thread.parser.Cancel(False)
def UiPrint(self, text):
self.statusbar.SetStatusText(text)
def UiClear(self, last):
if last:
self.ClearLast(None)
else:
self.ClearFirst(None)
def UiGetLine(self, lineNum):
return self.URLList.GetLineText(lineNum)
def UiGetNumberOfLines(self):
if self.URLList.GetValue() == '':
return 0
else:
return self.URLList.GetNumberOfLines()
| |
""" Sample showing chunked execution of expresions
This sample constructs an expresion to be executed in chunks.
Different aproaches are tested, and it is compared with the
equivalent expresion written in numpy.
"""
import blaze
import blaze.blir as blir
import numpy as np
from time import time
import math
# ================================================================
def _to_blir_type_string(anObject):
if (isinstance(anObject, blaze.Array)):
p = anObject.datashape.parameters
assert(len(p) == 2)
return 'array[%s]' % 'float'#p[-1].name
else:
return 'float' #hardcode ftw
def _gen_blir_decl(name, obj):
return name + ': ' + _to_blir_type_string(obj)
def _gen_blir_signature(terms):
return ',\n\t'.join([_gen_blir_decl(pair[1], pair[0])
for pair in terms.iteritems()])
# ================================================================
# Support code to build expresions and convert them to a blir
# function to be executed on each chunk
class Operation(object):
def __init__(self, op, lhs, rhs):
self.op = op
self.lhs = lhs
self.rhs = rhs
# ------------------------------------------------------------
# operators - used to build an AST of the expresion
def __add__(self, rhs):
return Operation('+', self, rhs)
def __sub__(self, rhs):
return Operation('-', self, rhs)
def __mul__(self, rhs):
return Operation('*', self, rhs)
def dot(self, rhs):
return Operation('dot', self, rhs)
# ------------------------------------------------------------
# repr
def __repr__(self):
return ('Operation(' + repr(self.op) + ', '
+ repr(self.lhs) + ', '
+ repr(self.rhs) + ')')
# ------------------------------------------------------------
# support functions to generate blir code
def make_terms(self, terms):
self.lhs.make_terms(terms)
self.rhs.make_terms(terms)
return terms
def gen_blir_expr(self, terms):
a = self.lhs.gen_blir_expr(terms)
b = self.rhs.gen_blir_expr(terms)
return '(' + a + self.op + b + ')'
def gen_blir(self):
assert(self.op == 'dot')
term_array = self.make_terms(set())
terms = { obj: 'in%d' % i for i, obj in
enumerate(term_array)}
code = """
def main(%s, n: int) -> float {
var float accum = 0.0;
var int i = 0;
for i in range(n) {
accum = accum + (%s*%s);
}
return accum;
}
""" % (_gen_blir_signature(terms),
self.lhs.gen_blir_expr(terms),
self.rhs.gen_blir_expr(terms))
return term_array, code
class Terminal(object):
def __init__(self, src):
self.source = src
# ------------------------------------------------------------
def __add__(self, rhs):
return Operation('+', self, rhs)
def __sub__(self, rhs):
return Operation('-', self, rhs)
def __mul__(self, rhs):
return Operation('*', self, rhs)
def dot(self, rhs):
return Operation('dot', self, rhs)
# ------------------------------------------------------------
def __repr__(self):
return 'Terminal(' + repr(self.source) + ')'
# ------------------------------------------------------------
def make_terms(self, terms):
if isinstance(self.source, blaze.Array):
terms.add(self.source)
def gen_blir_expr(self, terms):
if (isinstance(self.source, blaze.Array)):
return terms[self.source] + '[i]'
else:
return repr(self.source)
# ================================================================
def _temp_for(aScalarOrArray, chunk_size):
if (isinstance(aScalarOrArray, blaze.Array)):
dtype = aScalarOrArray.datashape.parameters[-1].to_dtype()
return np.empty((chunk_size,), dtype=dtype)
else:
return aScalarOrArray #an Scalar
def _dimension(operand_list):
dims = [op.datashape.shape[-1].val for op in operand_list if isinstance(op, blaze.Array)]
assert (dims.count(dims[0]) == len(dims))
return dims[0]
def chunked_eval(blz_expr, chunk_size=32768):
operands, code = blz_expr.gen_blir()
total_size = _dimension(operands)
temps = [_temp_for(i, chunk_size) for i in operands]
temp_op = [i for i in zip(temps, operands) if isinstance(i[1], blaze.Array)]
offset = 0
accum = 0.0
_, env = blir.compile(code)
ctx = blir.Context(env)
while offset < total_size:
curr_chunk_size = min(total_size - offset, chunk_size)
slice_chunk = slice(0, curr_chunk_size)
slice_src = slice(offset, offset+curr_chunk_size)
for temp, op in temp_op:
temp[slice_chunk] = op[slice_src]
accum += blir.execute(ctx, args=temps + [curr_chunk_size], fname='main')
offset = slice_src.stop
ctx.destroy()
return accum
# ================================================================
_persistent_array_names = ['chunk_sample_x.blz',
'chunk_sample_y.blz',
'chunk_sample_z.blz',
'chunk_sample_w.blz']
def _create_persistent_array(name, n, clevel=9):
print 'creating ' + name + '...'
blaze.array(n, params=blaze.params(storage=name, clevel=clevel))
def _delete_persistent_array(name):
from shutil import rmtree
rmtree(name)
def create_persistent_arrays(elements, clevel):
dshape = str(elements) + ', float64'
try:
dshape = blaze.dshape(dshape)
except:
print elements + ' is not a valid size for the arrays'
return
for name in _persistent_array_names:
# First create a numpy container
shape, dtype = blaze.to_numpy(dshape)
n = np.sin(np.linspace(0, 10*math.pi, shape[0]))
_create_persistent_array(name, n, clevel)
def delete_persistent_arrays():
for name in _persistent_array_names:
_delete_persistent_array(name)
def run_test(in_memory, args):
T = Terminal
print 'opening blaze arrays...'
x = blaze.open(_persistent_array_names[0])
y = blaze.open(_persistent_array_names[1])
z = blaze.open(_persistent_array_names[2])
w = blaze.open(_persistent_array_names[3])
shape, dtype = blaze.to_numpy(x.datashape)
print "***nelements:", shape[0]
if in_memory:
print 'getting an in-memory version of blaze arrays...'
params = blaze.params(clevel=9)
t0 = time()
x = blaze.array(x[:], params=params)
y = blaze.array(y[:], params=params)
z = blaze.array(z[:], params=params)
w = blaze.array(w[:], params=params)
print "conversion to blaze in-memory: %.3f" % (time() - t0)
print 'datashape is:', x.datashape
print 'evaluating expression with blir...'
expr = (T(x)+T(y)).dot(T(2.0)*T(z) + T(2.0)*T(w))
if 'print_expr' in args:
print expr.gen_blir()[1]
t_ce = time()
result_ce = chunked_eval(expr, chunk_size=50000)
t_ce = time() - t_ce
print 'blir chunked result is : %s in %f s' % (result_ce, t_ce)
print '***blir time: %.3f' % t_ce
# in numpy...
t0 = time()
x = x[:]
y = y[:]
z = z[:]
w = w[:]
print "conversion to numpy in-memory: %.3f" % (time() - t0)
print 'evaluating expression with numpy...'
t_np = time()
result_np = np.dot(x+y, 2.0*z + 2.0*w)
t_np = time() - t_np
print 'numpy result is : %s in %f s' % (result_np, t_np)
print '***numpy time: %.3f' % t_np
print '**** %d, %.5f, %.5f' % (shape[0], t_ce, t_np)
def usage(sname):
print sname + ' [--create elements [--clevel lvl]|--run [--in_memory]|--delete|--bench [--max_log]]'
def main(args):
import sys,getopt
command = args[1] if len(args) > 1 else 'help'
try:
opts, args = getopt.getopt(sys.argv[1:], "hric:dl:bm:",
["help", "create=", "delete",
"run", "in_memory", "clevel=",
"bench", "max_log"])
except getopt.GetoptError:
usage(args[0])
sys.exit(2)
create = False
delete = False
run = False
bench = False
clevel = 9
elements = 1000000
max_log = 7
in_memory = False
for opt, arg in opts:
if opt in ("-h", "--help"):
usage(args[0])
sys.exit()
elif opt in ("-c", "--create"):
elements = int(arg)
create = True
elif opt in ("-d", "--delete"):
delete_persistent_arrays()
elif opt in ("-l", "--clevel"):
clevel = int(arg)
elif opt in ("-r", "--run"):
run = True
elif opt in ("-i", "--in_memory"):
in_memory = True
elif opt in ("-b", "--bench"):
bench = True
elif opt in ("-m", "--max_log"):
max_log = int(arg)
if create:
create_persistent_arrays(elements, clevel)
elif run:
run_test(in_memory, args)
elif delete:
delete_persistent_arrays()
elif bench:
print "max_log:", max_log
for i in xrange(max_log):
delete_persistent_arrays()
create_persistent_arrays(10**i, clevel)
run_test(in_memory, args)
if __name__ == '__main__':
from sys import argv
main(argv)
## Local Variables:
## mode: python
## coding: utf-8
## python-indent: 4
## tab-width: 4
## fill-column: 66
## End:
| |
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch Wav2Vec2 model. """
import math
import unittest
from tests.test_modeling_common import floats_tensor, ids_tensor, random_attention_mask
from transformers import is_torch_available
from transformers.testing_utils import require_datasets, require_soundfile, require_torch, slow, torch_device
from .test_configuration_common import ConfigTester
from .test_modeling_common import ModelTesterMixin, _config_zero_init
if is_torch_available():
import torch
from transformers import Wav2Vec2Config, Wav2Vec2ForCTC, Wav2Vec2ForMaskedLM, Wav2Vec2Model, Wav2Vec2Processor
from transformers.models.wav2vec2.modeling_wav2vec2 import _compute_mask_indices
class Wav2Vec2ModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=1024, # speech is longer
is_training=False,
hidden_size=16,
feat_extract_norm="group",
feat_extract_dropout=0.0,
feat_extract_activation="gelu",
conv_dim=(32, 32, 32),
conv_stride=(4, 4, 4),
conv_kernel=(8, 8, 8),
conv_bias=False,
num_conv_pos_embeddings=16,
num_conv_pos_embedding_groups=2,
num_hidden_layers=4,
num_attention_heads=2,
hidden_dropout_prob=0.1, # this is most likely not correctly set yet
intermediate_size=20,
layer_norm_eps=1e-5,
hidden_act="gelu",
initializer_range=0.02,
vocab_size=32,
do_stable_layer_norm=False,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.hidden_size = hidden_size
self.feat_extract_norm = feat_extract_norm
self.feat_extract_dropout = feat_extract_dropout
self.feat_extract_activation = feat_extract_activation
self.conv_dim = conv_dim
self.conv_stride = conv_stride
self.conv_kernel = conv_kernel
self.conv_bias = conv_bias
self.num_conv_pos_embeddings = num_conv_pos_embeddings
self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_dropout_prob = hidden_dropout_prob
self.intermediate_size = intermediate_size
self.layer_norm_eps = layer_norm_eps
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.vocab_size = vocab_size
self.do_stable_layer_norm = do_stable_layer_norm
self.scope = scope
output_seq_length = self.seq_length
for kernel, stride in zip(self.conv_kernel, self.conv_stride):
output_seq_length = (output_seq_length - (kernel - 1)) / stride
self.output_seq_length = int(math.ceil(output_seq_length))
self.encoder_seq_length = self.output_seq_length
def prepare_config_and_inputs(self):
input_values = floats_tensor([self.batch_size, self.seq_length], self.vocab_size)
attention_mask = random_attention_mask([self.batch_size, self.seq_length])
config = Wav2Vec2Config(
hidden_size=self.hidden_size,
feat_extract_norm=self.feat_extract_norm,
feat_extract_dropout=self.feat_extract_dropout,
feat_extract_activation=self.feat_extract_activation,
conv_dim=self.conv_dim,
conv_stride=self.conv_stride,
conv_kernel=self.conv_kernel,
conv_bias=self.conv_bias,
num_conv_pos_embeddings=self.num_conv_pos_embeddings,
num_conv_pos_embedding_groups=self.num_conv_pos_embedding_groups,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
hidden_dropout_prob=self.hidden_dropout_prob,
intermediate_size=self.intermediate_size,
layer_norm_eps=self.layer_norm_eps,
hidden_act=self.hidden_act,
initializer_range=self.initializer_range,
vocab_size=self.vocab_size,
)
return config, input_values, attention_mask
def create_and_check_model(self, config, input_values, attention_mask):
model = Wav2Vec2Model(config=config)
model.to(torch_device)
model.eval()
result = model(input_values, attention_mask=attention_mask)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.output_seq_length, self.hidden_size)
)
def create_and_check_batch_inference(self, config, input_values, *args):
# test does not pass for models making use of `group_norm`
# check: https://github.com/pytorch/fairseq/issues/3227
model = Wav2Vec2Model(config=config)
model.to(torch_device)
model.eval()
input_values = input_values[:3]
attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.bool)
input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]]
# pad input
for i in range(len(input_lengths)):
input_values[i, input_lengths[i] :] = 0.0
attention_mask[i, input_lengths[i] :] = 0.0
batch_outputs = model(input_values, attention_mask=attention_mask).last_hidden_state
for i in range(input_values.shape[0]):
input_slice = input_values[i : i + 1, : input_lengths[i]]
output = model(input_slice).last_hidden_state
batch_output = batch_outputs[i : i + 1, : output.shape[1]]
self.parent.assertTrue(torch.allclose(output, batch_output, atol=1e-3))
def check_ctc_loss(self, config, input_values, *args):
model = Wav2Vec2ForCTC(config=config)
model.to(torch_device)
# make sure that dropout is disabled
model.eval()
input_values = input_values[:3]
attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long)
input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]]
max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths))
labels = ids_tensor((input_values.shape[0], min(max_length_labels) - 1), model.config.vocab_size)
# pad input
for i in range(len(input_lengths)):
input_values[i, input_lengths[i] :] = 0.0
attention_mask[i, input_lengths[i] :] = 0
model.config.ctc_loss_reduction = "sum"
sum_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss
model.config.ctc_loss_reduction = "mean"
mean_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss
self.parent.assertTrue(abs(labels.shape[0] * labels.shape[1] * mean_loss.item() - sum_loss.item()) < 1e-3)
def check_training(self, config, input_values, *args):
config.ctc_zero_infinity = True
model = Wav2Vec2ForCTC(config=config)
model.to(torch_device)
model.train()
# freeze feature encoder
model.freeze_feature_extractor()
input_values = input_values[:3]
input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]]
max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths))
labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size)
# pad input
for i in range(len(input_lengths)):
input_values[i, input_lengths[i] :] = 0.0
if max_length_labels[i] < labels.shape[-1]:
# it's important that we make sure that target lenghts are at least
# one shorter than logit lenghts to prevent -inf
labels[i, max_length_labels[i] - 1 :] = -100
loss = model(input_values, labels=labels).loss
self.parent.assertFalse(torch.isinf(loss).item())
loss.backward()
def prepare_config_and_inputs_for_common(self):
config, input_values, attention_mask = self.prepare_config_and_inputs()
inputs_dict = {"input_values": input_values, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
class Wav2Vec2ModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (
(
Wav2Vec2ForCTC,
Wav2Vec2Model,
Wav2Vec2ForMaskedLM,
)
if is_torch_available()
else ()
)
test_pruning = False
test_headmasking = False
test_torchscript = False
def setUp(self):
self.model_tester = Wav2Vec2ModelTester(self)
self.config_tester = ConfigTester(self, config_class=Wav2Vec2Config, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_ctc_loss_inference(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_ctc_loss(*config_and_inputs)
def test_train(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_training(*config_and_inputs)
# Wav2Vec2 has no inputs_embeds
def test_inputs_embeds(self):
pass
# `input_ids` is renamed to `input_values`
def test_forward_signature(self):
pass
# Wav2Vec2 cannot resize token embeddings
# since it has no tokens embeddings
def test_resize_tokens_embeddings(self):
pass
# Wav2Vec2 has no inputs_embeds
# and thus the `get_input_embeddings` fn
# is not implemented
def test_model_common_attributes(self):
pass
def test_retain_grad_hidden_states_attentions(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
config.output_attentions = True
# no need to test all models as different heads yield the same functionality
model_class = self.all_model_classes[0]
model = model_class(config)
model.to(torch_device)
# set layer drop to 0
model.config.layerdrop = 0.0
input_values = inputs_dict["input_values"]
input_lengths = torch.tensor(
[input_values.shape[1] for _ in range(input_values.shape[0])], dtype=torch.long, device=torch_device
)
output_lengths = model._get_feat_extract_output_lengths(input_lengths)
labels = ids_tensor((input_values.shape[0], output_lengths[0] - 2), self.model_tester.vocab_size)
inputs_dict["attention_mask"] = torch.ones_like(inputs_dict["attention_mask"])
inputs_dict["labels"] = labels
outputs = model(**inputs_dict)
output = outputs[0]
# Encoder-/Decoder-only models
hidden_states = outputs.hidden_states[0]
attentions = outputs.attentions[0]
hidden_states.retain_grad()
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=True)
self.assertIsNotNone(hidden_states.grad)
self.assertIsNotNone(attentions.grad)
def test_initialization(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
configs_no_init = _config_zero_init(config)
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
for name, param in model.named_parameters():
if param.requires_grad:
if "conv.weight" in name or "masked_spec_embed" in name:
self.assertTrue(
-1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0,
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
else:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(),
[0.0, 1.0],
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
# overwrite from test_modeling_common
def _mock_init_weights(self, module):
if hasattr(module, "weight") and module.weight is not None:
module.weight.data.fill_(3)
if hasattr(module, "weight_g") and module.weight is not None:
module.weight_g.data.fill_(3)
if hasattr(module, "bias") and module.bias is not None:
module.bias.data.fill_(3)
@slow
def test_model_from_pretrained(self):
model = Wav2Vec2Model.from_pretrained("facebook/wav2vec2-base-960h")
self.assertIsNotNone(model)
@require_torch
class Wav2Vec2RobustModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (Wav2Vec2ForCTC, Wav2Vec2Model, Wav2Vec2ForMaskedLM) if is_torch_available() else ()
test_pruning = False
test_headmasking = False
test_torchscript = False
def setUp(self):
self.model_tester = Wav2Vec2ModelTester(
self, conv_stride=(3, 3, 3), feat_extract_norm="layer", do_stable_layer_norm=True
)
self.config_tester = ConfigTester(self, config_class=Wav2Vec2Config, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_batched_inference(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_batch_inference(*config_and_inputs)
def test_ctc_loss_inference(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_ctc_loss(*config_and_inputs)
def test_train(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_training(*config_and_inputs)
# Wav2Vec2 has no inputs_embeds
def test_inputs_embeds(self):
pass
# `input_ids` is renamed to `input_values`
def test_forward_signature(self):
pass
# Wav2Vec2 cannot resize token embeddings
# since it has no tokens embeddings
def test_resize_tokens_embeddings(self):
pass
# Wav2Vec2 has no inputs_embeds
# and thus the `get_input_embeddings` fn
# is not implemented
def test_model_common_attributes(self):
pass
def test_retain_grad_hidden_states_attentions(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
config.output_attentions = True
# no need to test all models as different heads yield the same functionality
model_class = self.all_model_classes[0]
model = model_class(config)
model.to(torch_device)
# set layer drop to 0
model.config.layerdrop = 0.0
input_values = inputs_dict["input_values"]
input_lengths = torch.tensor(
[input_values.shape[1] for _ in range(input_values.shape[0])], dtype=torch.long, device=torch_device
)
output_lengths = model._get_feat_extract_output_lengths(input_lengths)
labels = ids_tensor((input_values.shape[0], output_lengths[0] - 2), self.model_tester.vocab_size)
inputs_dict["attention_mask"] = torch.ones_like(inputs_dict["attention_mask"])
inputs_dict["labels"] = labels
outputs = model(**inputs_dict)
output = outputs[0]
# Encoder-/Decoder-only models
hidden_states = outputs.hidden_states[0]
attentions = outputs.attentions[0]
hidden_states.retain_grad()
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=True)
self.assertIsNotNone(hidden_states.grad)
self.assertIsNotNone(attentions.grad)
def test_initialization(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
configs_no_init = _config_zero_init(config)
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
for name, param in model.named_parameters():
if param.requires_grad:
if "conv.weight" in name or "masked_spec_embed" in name:
self.assertTrue(
-1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0,
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
else:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(),
[0.0, 1.0],
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
# overwrite from test_modeling_common
def _mock_init_weights(self, module):
if hasattr(module, "weight") and module.weight is not None:
module.weight.data.fill_(3)
if hasattr(module, "weight_g") and module.weight is not None:
module.weight_g.data.fill_(3)
if hasattr(module, "bias") and module.bias is not None:
module.bias.data.fill_(3)
@slow
def test_model_from_pretrained(self):
model = Wav2Vec2Model.from_pretrained("facebook/wav2vec2-base-960h")
self.assertIsNotNone(model)
@require_torch
class Wav2Vec2UtilsTest(unittest.TestCase):
def test_compute_mask_indices(self):
batch_size = 4
sequence_length = 60
mask_prob = 0.5
mask_length = 1
mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length, torch_device)
self.assertListEqual(mask.sum(axis=-1).tolist(), [mask_prob * sequence_length for _ in range(batch_size)])
def test_compute_mask_indices_overlap(self):
batch_size = 4
sequence_length = 60
mask_prob = 0.5
mask_length = 4
mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length, torch_device)
# because of overlap there is a range of possible masks
for batch_sum in mask.sum(axis=-1):
self.assertIn(
int(batch_sum),
list(range(int(mask_prob // mask_length * sequence_length), int(mask_prob * sequence_length))),
)
@require_torch
@slow
@require_datasets
@require_soundfile
class Wav2Vec2ModelIntegrationTest(unittest.TestCase):
def _load_datasamples(self, num_samples):
from datasets import load_dataset
import soundfile as sf
ids = [f"1272-141231-000{i}" for i in range(num_samples)]
# map files to raw
def map_to_array(batch):
speech, _ = sf.read(batch["file"])
batch["speech"] = speech
return batch
ds = load_dataset("patrickvonplaten/librispeech_asr_dummy", "clean", split="validation")
ds = ds.filter(lambda x: x["id"] in ids).sort("id").map(map_to_array)
return ds["speech"][:num_samples]
def test_inference_ctc_normal(self):
model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h")
model.to(torch_device)
processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h", do_lower_case=True)
input_speech = self._load_datasamples(1)
input_values = processor(input_speech, return_tensors="pt").input_values.to(torch_device)
with torch.no_grad():
logits = model(input_values).logits
predicted_ids = torch.argmax(logits, dim=-1)
predicted_trans = processor.batch_decode(predicted_ids)
EXPECTED_TRANSCRIPTIONS = ["a man said to the universe sir i exist"]
self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS)
def test_inference_ctc_normal_batched(self):
model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h")
model.to(torch_device)
processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h", do_lower_case=True)
input_speech = self._load_datasamples(2)
inputs = processor(input_speech, return_tensors="pt", padding=True, truncation=True)
input_values = inputs.input_values.to(torch_device)
with torch.no_grad():
logits = model(input_values).logits
predicted_ids = torch.argmax(logits, dim=-1)
predicted_trans = processor.batch_decode(predicted_ids)
EXPECTED_TRANSCRIPTIONS = [
"a man said to the universe sir i exist",
"sweat covered brion's body trickling into the tight lowing cloth that was the only garment he wore",
]
self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS)
def test_inference_ctc_robust_batched(self):
model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-large-960h-lv60-self").to(torch_device)
processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-large-960h-lv60-self", do_lower_case=True)
input_speech = self._load_datasamples(4)
inputs = processor(input_speech, return_tensors="pt", padding=True, truncation=True)
input_values = inputs.input_values.to(torch_device)
attention_mask = inputs.attention_mask.to(torch_device)
with torch.no_grad():
logits = model(input_values, attention_mask=attention_mask).logits
predicted_ids = torch.argmax(logits, dim=-1)
predicted_trans = processor.batch_decode(predicted_ids)
EXPECTED_TRANSCRIPTIONS = [
"a man said to the universe sir i exist",
"sweat covered brion's body trickling into the tight loin cloth that was the only garment he wore",
"the cut on his chest still dripping blood the ache of his overstrained eyes even the soaring arena around him with the thousands of spectators were trivialities not worth thinking about",
"his instant panic was followed by a small sharp blow high on his chest",
]
self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS)
| |
#!/usr/bin/env python
# Copyright (c) 2013 Intel Corporation. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This script will do:
1. Setup src's git initialization.
2. Place .gclient file outside of src.
3. Call gclient sync outside of src.
"""
import optparse
import os
import pprint
import re
import sys
from utils import TryAddDepotToolsToPythonPath
try:
import gclient_utils
except ImportError:
TryAddDepotToolsToPythonPath()
try:
import gclient_utils
import gclient_scm
import subprocess2
from third_party.repo.progress import Progress
except ImportError:
sys.stderr.write("Can't find gclient_utils, please add your depot_tools "\
"to PATH or PYTHONPATH\n")
percent_re = re.compile('.* ([0-9]{1,2})% .*')
def _GitFilter(line):
# git uses an escape sequence to clear the line; elide it.
esc = line.find(unichr(033))
if esc > -1:
line = line[:esc]
match = percent_re.match(line)
if not match or not int(match.group(1)) % 10:
print '%s' % line
class FetchingError(Exception):
pass
class FolderExistGitWrapper(gclient_scm.GitWrapper):
"""Handle the case that we need to initial git environment
when the folder is already there.
We need to do:
1. git init
2. git remote add
3. git fetch
4. git checkout
Then we can let gclient sync to handle the rest of its life.
"""
def __init__(self, url=None, root_dir=None, relpath=None):
gclient_scm.GitWrapper.__init__(self, url, root_dir, relpath)
self._split_url = gclient_utils.SplitUrlRevision(self.url)
def _Fetch(self, remote, options):
fetch_cmd = ['fetch', remote, '--progress']
if options.verbose:
fetch_cmd.append('--verbose')
for _ in range(3):
try:
# git fetch for chromium will take a looooong time for the
# first time, so set timeout for 30 minutes
self._Run(fetch_cmd, options, cwd=self.checkout_path,
git_filter=True, nag_timer=30, nag_max=60)
break
except subprocess2.CalledProcessError, e:
if e.returncode == 128:
print(str(e))
print('Retrying...')
continue
raise e
def _DoCheckOut(self, options):
revision = self._split_url[1]
if revision:
if revision.startswith('refs/heads/'):
revision = revision.replace('refs/heads/', 'origin/')
rev_type = "branch"
elif revision.startswith('origin/'):
rev_type = "branch"
else:
rev_type = 'hash'
if rev_type == 'hash':
co_args = [revision]
else:
branch = revision[len('origin/'):]
branches = self._Capture(['branch'])
if branch.strip() in [br.strip() for br in branches.split('\n')]:
print('branch %s already exist, skip checkout', branch)
return
else:
co_args = ['-b', branch, revision]
else:
co_args = ['-b', 'master', 'origin/master']
self._Run(['checkout'] + co_args, options, cwd=self.checkout_path,
git_filter=True)
def DoInitAndCheckout(self, options):
# Do git init if necessary
if not os.path.exists(os.path.join(self.checkout_path, '.git')):
print('_____ initialize %s to be a git repo' % self.relpath)
self._Capture(['init'])
# Find out remote origin exists or not
remotes = self._Capture(['remote']).strip().splitlines()
if 'origin' not in [remote.strip() for remote in remotes]:
print('_____ setting remote for %s' % self.relpath)
self._Capture(['remote', 'add', 'origin', self._split_url[0]])
else:
current_url = self._Capture(['config', 'remote.origin.url'])
if current_url != self._split_url[0]:
print('_____ switching %s to a new upstream' % self.relpath)
# Switch over to the new upstream
self._Run(['remote', 'set-url', 'origin', self._split_url[0]], options)
self._Fetch('origin', options)
self._DoCheckOut(options)
class DepsFetcher(gclient_utils.WorkItem):
def __init__(self, name, options):
gclient_utils.WorkItem.__init__(self, name)
self._options = options
self._xwalk_dir = os.path.dirname(
os.path.dirname(os.path.abspath(__file__)))
if options.deps:
self._deps_file = options.deps
else:
self._deps_file = os.path.join(self._xwalk_dir, 'DEPS.xwalk')
self._deps = None
self._chromium_version = None
self._ParseDepsFile()
if not 'src' in self._deps:
raise FetchingError("'src' not specified in deps file(%s)" % options.deps)
self._src_dep = self._deps['src']
# self should be at src/xwalk/tools/fetch_deps.py
# so src is at self/../../../
self._src_dir = os.path.dirname(self._xwalk_dir)
self._root_dir = os.path.dirname(self._src_dir)
self._new_gclient_file = os.path.join(self._root_dir,
'.gclient-xwalk')
self._src_git = FolderExistGitWrapper(self._src_dep, self._root_dir, 'src')
def _ParseDepsFile(self):
if not os.path.exists(self._deps_file):
raise FetchingError('Deps file does not exist (%s).' % self._deps_file)
exec_globals = {}
execfile(self._deps_file, exec_globals)
self._deps = exec_globals['deps_xwalk']
self._chromium_version = exec_globals['chromium_version']
@property
# pylint: disable=R0201
def requirements(self):
# No requirements at all
return set()
def run(self, work_queue):
self._src_git.DoInitAndCheckout(self._options)
self.PrepareGclient()
return 0
def AddIgnorePathFromEnv(self):
"""Read paths from environ XWALK_SYNC_IGNORE.
Set the path with None value to ignore it when syncing chromium.
If environ not set, will ignore the ones upstream wiki recommended
by default.
"""
ignores_str = os.environ.get("XWALK_SYNC_IGNORE")
if not ignores_str:
ignores = ['src/webkit/data/layout_tests/LayoutTests',
'src/third_party/WebKit/LayoutTests',
'src/content/test/data/layout_tests/LayoutTests',
'src/chrome/tools/test/reference_build/chrome_win',
'src/chrome_frame/tools/test/reference_build/chrome_win',
'src/chrome/tools/test/reference_build/chrome_linux',
'src/chrome/tools/test/reference_build/chrome_mac',
'src/third_party/hunspell_dictionaries']
else:
ignores_str = ignores_str.replace(':', ';')
ignores = ignores_str.split(';')
for ignore in ignores:
self._deps[ignore] = None
def PrepareGclient(self):
"""It is very important here to know if the based chromium is trunk
or versioned.
If it's trunk, we must use .DEPS.git. Because if we use DEPS, gclient
will try to find all repos under the same url we host chromium-crosswalk.
And we need to remove 'src' from custom deps, because 'src' will be the
main subject for the gclient sync.
Otherwise, we must use DEPS, and we can find the DEPS at
http://src.chromium.org/svn/releases/<version>
In this case, we need to keep 'src' in custom deps.
"""
solution = {}
if self._chromium_version == 'Trunk':
solution['name'] = 'src'
solution['url'] = self._src_dep
solution['deps_file'] = '.DEPS.git'
del(self._deps['src'])
else:
solution['name'] = self._chromium_version
solution['url'] = \
'http://src.chromium.org/svn/releases/%s' % self._chromium_version
self.AddIgnorePathFromEnv()
solution['custom_deps'] = self._deps
solutions = [solution]
gclient_file = open(self._new_gclient_file, 'w')
print "Place %s with solutions:\n%s" % (self._new_gclient_file, solutions)
gclient_file.write('solutions = %s' % pprint.pformat(solutions))
# Check whether the target OS is Android.
if os.environ.get('XWALK_OS_ANDROID'):
target_os = ['android']
gclient_file.write('\n')
gclient_file.write('target_os = %s' % target_os)
def DoGclientSyncForChromium(self):
gclient_cmd = ['gclient', 'sync', '--verbose', '--reset',
'--force', '--with_branch_heads']
gclient_cmd.append('--gclientfile=%s' % self._new_gclient_file)
gclient_utils.CheckCallAndFilterAndHeader(gclient_cmd,
always=self._options.verbose, cwd=self._root_dir)
# CheckCallAndFilterAndHeader will raise exception if return
# value is not 0. So we can easily return 0 here.
return 0
def main():
option_parser = optparse.OptionParser()
option_parser.add_option('--deps', default=None,
help='The deps file contains the dependencies path and url')
option_parser.add_option('-v', '--verbose', action='count', default=0,
help='Produces additional output for diagnostics. Can be '
'used up to three times for more logging info.')
# pylint: disable=W0612
options, args = option_parser.parse_args()
# Following code copied from gclient_utils.py
try:
# Make stdout auto-flush so buildbot doesn't kill us during lengthy
# operations. Python as a strong tendency to buffer sys.stdout.
sys.stdout = gclient_utils.MakeFileAutoFlush(sys.stdout)
# Make stdout annotated with the thread ids.
sys.stdout = gclient_utils.MakeFileAnnotated(sys.stdout)
except (gclient_utils.Error, subprocess2.CalledProcessError), e:
print >> sys.stderr, 'Error: %s' % str(e)
return 1
pm = Progress('Syncing chromium-crosswalk', 1)
work_queue = gclient_utils.ExecutionQueue(1, pm, None)
deps_fetcher = DepsFetcher('fetching', options)
work_queue.enqueue(deps_fetcher)
work_queue.flush()
sys.exit(deps_fetcher.DoGclientSyncForChromium())
if __name__ == '__main__':
main()
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2011 Openstack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
suse network helper module
"""
# SuSE network configuration uses:
# - 1 network configuration file per interface
# - 1 IP per interface
# - routes are per interface
# - gateways are per interface
# - DNS is global (/etc/sysconfig/network/config)
import os
import time
import glob
import subprocess
import logging
from cStringIO import StringIO
import commands.network
HOSTNAME_FILE = "/etc/HOSTNAME"
DNS_CONFIG_FILE = "/etc/sysconfig/network/config"
NETCONFIG_DIR = "/etc/sysconfig/network"
INTERFACE_FILE = "ifcfg-%s"
ROUTE_FILE = "ifroute-%s"
def configure_network(hostname, interfaces):
# Generate new interface files
update_files, remove_files = process_interface_files(interfaces)
# Update nameservers
if os.path.exists(DNS_CONFIG_FILE):
infile = open(DNS_CONFIG_FILE)
else:
infile = StringIO()
dns = commands.network.get_nameservers(interfaces)
data = get_nameservers_file(infile, dns)
update_files[DNS_CONFIG_FILE] = data
# Generate new hostname file
data = get_hostname_file(hostname)
update_files[HOSTNAME_FILE] = data
# Generate new /etc/hosts file
filepath, data = commands.network.get_etc_hosts(interfaces, hostname)
update_files[filepath] = data
# Write out new files
commands.network.update_files(update_files, remove_files)
pipe = subprocess.PIPE
# Set hostname
try:
commands.network.sethostname(hostname)
except Exception, e:
logging.error("Couldn't sethostname(): %s" % str(e))
return (500, "Couldn't set hostname: %s" % str(e))
# Restart network
logging.debug('executing /etc/init.d/network restart')
p = subprocess.Popen(["/etc/init.d/network", "restart"],
stdin=pipe, stdout=pipe, stderr=pipe, env={})
logging.debug('waiting on pid %d' % p.pid)
status = os.waitpid(p.pid, 0)[1]
logging.debug('status = %d' % status)
if status != 0:
return (500, "Couldn't restart network: %d" % status)
return (0, "")
def get_hostname_file(hostname):
"""
Update hostname on system
"""
return hostname + '\n'
def get_nameservers_file(infile, dns):
outfile = StringIO()
if not dns:
return outfile
found = False
for line in infile:
line = line.strip()
if '=' not in line:
print >> outfile, line
continue
k, v = line.split('=', 1)
k = k.strip()
if k == 'NETCONFIG_DNS_STATIC_SERVERS':
print >> outfile, \
'NETCONFIG_DNS_STATIC_SERVERS="%s"' % ' '.join(dns)
found = True
else:
print >> outfile, line
if not found:
print >> outfile, 'NETCONFIG_DNS_STATIC_SERVERS="%s"' % ' '.join(dns)
outfile.seek(0)
return outfile.read()
def _get_file_data(ifname, interface):
"""
Return data for (sub-)interfaces and routes
"""
label = interface['label']
ip4s = interface['ip4s']
ip6s = interface['ip6s']
gateway4 = interface['gateway4']
gateway6 = interface['gateway6']
ifnum = None
iface_data = "# Automatically generated, do not edit\n\n"
if label:
iface_data += "# Label %s\n" % label
iface_data += "BOOTPROTO='static'\n"
for ip in ip4s:
if ifnum is None:
iface_data += "IPADDR='%s'\n" % ip['address']
iface_data += "NETMASK='%s'\n" % ip['netmask']
ifnum = 0
else:
iface_data += "IPADDR_%s='%s'\n" % (ifnum, ip['address'])
iface_data += "NETMASK_%s='%s'\n" % (ifnum, ip['netmask'])
iface_data += "LABEL_%s='%s'\n" % (ifnum, ifnum)
ifnum += 1
for ip in ip6s:
if ifnum is None:
iface_data += "IPADDR='%s'\n" % ip['address']
iface_data += "PREFIXLEN='%s'\n" % ip['prefixlen']
ifnum = 0
else:
iface_data += "IPADDR_%s='%s'\n" % (ifnum, ip['address'])
iface_data += "PREFIXLEN_%s='%s'\n" % (ifnum, ip['prefixlen'])
iface_data += "LABEL_%s='%s'\n" % (ifnum, ifnum)
ifnum += 1
iface_data += "STARTMODE='auto'\n"
iface_data += "USERCONTROL='no'\n"
route_data = ''
for route in interface['routes']:
network = route['network']
netmask = route['netmask']
gateway = route['gateway']
route_data += '%s %s %s %s\n' % (network, gateway, netmask, ifname)
if gateway4:
route_data += 'default %s - -\n' % gateway4
if gateway6:
route_data += 'default %s - -\n' % gateway6
return (iface_data, route_data)
def get_interface_files(interfaces):
results = {}
for ifname, interface in interfaces.iteritems():
iface_data, route_data = _get_file_data(ifname, interface)
results[INTERFACE_FILE % ifname] = iface_data
if route_data:
results[ROUTE_FILE % ifname] = route_data
return results
def process_interface_files(interfaces):
"""
Write out a new files for interfaces
"""
# Enumerate all of the existing ifcfg-* files
remove_files = set()
for filepath in glob.glob(NETCONFIG_DIR + "/ifcfg-*"):
if '.' not in filepath:
remove_files.add(filepath)
for filepath in glob.glob(NETCONFIG_DIR + "/route-*"):
if '.' not in filepath:
remove_files.add(filepath)
route_file = os.path.join(NETCONFIG_DIR, 'routes')
if os.path.exists(route_file):
remove_files.add(route_file)
# We never write config for lo interface, but it should stay
lo_file = os.path.join(NETCONFIG_DIR, INTERFACE_FILE % 'lo')
if lo_file in remove_files:
remove_files.remove(lo_file)
update_files = {}
for filename, data in get_interface_files(interfaces).iteritems():
filepath = os.path.join(NETCONFIG_DIR, filename)
update_files[filepath] = data
if filepath in remove_files:
remove_files.remove(filepath)
return update_files, remove_files
| |
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import re
import sys
import time
import click
import hashlib
import gnupg
import toolz
import pygit2
import github3
import jira.client
from io import StringIO
from pathlib import Path
from textwrap import dedent
from datetime import datetime
from jinja2 import Template, StrictUndefined
from setuptools_scm import get_version
from ruamel.yaml import YAML
CWD = Path(__file__).parent.absolute()
NEW_FEATURE = 'New Features and Improvements'
BUGFIX = 'Bug Fixes'
def md(template, *args, **kwargs):
"""Wraps string.format with naive markdown escaping"""
def escape(s):
for char in ('*', '#', '_', '~', '`', '>'):
s = s.replace(char, '\\' + char)
return s
return template.format(*map(escape, args), **toolz.valmap(escape, kwargs))
class JiraChangelog:
def __init__(self, version, username, password,
server='https://issues.apache.org/jira'):
self.server = server
# clean version to the first numbers
self.version = '.'.join(version.split('.')[:3])
query = ("project=ARROW "
"AND fixVersion='{0}' "
"AND status = Resolved "
"AND resolution in (Fixed, Done) "
"ORDER BY issuetype DESC").format(self.version)
self.client = jira.client.JIRA({'server': server},
basic_auth=(username, password))
self.issues = self.client.search_issues(query, maxResults=9999)
def format_markdown(self):
out = StringIO()
issues_by_type = toolz.groupby(lambda i: i.fields.issuetype.name,
self.issues)
for typename, issues in sorted(issues_by_type.items()):
issues.sort(key=lambda x: x.key)
out.write(md('## {}\n\n', typename))
for issue in issues:
out.write(md('* {} - {}\n', issue.key, issue.fields.summary))
out.write('\n')
return out.getvalue()
def format_website(self):
# jira category => website category mapping
categories = {
'New Feature': 'feature',
'Improvement': 'feature',
'Wish': 'feature',
'Task': 'feature',
'Test': 'bug',
'Bug': 'bug',
'Sub-task': 'feature'
}
titles = {
'feature': 'New Features and Improvements',
'bugfix': 'Bug Fixes'
}
issues_by_category = toolz.groupby(
lambda issue: categories[issue.fields.issuetype.name],
self.issues
)
out = StringIO()
for category in ('feature', 'bug'):
title = titles[category]
issues = issues_by_category[category]
issues.sort(key=lambda x: x.key)
out.write(md('## {}\n\n', title))
for issue in issues:
link = md('[{0}]({1}/browse/{0})', issue.key, self.server)
out.write(md('* {} - {}\n', link, issue.fields.summary))
out.write('\n')
return out.getvalue()
def render(self, old_changelog, website=False):
old_changelog = old_changelog.splitlines()
if website:
new_changelog = self.format_website()
else:
new_changelog = self.format_markdown()
out = StringIO()
# Apache license header
out.write('\n'.join(old_changelog[:18]))
# Newly generated changelog
today = datetime.today().strftime('%d %B %Y')
out.write(md('\n\n# Apache Arrow {} ({})\n\n', self.version, today))
out.write(new_changelog)
out.write('\n'.join(old_changelog[19:]))
return out.getvalue().strip()
class GitRemoteCallbacks(pygit2.RemoteCallbacks):
def __init__(self, token):
self.token = token
self.attempts = 0
super().__init__()
def push_update_reference(self, refname, message):
pass
def update_tips(self, refname, old, new):
pass
def credentials(self, url, username_from_url, allowed_types):
# its a libgit2 bug, that it infinitly retries the authentication
self.attempts += 1
if self.attempts >= 5:
# pygit2 doesn't propagate the exception properly
msg = 'Wrong oauth personal access token'
print(msg)
raise ValueError(msg)
if allowed_types & pygit2.credentials.GIT_CREDTYPE_USERPASS_PLAINTEXT:
return pygit2.UserPass(self.token, 'x-oauth-basic')
else:
return None
class Repo:
"""Base class for interaction with local git repositories
A high level wrapper used for both reading revision information from
arrow's repository and pushing continuous integration tasks to the queue
repository.
"""
def __init__(self, path, github_token=None):
self.path = Path(path)
self.repo = pygit2.Repository(str(self.path))
self.github_token = github_token
self._updated_refs = []
def __str__(self):
tpl = dedent('''
Repo: {remote}@{branch}
Commit: {head}
''')
return tpl.format(
remote=self.remote_url,
branch=self.branch.branch_name,
head=self.head
)
@property
def origin(self):
return self.repo.remotes['origin']
def fetch(self):
refspec = '+refs/heads/*:refs/remotes/origin/*'
self.origin.fetch([refspec])
def push(self):
callbacks = GitRemoteCallbacks(self.github_token)
self.origin.push(self._updated_refs, callbacks=callbacks)
self.updated_refs = []
@property
def head(self):
"""Currently checked out commit's sha"""
return self.repo.head
@property
def branch(self):
"""Currently checked out branch"""
return self.repo.branches[self.repo.head.shorthand]
@property
def remote(self):
"""Currently checked out branch's remote counterpart"""
return self.repo.remotes[self.branch.upstream.remote_name]
@property
def remote_url(self):
"""
Currently checked out branch's remote counterpart URL
If an SSH github url is set, it will be replaced by the https
equivalent.
"""
return self.remote.url.replace(
'git@github.com:', 'https://github.com/')
@property
def email(self):
return next(self.repo.config.get_multivar('user.email'))
@property
def signature(self):
name = next(self.repo.config.get_multivar('user.name'))
return pygit2.Signature(name, self.email, int(time.time()))
def create_branch(self, branch_name, files, parents=[], message=''):
# 1. create tree
builder = self.repo.TreeBuilder()
for filename, content in files.items():
# insert the file and creating the new filetree
blob_id = self.repo.create_blob(content)
builder.insert(filename, blob_id, pygit2.GIT_FILEMODE_BLOB)
tree_id = builder.write()
# 2. create commit with the tree created above
author = committer = self.signature
commit_id = self.repo.create_commit(None, author, committer, message,
tree_id, parents)
commit = self.repo[commit_id]
# 3. create branch pointing to the previously created commit
branch = self.repo.create_branch(branch_name, commit)
# append to the pushable references
self._updated_refs.append('refs/heads/{}'.format(branch_name))
return branch
def create_tag(self, tag_name, commit_id, message=''):
tag_id = self.repo.create_tag(tag_name, commit_id,
pygit2.GIT_OBJ_COMMIT, self.signature,
message)
# append to the pushable references
self._updated_refs.append('refs/tags/{}'.format(tag_name))
return self.repo[tag_id]
def file_contents(self, commit_id, file):
commit = self.repo[commit_id]
entry = commit.tree[file]
blob = self.repo[entry.id]
return blob.data
def _parse_github_user_repo(self):
m = re.match(r'.*\/([^\/]+)\/([^\/\.]+)(\.git)?$', self.remote_url)
user, repo = m.group(1), m.group(2)
return user, repo
def as_github_repo(self):
"""Converts it to a repository object which wraps the GitHub API"""
username, reponame = self._parse_github_user_repo()
gh = github3.login(token=self.github_token)
return gh.repository(username, reponame)
class Queue(Repo):
def _next_job_id(self, prefix):
"""Auto increments the branch's identifier based on the prefix"""
pattern = re.compile(r'[\w\/-]*{}-(\d+)'.format(prefix))
matches = list(filter(None, map(pattern.match, self.repo.branches)))
if matches:
latest = max(int(m.group(1)) for m in matches)
else:
latest = 0
return '{}-{}'.format(prefix, latest + 1)
def get(self, job_name):
branch_name = 'origin/{}'.format(job_name)
branch = self.repo.branches[branch_name]
content = self.file_contents(branch.target, 'job.yml')
buffer = StringIO(content.decode('utf-8'))
return yaml.load(buffer)
def put(self, job, prefix='build'):
# TODO(kszucs): more verbose error handling
assert isinstance(job, Job)
assert job.branch is None
assert len(job.tasks) > 0
# auto increment and set next job id, e.g. build-85
job.branch = self._next_job_id(prefix)
# create tasks' branches
for task_name, task in job.tasks.items():
task.branch = '{}-{}'.format(job.branch, task_name)
files = task.render_files(job=job, arrow=job.target)
branch = self.create_branch(task.branch, files=files)
self.create_tag(task.tag, branch.target)
task.commit = str(branch.target)
# create job's branch with its description
return self.create_branch(job.branch, files=job.render_files())
def github_statuses(self, job):
repo = self.as_github_repo()
return {name: repo.commit(task.commit).status()
for name, task in job.tasks.items()}
def github_assets(self, task):
repo = self.as_github_repo()
try:
release = repo.release_from_tag(task.tag)
except github3.exceptions.NotFoundError:
return {}
assets = {a.name: a for a in release.assets()}
artifacts = {}
for artifact in task.artifacts:
# artifact can be a regex pattern
pattern = re.compile(artifact)
matches = list(filter(None, map(pattern.match, assets.keys())))
num_matches = len(matches)
# validate artifact pattern matches single asset
if num_matches > 1:
raise ValueError(
'Only a single asset should match pattern `{}`, there are '
'multiple ones: {}'.format(', '.join(matches))
)
elif num_matches == 1:
artifacts[artifact] = assets[matches[0].group(0)]
return artifacts
def upload_assets(self, job, files, content_type):
repo = self.as_github_repo()
release = repo.release_from_tag(job.branch)
assets = {a.name: a for a in release.assets()}
for path in files:
if path.name in assets:
# remove already uploaded asset
assets[path.name].delete()
with path.open('rb') as fp:
release.upload_asset(name=path.name, asset=fp,
content_type=content_type)
class Target:
"""Describes target repository and revision the builds run against
This serializable data container holding information about arrow's
git remote, branch, sha and version number as well as some metadata
(currently only an email address where the notification should be sent).
"""
def __init__(self, head, branch, remote, version, email=None):
self.head = head
self.email = email
self.branch = branch
self.remote = remote
self.version = version
self.no_rc_version = re.sub(r'-rc\d+\Z', '', version)
@classmethod
def from_repo(cls, repo, version=None):
assert isinstance(repo, Repo)
if version is None:
version = get_version(repo.path, local_scheme=lambda v: '')
return cls(head=str(repo.head.target),
email=repo.email,
branch=repo.branch.branch_name,
remote=repo.remote_url,
version=version)
class Task:
"""Describes a build task and metadata required to render CI templates
A task is represented as a single git commit and branch containing jinja2
rendered files (currently appveyor.yml or .travis.yml configurations).
A task can't be directly submitted to a queue, must belong to a job.
Each task's unique identifier is its branch name, which is generated after
submitting the job to a queue.
"""
def __init__(self, platform, template, artifacts=None, params=None):
assert platform in {'win', 'osx', 'linux'}
self.platform = platform
self.template = template
self.artifacts = artifacts or []
self.params = params or {}
self.branch = None # filled after adding to a queue
self.commit = None
def render_files(self, **extra_params):
path = CWD / self.template
params = toolz.merge(self.params, extra_params)
template = Template(path.read_text(), undefined=StrictUndefined)
rendered = template.render(task=self, **params)
return {self.filename: rendered}
@property
def tag(self):
return self.branch
@property
def ci(self):
if self.platform == 'win':
return 'appveyor'
else:
return 'travis'
@property
def filename(self):
if self.ci == 'appveyor':
return 'appveyor.yml'
else:
return '.travis.yml'
class Job:
"""Describes multiple tasks against a single target repository"""
def __init__(self, target, tasks):
assert isinstance(target, Target)
assert all(isinstance(task, Task) for task in tasks.values())
self.target = target
self.tasks = tasks
self.branch = None # filled after adding to a queue
def render_files(self):
with StringIO() as buf:
yaml.dump(self, buf)
content = buf.getvalue()
return {'job.yml': content}
@property
def email(self):
return os.environ.get('CROSSBOW_EMAIL', self.target.email)
# configure yaml serializer
yaml = YAML()
yaml.register_class(Job)
yaml.register_class(Task)
yaml.register_class(Target)
# state color mapping to highlight console output
COLORS = {'ok': 'green',
'error': 'red',
'missing': 'red',
'failure': 'red',
'pending': 'yellow',
'success': 'green'}
# define default paths
DEFAULT_CONFIG_PATH = CWD / 'tasks.yml'
DEFAULT_ARROW_PATH = CWD.parents[1]
DEFAULT_QUEUE_PATH = CWD.parents[2] / 'crossbow'
@click.group()
@click.option('--github-token', '-t', default=None,
help='OAuth token for GitHub authentication')
@click.option('--arrow-path', '-a',
type=click.Path(exists=True), default=DEFAULT_ARROW_PATH,
help='Arrow\'s repository path. Defaults to the repository of '
'this script')
@click.option('--queue-path', '-q',
type=click.Path(exists=True), default=DEFAULT_QUEUE_PATH,
help='The repository path used for scheduling the tasks. '
'Defaults to crossbow directory placed next to arrow')
@click.pass_context
def crossbow(ctx, github_token, arrow_path, queue_path):
if github_token is None:
raise click.ClickException(
'Could not determine GitHub token. Please set the '
'CROSSBOW_GITHUB_TOKEN environment variable to a '
'valid GitHub access token or pass one to --github-token.'
)
ctx.obj['arrow'] = Repo(Path(arrow_path))
ctx.obj['queue'] = Queue(Path(queue_path), github_token=github_token)
@crossbow.command()
@click.option('--changelog-path', '-c', type=click.Path(exists=True),
default=DEFAULT_ARROW_PATH / 'CHANGELOG.md',
help='Path of changelog to update')
@click.option('--arrow-version', '-v', default=None,
help='Set target version explicitly')
@click.option('--is-website', '-w', default=False)
@click.option('--jira-username', '-u', default=None, help='JIRA username')
@click.option('--jira-password', '-P', default=None, help='JIRA password')
@click.option('--dry-run/--write', default=False,
help='Just display the new changelog, don\'t write it')
@click.pass_context
def changelog(ctx, changelog_path, arrow_version, is_website, jira_username,
jira_password, dry_run):
changelog_path = Path(changelog_path)
target = Target.from_repo(ctx.obj['arrow'])
version = arrow_version or target.version
changelog = JiraChangelog(version, username=jira_username,
password=jira_password)
new_content = changelog.render(changelog_path.read_text(),
website=is_website)
if dry_run:
click.echo(new_content)
else:
changelog_path.write_text(new_content)
click.echo('New changelog successfully generated, see git diff for the'
'changes')
def load_tasks_from_config(config_path, task_names, group_names):
with Path(config_path).open() as fp:
config = yaml.load(fp)
groups = config['groups']
tasks = config['tasks']
valid_groups = set(groups.keys())
valid_tasks = set(tasks.keys())
requested_groups = set(group_names)
invalid_groups = requested_groups - valid_groups
if invalid_groups:
raise click.ClickException('Invalid group(s) {!r}. Must be one of {!r}'
.format(invalid_groups, valid_groups))
requested_tasks = [list(groups[name]) for name in group_names]
requested_tasks = set(sum(requested_tasks, list(task_names)))
invalid_tasks = requested_tasks - valid_tasks
if invalid_tasks:
raise click.ClickException('Invalid task(s) {!r}. Must be one of {!r}'
.format(invalid_tasks, valid_tasks))
return {t: config['tasks'][t] for t in requested_tasks}
@crossbow.command()
@click.argument('task', nargs=-1, required=False)
@click.option('--group', '-g', multiple=True,
help='Submit task groups as defined in task.yml')
@click.option('--job-prefix', default='build',
help='Arbitrary prefix for branch names, e.g. nightly')
@click.option('--config-path', '-c',
type=click.Path(exists=True), default=DEFAULT_CONFIG_PATH,
help='Task configuration yml. Defaults to tasks.yml')
@click.option('--arrow-version', '-v', default=None,
help='Set target version explicitly')
@click.option('--arrow-repo', '-r', default=None,
help='Set Github repo name explicitly, e.g. apache/arrow, '
'kszucs/arrow, this repository is going to be cloned on '
'the CI services. Note, that no validation happens locally '
'and potentially --arrow-branch and --arrow-sha must be '
'defined as well')
@click.option('--arrow-branch', '-b', default='master',
help='Give the branch name explicitly, e.g. master, ARROW-1949.'
'Only available if --arrow-repo is set.')
@click.option('--arrow-sha', '-t', default='HEAD',
help='Set commit SHA or Tag name explicitly, e.g. f67a515, '
'apache-arrow-0.11.1. Only available if both --arrow-repo '
'--arrow-branch are set.')
@click.option('--dry-run/--push', default=False,
help='Just display the rendered CI configurations without '
'submitting them')
@click.pass_context
def submit(ctx, task, group, job_prefix, config_path, arrow_version,
arrow_repo, arrow_branch, arrow_sha, dry_run):
queue, arrow = ctx.obj['queue'], ctx.obj['arrow']
if arrow_repo is not None:
values = {'version': arrow_version,
'branch': arrow_branch,
'sha': arrow_sha}
for k, v in values.items():
if not v:
raise ValueError('Must pass --arrow-{} argument'.format(k))
# Set repo url, branch and sha explicitly - this aims to make release
# procedure a bit simpler.
# Note, that the target resivion's crossbow templates must be
# compatible with the locally checked out version of crossbow (which is
# in case of the release procedure), because the templates still
# contain some business logic (dependency installation, deployments)
# which will be reduced to a single command in the future.
remote = 'https://github.com/{}'.format(arrow_repo)
target = Target(head=arrow_sha, branch=arrow_branch, remote=remote,
version=arrow_version)
else:
# instantiate target from the locally checked out repository and branch
target = Target.from_repo(arrow, version=arrow_version)
params = {
'version': target.version,
'no_rc_version': target.no_rc_version,
}
# task and group variables are lists, containing multiple values
tasks = {}
task_configs = load_tasks_from_config(config_path, task, group)
for name, task in task_configs.items():
# replace version number and create task instance from configuration
artifacts = task.pop('artifacts', None) or [] # because of yaml
artifacts = [fn.format(**params) for fn in artifacts]
tasks[name] = Task(artifacts=artifacts, **task)
# create job instance, doesn't mutate git data yet
job = Job(target=target, tasks=tasks)
if dry_run:
yaml.dump(job, sys.stdout)
delimiter = '-' * 79
for task_name, task in job.tasks.items():
files = task.render_files(job=job, arrow=job.target)
for filename, content in files.items():
click.echo('\n\n')
click.echo(delimiter)
click.echo('{:<29}{:>50}'.format(task_name, filename))
click.echo(delimiter)
click.echo(content)
else:
queue.fetch()
queue.put(job, prefix=job_prefix)
queue.push()
yaml.dump(job, sys.stdout)
click.echo('Pushed job identifier is: `{}`'.format(job.branch))
@crossbow.command()
@click.argument('job-name', required=True)
@click.pass_context
def status(ctx, job_name):
queue = ctx.obj['queue']
queue.fetch()
tpl = '[{:>7}] {:<49} {:>20}'
header = tpl.format('status', 'branch', 'artifacts')
click.echo(header)
click.echo('-' * len(header))
job = queue.get(job_name)
statuses = queue.github_statuses(job)
for task_name, task in sorted(job.tasks.items()):
status = statuses[task_name]
assets = queue.github_assets(task)
uploaded = 'uploaded {} / {}'.format(
sum(a in assets for a in task.artifacts),
len(task.artifacts)
)
leadline = tpl.format(status.state.upper(), task.branch, uploaded)
click.echo(click.style(leadline, fg=COLORS[status.state]))
for artifact in task.artifacts:
try:
asset = assets[artifact]
except KeyError:
state = 'pending' if status.state == 'pending' else 'missing'
filename = '{:>70} '.format(artifact)
else:
state = 'ok'
filename = '{:>70} '.format(asset.name)
statemsg = '[{:>7}]'.format(state.upper())
click.echo(filename + click.style(statemsg, fg=COLORS[state]))
def hashbytes(bytes, algoname):
"""Hash `bytes` using the algorithm named `algoname`.
Parameters
----------
bytes : bytes
The bytes to hash
algoname : str
The name of class in the hashlib standard library module
Returns
-------
str
Hexadecimal digest of `bytes` hashed using `algoname`
"""
algo = getattr(hashlib, algoname)()
algo.update(bytes)
result = algo.hexdigest()
return result
@crossbow.command()
@click.argument('job-name', required=True)
@click.option('-g', '--gpg-homedir', default=None,
type=click.Path(exists=True, file_okay=False, dir_okay=True),
help=('Full pathname to directory containing the public and '
'private keyrings. Default is whatever GnuPG defaults to'))
@click.option('-t', '--target-dir', default=DEFAULT_ARROW_PATH / 'packages',
type=click.Path(file_okay=False, dir_okay=True),
help='Directory to download the build artifacts')
@click.option('-a', '--algorithm',
default=['sha256', 'sha512'],
show_default=True,
type=click.Choice(sorted(hashlib.algorithms_guaranteed)),
multiple=True,
help=('Algorithm(s) used to generate checksums. Pass multiple '
'algorithms by passing -a/--algorithm multiple times'))
@click.pass_context
def sign(ctx, job_name, gpg_homedir, target_dir, algorithm):
"""Download and sign build artifacts from github releases"""
gpg = gnupg.GPG(gnupghome=gpg_homedir)
# fetch the queue repository
queue = ctx.obj['queue']
queue.fetch()
# query the job's artifacts
job = queue.get(job_name)
target_dir = Path(target_dir).absolute() / job_name
target_dir.mkdir(parents=True, exist_ok=True)
click.echo('Download {}\'s artifacts to {}'.format(job_name, target_dir))
tpl = '{:<10} {:>73}'
task_items = sorted(job.tasks.items())
ntasks = len(task_items)
for i, (task_name, task) in enumerate(task_items, start=1):
assets = queue.github_assets(task)
artifact_dir = target_dir / task_name
artifact_dir.mkdir(exist_ok=True)
basemsg = 'Downloading and signing assets for task {}'.format(
click.style(task_name, bold=True)
)
click.echo(
'\n{} {:>{size}}' .format(
basemsg,
click.style('{}/{}'.format(i, ntasks), bold=True),
size=89 - (len(basemsg) + 1) + 2 * len(
click.style('', bold=True))
)
)
click.echo('-' * 89)
for artifact in task.artifacts:
try:
asset = assets[artifact]
except KeyError:
msg = click.style('[{:>13}]'.format('MISSING'),
fg=COLORS['missing'])
click.echo(tpl.format(msg, artifact))
else:
click.echo(click.style(artifact, bold=True))
# download artifact
artifact_path = artifact_dir / asset.name
asset.download(artifact_path)
# sign the artifact
signature_path = Path(str(artifact_path) + '.asc')
with artifact_path.open('rb') as fp:
gpg.sign_file(fp, detach=True, clearsign=False,
binary=False,
output=str(signature_path))
# compute checksums for the artifact
artifact_bytes = artifact_path.read_bytes()
for algo in algorithm:
suffix = '.{}'.format(algo)
checksum_path = Path(str(artifact_path) + suffix)
checksum = '{} {}'.format(
hashbytes(artifact_bytes, algo), artifact_path.name
)
checksum_path.write_text(checksum)
msg = click.style(
'[{:>13}]'.format('{} HASHED'.format(algo)),
fg='blue'
)
click.echo(tpl.format(msg, checksum_path.name))
msg = click.style('[{:>13}]'.format('SIGNED'), fg=COLORS['ok'])
click.echo(tpl.format(msg, str(signature_path.name)))
if __name__ == '__main__':
crossbow(obj={}, auto_envvar_prefix='CROSSBOW')
| |
#Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
__version__=''' $Id: test_platypus_general.py 3959 2012-09-27 14:39:39Z robin $ '''
#tests and documents Page Layout API
__doc__="""This is not obvious so here's a brief explanation. This module is both
the test script and user guide for layout. Each page has two frames on it:
one for commentary, and one for demonstration objects which may be drawn in
various esoteric ways. The two functions getCommentary() and getExamples()
return the 'story' for each. The run() function gets the stories, then
builds a special "document model" in which the frames are added to each page
and drawn into.
"""
from reportlab.lib.testutils import setOutDir,makeSuiteForClasses, outputfile, printLocation
setOutDir(__name__)
import string, copy, sys, os
from reportlab.pdfgen import canvas
from reportlab import platypus
from reportlab.platypus import BaseDocTemplate, PageTemplate, Flowable, FrameBreak
from reportlab.platypus import Paragraph, Preformatted
from reportlab.lib.units import inch, cm
from reportlab.lib.styles import PropertySet, getSampleStyleSheet, ParagraphStyle
from reportlab.lib import colors
from reportlab.rl_config import defaultPageSize
from reportlab.lib.utils import haveImages, _RL_DIR, rl_isfile, open_for_read
import unittest
from reportlab.lib.testutils import testsFolder
if haveImages:
_GIF = os.path.join(testsFolder,'pythonpowered.gif')
if not rl_isfile(_GIF): _GIF = None
else:
_GIF = None
_JPG = os.path.join(testsFolder,'..','docs','images','lj8100.jpg')
if not rl_isfile(_JPG): _JPG = None
def getFurl(fn):
furl = fn.replace(os.sep,'/')
if sys.platform=='win32' and furl[1]==':': furl = furl[0]+'|'+furl[2:]
if furl[0]!='/': furl = '/'+furl
return 'file://'+furl
PAGE_HEIGHT = defaultPageSize[1]
#################################################################
#
# first some drawing utilities
#
#
################################################################
BASEFONT = ('Times-Roman', 10)
def framePage(canvas,doc):
#canvas.drawImage("snkanim.gif", 36, 36)
canvas.saveState()
canvas.setStrokeColorRGB(1,0,0)
canvas.setLineWidth(5)
canvas.line(66,72,66,PAGE_HEIGHT-72)
canvas.setFont('Times-Italic',12)
canvas.drawRightString(523, PAGE_HEIGHT - 56, "Platypus User Guide and Test Script")
canvas.setFont('Times-Roman',12)
canvas.drawString(4 * inch, 0.75 * inch,
"Page %d" % canvas.getPageNumber())
canvas.restoreState()
def getParagraphs(textBlock):
"""Within the script, it is useful to whack out a page in triple
quotes containing separate paragraphs. This breaks one into its
constituent paragraphs, using blank lines as the delimiter."""
lines = textBlock.split('\n')
paras = []
currentPara = []
for line in lines:
if len(line.strip()) == 0:
#blank, add it
if currentPara != []:
paras.append('\n'.join(currentPara))
currentPara = []
else:
currentPara.append(line)
#...and the last one
if currentPara != []:
paras.append(currentPara.join('\n'))
return paras
def getCommentary():
"""Returns the story for the commentary - all the paragraphs."""
styleSheet = getSampleStyleSheet()
story = []
story.append(Paragraph("""
PLATYPUS User Guide and Test Script
""", styleSheet['Heading1']))
spam = """
Welcome to PLATYPUS!
Platypus stands for "Page Layout and Typography Using Scripts". It is a high
level page layout library which lets you programmatically create complex
documents with a minimum of effort.
This document is both the user guide & the output of the test script.
In other words, a script used platypus to create the document you are now
reading, and the fact that you are reading it proves that it works. Or
rather, that it worked for this script anyway. It is a first release!
Platypus is built 'on top of' PDFgen, the Python library for creating PDF
documents. To learn about PDFgen, read the document testpdfgen.pdf.
"""
for text in getParagraphs(spam):
story.append(Paragraph(text, styleSheet['BodyText']))
story.append(Paragraph("""
What concepts does PLATYPUS deal with?
""", styleSheet['Heading2']))
story.append(Paragraph("""
The central concepts in PLATYPUS are Flowable Objects, Frames, Flow
Management, Styles and Style Sheets, Paragraphs and Tables. This is
best explained in contrast to PDFgen, the layer underneath PLATYPUS.
PDFgen is a graphics library, and has primitive commans to draw lines
and strings. There is nothing in it to manage the flow of text down
the page. PLATYPUS works at the conceptual level fo a desktop publishing
package; you can write programs which deal intelligently with graphic
objects and fit them onto the page.
""", styleSheet['BodyText']))
story.append(Paragraph("""
How is this document organized?
""", styleSheet['Heading2']))
story.append(Paragraph("""
Since this is a test script, we'll just note how it is organized.
the top of each page contains commentary. The bottom half contains
example drawings and graphic elements to whicht he commentary will
relate. Down below, you can see the outline of a text frame, and
various bits and pieces within it. We'll explain how they work
on the next page.
""", styleSheet['BodyText']))
story.append(FrameBreak())
#######################################################################
# Commentary Page 2
#######################################################################
story.append(Paragraph("""
Flowable Objects
""", styleSheet['Heading2']))
spam = """
The first and most fundamental concept is that of a 'Flowable Object'.
In PDFgen, you draw stuff by calling methods of the canvas to set up
the colors, fonts and line styles, and draw the graphics primitives.
If you set the pen color to blue, everything you draw after will be
blue until you change it again. And you have to handle all of the X-Y
coordinates yourself.
A 'Flowable object' is exactly what it says. It knows how to draw itself
on the canvas, and the way it does so is totally independent of what
you drew before or after. Furthermore, it draws itself at the location
on the page you specify.
The most fundamental Flowable Objects in most documents are likely to be
paragraphs, tables, diagrams/charts and images - but there is no
restriction. You can write your own easily, and I hope that people
will start to contribute them. PINGO users - we provide a "PINGO flowable" object to let
you insert platform-independent graphics into the flow of a document.
When you write a flowable object, you inherit from Flowable and
must implement two methods. object.wrap(availWidth, availHeight) will be called by other parts of
the system, and tells you how much space you have. You should return
how much space you are going to use. For a fixed-size object, this
is trivial, but it is critical - PLATYPUS needs to figure out if things
will fit on the page before drawing them. For other objects such as paragraphs,
the height is obviously determined by the available width.
The second method is object.draw(). Here, you do whatever you want.
The Flowable base class sets things up so that you have an origin of
(0,0) for your drawing, and everything will fit nicely if you got the
height and width right. It also saves and restores the graphics state
around your calls, so you don;t have to reset all the properties you
changed.
Programs which actually draw a Flowable don't
call draw() this directly - they call object.drawOn(canvas, x, y).
So you can write code in your own coordinate system, and things
can be drawn anywhere on the page (possibly even scaled or rotated).
"""
for text in getParagraphs(spam):
story.append(Paragraph(text, styleSheet['BodyText']))
story.append(FrameBreak())
#######################################################################
# Commentary Page 3
#######################################################################
story.append(Paragraph("""
Available Flowable Objects
""", styleSheet['Heading2']))
story.append(Paragraph("""
Platypus comes with a basic set of flowable objects. Here we list their
class names and tell you what they do:
""", styleSheet['BodyText']))
#we can use the bullet feature to do a definition list
story.append(Paragraph("""
<para color=green bcolor=red bg=pink>This is a <font bgcolor=yellow color=red>contrived</font> object to give an example of a Flowable -
just a fixed-size box with an X through it and a centred string.</para>""",
styleSheet['Definition'],
bulletText='XBox ' #hack - spot the extra space after
))
story.append(Paragraph("""
This is the basic unit of a document. Paragraphs can be finely
tuned and offer a host of properties through their associated
ParagraphStyle.""",
styleSheet['Definition'],
bulletText='Paragraph ' #hack - spot the extra space after
))
story.append(Paragraph("""
This is used for printing code and other preformatted text.
There is no wrapping, and line breaks are taken where they occur.
Many paragraph style properties do not apply. You may supply
an optional 'dedent' parameter to trim a number of characters
off the front of each line.""",
styleSheet['Definition'],
bulletText='Preformatted ' #hack - spot the extra space after
))
story.append(Paragraph("""
This is a straight wrapper around an external image file. By default
the image will be drawn at a scale of one pixel equals one point, and
centred in the frame. You may supply an optional width and height.""",
styleSheet['Definition'],
bulletText='Image ' #hack - spot the extra space after
))
story.append(Paragraph("""
This is a table drawing class; it is intended to be simpler
than a full HTML table model yet be able to draw attractive output,
and behave intelligently when the numbers of rows and columns vary.
Still need to add the cell properties (shading, alignment, font etc.)""",
styleSheet['Definition'],
bulletText='Table ' #hack - spot the extra space after
))
story.append(Paragraph("""
This is a 'null object' which merely takes up space on the page.
Use it when you want some extra padding betweene elements.""",
styleSheet['Definition'],
bulletText='Spacer ' #hack - spot the extra space after
))
story.append(Paragraph("""
A FrameBreak causes the document to call its handle_frameEnd method.""",
styleSheet['Definition'],
bulletText='FrameBreak ' #hack - spot the extra space after
))
story.append(Paragraph("""
This is in progress, but a macro is basically a chunk of Python code to
be evaluated when it is drawn. It could do lots of neat things.""",
styleSheet['Definition'],
bulletText='Macro ' #hack - spot the extra space after
))
story.append(FrameBreak())
story.append(Paragraph(
"The next example uses a custom font",
styleSheet['Italic']))
def code(txt,story=story,styleSheet=styleSheet):
story.append(Preformatted(txt,styleSheet['Code']))
code('''import reportlab.rl_config
reportlab.rl_config.warnOnMissingFontGlyphs = 0
from reportlab.pdfbase import pdfmetrics
fontDir = os.path.join(_RL_DIR,'fonts')
face = pdfmetrics.EmbeddedType1Face(os.path.join(fontDir,'DarkGardenMK.afm'),
os.path.join(fontDir,'DarkGardenMK.pfb'))
faceName = face.name # should be 'DarkGardenMK'
pdfmetrics.registerTypeFace(face)
font = pdfmetrics.Font(faceName, faceName, 'WinAnsiEncoding')
pdfmetrics.registerFont(font)
# put it inside a paragraph.
story.append(Paragraph(
"""This is an ordinary paragraph, which happens to contain
text in an embedded font:
<font name="DarkGardenMK">DarkGardenMK</font>.
Now for the real challenge...""", styleSheet['Normal']))
styRobot = ParagraphStyle('Robot', styleSheet['Normal'])
styRobot.fontSize = 16
styRobot.leading = 20
styRobot.fontName = 'DarkGardenMK'
story.append(Paragraph(
"This whole paragraph is 16-point DarkGardenMK.",
styRobot))''')
story.append(FrameBreak())
if _GIF:
story.append(Paragraph("""We can use images via the file name""", styleSheet['BodyText']))
code(''' story.append(platypus.Image('%s'))'''%_GIF)
story.append(Paragraph("""They can also be used with a file URI or from an open python file!""", styleSheet['BodyText']))
code(''' story.append(platypus.Image('%s'))'''% getFurl(_GIF))
code(''' story.append(platypus.Image(open_for_read('%s','b')))''' % _GIF)
story.append(FrameBreak())
story.append(Paragraph("""Images can even be obtained from the internet.""", styleSheet['BodyText']))
code(''' img = platypus.Image('http://www.reportlab.com/rsrc/encryption.gif')
story.append(img)''')
story.append(FrameBreak())
if _JPG:
story.append(Paragraph("""JPEGs are a native PDF image format. They should be available even if PIL cannot be used.""", styleSheet['BodyText']))
story.append(FrameBreak())
return story
def getExamples():
"""Returns all the example flowable objects"""
styleSheet = getSampleStyleSheet()
story = []
#make a style with indents and spacing
sty = ParagraphStyle('obvious', None)
sty.leftIndent = 18
sty.rightIndent = 18
sty.firstLineIndent = 18
sty.spaceBefore = 6
sty.spaceAfter = 6
story.append(Paragraph("""Now for some demo stuff - we need some on this page,
even before we explain the concepts fully""", styleSheet['BodyText']))
p = Paragraph("""
Platypus is all about fitting objects into frames on the page. You
are looking at a fairly simple Platypus paragraph in Debug mode.
It has some gridlines drawn around it to show the left and right indents,
and the space before and after, all of which are attributes set in
the style sheet. To be specific, this paragraph has left and
right indents of 18 points, a first line indent of 36 points,
and 6 points of space before and after itself. A paragraph
object fills the width of the enclosing frame, as you would expect.""", sty)
p.debug = 1 #show me the borders
story.append(p)
story.append(Paragraph("""Same but with justification 1.5 extra leading and green text.""", styleSheet['BodyText']))
p = Paragraph("""
<para align=justify leading=+1.5 fg=green><font color=red>Platypus</font> is all about fitting objects into frames on the page. You
are looking at a fairly simple Platypus paragraph in Debug mode.
It has some gridlines drawn around it to show the left and right indents,
and the space before and after, all of which are attributes set in
the style sheet. To be specific, this paragraph has left and
right indents of 18 points, a first line indent of 36 points,
and 6 points of space before and after itself. A paragraph
object fills the width of the enclosing frame, as you would expect.</para>""", sty)
p.debug = 1 #show me the borders
story.append(p)
story.append(platypus.XBox(4*inch, 0.75*inch,
'This is a box with a fixed size'))
story.append(Paragraph("""
All of this is being drawn within a text frame which was defined
on the page. This frame is in 'debug' mode so you can see the border,
and also see the margins which it reserves. A frame does not have
to have margins, but they have been set to 6 points each to create
a little space around the contents.
""", styleSheet['BodyText']))
story.append(FrameBreak())
#######################################################################
# Examples Page 2
#######################################################################
story.append(Paragraph("""
Here's the base class for Flowable...
""", styleSheet['Italic']))
code = '''class Flowable:
"""Abstract base class for things to be drawn. Key concepts:
1. It knows its size
2. It draws in its own coordinate system (this requires the
base API to provide a translate() function.
"""
def __init__(self):
self.width = 0
self.height = 0
self.wrapped = 0
def drawOn(self, canvas, x, y):
"Tell it to draw itself on the canvas. Do not override"
self.canv = canvas
self.canv.saveState()
self.canv.translate(x, y)
self.draw() #this is the bit you overload
self.canv.restoreState()
del self.canv
def wrap(self, availWidth, availHeight):
"""This will be called by the enclosing frame before objects
are asked their size, drawn or whatever. It returns the
size actually used."""
return (self.width, self.height)
'''
story.append(Preformatted(code, styleSheet['Code'], dedent=4))
story.append(FrameBreak())
#######################################################################
# Examples Page 3
#######################################################################
story.append(Paragraph(
"Here are some examples of the remaining objects above.",
styleSheet['Italic']))
story.append(Paragraph("This is a bullet point", styleSheet['Bullet'], bulletText='O'))
story.append(Paragraph("Another bullet point", styleSheet['Bullet'], bulletText='O'))
story.append(Paragraph("""Here is a Table, which takes all kinds of formatting options...""",
styleSheet['Italic']))
story.append(platypus.Spacer(0, 12))
g = platypus.Table(
(('','North','South','East','West'),
('Quarter 1',100,200,300,400),
('Quarter 2',100,200,300,400),
('Total',200,400,600,800)),
(72,36,36,36,36),
(24, 16,16,18)
)
style = platypus.TableStyle([('ALIGN', (1,1), (-1,-1), 'RIGHT'),
('ALIGN', (0,0), (-1,0), 'CENTRE'),
('GRID', (0,0), (-1,-1), 0.25, colors.black),
('LINEBELOW', (0,0), (-1,0), 2, colors.black),
('LINEBELOW',(1,-1), (-1, -1), 2, (0.5, 0.5, 0.5)),
('TEXTCOLOR', (0,1), (0,-1), colors.black),
('BACKGROUND', (0,0), (-1,0), (0,0.7,0.7))
])
g.setStyle(style)
story.append(g)
story.append(FrameBreak())
#######################################################################
# Examples Page 4 - custom fonts
#######################################################################
# custom font with LettError-Robot font
import reportlab.rl_config
reportlab.rl_config.warnOnMissingFontGlyphs = 0
from reportlab.pdfbase import pdfmetrics
fontDir = os.path.join(_RL_DIR,'fonts')
face = pdfmetrics.EmbeddedType1Face(os.path.join(fontDir,'DarkGardenMK.afm'),os.path.join(fontDir,'DarkGardenMK.pfb'))
faceName = face.name # should be 'DarkGardenMK'
pdfmetrics.registerTypeFace(face)
font = pdfmetrics.Font(faceName, faceName, 'WinAnsiEncoding')
pdfmetrics.registerFont(font)
# put it inside a paragraph.
story.append(Paragraph(
"""This is an ordinary paragraph, which happens to contain
text in an embedded font:
<font name="DarkGardenMK">DarkGardenMK</font>.
Now for the real challenge...""", styleSheet['Normal']))
styRobot = ParagraphStyle('Robot', styleSheet['Normal'])
styRobot.fontSize = 16
styRobot.leading = 20
styRobot.fontName = 'DarkGardenMK'
story.append(Paragraph(
"This whole paragraph is 16-point DarkGardenMK.",
styRobot))
story.append(FrameBreak())
if _GIF:
story.append(Paragraph("Here is an Image flowable obtained from a string filename.",styleSheet['Italic']))
story.append(platypus.Image(_GIF))
story.append(Paragraph( "Here is an Image flowable obtained from a utf8 filename.", styleSheet['Italic']))
story.append(Paragraph("Here is an Image flowable obtained from a string file url.",styleSheet['Italic']))
story.append(platypus.Image(getFurl(_GIF)))
story.append(Paragraph("Here is an Image flowable obtained from an open file.",styleSheet['Italic']))
story.append(platypus.Image(open_for_read(_GIF,'b')))
story.append(FrameBreak())
try:
img = platypus.Image('http://www.reportlab.com/rsrc/encryption.gif')
story.append(Paragraph("Here is an Image flowable obtained from a string http url.",styleSheet['Italic']))
story.append(img)
except:
story.append(Paragraph("The image could not be obtained from a string http url.",styleSheet['Italic']))
story.append(FrameBreak())
if _JPG:
img = platypus.Image(_JPG)
story.append(Paragraph("Here is an JPEG Image flowable obtained from a filename.",styleSheet['Italic']))
story.append(img)
story.append(Paragraph("Here is an JPEG Image flowable obtained from an open file.",styleSheet['Italic']))
img = platypus.Image(open_for_read(_JPG,'b'))
story.append(img)
story.append(FrameBreak())
return story
class AndyTemplate(BaseDocTemplate):
_invalidInitArgs = ('pageTemplates',)
def __init__(self, filename, **kw):
frame1 = platypus.Frame(inch, 5.6*inch, 6*inch, 5.2*inch,id='F1')
frame2 = platypus.Frame(inch, inch, 6*inch, 4.5*inch, showBoundary=1,id='F2')
self.allowSplitting = 0
BaseDocTemplate.__init__(self,filename,**kw)
self.addPageTemplates(PageTemplate('normal',[frame1,frame2],framePage))
def fillFrame(self,flowables):
f = self.frame
while len(flowables)>0 and f is self.frame:
self.handle_flowable(flowables)
def build(self, flowables1, flowables2):
assert [x for x in flowables1 if not isinstance(x, Flowable)]==[], "flowables1 argument error"
assert [x for x in flowables2 if not isinstance(x, Flowable)]==[], "flowables2 argument error"
self._startBuild()
while (len(flowables1) > 0 or len(flowables1) > 0):
self.clean_hanging()
self.fillFrame(flowables1)
self.fillFrame(flowables2)
self._endBuild()
def showProgress(pageNo):
print('CALLBACK SAYS: page %d' % pageNo)
def run():
doc = AndyTemplate(outputfile('test_platypus_general.pdf'),subject='test0')
#doc.setPageCallBack(showProgress)
commentary = getCommentary()
examples = getExamples()
doc.build(commentary,examples)
class PlatypusTestCase(unittest.TestCase):
"Make documents with lots of Platypus features"
def test0(self):
"Make a platypus document"
run()
def test1(self):
#test from Wietse Jacobs
from reportlab.lib.styles import ParagraphStyle
from reportlab.graphics.shapes import Drawing, Rect
from reportlab.platypus import SimpleDocTemplate
normal = ParagraphStyle(name='Normal', fontName='Helvetica', fontSize=8.5, leading=11)
header = ParagraphStyle(name='Heading1', parent=normal, fontSize=14, leading=19,
spaceAfter=6, keepWithNext=1)
d = Drawing(400, 200)
d.add(Rect(50, 50, 300, 100))
story = [Paragraph("The section header", header), d,
]
doc = SimpleDocTemplate(outputfile('test_drawing_keepwithnext.pdf'))
doc.build(story)
def makeSuite():
return makeSuiteForClasses(PlatypusTestCase)
#noruntests
if __name__ == "__main__":
if '-debug' in sys.argv:
run()
else:
unittest.TextTestRunner().run(makeSuite())
printLocation()
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_by_subscription_request(
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-04-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/sshPublicKeys')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_by_resource_group_request(
resource_group_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-04-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/sshPublicKeys')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_request(
resource_group_name: str,
ssh_public_key_name: str,
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-04-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/sshPublicKeys/{sshPublicKeyName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"sshPublicKeyName": _SERIALIZER.url("ssh_public_key_name", ssh_public_key_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_update_request(
resource_group_name: str,
ssh_public_key_name: str,
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-04-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/sshPublicKeys/{sshPublicKeyName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"sshPublicKeyName": _SERIALIZER.url("ssh_public_key_name", ssh_public_key_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_delete_request(
resource_group_name: str,
ssh_public_key_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-04-01"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/sshPublicKeys/{sshPublicKeyName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"sshPublicKeyName": _SERIALIZER.url("ssh_public_key_name", ssh_public_key_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
**kwargs
)
def build_get_request(
resource_group_name: str,
ssh_public_key_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-04-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/sshPublicKeys/{sshPublicKeyName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"sshPublicKeyName": _SERIALIZER.url("ssh_public_key_name", ssh_public_key_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_generate_key_pair_request(
resource_group_name: str,
ssh_public_key_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-04-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/sshPublicKeys/{sshPublicKeyName}/generateKeyPair')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"sshPublicKeyName": _SERIALIZER.url("ssh_public_key_name", ssh_public_key_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class SshPublicKeysOperations(object):
"""SshPublicKeysOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2021_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list_by_subscription(
self,
**kwargs: Any
) -> Iterable["_models.SshPublicKeysGroupListResult"]:
"""Lists all of the SSH public keys in the subscription. Use the nextLink property in the response
to get the next page of SSH public keys.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SshPublicKeysGroupListResult or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2021_04_01.models.SshPublicKeysGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SshPublicKeysGroupListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_subscription_request(
subscription_id=self._config.subscription_id,
template_url=self.list_by_subscription.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_subscription_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("SshPublicKeysGroupListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/sshPublicKeys'} # type: ignore
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> Iterable["_models.SshPublicKeysGroupListResult"]:
"""Lists all of the SSH public keys in the specified resource group. Use the nextLink property in
the response to get the next page of SSH public keys.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SshPublicKeysGroupListResult or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2021_04_01.models.SshPublicKeysGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SshPublicKeysGroupListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=self.list_by_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("SshPublicKeysGroupListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/sshPublicKeys'} # type: ignore
@distributed_trace
def create(
self,
resource_group_name: str,
ssh_public_key_name: str,
parameters: "_models.SshPublicKeyResource",
**kwargs: Any
) -> "_models.SshPublicKeyResource":
"""Creates a new SSH public key resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ssh_public_key_name: The name of the SSH public key.
:type ssh_public_key_name: str
:param parameters: Parameters supplied to create the SSH public key.
:type parameters: ~azure.mgmt.compute.v2021_04_01.models.SshPublicKeyResource
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SshPublicKeyResource, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_04_01.models.SshPublicKeyResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SshPublicKeyResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'SshPublicKeyResource')
request = build_create_request(
resource_group_name=resource_group_name,
ssh_public_key_name=ssh_public_key_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.create.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('SshPublicKeyResource', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('SshPublicKeyResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/sshPublicKeys/{sshPublicKeyName}'} # type: ignore
@distributed_trace
def update(
self,
resource_group_name: str,
ssh_public_key_name: str,
parameters: "_models.SshPublicKeyUpdateResource",
**kwargs: Any
) -> "_models.SshPublicKeyResource":
"""Updates a new SSH public key resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ssh_public_key_name: The name of the SSH public key.
:type ssh_public_key_name: str
:param parameters: Parameters supplied to update the SSH public key.
:type parameters: ~azure.mgmt.compute.v2021_04_01.models.SshPublicKeyUpdateResource
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SshPublicKeyResource, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_04_01.models.SshPublicKeyResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SshPublicKeyResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'SshPublicKeyUpdateResource')
request = build_update_request(
resource_group_name=resource_group_name,
ssh_public_key_name=ssh_public_key_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('SshPublicKeyResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/sshPublicKeys/{sshPublicKeyName}'} # type: ignore
@distributed_trace
def delete(
self,
resource_group_name: str,
ssh_public_key_name: str,
**kwargs: Any
) -> None:
"""Delete an SSH public key.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ssh_public_key_name: The name of the SSH public key.
:type ssh_public_key_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request(
resource_group_name=resource_group_name,
ssh_public_key_name=ssh_public_key_name,
subscription_id=self._config.subscription_id,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/sshPublicKeys/{sshPublicKeyName}'} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
ssh_public_key_name: str,
**kwargs: Any
) -> "_models.SshPublicKeyResource":
"""Retrieves information about an SSH public key.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ssh_public_key_name: The name of the SSH public key.
:type ssh_public_key_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SshPublicKeyResource, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_04_01.models.SshPublicKeyResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SshPublicKeyResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
ssh_public_key_name=ssh_public_key_name,
subscription_id=self._config.subscription_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('SshPublicKeyResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/sshPublicKeys/{sshPublicKeyName}'} # type: ignore
@distributed_trace
def generate_key_pair(
self,
resource_group_name: str,
ssh_public_key_name: str,
**kwargs: Any
) -> "_models.SshPublicKeyGenerateKeyPairResult":
"""Generates and returns a public/private key pair and populates the SSH public key resource with
the public key. The length of the key will be 3072 bits. This operation can only be performed
once per SSH public key resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ssh_public_key_name: The name of the SSH public key.
:type ssh_public_key_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SshPublicKeyGenerateKeyPairResult, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_04_01.models.SshPublicKeyGenerateKeyPairResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SshPublicKeyGenerateKeyPairResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_generate_key_pair_request(
resource_group_name=resource_group_name,
ssh_public_key_name=ssh_public_key_name,
subscription_id=self._config.subscription_id,
template_url=self.generate_key_pair.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('SshPublicKeyGenerateKeyPairResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
generate_key_pair.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/sshPublicKeys/{sshPublicKeyName}/generateKeyPair'} # type: ignore
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Custom filters for use in openshift-master
'''
import copy
import sys
# pylint import-error disabled because pylint cannot find the package
# when installed in a virtualenv
from distutils.version import LooseVersion # pylint: disable=no-name-in-module,import-error
from ansible import errors
from ansible.parsing.yaml.dumper import AnsibleDumper
from ansible.plugins.filter.core import to_bool as ansible_bool
# ansible.compat.six goes away with Ansible 2.4
try:
from ansible.compat.six import string_types, u
except ImportError:
from ansible.module_utils.six import string_types, u
import yaml
class IdentityProviderBase(object):
""" IdentityProviderBase
Attributes:
name (str): Identity provider Name
login (bool): Is this identity provider a login provider?
challenge (bool): Is this identity provider a challenge provider?
provider (dict): Provider specific config
_idp (dict): internal copy of the IDP dict passed in
_required (list): List of lists of strings for required attributes
_optional (list): List of lists of strings for optional attributes
_allow_additional (bool): Does this provider support attributes
not in _required and _optional
Args:
api_version(str): OpenShift config version
idp (dict): idp config dict
Raises:
AnsibleFilterError:
"""
# disabling this check since the number of instance attributes are
# necessary for this class
# pylint: disable=too-many-instance-attributes
def __init__(self, api_version, idp):
if api_version not in ['v1']:
raise errors.AnsibleFilterError("|failed api version {0} unknown".format(api_version))
self._idp = copy.deepcopy(idp)
if 'name' not in self._idp:
raise errors.AnsibleFilterError("|failed identity provider missing a name")
if 'kind' not in self._idp:
raise errors.AnsibleFilterError("|failed identity provider missing a kind")
self.name = self._idp.pop('name')
self.login = ansible_bool(self._idp.pop('login', False))
self.challenge = ansible_bool(self._idp.pop('challenge', False))
self.provider = dict(apiVersion=api_version, kind=self._idp.pop('kind'))
mm_keys = ('mappingMethod', 'mapping_method')
mapping_method = None
for key in mm_keys:
if key in self._idp:
mapping_method = self._idp.pop(key)
if mapping_method is None:
mapping_method = self.get_default('mappingMethod')
self.mapping_method = mapping_method
valid_mapping_methods = ['add', 'claim', 'generate', 'lookup']
if self.mapping_method not in valid_mapping_methods:
raise errors.AnsibleFilterError("|failed unknown mapping method "
"for provider {0}".format(self.__class__.__name__))
self._required = []
self._optional = []
self._allow_additional = True
@staticmethod
def validate_idp_list(idp_list, openshift_version, deployment_type):
''' validates a list of idps '''
login_providers = [x.name for x in idp_list if x.login]
multiple_logins_unsupported = False
if len(login_providers) > 1:
if deployment_type in ['enterprise', 'online', 'atomic-enterprise', 'openshift-enterprise']:
if LooseVersion(openshift_version) < LooseVersion('3.2'):
multiple_logins_unsupported = True
if deployment_type in ['origin']:
if LooseVersion(openshift_version) < LooseVersion('1.2'):
multiple_logins_unsupported = True
if multiple_logins_unsupported:
raise errors.AnsibleFilterError("|failed multiple providers are "
"not allowed for login. login "
"providers: {0}".format(', '.join(login_providers)))
names = [x.name for x in idp_list]
if len(set(names)) != len(names):
raise errors.AnsibleFilterError("|failed more than one provider configured with the same name")
for idp in idp_list:
idp.validate()
def validate(self):
''' validate an instance of this idp class '''
pass
@staticmethod
def get_default(key):
''' get a default value for a given key '''
if key == 'mappingMethod':
return 'claim'
else:
return None
def set_provider_item(self, items, required=False):
''' set a provider item based on the list of item names provided. '''
for item in items:
provider_key = items[0]
if item in self._idp:
self.provider[provider_key] = self._idp.pop(item)
break
else:
default = self.get_default(provider_key)
if default is not None:
self.provider[provider_key] = default
elif required:
raise errors.AnsibleFilterError("|failed provider {0} missing "
"required key {1}".format(self.__class__.__name__, provider_key))
def set_provider_items(self):
''' set the provider items for this idp '''
for items in self._required:
self.set_provider_item(items, True)
for items in self._optional:
self.set_provider_item(items)
if self._allow_additional:
for key in self._idp.keys():
self.set_provider_item([key])
else:
if len(self._idp) > 0:
raise errors.AnsibleFilterError("|failed provider {0} "
"contains unknown keys "
"{1}".format(self.__class__.__name__, ', '.join(self._idp.keys())))
def to_dict(self):
''' translate this idp to a dictionary '''
return dict(name=self.name, challenge=self.challenge,
login=self.login, mappingMethod=self.mapping_method,
provider=self.provider)
class LDAPPasswordIdentityProvider(IdentityProviderBase):
""" LDAPPasswordIdentityProvider
Attributes:
Args:
api_version(str): OpenShift config version
idp (dict): idp config dict
Raises:
AnsibleFilterError:
"""
def __init__(self, api_version, idp):
super(LDAPPasswordIdentityProvider, self).__init__(api_version, idp)
self._allow_additional = False
self._required += [['attributes'], ['url'], ['insecure']]
self._optional += [['ca'],
['bindDN', 'bind_dn'],
['bindPassword', 'bind_password']]
self._idp['insecure'] = ansible_bool(self._idp.pop('insecure', False))
if 'attributes' in self._idp and 'preferred_username' in self._idp['attributes']:
pref_user = self._idp['attributes'].pop('preferred_username')
self._idp['attributes']['preferredUsername'] = pref_user
def validate(self):
''' validate this idp instance '''
if not isinstance(self.provider['attributes'], dict):
raise errors.AnsibleFilterError("|failed attributes for provider "
"{0} must be a dictionary".format(self.__class__.__name__))
attrs = ['id', 'email', 'name', 'preferredUsername']
for attr in attrs:
if attr in self.provider['attributes'] and not isinstance(self.provider['attributes'][attr], list):
raise errors.AnsibleFilterError("|failed {0} attribute for "
"provider {1} must be a list".format(attr, self.__class__.__name__))
unknown_attrs = set(self.provider['attributes'].keys()) - set(attrs)
if len(unknown_attrs) > 0:
raise errors.AnsibleFilterError("|failed provider {0} has unknown "
"attributes: {1}".format(self.__class__.__name__, ', '.join(unknown_attrs)))
class KeystonePasswordIdentityProvider(IdentityProviderBase):
""" KeystoneIdentityProvider
Attributes:
Args:
api_version(str): OpenShift config version
idp (dict): idp config dict
Raises:
AnsibleFilterError:
"""
def __init__(self, api_version, idp):
super(KeystonePasswordIdentityProvider, self).__init__(api_version, idp)
self._allow_additional = False
self._required += [['url'], ['domainName', 'domain_name']]
self._optional += [['ca'], ['certFile', 'cert_file'], ['keyFile', 'key_file']]
class RequestHeaderIdentityProvider(IdentityProviderBase):
""" RequestHeaderIdentityProvider
Attributes:
Args:
api_version(str): OpenShift config version
idp (dict): idp config dict
Raises:
AnsibleFilterError:
"""
def __init__(self, api_version, idp):
super(RequestHeaderIdentityProvider, self).__init__(api_version, idp)
self._allow_additional = False
self._required += [['headers']]
self._optional += [['challengeURL', 'challenge_url'],
['loginURL', 'login_url'],
['clientCA', 'client_ca'],
['clientCommonNames', 'client_common_names'],
['emailHeaders', 'email_headers'],
['nameHeaders', 'name_headers'],
['preferredUsernameHeaders', 'preferred_username_headers']]
def validate(self):
''' validate this idp instance '''
if not isinstance(self.provider['headers'], list):
raise errors.AnsibleFilterError("|failed headers for provider {0} "
"must be a list".format(self.__class__.__name__))
class AllowAllPasswordIdentityProvider(IdentityProviderBase):
""" AllowAllPasswordIdentityProvider
Attributes:
Args:
api_version(str): OpenShift config version
idp (dict): idp config dict
Raises:
AnsibleFilterError:
"""
def __init__(self, api_version, idp):
super(AllowAllPasswordIdentityProvider, self).__init__(api_version, idp)
self._allow_additional = False
class DenyAllPasswordIdentityProvider(IdentityProviderBase):
""" DenyAllPasswordIdentityProvider
Attributes:
Args:
api_version(str): OpenShift config version
idp (dict): idp config dict
Raises:
AnsibleFilterError:
"""
def __init__(self, api_version, idp):
super(DenyAllPasswordIdentityProvider, self).__init__(api_version, idp)
self._allow_additional = False
class HTPasswdPasswordIdentityProvider(IdentityProviderBase):
""" HTPasswdPasswordIdentity
Attributes:
Args:
api_version(str): OpenShift config version
idp (dict): idp config dict
Raises:
AnsibleFilterError:
"""
def __init__(self, api_version, idp):
super(HTPasswdPasswordIdentityProvider, self).__init__(api_version, idp)
self._allow_additional = False
self._required += [['file', 'filename', 'fileName', 'file_name']]
@staticmethod
def get_default(key):
if key == 'file':
return '/etc/origin/htpasswd'
else:
return IdentityProviderBase.get_default(key)
class BasicAuthPasswordIdentityProvider(IdentityProviderBase):
""" BasicAuthPasswordIdentityProvider
Attributes:
Args:
api_version(str): OpenShift config version
idp (dict): idp config dict
Raises:
AnsibleFilterError:
"""
def __init__(self, api_version, idp):
super(BasicAuthPasswordIdentityProvider, self).__init__(api_version, idp)
self._allow_additional = False
self._required += [['url']]
self._optional += [['ca'], ['certFile', 'cert_file'], ['keyFile', 'key_file']]
class IdentityProviderOauthBase(IdentityProviderBase):
""" IdentityProviderOauthBase
Attributes:
Args:
api_version(str): OpenShift config version
idp (dict): idp config dict
Raises:
AnsibleFilterError:
"""
def __init__(self, api_version, idp):
super(IdentityProviderOauthBase, self).__init__(api_version, idp)
self._allow_additional = False
self._required += [['clientID', 'client_id'], ['clientSecret', 'client_secret']]
def validate(self):
''' validate this idp instance '''
if self.challenge:
raise errors.AnsibleFilterError("|failed provider {0} does not "
"allow challenge authentication".format(self.__class__.__name__))
class OpenIDIdentityProvider(IdentityProviderOauthBase):
""" OpenIDIdentityProvider
Attributes:
Args:
api_version(str): OpenShift config version
idp (dict): idp config dict
Raises:
AnsibleFilterError:
"""
def __init__(self, api_version, idp):
IdentityProviderOauthBase.__init__(self, api_version, idp)
self._required += [['claims'], ['urls']]
self._optional += [['ca'],
['extraScopes'],
['extraAuthorizeParameters']]
if 'claims' in self._idp and 'preferred_username' in self._idp['claims']:
pref_user = self._idp['claims'].pop('preferred_username')
self._idp['claims']['preferredUsername'] = pref_user
if 'urls' in self._idp and 'user_info' in self._idp['urls']:
user_info = self._idp['urls'].pop('user_info')
self._idp['urls']['userInfo'] = user_info
if 'extra_scopes' in self._idp:
self._idp['extraScopes'] = self._idp.pop('extra_scopes')
if 'extra_authorize_parameters' in self._idp:
self._idp['extraAuthorizeParameters'] = self._idp.pop('extra_authorize_parameters')
if 'extraAuthorizeParameters' in self._idp:
if 'include_granted_scopes' in self._idp['extraAuthorizeParameters']:
val = ansible_bool(self._idp['extraAuthorizeParameters'].pop('include_granted_scopes'))
self._idp['extraAuthorizeParameters']['include_granted_scopes'] = val
def validate(self):
''' validate this idp instance '''
IdentityProviderOauthBase.validate(self)
if not isinstance(self.provider['claims'], dict):
raise errors.AnsibleFilterError("|failed claims for provider {0} "
"must be a dictionary".format(self.__class__.__name__))
for var, var_type in (('extraScopes', list), ('extraAuthorizeParameters', dict)):
if var in self.provider and not isinstance(self.provider[var], var_type):
raise errors.AnsibleFilterError("|failed {1} for provider "
"{0} must be a {2}".format(self.__class__.__name__,
var,
var_type.__class__.__name__))
required_claims = ['id']
optional_claims = ['email', 'name', 'preferredUsername']
all_claims = required_claims + optional_claims
for claim in required_claims:
if claim in required_claims and claim not in self.provider['claims']:
raise errors.AnsibleFilterError("|failed {0} claim missing "
"for provider {1}".format(claim, self.__class__.__name__))
for claim in all_claims:
if claim in self.provider['claims'] and not isinstance(self.provider['claims'][claim], list):
raise errors.AnsibleFilterError("|failed {0} claims for "
"provider {1} must be a list".format(claim, self.__class__.__name__))
unknown_claims = set(self.provider['claims'].keys()) - set(all_claims)
if len(unknown_claims) > 0:
raise errors.AnsibleFilterError("|failed provider {0} has unknown "
"claims: {1}".format(self.__class__.__name__, ', '.join(unknown_claims)))
if not isinstance(self.provider['urls'], dict):
raise errors.AnsibleFilterError("|failed urls for provider {0} "
"must be a dictionary".format(self.__class__.__name__))
required_urls = ['authorize', 'token']
optional_urls = ['userInfo']
all_urls = required_urls + optional_urls
for url in required_urls:
if url not in self.provider['urls']:
raise errors.AnsibleFilterError("|failed {0} url missing for "
"provider {1}".format(url, self.__class__.__name__))
unknown_urls = set(self.provider['urls'].keys()) - set(all_urls)
if len(unknown_urls) > 0:
raise errors.AnsibleFilterError("|failed provider {0} has unknown "
"urls: {1}".format(self.__class__.__name__, ', '.join(unknown_urls)))
class GoogleIdentityProvider(IdentityProviderOauthBase):
""" GoogleIdentityProvider
Attributes:
Args:
api_version(str): OpenShift config version
idp (dict): idp config dict
Raises:
AnsibleFilterError:
"""
def __init__(self, api_version, idp):
IdentityProviderOauthBase.__init__(self, api_version, idp)
self._optional += [['hostedDomain', 'hosted_domain']]
class GitHubIdentityProvider(IdentityProviderOauthBase):
""" GitHubIdentityProvider
Attributes:
Args:
api_version(str): OpenShift config version
idp (dict): idp config dict
Raises:
AnsibleFilterError:
"""
def __init__(self, api_version, idp):
IdentityProviderOauthBase.__init__(self, api_version, idp)
self._optional += [['organizations'],
['teams']]
class FilterModule(object):
''' Custom ansible filters for use by the openshift_master role'''
@staticmethod
def translate_idps(idps, api_version, openshift_version, deployment_type):
''' Translates a list of dictionaries into a valid identityProviders config '''
idp_list = []
if not isinstance(idps, list):
raise errors.AnsibleFilterError("|failed expects to filter on a list of identity providers")
for idp in idps:
if not isinstance(idp, dict):
raise errors.AnsibleFilterError("|failed identity providers must be a list of dictionaries")
cur_module = sys.modules[__name__]
idp_class = getattr(cur_module, idp['kind'], None)
idp_inst = idp_class(api_version, idp) if idp_class is not None else IdentityProviderBase(api_version, idp)
idp_inst.set_provider_items()
idp_list.append(idp_inst)
IdentityProviderBase.validate_idp_list(idp_list, openshift_version, deployment_type)
return u(yaml.dump([idp.to_dict() for idp in idp_list],
allow_unicode=True,
default_flow_style=False,
width=float("inf"),
Dumper=AnsibleDumper))
@staticmethod
def validate_pcs_cluster(data, masters=None):
''' Validates output from "pcs status", ensuring that each master
provided is online.
Ex: data = ('...',
'PCSD Status:',
'master1.example.com: Online',
'master2.example.com: Online',
'master3.example.com: Online',
'...')
masters = ['master1.example.com',
'master2.example.com',
'master3.example.com']
returns True
'''
if not issubclass(type(data), string_types):
raise errors.AnsibleFilterError("|failed expects data is a string or unicode")
if not issubclass(type(masters), list):
raise errors.AnsibleFilterError("|failed expects masters is a list")
valid = True
for master in masters:
if "{0}: Online".format(master) not in data:
valid = False
return valid
@staticmethod
def certificates_to_synchronize(hostvars, include_keys=True, include_ca=True):
''' Return certificates to synchronize based on facts. '''
if not issubclass(type(hostvars), dict):
raise errors.AnsibleFilterError("|failed expects hostvars is a dict")
certs = ['admin.crt',
'admin.key',
'admin.kubeconfig',
'master.kubelet-client.crt',
'master.kubelet-client.key']
if bool(include_ca):
certs += ['ca.crt', 'ca.key', 'ca-bundle.crt']
if bool(include_keys):
certs += ['serviceaccounts.private.key',
'serviceaccounts.public.key']
if bool(hostvars['openshift']['common']['version_gte_3_1_or_1_1']):
certs += ['master.proxy-client.crt',
'master.proxy-client.key']
if not bool(hostvars['openshift']['common']['version_gte_3_2_or_1_2']):
certs += ['openshift-master.crt',
'openshift-master.key',
'openshift-master.kubeconfig']
if bool(hostvars['openshift']['common']['version_gte_3_3_or_1_3']):
certs += ['service-signer.crt',
'service-signer.key']
if not bool(hostvars['openshift']['common']['version_gte_3_5_or_1_5']):
certs += ['openshift-registry.crt',
'openshift-registry.key',
'openshift-registry.kubeconfig',
'openshift-router.crt',
'openshift-router.key',
'openshift-router.kubeconfig']
return certs
@staticmethod
def oo_htpasswd_users_from_file(file_contents):
''' return a dictionary of htpasswd users from htpasswd file contents '''
htpasswd_entries = {}
if not isinstance(file_contents, string_types):
raise errors.AnsibleFilterError("failed, expects to filter on a string")
for line in file_contents.splitlines():
user = None
passwd = None
if len(line) == 0:
continue
if ':' in line:
user, passwd = line.split(':', 1)
if user is None or len(user) == 0 or passwd is None or len(passwd) == 0:
error_msg = "failed, expects each line to be a colon separated string representing the user and passwd"
raise errors.AnsibleFilterError(error_msg)
htpasswd_entries[user] = passwd
return htpasswd_entries
def filters(self):
''' returns a mapping of filters to methods '''
return {"translate_idps": self.translate_idps,
"validate_pcs_cluster": self.validate_pcs_cluster,
"certificates_to_synchronize": self.certificates_to_synchronize,
"oo_htpasswd_users_from_file": self.oo_htpasswd_users_from_file}
| |
"""
Views for the ``django-frequently`` application.
"""
from math import fsum
from django.contrib import messages
from django.contrib.auth.models import User
from django.http import HttpResponse, Http404
from django.template.response import TemplateResponse
from django.urls import reverse
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.views.generic import CreateView, DetailView, ListView
from django_libs.views_mixins import AccessMixin
from .forms import EntryForm
from .models import Entry, EntryCategory, Feedback
class EntryMixin(object):
"""
Mixin to handle and arrange the entry list.
"""
def get_ordered_entries(self, queryset=False):
"""
Custom ordering. First we get the average views and rating for
the categories's entries. Second we created a rank by multiplying
both. Last, we sort categories by this rank from top to bottom.
Example:
- Cat_1
- Entry_1 (500 Views, Rating 2)
- Entry_2 (200 Views, Rating -4)
- Entry_3 (100 Views, Rating 3)
- Cat_2
- Entry_1 (200 Views, Rating 7)
- Entry_2 (50 Views, Rating 2)
Result:
Cat_1 has a rank by: 88.88 (avg. views: 266.66, avg. rating: 0.33)
Cat_2 has a rank by: 562.5 (avg. views: 125, avg. rating: 4.5)
Cat_2 will be displayed at the top. The algorithm is quality-oriented,
as you can see.
"""
if queryset:
self.queryset = queryset
else:
self.queryset = EntryCategory.objects.all()
if self.queryset:
for category in self.queryset:
entries = category.get_entries()
if entries:
amount_list = [e.amount_of_views for e in entries]
rating_list = [e.rating() for e in entries]
views_per_entry = fsum(amount_list) / len(amount_list)
rating_per_entry = fsum(rating_list) / len(rating_list)
category.last_rank = views_per_entry * rating_per_entry
category.save()
else:
self.queryset = self.queryset.exclude(pk=category.pk)
self.queryset = sorted(self.queryset, key=lambda c: c.last_rank,
reverse=True)
return self.queryset
def post(self, request, *args, **kwargs):
if "get_answer" in request.POST.keys():
entry = Entry.objects.get(pk=request.POST['get_answer'])
entry.last_view_date = timezone.now()
entry.amount_of_views += 1
entry.save()
return TemplateResponse(
request,
'frequently/partials/answer.html',
{
'entry': entry,
'rated_entries': self.request.session.get(
'rated_entries', False),
},
)
self.feedback = Feedback()
if "user_id" in request.POST.keys():
try:
user_id = int(request.POST.get('user_id'))
try:
self.feedback.user = User.objects.get(pk=user_id)
except User.DoesNotExist:
pass
except ValueError:
pass
if 'rating_id' in request.POST.keys() and request.is_ajax():
try:
entry_id = int(request.POST.get('rating_id').replace(
'rating_id', ''))
try:
entry = Entry.objects.get(pk=entry_id)
return HttpResponse(entry.rating())
except Entry.DoesNotExist:
raise Http404
except ValueError:
raise Http404
for key in request.POST.keys():
if key.startswith('up') or key.startswith('down'):
try:
entry_id = int(key.replace('up', '').replace('down', ''))
try:
entry = Entry.objects.get(pk=entry_id)
except Entry.DoesNotExist:
raise Http404
except ValueError:
raise Http404
if not request.session.get('rated_entries', False):
request.session['rated_entries'] = []
if entry.pk not in request.session['rated_entries']:
request.session['rated_entries'].append(entry.pk)
request.session.modified = True
self.feedback.entry = entry
if key.startswith('up'):
entry.upvotes += 1
self.feedback.validation = "P"
if key.startswith('down'):
entry.downvotes += 1
self.feedback.validation = "N"
entry.save()
self.feedback.save()
if request.is_ajax():
return TemplateResponse(
request,
'frequently/partials/feedback_form.html',
{
'feedback_entry': entry.pk,
'feedback': self.feedback,
},
)
elif key.startswith('feedback'):
try:
feedback_id = int(key.replace('feedback', ''))
try:
self.feedback = Feedback.objects.get(pk=feedback_id)
except Feedback.DoesNotExist:
raise Http404
except ValueError:
raise Http404
self.feedback.remark = request.POST.get("remark")
self.feedback.save()
if request.is_ajax():
return TemplateResponse(
request,
'frequently/partials/feedback_form.html',
{'feedback_send': True},
)
return self.get(self, request, *args, **kwargs)
class EntryCategoryListView(AccessMixin, EntryMixin, ListView):
"""
Main view to display all categories and their entries.
"""
model = EntryCategory
template_name = "frequently/entry_list.html"
access_mixin_setting_name = 'FREQUENTLY_ALLOW_ANONYMOUS'
def get_queryset(self):
"""
Customized to get the ordered categories and entries from the Mixin.
"""
self.queryset = super(EntryCategoryListView, self).get_queryset()
return self.get_ordered_entries(self.queryset)
class EntryDetailView(AccessMixin, EntryMixin, DetailView):
"""
Main view to display one entry.
"""
model = Entry
template_name = "frequently/entry_list.html"
access_mixin_setting_name = 'FREQUENTLY_ALLOW_ANONYMOUS'
def get_object(self, **kwargs):
obj = super(EntryDetailView, self).get_object(**kwargs)
obj.last_view_date = timezone.now()
obj.amount_of_views += 1
obj.save()
return obj
def get_context_data(self, **kwargs):
context = super(EntryDetailView, self).get_context_data(**kwargs)
context.update({
'rated_entries': self.request.session.get('rated_entries', False),
'object_list': self.get_ordered_entries(),
})
for key in self.request.POST.keys(): # pragma: nocover
if key.startswith('down') or key.startswith('up'):
context.update({
'feedback_entry': int(
key.replace('up', '').replace('down', '')),
'feedback': self.feedback,
})
return context
return context
class EntryCreateView(AccessMixin, CreateView):
"""
Feedback submission form view.
"""
model = Entry
form_class = EntryForm
access_mixin_setting_name = 'FREQUENTLY_ALLOW_ANONYMOUS'
def form_valid(self, form):
messages.add_message(self.request, messages.SUCCESS, _(
'Your question has been posted. Our team will review it as soon'
' as possible and get back to you with an answer.'))
return super(EntryCreateView, self).form_valid(form)
def get_form_kwargs(self):
kwargs = super(EntryCreateView, self).get_form_kwargs()
if self.request.user.is_authenticated():
kwargs.update({
'owner': self.request.user,
})
return kwargs
def get_success_url(self):
return reverse('frequently_list')
| |
"""
Implements the PSLQ algorithm for integer relation detection,
and derivative algorithms for constant recognition.
"""
from .libmp.backend import xrange
from .libmp import int_types, sqrt_fixed
# round to nearest integer (can be done more elegantly...)
def round_fixed(x, prec):
return ((x + (1<<(prec-1))) >> prec) << prec
class IdentificationMethods(object):
pass
def pslq(ctx, x, tol=None, maxcoeff=1000, maxsteps=100, verbose=False):
r"""
Given a vector of real numbers `x = [x_0, x_1, ..., x_n]`, ``pslq(x)``
uses the PSLQ algorithm to find a list of integers
`[c_0, c_1, ..., c_n]` such that
.. math ::
|c_1 x_1 + c_2 x_2 + ... + c_n x_n| < \mathrm{tol}
and such that `\max |c_k| < \mathrm{maxcoeff}`. If no such vector
exists, :func:`~mpmath.pslq` returns ``None``. The tolerance defaults to
3/4 of the working precision.
**Examples**
Find rational approximations for `\pi`::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> pslq([-1, pi], tol=0.01)
[22, 7]
>>> pslq([-1, pi], tol=0.001)
[355, 113]
>>> mpf(22)/7; mpf(355)/113; +pi
3.14285714285714
3.14159292035398
3.14159265358979
Pi is not a rational number with denominator less than 1000::
>>> pslq([-1, pi])
>>>
To within the standard precision, it can however be approximated
by at least one rational number with denominator less than `10^{12}`::
>>> p, q = pslq([-1, pi], maxcoeff=10**12)
>>> print(p); print(q)
238410049439
75888275702
>>> mpf(p)/q
3.14159265358979
The PSLQ algorithm can be applied to long vectors. For example,
we can investigate the rational (in)dependence of integer square
roots::
>>> mp.dps = 30
>>> pslq([sqrt(n) for n in range(2, 5+1)])
>>>
>>> pslq([sqrt(n) for n in range(2, 6+1)])
>>>
>>> pslq([sqrt(n) for n in range(2, 8+1)])
[2, 0, 0, 0, 0, 0, -1]
**Machin formulas**
A famous formula for `\pi` is Machin's,
.. math ::
\frac{\pi}{4} = 4 \operatorname{acot} 5 - \operatorname{acot} 239
There are actually infinitely many formulas of this type. Two
others are
.. math ::
\frac{\pi}{4} = \operatorname{acot} 1
\frac{\pi}{4} = 12 \operatorname{acot} 49 + 32 \operatorname{acot} 57
+ 5 \operatorname{acot} 239 + 12 \operatorname{acot} 110443
We can easily verify the formulas using the PSLQ algorithm::
>>> mp.dps = 30
>>> pslq([pi/4, acot(1)])
[1, -1]
>>> pslq([pi/4, acot(5), acot(239)])
[1, -4, 1]
>>> pslq([pi/4, acot(49), acot(57), acot(239), acot(110443)])
[1, -12, -32, 5, -12]
We could try to generate a custom Machin-like formula by running
the PSLQ algorithm with a few inverse cotangent values, for example
acot(2), acot(3) ... acot(10). Unfortunately, there is a linear
dependence among these values, resulting in only that dependence
being detected, with a zero coefficient for `\pi`::
>>> pslq([pi] + [acot(n) for n in range(2,11)])
[0, 1, -1, 0, 0, 0, -1, 0, 0, 0]
We get better luck by removing linearly dependent terms::
>>> pslq([pi] + [acot(n) for n in range(2,11) if n not in (3, 5)])
[1, -8, 0, 0, 4, 0, 0, 0]
In other words, we found the following formula::
>>> 8*acot(2) - 4*acot(7)
3.14159265358979323846264338328
>>> +pi
3.14159265358979323846264338328
**Algorithm**
This is a fairly direct translation to Python of the pseudocode given by
David Bailey, "The PSLQ Integer Relation Algorithm":
http://www.cecm.sfu.ca/organics/papers/bailey/paper/html/node3.html
The present implementation uses fixed-point instead of floating-point
arithmetic, since this is significantly (about 7x) faster.
"""
n = len(x)
if n < 2:
raise ValueError("n cannot be less than 2")
# At too low precision, the algorithm becomes meaningless
prec = ctx.prec
if prec < 53:
raise ValueError("prec cannot be less than 53")
if verbose and prec // max(2,n) < 5:
print("Warning: precision for PSLQ may be too low")
target = int(prec * 0.75)
if tol is None:
tol = ctx.mpf(2)**(-target)
else:
tol = ctx.convert(tol)
extra = 60
prec += extra
if verbose:
print("PSLQ using prec %i and tol %s" % (prec, ctx.nstr(tol)))
tol = ctx.to_fixed(tol, prec)
assert tol
# Convert to fixed-point numbers. The dummy None is added so we can
# use 1-based indexing. (This just allows us to be consistent with
# Bailey's indexing. The algorithm is 100 lines long, so debugging
# a single wrong index can be painful.)
x = [None] + [ctx.to_fixed(ctx.mpf(xk), prec) for xk in x]
# Sanity check on magnitudes
minx = min(abs(xx) for xx in x[1:])
if not minx:
raise ValueError("PSLQ requires a vector of nonzero numbers")
if minx < tol//100:
if verbose:
print("STOPPING: (one number is too small)")
return None
g = sqrt_fixed((4<<prec)//3, prec)
A = {}
B = {}
H = {}
# Initialization
# step 1
for i in xrange(1, n+1):
for j in xrange(1, n+1):
A[i,j] = B[i,j] = (i==j) << prec
H[i,j] = 0
# step 2
s = [None] + [0] * n
for k in xrange(1, n+1):
t = 0
for j in xrange(k, n+1):
t += (x[j]**2 >> prec)
s[k] = sqrt_fixed(t, prec)
t = s[1]
y = x[:]
for k in xrange(1, n+1):
y[k] = (x[k] << prec) // t
s[k] = (s[k] << prec) // t
# step 3
for i in xrange(1, n+1):
for j in xrange(i+1, n):
H[i,j] = 0
if i <= n-1:
if s[i]:
H[i,i] = (s[i+1] << prec) // s[i]
else:
H[i,i] = 0
for j in range(1, i):
sjj1 = s[j]*s[j+1]
if sjj1:
H[i,j] = ((-y[i]*y[j])<<prec)//sjj1
else:
H[i,j] = 0
# step 4
for i in xrange(2, n+1):
for j in xrange(i-1, 0, -1):
#t = floor(H[i,j]/H[j,j] + 0.5)
if H[j,j]:
t = round_fixed((H[i,j] << prec)//H[j,j], prec)
else:
#t = 0
continue
y[j] = y[j] + (t*y[i] >> prec)
for k in xrange(1, j+1):
H[i,k] = H[i,k] - (t*H[j,k] >> prec)
for k in xrange(1, n+1):
A[i,k] = A[i,k] - (t*A[j,k] >> prec)
B[k,j] = B[k,j] + (t*B[k,i] >> prec)
# Main algorithm
for REP in range(maxsteps):
# Step 1
m = -1
szmax = -1
for i in range(1, n):
h = H[i,i]
sz = (g**i * abs(h)) >> (prec*(i-1))
if sz > szmax:
m = i
szmax = sz
# Step 2
y[m], y[m+1] = y[m+1], y[m]
for i in xrange(1,n+1): H[m,i], H[m+1,i] = H[m+1,i], H[m,i]
for i in xrange(1,n+1): A[m,i], A[m+1,i] = A[m+1,i], A[m,i]
for i in xrange(1,n+1): B[i,m], B[i,m+1] = B[i,m+1], B[i,m]
# Step 3
if m <= n - 2:
t0 = sqrt_fixed((H[m,m]**2 + H[m,m+1]**2)>>prec, prec)
# A zero element probably indicates that the precision has
# been exhausted. XXX: this could be spurious, due to
# using fixed-point arithmetic
if not t0:
break
t1 = (H[m,m] << prec) // t0
t2 = (H[m,m+1] << prec) // t0
for i in xrange(m, n+1):
t3 = H[i,m]
t4 = H[i,m+1]
H[i,m] = (t1*t3+t2*t4) >> prec
H[i,m+1] = (-t2*t3+t1*t4) >> prec
# Step 4
for i in xrange(m+1, n+1):
for j in xrange(min(i-1, m+1), 0, -1):
try:
t = round_fixed((H[i,j] << prec)//H[j,j], prec)
# Precision probably exhausted
except ZeroDivisionError:
break
y[j] = y[j] + ((t*y[i]) >> prec)
for k in xrange(1, j+1):
H[i,k] = H[i,k] - (t*H[j,k] >> prec)
for k in xrange(1, n+1):
A[i,k] = A[i,k] - (t*A[j,k] >> prec)
B[k,j] = B[k,j] + (t*B[k,i] >> prec)
# Until a relation is found, the error typically decreases
# slowly (e.g. a factor 1-10) with each step TODO: we could
# compare err from two successive iterations. If there is a
# large drop (several orders of magnitude), that indicates a
# "high quality" relation was detected. Reporting this to
# the user somehow might be useful.
best_err = maxcoeff<<prec
for i in xrange(1, n+1):
err = abs(y[i])
# Maybe we are done?
if err < tol:
# We are done if the coefficients are acceptable
vec = [int(round_fixed(B[j,i], prec) >> prec) for j in \
range(1,n+1)]
if max(abs(v) for v in vec) < maxcoeff:
if verbose:
print("FOUND relation at iter %i/%i, error: %s" % \
(REP, maxsteps, ctx.nstr(err / ctx.mpf(2)**prec, 1)))
return vec
best_err = min(err, best_err)
# Calculate a lower bound for the norm. We could do this
# more exactly (using the Euclidean norm) but there is probably
# no practical benefit.
recnorm = max(abs(h) for h in H.values())
if recnorm:
norm = ((1 << (2*prec)) // recnorm) >> prec
norm //= 100
else:
norm = ctx.inf
if verbose:
print("%i/%i: Error: %8s Norm: %s" % \
(REP, maxsteps, ctx.nstr(best_err / ctx.mpf(2)**prec, 1), norm))
if norm >= maxcoeff:
break
if verbose:
print("CANCELLING after step %i/%i." % (REP, maxsteps))
print("Could not find an integer relation. Norm bound: %s" % norm)
return None
def findpoly(ctx, x, n=1, **kwargs):
r"""
``findpoly(x, n)`` returns the coefficients of an integer
polynomial `P` of degree at most `n` such that `P(x) \approx 0`.
If no polynomial having `x` as a root can be found,
:func:`~mpmath.findpoly` returns ``None``.
:func:`~mpmath.findpoly` works by successively calling :func:`~mpmath.pslq` with
the vectors `[1, x]`, `[1, x, x^2]`, `[1, x, x^2, x^3]`, ...,
`[1, x, x^2, .., x^n]` as input. Keyword arguments given to
:func:`~mpmath.findpoly` are forwarded verbatim to :func:`~mpmath.pslq`. In
particular, you can specify a tolerance for `P(x)` with ``tol``
and a maximum permitted coefficient size with ``maxcoeff``.
For large values of `n`, it is recommended to run :func:`~mpmath.findpoly`
at high precision; preferably 50 digits or more.
**Examples**
By default (degree `n = 1`), :func:`~mpmath.findpoly` simply finds a linear
polynomial with a rational root::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> findpoly(0.7)
[-10, 7]
The generated coefficient list is valid input to ``polyval`` and
``polyroots``::
>>> nprint(polyval(findpoly(phi, 2), phi), 1)
-2.0e-16
>>> for r in polyroots(findpoly(phi, 2)):
... print(r)
...
-0.618033988749895
1.61803398874989
Numbers of the form `m + n \sqrt p` for integers `(m, n, p)` are
solutions to quadratic equations. As we find here, `1+\sqrt 2`
is a root of the polynomial `x^2 - 2x - 1`::
>>> findpoly(1+sqrt(2), 2)
[1, -2, -1]
>>> findroot(lambda x: x**2 - 2*x - 1, 1)
2.4142135623731
Despite only containing square roots, the following number results
in a polynomial of degree 4::
>>> findpoly(sqrt(2)+sqrt(3), 4)
[1, 0, -10, 0, 1]
In fact, `x^4 - 10x^2 + 1` is the *minimal polynomial* of
`r = \sqrt 2 + \sqrt 3`, meaning that a rational polynomial of
lower degree having `r` as a root does not exist. Given sufficient
precision, :func:`~mpmath.findpoly` will usually find the correct
minimal polynomial of a given algebraic number.
**Non-algebraic numbers**
If :func:`~mpmath.findpoly` fails to find a polynomial with given
coefficient size and tolerance constraints, that means no such
polynomial exists.
We can verify that `\pi` is not an algebraic number of degree 3 with
coefficients less than 1000::
>>> mp.dps = 15
>>> findpoly(pi, 3)
>>>
It is always possible to find an algebraic approximation of a number
using one (or several) of the following methods:
1. Increasing the permitted degree
2. Allowing larger coefficients
3. Reducing the tolerance
One example of each method is shown below::
>>> mp.dps = 15
>>> findpoly(pi, 4)
[95, -545, 863, -183, -298]
>>> findpoly(pi, 3, maxcoeff=10000)
[836, -1734, -2658, -457]
>>> findpoly(pi, 3, tol=1e-7)
[-4, 22, -29, -2]
It is unknown whether Euler's constant is transcendental (or even
irrational). We can use :func:`~mpmath.findpoly` to check that if is
an algebraic number, its minimal polynomial must have degree
at least 7 and a coefficient of magnitude at least 1000000::
>>> mp.dps = 200
>>> findpoly(euler, 6, maxcoeff=10**6, tol=1e-100, maxsteps=1000)
>>>
Note that the high precision and strict tolerance is necessary
for such high-degree runs, since otherwise unwanted low-accuracy
approximations will be detected. It may also be necessary to set
maxsteps high to prevent a premature exit (before the coefficient
bound has been reached). Running with ``verbose=True`` to get an
idea what is happening can be useful.
"""
x = ctx.mpf(x)
if n < 1:
raise ValueError("n cannot be less than 1")
if x == 0:
return [1, 0]
xs = [ctx.mpf(1)]
for i in range(1,n+1):
xs.append(x**i)
a = ctx.pslq(xs, **kwargs)
if a is not None:
return a[::-1]
def fracgcd(p, q):
x, y = p, q
while y:
x, y = y, x % y
if x != 1:
p //= x
q //= x
if q == 1:
return p
return p, q
def pslqstring(r, constants):
q = r[0]
r = r[1:]
s = []
for i in range(len(r)):
p = r[i]
if p:
z = fracgcd(-p,q)
cs = constants[i][1]
if cs == '1':
cs = ''
else:
cs = '*' + cs
if isinstance(z, int_types):
if z > 0: term = str(z) + cs
else: term = ("(%s)" % z) + cs
else:
term = ("(%s/%s)" % z) + cs
s.append(term)
s = ' + '.join(s)
if '+' in s or '*' in s:
s = '(' + s + ')'
return s or '0'
def prodstring(r, constants):
q = r[0]
r = r[1:]
num = []
den = []
for i in range(len(r)):
p = r[i]
if p:
z = fracgcd(-p,q)
cs = constants[i][1]
if isinstance(z, int_types):
if abs(z) == 1: t = cs
else: t = '%s**%s' % (cs, abs(z))
([num,den][z<0]).append(t)
else:
t = '%s**(%s/%s)' % (cs, abs(z[0]), z[1])
([num,den][z[0]<0]).append(t)
num = '*'.join(num)
den = '*'.join(den)
if num and den: return "(%s)/(%s)" % (num, den)
if num: return num
if den: return "1/(%s)" % den
def quadraticstring(ctx,t,a,b,c):
if c < 0:
a,b,c = -a,-b,-c
u1 = (-b+ctx.sqrt(b**2-4*a*c))/(2*c)
u2 = (-b-ctx.sqrt(b**2-4*a*c))/(2*c)
if abs(u1-t) < abs(u2-t):
if b: s = '((%s+sqrt(%s))/%s)' % (-b,b**2-4*a*c,2*c)
else: s = '(sqrt(%s)/%s)' % (-4*a*c,2*c)
else:
if b: s = '((%s-sqrt(%s))/%s)' % (-b,b**2-4*a*c,2*c)
else: s = '(-sqrt(%s)/%s)' % (-4*a*c,2*c)
return s
# Transformation y = f(x,c), with inverse function x = f(y,c)
# The third entry indicates whether the transformation is
# redundant when c = 1
transforms = [
(lambda ctx,x,c: x*c, '$y/$c', 0),
(lambda ctx,x,c: x/c, '$c*$y', 1),
(lambda ctx,x,c: c/x, '$c/$y', 0),
(lambda ctx,x,c: (x*c)**2, 'sqrt($y)/$c', 0),
(lambda ctx,x,c: (x/c)**2, '$c*sqrt($y)', 1),
(lambda ctx,x,c: (c/x)**2, '$c/sqrt($y)', 0),
(lambda ctx,x,c: c*x**2, 'sqrt($y)/sqrt($c)', 1),
(lambda ctx,x,c: x**2/c, 'sqrt($c)*sqrt($y)', 1),
(lambda ctx,x,c: c/x**2, 'sqrt($c)/sqrt($y)', 1),
(lambda ctx,x,c: ctx.sqrt(x*c), '$y**2/$c', 0),
(lambda ctx,x,c: ctx.sqrt(x/c), '$c*$y**2', 1),
(lambda ctx,x,c: ctx.sqrt(c/x), '$c/$y**2', 0),
(lambda ctx,x,c: c*ctx.sqrt(x), '$y**2/$c**2', 1),
(lambda ctx,x,c: ctx.sqrt(x)/c, '$c**2*$y**2', 1),
(lambda ctx,x,c: c/ctx.sqrt(x), '$c**2/$y**2', 1),
(lambda ctx,x,c: ctx.exp(x*c), 'log($y)/$c', 0),
(lambda ctx,x,c: ctx.exp(x/c), '$c*log($y)', 1),
(lambda ctx,x,c: ctx.exp(c/x), '$c/log($y)', 0),
(lambda ctx,x,c: c*ctx.exp(x), 'log($y/$c)', 1),
(lambda ctx,x,c: ctx.exp(x)/c, 'log($c*$y)', 1),
(lambda ctx,x,c: c/ctx.exp(x), 'log($c/$y)', 0),
(lambda ctx,x,c: ctx.ln(x*c), 'exp($y)/$c', 0),
(lambda ctx,x,c: ctx.ln(x/c), '$c*exp($y)', 1),
(lambda ctx,x,c: ctx.ln(c/x), '$c/exp($y)', 0),
(lambda ctx,x,c: c*ctx.ln(x), 'exp($y/$c)', 1),
(lambda ctx,x,c: ctx.ln(x)/c, 'exp($c*$y)', 1),
(lambda ctx,x,c: c/ctx.ln(x), 'exp($c/$y)', 0),
]
def identify(ctx, x, constants=[], tol=None, maxcoeff=1000, full=False,
verbose=False):
r"""
Given a real number `x`, ``identify(x)`` attempts to find an exact
formula for `x`. This formula is returned as a string. If no match
is found, ``None`` is returned. With ``full=True``, a list of
matching formulas is returned.
As a simple example, :func:`~mpmath.identify` will find an algebraic
formula for the golden ratio::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> identify(phi)
'((1+sqrt(5))/2)'
:func:`~mpmath.identify` can identify simple algebraic numbers and simple
combinations of given base constants, as well as certain basic
transformations thereof. More specifically, :func:`~mpmath.identify`
looks for the following:
1. Fractions
2. Quadratic algebraic numbers
3. Rational linear combinations of the base constants
4. Any of the above after first transforming `x` into `f(x)` where
`f(x)` is `1/x`, `\sqrt x`, `x^2`, `\log x` or `\exp x`, either
directly or with `x` or `f(x)` multiplied or divided by one of
the base constants
5. Products of fractional powers of the base constants and
small integers
Base constants can be given as a list of strings representing mpmath
expressions (:func:`~mpmath.identify` will ``eval`` the strings to numerical
values and use the original strings for the output), or as a dict of
formula:value pairs.
In order not to produce spurious results, :func:`~mpmath.identify` should
be used with high precision; preferably 50 digits or more.
**Examples**
Simple identifications can be performed safely at standard
precision. Here the default recognition of rational, algebraic,
and exp/log of algebraic numbers is demonstrated::
>>> mp.dps = 15
>>> identify(0.22222222222222222)
'(2/9)'
>>> identify(1.9662210973805663)
'sqrt(((24+sqrt(48))/8))'
>>> identify(4.1132503787829275)
'exp((sqrt(8)/2))'
>>> identify(0.881373587019543)
'log(((2+sqrt(8))/2))'
By default, :func:`~mpmath.identify` does not recognize `\pi`. At standard
precision it finds a not too useful approximation. At slightly
increased precision, this approximation is no longer accurate
enough and :func:`~mpmath.identify` more correctly returns ``None``::
>>> identify(pi)
'(2**(176/117)*3**(20/117)*5**(35/39))/(7**(92/117))'
>>> mp.dps = 30
>>> identify(pi)
>>>
Numbers such as `\pi`, and simple combinations of user-defined
constants, can be identified if they are provided explicitly::
>>> identify(3*pi-2*e, ['pi', 'e'])
'(3*pi + (-2)*e)'
Here is an example using a dict of constants. Note that the
constants need not be "atomic"; :func:`~mpmath.identify` can just
as well express the given number in terms of expressions
given by formulas::
>>> identify(pi+e, {'a':pi+2, 'b':2*e})
'((-2) + 1*a + (1/2)*b)'
Next, we attempt some identifications with a set of base constants.
It is necessary to increase the precision a bit.
>>> mp.dps = 50
>>> base = ['sqrt(2)','pi','log(2)']
>>> identify(0.25, base)
'(1/4)'
>>> identify(3*pi + 2*sqrt(2) + 5*log(2)/7, base)
'(2*sqrt(2) + 3*pi + (5/7)*log(2))'
>>> identify(exp(pi+2), base)
'exp((2 + 1*pi))'
>>> identify(1/(3+sqrt(2)), base)
'((3/7) + (-1/7)*sqrt(2))'
>>> identify(sqrt(2)/(3*pi+4), base)
'sqrt(2)/(4 + 3*pi)'
>>> identify(5**(mpf(1)/3)*pi*log(2)**2, base)
'5**(1/3)*pi*log(2)**2'
An example of an erroneous solution being found when too low
precision is used::
>>> mp.dps = 15
>>> identify(1/(3*pi-4*e+sqrt(8)), ['pi', 'e', 'sqrt(2)'])
'((11/25) + (-158/75)*pi + (76/75)*e + (44/15)*sqrt(2))'
>>> mp.dps = 50
>>> identify(1/(3*pi-4*e+sqrt(8)), ['pi', 'e', 'sqrt(2)'])
'1/(3*pi + (-4)*e + 2*sqrt(2))'
**Finding approximate solutions**
The tolerance ``tol`` defaults to 3/4 of the working precision.
Lowering the tolerance is useful for finding approximate matches.
We can for example try to generate approximations for pi::
>>> mp.dps = 15
>>> identify(pi, tol=1e-2)
'(22/7)'
>>> identify(pi, tol=1e-3)
'(355/113)'
>>> identify(pi, tol=1e-10)
'(5**(339/269))/(2**(64/269)*3**(13/269)*7**(92/269))'
With ``full=True``, and by supplying a few base constants,
``identify`` can generate almost endless lists of approximations
for any number (the output below has been truncated to show only
the first few)::
>>> for p in identify(pi, ['e', 'catalan'], tol=1e-5, full=True):
... print(p)
... # doctest: +ELLIPSIS
e/log((6 + (-4/3)*e))
(3**3*5*e*catalan**2)/(2*7**2)
sqrt(((-13) + 1*e + 22*catalan))
log(((-6) + 24*e + 4*catalan)/e)
exp(catalan*((-1/5) + (8/15)*e))
catalan*(6 + (-6)*e + 15*catalan)
sqrt((5 + 26*e + (-3)*catalan))/e
e*sqrt(((-27) + 2*e + 25*catalan))
log(((-1) + (-11)*e + 59*catalan))
((3/20) + (21/20)*e + (3/20)*catalan)
...
The numerical values are roughly as close to `\pi` as permitted by the
specified tolerance:
>>> e/log(6-4*e/3)
3.14157719846001
>>> 135*e*catalan**2/98
3.14166950419369
>>> sqrt(e-13+22*catalan)
3.14158000062992
>>> log(24*e-6+4*catalan)-1
3.14158791577159
**Symbolic processing**
The output formula can be evaluated as a Python expression.
Note however that if fractions (like '2/3') are present in
the formula, Python's :func:`~mpmath.eval()` may erroneously perform
integer division. Note also that the output is not necessarily
in the algebraically simplest form::
>>> identify(sqrt(2))
'(sqrt(8)/2)'
As a solution to both problems, consider using SymPy's
:func:`~mpmath.sympify` to convert the formula into a symbolic expression.
SymPy can be used to pretty-print or further simplify the formula
symbolically::
>>> from sympy import sympify # doctest: +SKIP
>>> sympify(identify(sqrt(2))) # doctest: +SKIP
2**(1/2)
Sometimes :func:`~mpmath.identify` can simplify an expression further than
a symbolic algorithm::
>>> from sympy import simplify # doctest: +SKIP
>>> x = sympify('-1/(-3/2+(1/2)*5**(1/2))*(3/2-1/2*5**(1/2))**(1/2)') # doctest: +SKIP
>>> x # doctest: +SKIP
(3/2 - 5**(1/2)/2)**(-1/2)
>>> x = simplify(x) # doctest: +SKIP
>>> x # doctest: +SKIP
2/(6 - 2*5**(1/2))**(1/2)
>>> mp.dps = 30 # doctest: +SKIP
>>> x = sympify(identify(x.evalf(30))) # doctest: +SKIP
>>> x # doctest: +SKIP
1/2 + 5**(1/2)/2
(In fact, this functionality is available directly in SymPy as the
function :func:`~mpmath.nsimplify`, which is essentially a wrapper for
:func:`~mpmath.identify`.)
**Miscellaneous issues and limitations**
The input `x` must be a real number. All base constants must be
positive real numbers and must not be rationals or rational linear
combinations of each other.
The worst-case computation time grows quickly with the number of
base constants. Already with 3 or 4 base constants,
:func:`~mpmath.identify` may require several seconds to finish. To search
for relations among a large number of constants, you should
consider using :func:`~mpmath.pslq` directly.
The extended transformations are applied to x, not the constants
separately. As a result, ``identify`` will for example be able to
recognize ``exp(2*pi+3)`` with ``pi`` given as a base constant, but
not ``2*exp(pi)+3``. It will be able to recognize the latter if
``exp(pi)`` is given explicitly as a base constant.
"""
solutions = []
def addsolution(s):
if verbose: print("Found: ", s)
solutions.append(s)
x = ctx.mpf(x)
# Further along, x will be assumed positive
if x == 0:
if full: return ['0']
else: return '0'
if x < 0:
sol = ctx.identify(-x, constants, tol, maxcoeff, full, verbose)
if sol is None:
return sol
if full:
return ["-(%s)"%s for s in sol]
else:
return "-(%s)" % sol
if tol:
tol = ctx.mpf(tol)
else:
tol = ctx.eps**0.7
M = maxcoeff
if constants:
if isinstance(constants, dict):
constants = [(ctx.mpf(v), name) for (name, v) in sorted(constants.items())]
else:
namespace = dict((name, getattr(ctx,name)) for name in dir(ctx))
constants = [(eval(p, namespace), p) for p in constants]
else:
constants = []
# We always want to find at least rational terms
if 1 not in [value for (name, value) in constants]:
constants = [(ctx.mpf(1), '1')] + constants
# PSLQ with simple algebraic and functional transformations
for ft, ftn, red in transforms:
for c, cn in constants:
if red and cn == '1':
continue
t = ft(ctx,x,c)
# Prevent exponential transforms from wreaking havoc
if abs(t) > M**2 or abs(t) < tol:
continue
# Linear combination of base constants
r = ctx.pslq([t] + [a[0] for a in constants], tol, M)
s = None
if r is not None and max(abs(uw) for uw in r) <= M and r[0]:
s = pslqstring(r, constants)
# Quadratic algebraic numbers
else:
q = ctx.pslq([ctx.one, t, t**2], tol, M)
if q is not None and len(q) == 3 and q[2]:
aa, bb, cc = q
if max(abs(aa),abs(bb),abs(cc)) <= M:
s = quadraticstring(ctx,t,aa,bb,cc)
if s:
if cn == '1' and ('/$c' in ftn):
s = ftn.replace('$y', s).replace('/$c', '')
else:
s = ftn.replace('$y', s).replace('$c', cn)
addsolution(s)
if not full: return solutions[0]
if verbose:
print(".")
# Check for a direct multiplicative formula
if x != 1:
# Allow fractional powers of fractions
ilogs = [2,3,5,7]
# Watch out for existing fractional powers of fractions
logs = []
for a, s in constants:
if not sum(bool(ctx.findpoly(ctx.ln(a)/ctx.ln(i),1)) for i in ilogs):
logs.append((ctx.ln(a), s))
logs = [(ctx.ln(i),str(i)) for i in ilogs] + logs
r = ctx.pslq([ctx.ln(x)] + [a[0] for a in logs], tol, M)
if r is not None and max(abs(uw) for uw in r) <= M and r[0]:
addsolution(prodstring(r, logs))
if not full: return solutions[0]
if full:
return sorted(solutions, key=len)
else:
return None
IdentificationMethods.pslq = pslq
IdentificationMethods.findpoly = findpoly
IdentificationMethods.identify = identify
if __name__ == '__main__':
import doctest
doctest.testmod()
| |
""".. Ignore pydocstyle D400.
.. autoclass:: resolwe.flow.executors.docker.run.FlowExecutor
:members:
"""
# pylint: disable=logging-format-interpolation
import asyncio
import json
import logging
import os
import shlex
import tempfile
import time
from asyncio import subprocess
from . import constants
from ..global_settings import PROCESS_META, SETTINGS
from ..local.run import FlowExecutor as LocalFlowExecutor
from ..protocol import ExecutorFiles
from .seccomp import SECCOMP_POLICY
DOCKER_START_TIMEOUT = 60
# Limits of containers' access to memory. We set the limit to ensure
# processes are stable and do not get killed by OOM signal.
DOCKER_MEMORY_HARD_LIMIT_BUFFER = 100
DOCKER_MEMORY_SWAP_RATIO = 2
DOCKER_MEMORY_SWAPPINESS = 1
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class FlowExecutor(LocalFlowExecutor):
"""Docker executor."""
name = 'docker'
def __init__(self, *args, **kwargs):
"""Initialize attributes."""
super().__init__(*args, **kwargs)
self.container_name_prefix = None
self.tools_volumes = None
self.temporary_files = []
self.command = SETTINGS.get('FLOW_DOCKER_COMMAND', 'docker')
def _generate_container_name(self):
"""Generate unique container name."""
return '{}_{}'.format(self.container_name_prefix, self.data_id)
async def start(self):
"""Start process execution."""
# arguments passed to the Docker command
command_args = {
'command': self.command,
'container_image': self.requirements.get('image', constants.DEFAULT_CONTAINER_IMAGE),
}
# Get limit defaults.
limit_defaults = SETTINGS.get('FLOW_PROCESS_RESOURCE_DEFAULTS', {})
# Set resource limits.
limits = []
# Each core is equivalent to 1024 CPU shares. The default for Docker containers
# is 1024 shares (we don't need to explicitly set that).
limits.append('--cpu-shares={}'.format(int(self.process['resource_limits']['cores']) * 1024))
# Some SWAP is needed to avoid OOM signal. Swappiness is low to prevent
# extensive usage of SWAP (this would reduce the performance).
memory = self.process['resource_limits']['memory'] + DOCKER_MEMORY_HARD_LIMIT_BUFFER
memory_swap = int(self.process['resource_limits']['memory'] * DOCKER_MEMORY_SWAP_RATIO)
limits.append('--memory={}m'.format(memory))
limits.append('--memory-swap={}m'.format(memory_swap))
limits.append('--memory-reservation={}m'.format(self.process['resource_limits']['memory']))
limits.append('--memory-swappiness={}'.format(DOCKER_MEMORY_SWAPPINESS))
# Set ulimits for interactive processes to prevent them from running too long.
if self.process['scheduling_class'] == PROCESS_META['SCHEDULING_CLASS_INTERACTIVE']:
# TODO: This is not very good as each child gets the same limit.
limits.append('--ulimit cpu={}'.format(limit_defaults.get('cpu_time_interactive', 30)))
command_args['limits'] = ' '.join(limits)
# set container name
self.container_name_prefix = SETTINGS.get('FLOW_EXECUTOR', {}).get('CONTAINER_NAME_PREFIX', 'resolwe')
command_args['container_name'] = '--name={}'.format(self._generate_container_name())
if 'network' in self.resources:
# Configure Docker network mode for the container (if specified).
# By default, current Docker versions use the 'bridge' mode which
# creates a network stack on the default Docker bridge.
network = SETTINGS.get('FLOW_EXECUTOR', {}).get('NETWORK', '')
command_args['network'] = '--net={}'.format(network) if network else ''
else:
# No network if not specified.
command_args['network'] = '--net=none'
# Security options.
security = []
# Generate and set seccomp policy to limit syscalls.
policy_file = tempfile.NamedTemporaryFile(mode='w')
json.dump(SECCOMP_POLICY, policy_file)
policy_file.file.flush()
if not SETTINGS.get('FLOW_DOCKER_DISABLE_SECCOMP', False):
security.append('--security-opt seccomp={}'.format(policy_file.name))
self.temporary_files.append(policy_file)
# Drop all capabilities and only add ones that are needed.
security.append('--cap-drop=all')
command_args['security'] = ' '.join(security)
# Setup Docker volumes.
def new_volume(kind, base_dir_name, volume, path=None, read_only=True):
"""Generate a new volume entry.
:param kind: Kind of volume, which is used for getting extra options from
settings (the ``FLOW_DOCKER_VOLUME_EXTRA_OPTIONS`` setting)
:param base_dir_name: Name of base directory setting for volume source path
:param volume: Destination volume mount point
:param path: Optional additional path atoms appended to source path
:param read_only: True to make the volume read-only
"""
if path is None:
path = []
path = [str(atom) for atom in path]
options = set(SETTINGS.get('FLOW_DOCKER_VOLUME_EXTRA_OPTIONS', {}).get(kind, '').split(','))
options.discard('')
# Do not allow modification of read-only option.
options.discard('ro')
options.discard('rw')
if read_only:
options.add('ro')
else:
options.add('rw')
return {
'src': os.path.join(SETTINGS['FLOW_EXECUTOR'].get(base_dir_name, ''), *path),
'dest': volume,
'options': ','.join(options),
}
volumes = [
new_volume('data', 'DATA_DIR', constants.DATA_VOLUME, [self.data_id], read_only=False),
new_volume('data_all', 'DATA_DIR', constants.DATA_ALL_VOLUME),
new_volume('upload', 'UPLOAD_DIR', constants.UPLOAD_VOLUME, read_only=False),
new_volume('secrets', 'RUNTIME_DIR', constants.SECRETS_VOLUME, [self.data_id, ExecutorFiles.SECRETS_DIR]),
]
# Generate dummy passwd and create mappings for it. This is required because some tools
# inside the container may try to lookup the given UID/GID and will crash if they don't
# exist. So we create minimal user/group files.
passwd_file = tempfile.NamedTemporaryFile(mode='w')
passwd_file.write('root:x:0:0:root:/root:/bin/bash\n')
passwd_file.write('user:x:{}:{}:user:/:/bin/bash\n'.format(os.getuid(), os.getgid()))
passwd_file.file.flush()
self.temporary_files.append(passwd_file)
group_file = tempfile.NamedTemporaryFile(mode='w')
group_file.write('root:x:0:\n')
group_file.write('user:x:{}:user\n'.format(os.getgid()))
group_file.file.flush()
self.temporary_files.append(group_file)
volumes += [
new_volume('users', None, '/etc/passwd', [passwd_file.name]),
new_volume('users', None, '/etc/group', [group_file.name]),
]
# Create volumes for tools.
# NOTE: To prevent processes tampering with tools, all tools are mounted read-only
self.tools_volumes = []
for index, tool in enumerate(self.get_tools_paths()):
self.tools_volumes.append(new_volume(
'tools',
None,
os.path.join('/usr/local/bin/resolwe', str(index)),
[tool]
))
volumes += self.tools_volumes
# Create volumes for runtime (all read-only).
runtime_volume_maps = SETTINGS.get('RUNTIME_VOLUME_MAPS', None)
if runtime_volume_maps:
for src, dst in runtime_volume_maps.items():
volumes.append(new_volume(
'runtime',
'RUNTIME_DIR',
dst,
[self.data_id, src],
))
# Add any extra volumes verbatim.
volumes += SETTINGS.get('FLOW_DOCKER_EXTRA_VOLUMES', [])
# Create Docker --volume parameters from volumes.
command_args['volumes'] = ' '.join(['--volume="{src}":"{dest}":{options}'.format(**volume)
for volume in volumes])
# Set working directory to the data volume.
command_args['workdir'] = '--workdir={}'.format(constants.DATA_VOLUME)
# Change user inside the container.
command_args['user'] = '--user={}:{}'.format(os.getuid(), os.getgid())
# A non-login Bash shell should be used here (a subshell will be spawned later).
command_args['shell'] = '/bin/bash'
pull_command = '{command} pull {container_image}'.format(**command_args)
logger.info("Pulling docker image: {}".format(command_args['container_image']))
pull_proc = await subprocess.create_subprocess_exec( # pylint: disable=no-member
*shlex.split(pull_command),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
_, stderr = await pull_proc.communicate()
if pull_proc.returncode != 0:
error_msg = "Docker failed to pull {} image.".format(command_args['container_image'])
if stderr:
error_msg = '\n'.join([error_msg, stderr.decode('utf-8')])
raise RuntimeError(error_msg)
docker_command = (
'{command} run --rm --interactive {container_name} {network} {volumes} {limits} '
'{security} {workdir} {user} {container_image} {shell}'.format(**command_args)
)
logger.info("Starting docker container with command: {}".format(docker_command))
start_time = time.time()
# Workaround for pylint issue #1469
# (https://github.com/PyCQA/pylint/issues/1469).
self.proc = await subprocess.create_subprocess_exec( # pylint: disable=no-member
*shlex.split(docker_command),
limit=4 * (2 ** 20), # 4MB buffer size for line buffering
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
stdout = []
async def wait_for_container():
"""Wait for Docker container to start to avoid blocking the code that uses it."""
self.proc.stdin.write(('echo PING' + os.linesep).encode('utf-8'))
await self.proc.stdin.drain()
while True:
line = await self.proc.stdout.readline()
stdout.append(line)
if line.rstrip() == b'PING':
break
if self.proc.stdout.at_eof():
raise RuntimeError()
try:
await asyncio.wait_for(wait_for_container(), timeout=DOCKER_START_TIMEOUT)
except (asyncio.TimeoutError, RuntimeError):
error_msg = "Docker container has not started for {} seconds.".format(DOCKER_START_TIMEOUT)
stdout = ''.join([line.decode('utf-8') for line in stdout if line])
if stdout:
error_msg = '\n'.join([error_msg, stdout])
raise RuntimeError(error_msg)
end_time = time.time()
logger.info("It took {:.2f}s for Docker container to start".format(end_time - start_time))
self.stdout = self.proc.stdout
async def run_script(self, script):
"""Execute the script and save results."""
# Create a Bash command to add all the tools to PATH.
tools_paths = ':'.join([map_["dest"] for map_ in self.tools_volumes])
add_tools_path = 'export PATH=$PATH:{}'.format(tools_paths)
# Spawn another child bash, to avoid running anything as PID 1, which has special
# signal handling (e.g., cannot be SIGKILL-ed from inside).
# A login Bash shell is needed to source /etc/profile.
bash_line = '/bin/bash --login; exit $?' + os.linesep
script = os.linesep.join(['set -x', 'set +B', add_tools_path, script]) + os.linesep
self.proc.stdin.write(bash_line.encode('utf-8'))
await self.proc.stdin.drain()
self.proc.stdin.write(script.encode('utf-8'))
await self.proc.stdin.drain()
self.proc.stdin.close()
async def end(self):
"""End process execution."""
try:
await self.proc.wait()
finally:
# Cleanup temporary files.
for temporary_file in self.temporary_files:
temporary_file.close()
self.temporary_files = []
return self.proc.returncode
async def terminate(self):
"""Terminate a running script."""
# Workaround for pylint issue #1469
# (https://github.com/PyCQA/pylint/issues/1469).
cmd = await subprocess.create_subprocess_exec( # pylint: disable=no-member
*shlex.split('{} rm -f {}'.format(self.command, self._generate_container_name()))
)
await cmd.wait()
await self.proc.wait()
await super().terminate()
| |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
####################
# Copyright (c) 2012, Perceptive Automation, LLC. All rights reserved.
# http://www.perceptiveautomation.com
#
# Note that python version 2.5.6 is in use by Indigo.
#
import indigo
import os
import sys
import time
from Protocol import *
from Magiccolor import *
# Note the "indigo" module is automatically imported and made available inside
# our global name space by the host process.
################################################################################
class Plugin(indigo.PluginBase):
########################################
def __init__(self, pluginId, pluginDisplayName, pluginVersion, pluginPrefs):
indigo.PluginBase.__init__(self, pluginId, pluginDisplayName, pluginVersion, pluginPrefs)
self.debug = True
def __del__(self):
indigo.PluginBase.__del__(self)
########################################
def startup(self):
self.debugLog(u"startup called - python version %s " % sys.version)
def shutdown(self):
self.debugLog(u"shutdown called")
########################################
def validateDeviceConfigUi(self, valuesDict, typeId, devId):
return (True, valuesDict)
########################################
# Relay / Dimmer Action callback
######################
def actionControlDimmerRelay(self, action, dev):
m = Magiccolor()
m.connect()
p = Protocol()
###### TURN ON ######
if action.deviceAction == indigo.kDimmerRelayAction.TurnOn:
# Command hardware module (dev) to turn ON here:
p.keyNum=p.MODE_ON
p.keyValue=p.findProgram("WHITE")
m.sendMsg(p)
sendSuccess = True # Set to False if it failed.
if sendSuccess:
# If success then log that the command was successfully sent.
indigo.server.log(u"sent \"%s\" %s" % (dev.name, "on"))
# And then tell the Indigo Server to update the state.
dev.updateStateOnServer("onOffState", True)
else:
# Else log failure but do NOT update state on Indigo Server.
indigo.server.log(u"send \"%s\" %s failed" % (dev.name, "on"), isError=True)
###### TURN OFF ######
elif action.deviceAction == indigo.kDimmerRelayAction.TurnOff:
# Command hardware module (dev) to turn OFF here:
p.keyNum=p.MODE_OFF
m.sendMsg(p)
sendSuccess = True # Set to False if it failed.
if sendSuccess:
# If success then log that the command was successfully sent.
indigo.server.log(u"sent \"%s\" (%s) %s" % (dev.name, dev.address, "off") )
# And then tell the Indigo Server to update the state:
dev.updateStateOnServer("onOffState", False)
else:
# Else log failure but do NOT update state on Indigo Server.
indigo.server.log(u"send \"%s\" %s failed" % (dev.name, "off"), isError=True)
###### TOGGLE ######
elif action.deviceAction == indigo.kDimmerRelayAction.Toggle:
# Command hardware module (dev) to toggle here:
newOnState = not dev.onState
if newOnState == False:
p.keyNum=p.MODE_OFF
else:
p.keyNum=p.MODE_ON
p.keyValue=p.findProgram("WHITE")
m.sendMsg(p)
sendSuccess = True # Set to False if it failed.
if sendSuccess:
# If success then log that the command was successfully sent.
indigo.server.log(u"sent \"%s\" %s" % (dev.name, "toggle"))
# And then tell the Indigo Server to update the state:
dev.updateStateOnServer("onOffState", newOnState)
else:
# Else log failure but do NOT update state on Indigo Server.
indigo.server.log(u"send \"%s\" %s failed" % (dev.name, "toggle"), isError=True)
###### SET BRIGHTNESS ######
elif action.deviceAction == indigo.kDimmerRelayAction.SetBrightness:
# Command hardware module (dev) to set brightness here:
#
# We're kinda lame here. We actually use brightness as a program
# so that the slider works. oh well!
newBrightness = action.actionValue
p.keyNum=p.MODE_ON
p.keyValue=newBrightness
m.sendMsg(p)
sendSuccess = True # Set to False if it failed.
if sendSuccess:
# If success then log that the command was successfully sent.
indigo.server.log(u"sent \"%s\" %s to %d" % (dev.name, "set brightness", newBrightness))
# And then tell the Indigo Server to update the state:
dev.updateStateOnServer("brightnessLevel", newBrightness)
else:
# Else log failure but do NOT update state on Indigo Server.
indigo.server.log(u"send \"%s\" %s to %d failed" % (dev.name, "set brightness", newBrightness), isError=True)
###### BRIGHTEN BY ######
elif action.deviceAction == indigo.kDimmerRelayAction.BrightenBy:
# Command hardware module (dev) to do a relative brighten here:
newBrightness = dev.brightness + action.actionValue
p.keyNum=p.MODE_ON
p.keyValue=newBrightness
m.sendMsg(p)
if newBrightness > 100:
newBrightness = 100
sendSuccess = True # Set to False if it failed.
if sendSuccess:
# If success then log that the command was successfully sent.
indigo.server.log(u"sent \"%s\" %s to %d" % (dev.name, "brighten", newBrightness))
# And then tell the Indigo Server to update the state:
dev.updateStateOnServer("brightnessLevel", newBrightness)
else:
# Else log failure but do NOT update state on Indigo Server.
indigo.server.log(u"send \"%s\" %s to %d failed" % (dev.name, "brighten", newBrightness), isError=True)
###### DIM BY ######
elif action.deviceAction == indigo.kDimmerRelayAction.DimBy:
# Command hardware module (dev) to do a relative dim here:
newBrightness = dev.brightness - action.actionValue
if newBrightness < 0:
newBrightness = 0
p.keyNum=p.MODE_ON
p.keyValue=newBrightness
m.sendMsg(p)
sendSuccess = True # Set to False if it failed.
if sendSuccess:
# If success then log that the command was successfully sent.
indigo.server.log(u"sent \"%s\" %s to %d" % (dev.name, "dim", newBrightness))
# And then tell the Indigo Server to update the state:
dev.updateStateOnServer("brightnessLevel", newBrightness)
else:
# Else log failure but do NOT update state on Indigo Server.
indigo.server.log(u"send \"%s\" %s to %d failed" % (dev.name, "dim", newBrightness), isError=True)
########################################
# General Action callback
######################
def actionControlGeneral(self, action, dev):
###### BEEP ######
if action.deviceAction == indigo.kDeviceGeneralAction.Beep:
# Beep the hardware module (dev) here:
indigo.server.log(u"does not support \"%s\" %s" % (dev.name, "beep request"))
###### ENERGY UPDATE ######
elif action.deviceAction == indigo.kDeviceGeneralAction.EnergyUpdate:
# Request hardware module (dev) for its most recent meter data here:
indigo.server.log(u"does not support \"%s\" %s" % (dev.name, "energy update request"))
###### ENERGY RESET ######
elif action.deviceAction == indigo.kDeviceGeneralAction.EnergyReset:
# Request that the hardware module (dev) reset its accumulative energy usage data here:
indigo.server.log(u"does not support \"%s\" %s" % (dev.name, "energy reset request"))
###### STATUS REQUEST ######
elif action.deviceAction == indigo.kDeviceGeneralAction.RequestStatus:
# Query hardware module (dev) for its current status here:
indigo.server.log(u"does not support \"%s\" %s" % (dev.name, "status request"))
########################################
# Custom Plugin Action callbacks (defined in Actions.xml)
######################
def setProgramNumber(self, pluginAction, dev):
try:
newValue = int(pluginAction.props.get(u"programNumber", 100))
except ValueError:
# The int() cast above might fail if the user didn't enter a number:
indigo.server.log(u"set program Number to device \"%s\" -- invalid value" % (dev.name), isError=True)
return
m = Magiccolor()
m.connect()
p = Protocol()
p.keyNum=p.MODE_ON
p.keyValue=newValue
m.sendMsg(p)
sendSuccess = True # Set to False if it failed.
if sendSuccess:
# If success then log that the command was successfully sent.
indigo.server.log(u"sent \"%s\" %s to %d" % (dev.name, "set program number", newValue))
# And then tell the Indigo Server to update the state:
dev.updateStateOnServer("programNumber", newValue)
else:
# Else log failure but do NOT update state on Indigo Server.
indigo.server.log(u"send \"%s\" %s to %d failed" % (dev.name, "set program number", newValue), isError=True)
def setSpeed(self, pluginAction, dev):
try:
newValue = int(pluginAction.props.get(u"speed", 100))
except ValueError:
# The int() cast above might fail if the user didn't enter a number:
indigo.server.log(u"set speed to device \"%s\" -- invalid value" % (dev.name), isError=True)
return
m = Magiccolor()
m.connect()
p = Protocol()
p.keyNum=p.MODE_SPEED
p.keyValue=newValue
m.sendMsg(p)
sendSuccess = True # Set to False if it failed.
if sendSuccess:
# If success then log that the command was successfully sent.
indigo.server.log(u"sent \"%s\" %s to %d" % (dev.name, "set speed", newValue))
# And then tell the Indigo Server to update the state:
dev.updateStateOnServer("programNumber", newValue)
else:
# Else log failure but do NOT update state on Indigo Server.
indigo.server.log(u"send \"%s\" %s to %d failed" % (dev.name, "set speed", newValue), isError=True)
def setPause(self, pluginAction, dev):
newValue = pluginAction.props.get(u"pauseRun", False)
m = Magiccolor()
m.connect()
p = Protocol()
p.keyNum=p.MODE_PAUSE
indigo.server.log("%d" % newValue)
if newValue:
p.keyValue=1
else:
p.keyValue=0
m.sendMsg(p)
sendSuccess = True # Set to False if it failed.
if sendSuccess:
# If success then log that the command was successfully sent.
indigo.server.log(u"sent \"%s\" %s to %d" % (dev.name, "pause", newValue))
# And then tell the Indigo Server to update the state:
dev.updateStateOnServer("pauseRun", newValue)
else:
# Else log failure but do NOT update state on Indigo Server.
indigo.server.log(u"send \"%s\" %s to %d failed" % (dev.name, "set pause", newValue), isError=True)
| |
"""plotting module for orbital
This implementation was inspired by poliastro (c) 2012 Juan Luis Cano (BSD License)
"""
# encoding: utf-8
from __future__ import absolute_import, division, print_function
from copy import copy
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import animation
from matplotlib.patches import Circle
from mpl_toolkits.mplot3d.axes3d import Axes3D
from numpy import cos, sin
from scipy.constants import kilo, pi
from .maneuver import TimeOperation
from .utilities import (
lookahead, orbit_radius, saved_state, uvw_from_elements)
__all__ = [
'plot2d',
'plot3d',
'plot',
'Plotter2D',
'Plotter3D',
]
def plot2d(orbit, title='', maneuver=None, animate=False, speedup=5000):
"""Convenience function to 2D plot orbit in a new figure."""
plotter = Plotter2D()
if animate:
return plotter.animate(orbit, title=title, speedup=speedup)
else:
plotter.plot(orbit, title=title, maneuver=maneuver)
def plot3d(orbit, title='', maneuver=None, animate=False, speedup=5000):
"""Convenience function to 3D plot orbit in a new figure."""
plotter = Plotter3D()
if animate:
return plotter.animate(orbit, title=title, speedup=speedup)
else:
plotter.plot(orbit, title=title, maneuver=maneuver)
plot = plot2d
class Plotter2D():
"""2D Plotter
Handles still and animated plots of an orbit.
"""
def __init__(self, axes=None, num_points=100):
if axes:
self.fig = axes.get_figure()
else:
self.fig = plt.figure()
axes = self.fig.add_subplot(111)
self.axes = axes
self.axes.set_aspect(1)
self.axes.set_xlabel("$p$ [km]")
self.axes.set_ylabel("$q$ [km]")
self.points_per_rad = num_points / (2 * pi)
def plot(self, orbit, maneuver=None, title=''):
self._plot_body(orbit)
if maneuver is None:
self._plot_orbit(orbit)
self.pos_dot = self._plot_position(orbit)
else:
self._plot_orbit(orbit, label='Initial orbit')
self.propagate_counter = 1
states = lookahead(
orbit.apply_maneuver(maneuver, iter=True, copy=True),
fillvalue=(None, None))
with saved_state(orbit):
for (orbit, operation), (_, next_operation) in states:
with saved_state(orbit):
operation.plot(orbit, self, next_operation)
self.axes.legend()
self.axes.set_title(title)
def animate(self, orbit, speedup=5000, title=''):
# Copy orbit, because it will be modified in the animation callback.
orbit = copy(orbit)
self.plot(orbit)
p = orbit.a * (1 - orbit.e ** 2)
def fpos(f):
pos = np.array([cos(f), sin(f), 0 * f]) * p / (1 + orbit.e * cos(f))
pos /= kilo
return pos
time_per_orbit = orbit.T / speedup
interval = 1000 / 30
times = np.linspace(orbit.t, orbit.t + orbit.T, int(time_per_orbit * 30))
def animate(i):
orbit.t = times[i - 1]
pos = fpos(orbit.f)
self.pos_dot.set_data(pos[0], pos[1])
return self.pos_dot
self.axes.set_title(title)
# blit=True causes an error on OS X, disable for now.
ani = animation.FuncAnimation(
self.fig, animate, len(times), interval=interval, blit=False)
return ani
@staticmethod
def _perifocal_coords(orbit, f):
p = orbit.a * (1 - orbit.e ** 2)
pos = np.array([cos(f), sin(f), 0 * f]) * p / (1 + orbit.e * cos(f))
pos /= kilo
return pos
def _plot_orbit(self, orbit, f1=0, f2=2 * pi, label=None):
if f2 < f1:
f2 += 2 * pi
num_points = self.points_per_rad * (f2 - f1)
f = np.linspace(f1, f2, int(num_points))
pos = self._perifocal_coords(orbit, f)
self.axes.plot(pos[0, :], pos[1, :], '--', linewidth=1, label=label)
def _plot_position(self, orbit, f=None, propagated=False, label=None):
if f is None:
f = orbit.f
pos = self._perifocal_coords(orbit, f)
if propagated:
if label is not None:
raise TypeError('propagated flag sets label automatically')
label = 'Propagated position {}'.format(self.propagate_counter)
self.propagate_counter += 1
pos_dot, = self.axes.plot(
pos[0], pos[1], 'o', label=label)
return pos_dot
def _plot_body(self, orbit):
color = '#EBEBEB'
if orbit.body.plot_color is not None:
color = orbit.body.plot_color
self.axes.add_patch(Circle((0, 0), orbit.body.mean_radius / kilo,
linewidth=0, color=color))
class Plotter3D(object):
"""3D Plotter
Handles still and animated plots of an orbit.
"""
def __init__(self, axes=None, num_points=100):
if axes:
self.fig = axes.get_figure()
else:
self.fig = plt.figure()
axes = self.fig.add_subplot(111, projection='3d')
self.axes = axes
self.axes.set_xlabel("$x$ [km]")
self.axes.set_ylabel("$y$ [km]")
self.axes.set_zlabel("$z$ [km]")
# These are used to fix aspect ratio of final plot.
# See Plotter3D._force_aspect()
self._coords_x = np.array(0)
self._coords_y = np.array(0)
self._coords_z = np.array(0)
self.points_per_rad = num_points / (2 * pi)
def plot(self, orbit, maneuver=None, title=''):
self._plot_body(orbit)
if maneuver is None:
self._plot_orbit(orbit)
self.pos_dot = self._plot_position(orbit)
else:
self._plot_orbit(orbit, label='Initial orbit')
self.propagate_counter = 1
states = lookahead(
orbit.apply_maneuver(maneuver, iter=True, copy=True),
fillvalue=(None, None))
with saved_state(orbit):
for (orbit, operation), (_, next_operation) in states:
with saved_state(orbit):
operation.plot(orbit, self, next_operation)
self.axes.legend()
self.axes.set_title(title)
self._force_aspect()
def animate(self, orbit, speedup=5000, title=''):
# Copy orbit, because it will be modified in the animation callback.
orbit = copy(orbit)
self.plot(orbit)
num_points = self.points_per_rad * 2 * pi
f = np.linspace(0, 2 * pi, int(num_points))
def fpos(f):
U, _, _ = uvw_from_elements(orbit.i, orbit.raan, orbit.arg_pe, f)
pos = orbit_radius(orbit.a, orbit.e, f) * U
pos /= kilo
return pos[0], pos[1], pos[2]
time_per_orbit = orbit.T / speedup
interval = 1000 / 30
times = np.linspace(orbit.t, orbit.t + orbit.T, int(time_per_orbit * 30))
def animate(i):
orbit.t = times[i - 1]
x, y, z = fpos(orbit.f)
self.pos_dot.set_data([x], [y])
self.pos_dot.set_3d_properties([z])
return self.pos_dot
self.axes.set_title(title)
# blit=True causes an error on OS X, disable for now.
ani = animation.FuncAnimation(
self.fig, animate, len(times), interval=interval, blit=False)
return ani
@staticmethod
def _xyz_coords(orbit, f):
U, _, _ = uvw_from_elements(orbit.i, orbit.raan, orbit.arg_pe, f)
pos = orbit_radius(orbit.a, orbit.e, f) * U
pos /= kilo
return pos
def _plot_orbit(self, orbit, f1=0, f2=2 * pi, label=None):
if f2 < f1:
f2 += 2 * pi
num_points = self.points_per_rad * (f2 - f1)
f = np.linspace(f1, f2, int(num_points))
pos = self._xyz_coords(orbit, f)
x, y, z = pos[0, :], pos[1, :], pos[2, :]
self.axes.plot(x, y, z, '--', linewidth=1, label=label)
self._append_coords_for_aspect(x, y, z)
def _plot_position(self, orbit, f=None, propagated=False, label=None):
if f is None:
f = orbit.f
pos = self._xyz_coords(orbit, f)
x, y, z = pos[0], pos[1], pos[2]
if propagated:
if label is not None:
raise TypeError('propagated flag sets label automatically')
label = 'Propagated position {}'.format(self.propagate_counter)
self.propagate_counter += 1
pos_dot, = self.axes.plot(
[x], [y], [z], 'o', label=label)
return pos_dot
def _plot_body(self, orbit):
color = '#EBEBEB'
if orbit.body.plot_color is not None:
color = orbit.body.plot_color
u = np.linspace(0, 2 * np.pi, 100)
v = np.linspace(0, np.pi, 50)
cx = orbit.body.mean_radius * np.outer(np.cos(u), np.sin(v))
cy = orbit.body.mean_radius * np.outer(np.sin(u), np.sin(v))
cz = orbit.body.mean_radius * np.outer(np.ones(np.size(u)), np.cos(v))
cx, cy, cz = cx / kilo, cy / kilo, cz / kilo
self.axes.plot_surface(cx, cy, cz, rstride=5, cstride=5, color=color,
edgecolors='#ADADAD', shade=False)
def _append_coords_for_aspect(self, x, y, z):
self._coords_x = np.append(self._coords_x, x)
self._coords_y = np.append(self._coords_y, y)
self._coords_z = np.append(self._coords_z, z)
def _force_aspect(self):
# Thanks to the following SO answer, we can make sure axes are equal
# http://stackoverflow.com/a/13701747/2093785
# Create cubic bounding box to simulate equal aspect ratio
x = self._coords_x
y = self._coords_y
z = self._coords_z
max_range = np.array([x.max() - x.min(),
y.max() - y.min(),
z.max() - z.min()]).max()
Xb = (0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][0].flatten() +
0.5 * (x.max() + x.min()))
Yb = (0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][1].flatten() +
0.5 * (y.max() + y.min()))
Zb = (0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][2].flatten() +
0.5 * (z.max() + z.min()))
for xb, yb, zb in zip(Xb, Yb, Zb):
self.axes.plot([xb], [yb], [zb], 'w')
| |
##
## MCMC sampler for Mixture-of-Isoforms (MISO) model
##
## Yarden Katz <yarden@mit.edu>
##
## The sampler uses a Metropolis-Hastings sampling scheme, combined with
## a Gibbs sampling step.
##
import scipy
import misopy
from misopy.reads_utils import count_aligned_reads, \
count_isoform_assignments
from misopy.read_simulator import simulate_reads, print_reads_summary, \
read_counts_to_read_list, \
get_reads_summary
import misopy.hypothesis_test as ht
from misopy.Gene import Gene, Exon
from misopy.py2c_gene import *
# C MISO interface
import pysplicing
from scipy import *
from numpy import *
import cPickle as pickle
from scipy.stats import mode
import math
import time
from numpy import numarray
import os
import sys
from collections import defaultdict
import glob
import logging
import logging.handlers
loggers = {}
def get_logger(logger_name, log_outdir,
level=logging.WARNING,
include_stdout=True):
"""
Return a logging object.
"""
global loggers
# Avoid race-conditions
try:
os.makedirs(log_outdir)
except OSError:
pass
if loggers.get(logger_name):
return loggers.get(logger_name)
logger = logging.getLogger(logger_name)
formatter = \
logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p')
# Do not log to file
#if log_outdir is not None:
# log_filename = os.path.join(log_outdir, "%s.log" %(logger_name))
# fh = logging.FileHandler(log_filename)
# fh.setLevel(level)
# fh.setFormatter(formatter)
# logger.addHandler(fh)
logging.root.setLevel(level)
# Optionally add handler that streams all logs
# to stdout
if include_stdout:
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(level)
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.info("Created logger %s" %(logger_name))
loggers.update({logger_name: logger})
return logger
##
## Helper statistics/linear algebra functions
##
def set_diag(a, v):
for i, elt in enumerate(a):
a[i, i] = v
return a
def maxi(l):
m = max(l)
for i, v in enumerate(l):
if m == v:
return i
def mini(l):
m = min(l)
for i, v in enumerate(l):
if m == v:
return i
def exp_logsumexp(a):
return exp(a - logsumexp(a))
def vect_logsumexp(a, axis=None):
if axis is None:
# Use the scipy.maxentropy version.
return logsumexp(a)
a = asarray(a)
shp = list(a.shape)
shp[axis] = 1
a_max = a.max(axis=axis)
s = log(exp(a - a_max.reshape(shp)).sum(axis=axis))
lse = a_max + s
return lse
def print_assignment_summary(assignments):
counts = defaultdict(int)
for a in assignments:
counts[a] += 1
for k, v in counts.iteritems():
print "Total of %d in isoform %d" %(v, k)
def float_array_to_str(array_of_floats):
"""
Convert a float numpy array to a string for printing purposes.
"""
str_float_array = '[' + ' '.join(['%.3f' %(val) for val in array_of_floats]) + ']'
return str_float_array
def get_paired_end_sampler_params(num_isoforms,
mean_frag_len,
frag_variance,
read_len,
overhang_len=1):
"""
Return parameters for MISO sampler, in paired-end mode.
"""
hyperparameters = ones(num_isoforms)
proposal_diag = 0.05
sigma = set_diag(zeros([num_isoforms-1, num_isoforms-1]),
proposal_diag)
sampler_params = {'read_len': read_len,
'overhang_len': overhang_len,
'uniform_proposal': False,
'sigma_proposal': sigma,
'mean_frag_len': mean_frag_len,
'frag_variance': frag_variance}
return sampler_params
def get_single_end_sampler_params(num_isoforms,
read_len,
overhang_len=1):
"""
Return parameters for MISO sampler, in single-end mode.
"""
hyperparameters = ones(num_isoforms)
proposal_diag = 0.05
sigma = set_diag(zeros([num_isoforms-1, num_isoforms-1]),
proposal_diag)
sampler_params = {'read_len': read_len,
'overhang_len': overhang_len,
'uniform_proposal': False,
'sigma_proposal': sigma}
return sampler_params
class MISOSampler:
def __init__(self, params,
paired_end=False,
log_dir=None):
"""
Make a sampler with the given parameters.
"""
self.params = params
self.paired_end = paired_end
# set default fragment length distribution parameters
if self.paired_end:
if ((not 'mean_frag_len' in self.params) or \
(not 'frag_variance' in self.params)):
raise Exception, "Must set mean_frag_len and frag_variance when " \
"running in sampler on paired-end data."
self.mean_frag_len = self.params['mean_frag_len']
self.frag_variance = self.params['frag_variance']
if log_dir != None:
self.log_dir = os.path.abspath(os.path.expanduser(log_dir))
self.log_dir = os.path.join(log_dir, 'logs')
# Avoid race-conditions
try:
os.makedirs(self.log_dir)
except OSError:
pass
self.miso_logger = get_logger('miso_logger', self.log_dir)
self.miso_logger.info("Instantiated sampler.")
def run_sampler(self, num_iters, reads, gene, hyperparameters, params,
output_file,
num_chains=6,
burn_in=1000,
lag=2,
prior_params=None,
# By default, use sampler with read classes (collapsed)
# to get speed boost for single-end reads
# (To revert to old reassigning sampler, use
# pysplicing.MISO_ALGO_REASSIGN)
algorithm=pysplicing.MISO_ALGO_CLASSES,
start_cond=pysplicing.MISO_START_AUTO,
stop_cond=pysplicing.MISO_STOP_FIXEDNO,
verbose=True):
"""
Fast version of MISO MCMC sampler.
Calls C version and returns results.
"""
num_isoforms = len(gene.isoforms)
self.num_isoforms = num_isoforms
if prior_params == None:
prior_params = (1.0,) * num_isoforms
read_positions = reads[0]
read_cigars = reads[1]
print read_positions
self.num_reads = len(read_positions)
if self.num_reads == 0:
print "No reads for gene: %s" %(gene.label)
return
output_file = output_file + ".miso"
# If output filename exists, don't run sampler
if os.path.isfile(os.path.normpath(output_file)):
print "Output filename %s exists, not running MISO." \
%(output_file)
return None
self.params['iters'] = num_iters
self.params['burn_in'] = burn_in
self.params['lag'] = lag
# Define local variables related to reads and overhang
self.overhang_len = self.params['overhang_len']
self.read_len = self.params['read_len']
t1 = 0
t2 = 0
if verbose:
t1 = time.time()
#self.miso_logger.info("Running sampler...")
#self.miso_logger.info(" - num_iters: " + str(num_iters))
#self.miso_logger.info(" - burn-in: " + str(burn_in))
#self.miso_logger.info(" - lag: " + str(lag))
#self.miso_logger.info(" - paired-end? " + str(self.paired_end))
#self.miso_logger.info(" - gene: " + str(gene))
rejected_proposals = 0
accepted_proposals = 0
psi_vectors = []
all_psi_proposals = []
if params['uniform_proposal']:
self.miso_logger.debug("UNIFORM independent proposal being used.")
proposal_type = "unif"
else:
self.miso_logger.debug("Non-uniform proposal being used.")
self.miso_logger.debug(" - sigma_proposal: " + str(params['sigma_proposal']))
proposal_type = "drift"
init_psi = ones(num_isoforms)/float(num_isoforms)
# Do not process genes with one isoform
if num_isoforms == 1:
one_iso_msg = "Gene %s has only one isoform; skipping..." \
%(gene.label)
self.miso_logger.warning(one_iso_msg)
return
# Convert Python Gene object to C
c_gene = py2c_gene(gene)
##
## Run C MISO
##
read_positions = tuple([r+1 for r in read_positions])
if self.paired_end:
# Number of standard deviations in insert length
# distribution to consider when assigning reads
# to isoforms
num_sds = 4L
# Run paired-end
miso_results = pysplicing.MISOPaired(c_gene, 0L,
read_positions,
read_cigars,
long(self.read_len),
float(self.mean_frag_len),
float(self.frag_variance),
float(num_sds),
long(num_iters),
long(burn_in),
long(lag),
prior_params,
long(self.overhang_len),
long(num_chains),
start_cond,
stop_cond)
else:
# Run single-end
miso_results = pysplicing.MISO(c_gene,
0L,
read_positions,
read_cigars,
long(self.read_len),
long(num_iters),
long(burn_in),
long(lag),
prior_params,
long(self.overhang_len),
long(num_chains),
start_cond,
stop_cond,
algorithm)
# Psi samples
psi_vectors = transpose(array(miso_results[0]))
# Log scores of accepted samples
kept_log_scores = transpose(array(miso_results[1]))
# Read classes
read_classes = miso_results[2]
# Read class statistics
read_class_data = miso_results[3]
# Assignments of reads to isoforms
assignments = miso_results[4]
# Statistics and parameters about sampler run
run_stats = miso_results[5]
# Assignments of reads to classes.
# read_classes[n] represents the read class that has
# read_assignments[n]-many reads.
reads_data = (read_classes, read_class_data)
assignments = array(assignments)
# Skip events where all reads are incompatible with the annotation;
# do not output a file for those.
if all(assignments == -1):
print "All reads incompatible with annotation, skipping..."
return
accepted_proposals = run_stats[4]
rejected_proposals = run_stats[5]
percent_acceptance = (float(accepted_proposals)/(accepted_proposals + \
rejected_proposals)) * 100
#self.miso_logger.info("Percent acceptance (including burn-in): %.4f" %(percent_acceptance))
#self.miso_logger.info("Number of iterations recorded: %d" %(len(psi_vectors)))
# Write MISO output to file
print "Outputting samples to: %s..." %(output_file)
self.miso_logger.info("Outputting samples to: %s" %(output_file))
self.output_miso_results(output_file, gene, reads_data, assignments,
psi_vectors, kept_log_scores, num_iters,
burn_in, lag, percent_acceptance,
proposal_type)
if verbose:
t2 = time.time()
print "Event took %.2f seconds" %(t2 - t1)
def output_miso_results(self, output_file, gene, reads_data, assignments,
psi_vectors, kept_log_scores, num_iters, burn_in,
lag, percent_acceptance, proposal_type):
"""
Output results of MISO to a file.
"""
output = open(output_file, 'w')
# Get a string representation of the isoforms - use '_'
# in the delimiter regardless
iso_delim = '_'
if type(gene.isoforms[0].desc) == list:
str_isoforms = '[' + ",".join(["\'" + iso_delim.join(iso.desc) + "\'" \
for iso in gene.isoforms]) + ']'
else:
str_isoforms = '[' + ",".join(["\'" + iso.desc + "\'" \
for iso in gene.isoforms]) + ']'
num_isoforms = len(gene.isoforms)
# And of the exon lengths
exon_lens = ",".join(["(\'%s\',%d)" %(p.label, p.len) \
for p in gene.parts])
## Compile header with information about isoforms and internal parameters used
## by the sampler, and also information about read counts and number of
## reads assigned to each isoform.
read_classes, read_class_counts = reads_data
read_counts_list = []
for class_num, class_type in enumerate(read_classes):
class_counts = read_class_counts[class_num]
# Get the read class type in string format
class_str = str(tuple([int(c) for c in class_type])).replace(" ", "")
# Get the read class counts in string format
class_counts_str = "%s" %(int(read_class_counts[class_num]))
# Put class and counts together
curr_str = "%s:%s" %(class_str,
class_counts_str)
read_counts_list.append(curr_str)
# Get a summary of the raw read counts supporting each isoform
read_counts_str = ",".join(read_counts_list)
assigned_counts = count_isoform_assignments(assignments)
# Get number of reads assigned to each isoform
assigned_counts_str = ",".join(["%d:%d" %(c[0], c[1]) \
for c in assigned_counts])
# coordinates where mRNAs start
mRNA_starts = []
mRNA_ends = []
for iso in gene.isoforms:
mRNA_starts.append(iso.genomic_start)
mRNA_ends.append(iso.genomic_end)
mRNA_start_coords = ",".join([str(start) for start in mRNA_starts])
mRNA_end_coords = ",".join([str(end) for end in mRNA_ends])
chrom = gene.chrom
if chrom == None:
chrom = "NA"
strand = gene.strand
if strand == None:
strand = "NA"
header = "#isoforms=%s\texon_lens=%s\titers=%d\tburn_in=%d\tlag=%d\t" \
"percent_accept=%.2f\tproposal_type=%s\t" \
"counts=%s\tassigned_counts=%s\tchrom=%s\tstrand=%s\tmRNA_starts=%s\tmRNA_ends=%s\n" \
%(str_isoforms, exon_lens, num_iters, burn_in, lag,
percent_acceptance, proposal_type, read_counts_str,
assigned_counts_str,
# Fields related to gene/event
chrom,
strand,
mRNA_start_coords,
mRNA_end_coords)
output.write(header)
# Output samples and their associated log scores, as well as read counts
results_fields = ["sampled_psi", "log_score"]
results_header = "%s\n" %("\t".join(results_fields))
output.write(results_header)
for psi_sample, curr_log_score in zip(psi_vectors, kept_log_scores):
psi_sample_str = ",".join(["%.4f" %(psi) for psi in psi_sample])
output_line = "%s\t%.2f\n" %(psi_sample_str, curr_log_score)
output.write(output_line)
output.close()
print "Completed outputting."
# return [percent_acceptance, array(psi_vectors), array(kept_log_scores)]
def run_sampler_on_event(gene, ni, ne, nb, read_len, overhang_len, num_iters,
output_dir, confidence_level=.95):
"""
Run sampler on a two-isoform gene event.
"""
print "Running sampler on a two-isoform event..."
print " - Gene label: ", gene.label, gene
print " - NI, NE, NB: %d, %d, %d" %(ni, ne, nb)
print "Using default sampler parameters."
if gene.chrom != None:
# Index output by chromosome
print "Indexing by chromosome..."
output_dir = os.path.join(output_dir, gene.chrom)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
output_filename = os.path.join(output_dir, gene.label)
samples = []
cred_interval = []
num_isoforms = len(gene.isoforms)
burn_in = 500
lag = 10
hyperparameters = ones(num_isoforms)
proposal_diag = 0.05
sigma = set_diag(zeros([num_isoforms-1, num_isoforms-1]),
proposal_diag)
sampler_params = {'read_len': read_len,
'overhang_len': overhang_len,
'uniform_proposal': False,
'sigma_proposal': sigma}
sampler = MISOSampler(sampler_params, log_dir=output_dir)
reads = read_counts_to_read_list(ni, ne, nb)
t1 = time.time()
sampler_results = sampler.run_sampler(num_iters, reads, gene, hyperparameters,
sampler_params, output_filename, burn_in=burn_in,
lag=lag)
if not sampler_results:
return (samples, cred_interval)
samples = sampler_results[1]
# Compute credible intervals
cred_interval = ht.compute_credible_intervals(samples, confidence_level=confidence_level)
t2 = time.time()
print " - Sampler run took %s seconds." %(str(t2-t1))
# return samples and credible intervals
return (samples, cred_interval)
def profile_miso():
from Gene import make_gene
gene = make_gene([150, 100, 150], [[1, 2, 3], [1, 3]])
read_len = 36
overhang_len = 4
output_dir = "profiler-test"
for x in range(10):
print "x = %d" %(x)
a, b = run_sampler_on_event(gene, 500, 50, 40, read_len, overhang_len,
10000, output_dir)
def main():
return
# import cProfile as profile
# import pstats
# output_file = "profile"
# profile.run('profile_miso()', output_file)
# p = pstats.Stats(output_file)
# print "name: "
# print p.sort_stats('name')
# print "all stats: "
# p.print_stats()
# print "cumulative (top 10): "
# p.sort_stats('cumulative').print_stats(20)
if __name__ == '__main__':
main()
| |
import time
import json
from threading import Thread
from random import sample, randint, choice, random
from plugincon import bot_command, easy_bot_command, get_message_target, get_bot_nickname
channel_data = {}
def percent_chance(percentage):
trandom = randint(0, 100)
print trandom
return percentage >= trandom and percentage > 0
def timebomb_explode(index, message, user, connector):
def msg(msg):
connector.send_message(index, get_message_target(connector, message, index), msg)
msg("BOOOOOOM!!!!!! There goes {}!".format(user))
channel_data[message["channel"]]["timebomb_running"] = False
connector.send_command(index, "KICK {} {} :Exploded into pieces!".format(message["channel"], user))
def wait_for_timebomb(time_fuse, index, message, user, connector):
print "Called!"
while time.time() < time_fuse:
time.sleep(0.25)
print "Time gone!"
if channel_data[message["channel"]]["timebomb_running"]:
timebomb_explode(index, message, user, connector)
@easy_bot_command("timebomb_reset", True)
def reset_timebomb(message, raw):
if raw:
return
channel_data = {}
@bot_command("timebomb_cutwire")
def cut_wire_for_timebomb(message, connector, index, raw):
def msg(mesg):
connector.send_message(index, get_message_target(connector, message, index), mesg)
def hlmsg(mesg):
msg("{}: {}".format(message["nickname"], mesg))
if raw:
return
if not message["channel"].startswith("#"):
msg("Run this command on a channel!")
return
if not message["channel"] in channel_data.keys() or not channel_data[message["channel"]]["timebomb_running"]:
hlmsg("This channel doesn't have any timebomb running yet!")
return
if channel_data[message["channel"]]["target"] != message["nickname"]:
hlmsg("Don't worry, you are not being targeted for this timebomb! :)")
return
if message["arguments"] < 2:
hlmsg("Insert the color of the wire as an argument!")
return
if message["arguments"][1] == channel_data[message["channel"]]["current_wire"]:
channel_data[message["channel"]]["timebomb_running"] = False
hlmsg("Congratulations! You have cut the timebomb!")
return
hlmsg("Wrong wire! :P")
Thread(target=timebomb_explode, args=(index, message, message["nickname"], connector)).start()
@bot_command("shoot")
def shoot_at(message, connector, index, raw):
if raw:
return
def msg(mseg):
connector.send_message(index, get_message_target(connector, message, index), mseg)
def hlmsg(mseg):
msg("{}: {}".format(message["nickname"], mseg))
if not message["channel"].startswith("#"):
msg("Run this command on a channel!")
return
if len(message["arguments"]) < 2:
hlmsg("Argument syntax: shoot <target>")
return
for target in message["arguments"][1:]:
if target == get_bot_nickname(connector, index):
hlmsg("Nobody can kill me!")
connector.send_command(index, "KICK {} {} :Trying to kill ME?!?".format(message["channel"], message["nickname"]))
continue
msg("{} tries to shoot at {}! SHOTS FIRED!!".format(message["nickname"], target))
if percent_chance(15):
msg("{} accidentally shoots at himself!".format(message["nickname"]))
connector.send_command(index, "KICK {} {} :Shot down!".format(message["channel"], message["nickname"]))
continue
if percent_chance(30):
msg("{} shot {} down!".format(message["nickname"], target))
connector.send_command(index, "KICK {} {} :Shot down!".format(message["channel"], target))
continue
msg("He missed the shot!")
@bot_command("timebomb")
def timebomb_user(message, connector, index, raw):
global wire_colors
global current_wire
global wire_colors_chooseable
wire_colors_chooseable = map(lambda x: x.strip("\n"), open("randomcolors.txt").readlines())
if raw:
return
def msg(mesg):
connector.send_message(index, get_message_target(connector, message, index), mesg)
def hlmsg(mesg):
msg("{}: {}".format(message["nickname"], mesg))
def hlmsg2(user, mesg):
msg("{}: {}".format(user, mesg))
if not message["channel"].startswith("#"):
msg("Run this command on a channel!")
if not message["channel"] in channel_data.keys():
channel_data[message["channel"]] = {}
if "timebomb_running" in channel_data[message["channel"]].keys() and channel_data[message["channel"]]["timebomb_running"]:
msg("The timebomb is already running!")
return
if len(message["arguments"]) < 2:
hlmsg("Argument syntax: timebomb <user> [counter = 30 seconds] [no. of wire colors = random between 4 and 20]")
return
channel_data[message["channel"]]["target"] = message["arguments"][1]
channel_data[message["channel"]]["timebomb_running"] = True
if len(message["arguments"]) < 3:
hlmsg("Using 30 seconds for time fuse!")
time_fuse = 30
else:
time_fuse = int(message["arguments"][2])
print sample(wire_colors_chooseable, randint(4, 20))
channel_data[message["channel"]]["wire_colors"] = sample(wire_colors_chooseable, randint(4, 20))
if len(message["arguments"]) < 4:
channel_data[message["channel"]]["current_wire"] = choice(channel_data[message["channel"]]["wire_colors"])
hlmsg("Using {} wire colors: {}".format(len(channel_data[message["channel"]]["wire_colors"]), ", ".join(channel_data[message["channel"]]["wire_colors"])))
else:
if int(message["arguments"][3]) > len(wire_colors_chooseable):
hlmsg("Not so many colors can be chosen!")
channel_data[message["channel"]]["timebomb_running"] = False
return
channel_data[message["channel"]]["wire_colors"] = sample(wire_colors_chooseable, int(message["arguments"][3]))
channel_data[message["channel"]]["current_wire"] = choice(channel_data[message["channel"]]["wire_colors"])
hlmsg("Using {} wire colors: {}".format(len(channel_data[message["channel"]]["wire_colors"]), ", ".join(channel_data[message["channel"]]["wire_colors"])))
print "Correct wire: " + channel_data[message["channel"]]["current_wire"]
hlmsg2(message["arguments"][1], "A timebomb was implanted on your chest! To escape it, cut the right wire between the following {} colors: {}. Use ||timebomb_cutwire! Be quick, since you only have {} seconds!".format(len(channel_data[message["channel"]]["wire_colors"]), ", ".join(channel_data[message["channel"]]["wire_colors"]), time_fuse))
Thread(target=wait_for_timebomb, args=(time_fuse + time.time(), index, message, message["arguments"][1], connector)).start()
@easy_bot_command("timebomb_delete", True)
def remove_timebomb_from_channel(message, raw):
if raw:
return
if not message["channel"].startswith("#"):
return ["In a channel, please!"]
channel_data.__delitem__(message["channel"])
return ["Success removing timebomb from channel!"]
# Comparing meters
try:
meter_info = json.load(open("meters.json"))
except IOError:
meter_info = {}
@easy_bot_command("comparemeters")
def compare_meters(message, raw):
if raw:
return
if len(message["arguments"]) < 3:
return ["Error: Not enough arguments!", "Syntax: comparemeters <subject> <list of persons to test for percentage in subject>"]
comparisees = message["arguments"][2:]
subject = message["arguments"][1]
if subject in meter_info.keys() and comparisees in [x[0] for x in meter_info[subject]]:
for result in meter_info[subject]:
if result[0] == comparisees:
these_results = result
break
return ["It was already found out ({}):".format(subject)] + ["{}: {}%".format(person, value) for person, value in these_results[1].items()]
these_results = [comparisees]
trv = {}
for person in comparisees:
trv[person] = float(randint(0, 100)) + float(randint(0, 100)) / 10 + float(randint(0, 100)) / 100
if trv[person] > 100.0:
trv[person] = 100.0
these_results.append(trv)
if subject in meter_info.keys():
meter_info[subject].append(these_results)
else:
meter_info[subject] = [these_results]
json.dump(meter_info, open("meters.json", "w"))
return ["It was found out ({}):".format(subject)] + ["{}: {}%".format(person, value) for person, value in these_results[1].items()]
@easy_bot_command("flushmeters", True)
def flush_meters(message, raw):
if raw:
return
global meter_info
meter_info = {}
return ["{nick}: Success flushing all the comparing meters!".format(message["nickname"])]
| |
import pieces, board, random, copy
gray = (100, 100, 100)
#Used fo rgenerating list of next pieces
class PieceBag:
def __init__(self):
self.bag=[]
self.refill_bag()
def refill_bag(self):
for x in range(7):
self.bag.append(random.randint(0, 6))
self.remaining = 7
def get_next_piece(self):
self.remaining -= 1;
nextPiece=self.bag.pop(0)
return nextPiece
def remaining_pieces(self):
return self.remaining
# Cpntains the array of blocks. Handles game logic. Model in MVC structure
class BoardModel:
#Enum for describing type of collision
class CollisionTypeEnum:
wall = 0
pieceSide = 0
floor = 1
pieceBelow = 2
def __init__(self, width, height):
self.score = 0
self.width = width
self.height = height
self.pieceBag = PieceBag()
self.boardSquares = []
for i in range(height):
self.boardSquares.append([])
for j in range(width):
self.boardSquares[i].append(board.Square(i, j, gray))
self.activePiece = False
self.currentPiece = None
self.pieceRow = 0
self.pieceCol = 0
self.nextPiece = None
def get_square(self, x, y):
return self.boardSquares[y][x]
#Grab the next piece to be dropped
def next_piece(self):
if not self.pieceBag.remaining:
self.pieceBag.refill_bag()
self.currentPiece = self.nextPiece
self.nextPiece = pieces.get_piece(self.pieceBag.get_next_piece())
def new_piece(self):
while self.currentPiece == None:
self.next_piece()
self.next_piece()
self.activePiece = True
self.pieceCol = 3
self.pieceRow = 0
#Hard drop function
def hard_drop(self):
while self.activePiece:
self.act_on_piece(0, 1)
# check if rotation is possible
def check_rotate(self):
#makes a copy of self.currentPiece so that changing rotatedPiece doesn't mess up self.currentPiece
rotatedPiece = copy.deepcopy(self.currentPiece)
#rotates rotatedPiece so that we have a copy of what spaces self.currentPiece will rotate into
rotatedPiece.blockArray = self.currentPiece.rotate()
#returns True if a block from the rotate piece will rotate into an existing colored block below it
for i in range(rotatedPiece.height):
for j in range(rotatedPiece.width):
if rotatedPiece.blockArray[i][j] and self.boardSquares[i + self.pieceRow][j + self.pieceCol].color != gray and not self.currentPiece.blockArray[i][j]:
return True
#checks if the rotation will cause the piece to collide with the wall
elif rotatedPiece.blockArray[i][j] and j + self.pieceCol < 0 or j + self.pieceCol > self.width - 1:
return True
#returns False if there are only gray squares below the rotating piece and there is not a wall blocking the rotation
return False
# Rotate a the current piece
def rotate_piece(self):
#checks if rotating the piece will cause it to overlap with an existing colored square
#if rotation will cause an overlap, then the piece does not rotate
if not self.check_rotate():
self.clear_piece() #Try a different way to do this
self.currentPiece.blockArray = self.currentPiece.rotate()
self.draw_piece()
# Check for collision
def _will_collide(self, dx, dy):
for i in range(self.currentPiece.height):
for j in range(self.currentPiece.width):
if self.currentPiece.blockArray[i][j]:
if i + dy < self.currentPiece.height and i + dy > -1 and j + dx < self.currentPiece.width and j + dx >= -1 and not self.currentPiece.blockArray[i + dy][j + dx]:
if j + self.pieceCol + dx < 0 or j + self.pieceCol + dx > self.width - 1: #should be 'or', not 'and'?
return True, self.CollisionTypeEnum.wall, False
elif i + self.pieceRow + dy > self.height - 1:
return True, self.CollisionTypeEnum.floor, False
elif self.boardSquares[i + self.pieceRow + dy][j + self.pieceCol + dx].color != gray and dy == 1:
#checks to see if the user has lost the game
for row in range(3):
for col in range(len(self.boardSquares[row])):
if self.boardSquares[row][col].color != gray:
return True, self.CollisionTypeEnum.pieceBelow, True
#if the user has not lost the game, then returns a normal pieceBelow collision
return True, self.CollisionTypeEnum.pieceBelow, False
elif self.boardSquares[i + self.pieceRow][j + self.pieceCol + dx].color != gray and abs(dx) == 1:
return True, self.CollisionTypeEnum.pieceSide, False
#These additional elif statements are used for the case in which there is a colored square on the edge of the
#piece's 3x3 or 4x4
elif i + dy == self.currentPiece.height and i + dy > -1:
if i + self.pieceRow + dy > self.height - 1:
return True, self.CollisionTypeEnum.floor, False
elif self.boardSquares[i + self.pieceRow + dy][j + self.pieceCol + dx].color != gray:
return True, self.CollisionTypeEnum.pieceBelow, False
# This elif statement checks for collisions on the leftmost and rightmost columns of blocks in a piece
elif j + dx == self.currentPiece.width or j + dx == -1:
if j + self.pieceCol + dx < 0 or j + self.pieceCol + dx > self.width - 1: #should be 'or', not 'and'?
return True, self.CollisionTypeEnum.wall, False
elif self.boardSquares[i + self.pieceRow + dy][j + self.pieceCol + dx].color != gray:
return True, self.CollisionTypeEnum.pieceSide, False
return False, None, False
# Act on piece given an dx and dy
def act_on_piece(self, dx, dy):
endGame = False
if self.activePiece:
willCollide, collisionType, endGame = self._will_collide(dx, dy)
if willCollide:
if collisionType != self.CollisionTypeEnum.wall:
self.activePiece = False
lines = self.check_lines()
self.drop_lines(lines)
else:
self.clear_piece()
self.pieceRow += dy
self.pieceCol += dx
self.draw_piece()
return endGame
def draw_piece(self):
for i in range(self.currentPiece.height):
for j in range(self.currentPiece.width):
if self.currentPiece.blockArray[i][j]:
self.boardSquares[i + self.pieceRow][j + self.pieceCol].set_color(self.currentPiece.color)
def clear_piece(self):
for i in range(self.currentPiece.height):
for j in range(self.currentPiece.width):
if self.currentPiece.blockArray[i][j]:
self.boardSquares[i + self.pieceRow][j + self.pieceCol].set_color(gray)
def clear_board(self, height, width):
for i in range(height):
for j in range(width):
self.boardSquares[i][j].set_color(gray)
##################
# Line completion#
##################
def check_lines(self):
lines = []
for i in range(self.height):
line = [i]
colors = []
for j in range(self.width):
line.append(self.boardSquares[i][j])
colors.append(self.boardSquares[i][j].color)
if gray not in colors:
lines.append(line)
else:
pass
return lines
def clear_line(self,line):
self.score+=1
for square in line[1:]:
square.set_color(gray)
def drop_lines(self,lines):
for line in lines:
self.clear_line(line)
Row = line.pop(0) - 1
if Row >= self.height - 1:
pass
else:
for i in range(Row,0,-1):
for j in range(self.width):
self.boardSquares[i+1][j].set_color(self.boardSquares[i][j].color)
self.boardSquares[i][j].set_color(gray)
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import os
import pathlib
import sys
import logging
import pytest
import numpy as np
import onnx
from PIL import Image
import tvm
import tvm.relay as relay
from tvm.relay.backend import Executor, Runtime
from tvm.relay.testing import byoc
from tvm.contrib import utils
from tvm.micro.testing import check_tune_log
import test_utils
_LOG = logging.getLogger(__name__)
def _make_sess_from_op(
temp_dir, model, zephyr_board, west_cmd, op_name, sched, arg_bufs, build_config
):
runtime = Runtime("crt", {"system-lib": True})
target = tvm.target.target.micro(model)
target = tvm.target.Target(target=target, host=target)
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
mod = tvm.build(sched, arg_bufs, target=target, runtime=runtime, name=op_name)
return _make_session(temp_dir, zephyr_board, west_cmd, mod, build_config)
def _make_session(temp_dir, zephyr_board, west_cmd, mod, build_config):
config_main_stack_size = None
if test_utils.qemu_boards(zephyr_board):
config_main_stack_size = 1536
project_options = {
"project_type": "host_driven",
"west_cmd": west_cmd,
"verbose": bool(build_config.get("debug")),
"zephyr_board": zephyr_board,
}
if config_main_stack_size is not None:
project_options["config_main_stack_size"] = config_main_stack_size
project = tvm.micro.generate_project(
str(test_utils.TEMPLATE_PROJECT_DIR),
mod,
temp_dir / "project",
project_options,
)
project.build()
project.flash()
return tvm.micro.Session(project.transport())
def _make_add_sess(temp_dir, model, zephyr_board, west_cmd, build_config, dtype="int8"):
A = tvm.te.placeholder((2,), dtype=dtype)
B = tvm.te.placeholder((1,), dtype=dtype)
C = tvm.te.compute(A.shape, lambda i: A[i] + B[0], name="C")
sched = tvm.te.create_schedule(C.op)
return _make_sess_from_op(
temp_dir, model, zephyr_board, west_cmd, "add", sched, [A, B, C], build_config
)
# The same test code can be executed on both the QEMU simulation and on real hardware.
@tvm.testing.requires_micro
def test_add_uint(temp_dir, board, west_cmd, tvm_debug):
"""Test compiling the on-device runtime."""
model = test_utils.ZEPHYR_BOARDS[board]
build_config = {"debug": tvm_debug}
# NOTE: run test in a nested function so cPython will delete arrays before closing the session.
def test_basic_add(sess):
A_data = tvm.nd.array(np.array([2, 3], dtype="int8"), device=sess.device)
assert (A_data.numpy() == np.array([2, 3])).all()
B_data = tvm.nd.array(np.array([4], dtype="int8"), device=sess.device)
assert (B_data.numpy() == np.array([4])).all()
C_data = tvm.nd.array(np.array([0, 0], dtype="int8"), device=sess.device)
assert (C_data.numpy() == np.array([0, 0])).all()
system_lib = sess.get_system_lib()
system_lib.get_function("add")(A_data, B_data, C_data)
assert (C_data.numpy() == np.array([6, 7])).all()
with _make_add_sess(temp_dir, model, board, west_cmd, build_config) as sess:
test_basic_add(sess)
# The same test code can be executed on both the QEMU simulation and on real hardware.
@tvm.testing.requires_micro
def test_add_float(temp_dir, board, west_cmd, tvm_debug):
"""Test compiling the on-device runtime."""
model = test_utils.ZEPHYR_BOARDS[board]
if not test_utils.has_fpu(board):
pytest.skip(f"FPU not enabled for {board}")
build_config = {"debug": tvm_debug}
# NOTE: run test in a nested function so cPython will delete arrays before closing the session.
def test_basic_add(sess):
A_data = tvm.nd.array(np.array([2.5, 3.5], dtype="float32"), device=sess.device)
assert (A_data.numpy() == np.array([2.5, 3.5])).all()
B_data = tvm.nd.array(np.array([4.5], dtype="float32"), device=sess.device)
assert (B_data.numpy() == np.array([4.5])).all()
C_data = tvm.nd.array(np.array([0, 0], dtype="float32"), device=sess.device)
assert (C_data.numpy() == np.array([0, 0])).all()
system_lib = sess.get_system_lib()
system_lib.get_function("add")(A_data, B_data, C_data)
assert (C_data.numpy() == np.array([7, 8])).all()
with _make_add_sess(temp_dir, model, board, west_cmd, build_config, dtype="float32") as sess:
test_basic_add(sess)
@tvm.testing.requires_micro
def test_platform_timer(temp_dir, board, west_cmd, tvm_debug):
"""Test compiling the on-device runtime."""
model = test_utils.ZEPHYR_BOARDS[board]
build_config = {"debug": tvm_debug}
# NOTE: run test in a nested function so cPython will delete arrays before closing the session.
def test_basic_add(sess):
A_data = tvm.nd.array(np.array([2, 3], dtype="int8"), device=sess.device)
assert (A_data.numpy() == np.array([2, 3])).all()
B_data = tvm.nd.array(np.array([4], dtype="int8"), device=sess.device)
assert (B_data.numpy() == np.array([4])).all()
C_data = tvm.nd.array(np.array([0, 0], dtype="int8"), device=sess.device)
assert (C_data.numpy() == np.array([0, 0])).all()
system_lib = sess.get_system_lib()
time_eval_f = system_lib.time_evaluator(
"add", sess.device, number=20, repeat=3, min_repeat_ms=40
)
result = time_eval_f(A_data, B_data, C_data)
assert (C_data.numpy() == np.array([6, 7])).all()
assert result.mean > 0
assert len(result.results) == 3
with _make_add_sess(temp_dir, model, board, west_cmd, build_config) as sess:
test_basic_add(sess)
@tvm.testing.requires_micro
def test_relay(temp_dir, board, west_cmd, tvm_debug):
"""Testing a simple relay graph"""
model = test_utils.ZEPHYR_BOARDS[board]
build_config = {"debug": tvm_debug}
shape = (10,)
dtype = "int8"
# Construct Relay program.
x = relay.var("x", relay.TensorType(shape=shape, dtype=dtype))
xx = relay.multiply(x, x)
z = relay.add(xx, relay.const(np.ones(shape=shape, dtype=dtype)))
func = relay.Function([x], z)
ir_mod = tvm.IRModule.from_expr(func)
runtime = Runtime("crt", {"system-lib": True})
target = tvm.target.target.micro(model)
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
mod = tvm.relay.build(ir_mod, target=target, runtime=runtime)
with _make_session(temp_dir, board, west_cmd, mod, build_config) as session:
graph_mod = tvm.micro.create_local_graph_executor(
mod.get_graph_json(), session.get_system_lib(), session.device
)
graph_mod.set_input(**mod.get_params())
x_in = np.random.randint(10, size=shape[0], dtype=dtype)
graph_mod.run(x=x_in)
result = graph_mod.get_output(0).numpy()
tvm.testing.assert_allclose(graph_mod.get_input(0).numpy(), x_in)
tvm.testing.assert_allclose(result, x_in * x_in + 1)
@tvm.testing.requires_micro
def test_onnx(temp_dir, board, west_cmd, tvm_debug):
"""Testing a simple ONNX model."""
model = test_utils.ZEPHYR_BOARDS[board]
build_config = {"debug": tvm_debug}
this_dir = pathlib.Path(os.path.dirname(__file__))
mnist_testdata = this_dir.parent / "testdata" / "mnist"
digit_2 = Image.open(mnist_testdata / "digit-2.jpg").resize((28, 28))
digit_2 = np.asarray(digit_2).astype("float32")
digit_2 = np.expand_dims(digit_2, axis=0)
digit_9 = Image.open(mnist_testdata / "digit-9.jpg").resize((28, 28))
digit_9 = np.asarray(digit_9).astype("float32")
digit_9 = np.expand_dims(digit_9, axis=0)
# Load ONNX model and convert to Relay.
onnx_model = onnx.load(mnist_testdata / "mnist-8.onnx")
shape = {"Input3": (1, 1, 28, 28)}
relay_mod, params = relay.frontend.from_onnx(onnx_model, shape=shape, freeze_params=True)
relay_mod = relay.transform.DynamicToStatic()(relay_mod)
# We add the link-params=True option to ensure the model parameters are compiled in.
# There is currently a bug preventing the host_driven environment from receiving
# the model weights when set using graph_mod.set_input().
# See: https://github.com/apache/tvm/issues/7567
target = tvm.target.target.micro(model)
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
executor = Executor("graph", {"link-params": True})
runtime = Runtime("crt", {"system-lib": True})
lowered = relay.build(relay_mod, target, params=params, executor=executor, runtime=runtime)
graph = lowered.get_graph_json()
with _make_session(temp_dir, board, west_cmd, lowered, build_config) as session:
graph_mod = tvm.micro.create_local_graph_executor(
graph, session.get_system_lib(), session.device
)
# Send the digit-2 image and confirm that the correct result is returned.
graph_mod.set_input("Input3", tvm.nd.array(digit_2))
graph_mod.run()
result = graph_mod.get_output(0).numpy()
assert np.argmax(result) == 2
# Send the digit-9 image and confirm that the correct result is returned.
graph_mod.set_input("Input3", tvm.nd.array(digit_9))
graph_mod.run()
result = graph_mod.get_output(0).numpy()
assert np.argmax(result) == 9
def check_result(
temp_dir, relay_mod, model, zephyr_board, west_cmd, map_inputs, out_shape, result, build_config
):
"""Helper function to verify results"""
TOL = 1e-5
runtime = Runtime("crt", {"system-lib": True})
target = tvm.target.target.micro(model)
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
mod = tvm.relay.build(relay_mod, target=target, runtime=runtime)
with _make_session(temp_dir, zephyr_board, west_cmd, mod, build_config) as session:
rt_mod = tvm.micro.create_local_graph_executor(
mod.get_graph_json(), session.get_system_lib(), session.device
)
rt_mod.set_input(**mod.get_params())
for name, data in map_inputs.items():
rt_mod.set_input(name, data)
rt_mod.set_input(**mod.get_params())
rt_mod.run()
out_shapes = out_shape if isinstance(out_shape, list) else [out_shape]
results = result if isinstance(result, list) else [result]
for idx, shape in enumerate(out_shapes):
out = tvm.nd.empty(shape, device=session.device)
out = rt_mod.get_output(idx, out)
tvm.testing.assert_allclose(out.numpy(), results[idx], rtol=TOL, atol=TOL)
@tvm.testing.requires_micro
def test_byoc_microtvm(temp_dir, board, west_cmd, tvm_debug):
"""This is a simple test case to check BYOC capabilities of microTVM"""
model = test_utils.ZEPHYR_BOARDS[board]
build_config = {"debug": tvm_debug}
x = relay.var("x", shape=(10, 10))
w0 = relay.var("w0", shape=(10, 10))
w1 = relay.var("w1", shape=(10, 10))
w2 = relay.var("w2", shape=(10, 10))
w3 = relay.var("w3", shape=(10, 10))
w4 = relay.var("w4", shape=(10, 10))
w5 = relay.var("w5", shape=(10, 10))
w6 = relay.var("w6", shape=(10, 10))
w7 = relay.var("w7", shape=(10, 10))
# C compiler
z0 = relay.add(x, w0)
p0 = relay.subtract(z0, w1)
q0 = relay.multiply(p0, w2)
z1 = relay.add(x, w3)
p1 = relay.subtract(z1, w4)
q1 = relay.multiply(p1, w5)
# Other parts on TVM
z2 = relay.add(x, w6)
q2 = relay.subtract(z2, w7)
r = relay.concatenate((q0, q1, q2), axis=0)
f = relay.Function([x, w0, w1, w2, w3, w4, w5, w6, w7], r)
mod = tvm.IRModule()
ann = byoc.CcompilerAnnotator()
mod["main"] = ann.visit(f)
mod = tvm.relay.transform.PartitionGraph()(mod)
mod = tvm.relay.transform.InferType()(mod)
x_data = np.random.rand(10, 10).astype("float32")
w_data = []
for _ in range(8):
w_data.append(np.random.rand(10, 10).astype("float32"))
map_inputs = {"w{}".format(i): w_data[i] for i in range(8)}
map_inputs["x"] = x_data
check_result(
temp_dir=temp_dir,
relay_mod=mod,
map_inputs=map_inputs,
out_shape=(30, 10),
result=np.concatenate(
(
((x_data + w_data[0]) - w_data[1]) * w_data[2],
((x_data + w_data[3]) - w_data[4]) * w_data[5],
x_data + w_data[6] - w_data[7],
),
axis=0,
),
model=model,
zephyr_board=board,
west_cmd=west_cmd,
build_config=build_config,
)
def _make_add_sess_with_shape(temp_dir, model, zephyr_board, west_cmd, shape, build_config):
A = tvm.te.placeholder(shape, dtype="int8")
C = tvm.te.compute(A.shape, lambda i: A[i] + A[i], name="C")
sched = tvm.te.create_schedule(C.op)
return _make_sess_from_op(
temp_dir, model, zephyr_board, west_cmd, "add", sched, [A, C], build_config
)
@pytest.mark.parametrize(
"shape,",
[
pytest.param((1 * 1024,), id="(1*1024)"),
pytest.param((4 * 1024,), id="(4*1024)"),
pytest.param((16 * 1024,), id="(16*1024)"),
],
)
@tvm.testing.requires_micro
def test_rpc_large_array(temp_dir, board, west_cmd, tvm_debug, shape):
"""Test large RPC array transfer."""
model = test_utils.ZEPHYR_BOARDS[board]
build_config = {"debug": tvm_debug}
# NOTE: run test in a nested function so cPython will delete arrays before closing the session.
def test_tensors(sess):
a_np = np.random.randint(low=-128, high=127, size=shape, dtype="int8")
A_data = tvm.nd.array(a_np, device=sess.device)
assert (A_data.numpy() == a_np).all()
C_data = tvm.nd.array(np.zeros(shape, dtype="int8"), device=sess.device)
assert (C_data.numpy() == np.zeros(shape)).all()
with _make_add_sess_with_shape(temp_dir, model, board, west_cmd, shape, build_config) as sess:
test_tensors(sess)
@pytest.mark.xfail(strict=False, reason="See https://github.com/apache/tvm/issues/10297")
@tvm.testing.requires_micro
def test_autotune_conv2d(temp_dir, board, west_cmd, tvm_debug):
"""Test AutoTune for microTVM Zephyr"""
if board != "qemu_x86":
pytest.xfail(f"Autotune fails on {board}.")
runtime = Runtime("crt", {"system-lib": True})
model = test_utils.ZEPHYR_BOARDS[board]
build_config = {"debug": tvm_debug}
# Create a Relay model
data_shape = (1, 3, 16, 16)
weight_shape = (8, 3, 5, 5)
data = relay.var("data", relay.TensorType(data_shape, "float32"))
weight = relay.var("weight", relay.TensorType(weight_shape, "float32"))
y = relay.nn.conv2d(
data,
weight,
padding=(2, 2),
kernel_size=(5, 5),
kernel_layout="OIHW",
out_dtype="float32",
)
f = relay.Function([data, weight], y)
mod = tvm.IRModule.from_expr(f)
mod = relay.transform.InferType()(mod)
data_sample = np.random.rand(data_shape[0], data_shape[1], data_shape[2], data_shape[3]).astype(
"float32"
)
weight_sample = np.random.rand(
weight_shape[0], weight_shape[1], weight_shape[2], weight_shape[3]
).astype("float32")
params = {mod["main"].params[1].name_hint: weight_sample}
target = tvm.target.target.micro(model)
pass_context = tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True})
with pass_context:
tasks = tvm.autotvm.task.extract_from_program(mod["main"], {}, target)
assert len(tasks) > 0
config_main_stack_size = None
if test_utils.qemu_boards(board):
config_main_stack_size = 1536
project_options = {
"zephyr_board": board,
"west_cmd": west_cmd,
"verbose": 1,
"project_type": "host_driven",
}
if config_main_stack_size is not None:
project_options["config_main_stack_size"] = config_main_stack_size
module_loader = tvm.micro.AutoTvmModuleLoader(
template_project_dir=test_utils.TEMPLATE_PROJECT_DIR,
project_options=project_options,
)
timeout = 200
builder = tvm.autotvm.LocalBuilder(
timeout=timeout,
n_parallel=1,
build_kwargs={"build_option": {"tir.disable_vectorize": True}},
do_fork=True,
build_func=tvm.micro.autotvm_build_func,
runtime=runtime,
)
runner = tvm.autotvm.LocalRunner(
number=1, repeat=1, timeout=timeout, module_loader=module_loader
)
measure_option = tvm.autotvm.measure_option(builder=builder, runner=runner)
log_path = pathlib.Path("zephyr_autotune.log")
if log_path.exists():
log_path.unlink()
n_trial = 10
for task in tasks:
tuner = tvm.autotvm.tuner.GATuner(task)
tuner.tune(
n_trial=n_trial,
measure_option=measure_option,
callbacks=[
tvm.autotvm.callback.log_to_file(str(log_path)),
tvm.autotvm.callback.progress_bar(n_trial, si_prefix="M"),
],
si_prefix="M",
)
assert tuner.best_flops > 0
check_tune_log(log_path)
# Build without tuning
with pass_context:
lowered = tvm.relay.build(mod, target=target, runtime=runtime, params=params)
temp_dir = utils.tempdir()
with _make_session(temp_dir, board, west_cmd, lowered, build_config) as session:
graph_mod = tvm.micro.create_local_graph_executor(
lowered.get_graph_json(), session.get_system_lib(), session.device
)
graph_mod.set_input(**lowered.get_params())
graph_mod.run(data=data_sample)
expected_output = graph_mod.get_output(0).numpy()
del graph_mod
# Build using autotune logs
with tvm.autotvm.apply_history_best(str(log_path)):
with pass_context:
lowered_tuned = tvm.relay.build(mod, target=target, runtime=runtime, params=params)
temp_dir = utils.tempdir()
with _make_session(temp_dir, board, west_cmd, lowered_tuned, build_config) as session:
graph_mod = tvm.micro.create_local_graph_executor(
lowered_tuned.get_graph_json(), session.get_system_lib(), session.device
)
graph_mod.set_input(**lowered_tuned.get_params())
graph_mod.run(data=data_sample)
output = graph_mod.get_output(0).numpy()
del graph_mod
tvm.testing.assert_allclose(output, expected_output, rtol=1e-4, atol=1e-5)
if __name__ == "__main__":
sys.exit(pytest.main([__file__] + sys.argv[1:]))
| |
import logging
from copy import deepcopy #for abusively making wrappers
class BlockReinit:
#def __new__(cls, *args, **kwargs):
# pass
# ?? is there a sensible way to trick wrapper
def __init__(self, *args, **kwargs):
logging.debug("blocking reinitialization of super() of %r; extra args: *%s, **%s" % (self, args, kwargs)) #DEBUG
pass
def wrapper(*cls, clone=True):
"""
Dynamically mix in classes before a given obj's type.
(actually returns a callable which will do the mixing)
In some cases you cannot or do not want to use
```
class Mixin(): ...
class Mixed(Mixin, Base): ...
o = Mixed()
```
mainly when Base is constructed deep in some library routine,
With wrapper, you can do
```
class Mixin(): ...
b = lib.blue_submarine.chug()
O = wrapper(Mixin)(b)
```
You can also do this if you just don't want to for the sake of
composability: if you have a lot of mixins it's a nuisance to
prepare an exponential number of combinations:
class MixedABC(A,B,C,Base): pass
class MixedAC(A,C,Base): pass
...
rather, at the point of need, you can say
o = wrapper(A,B)(o)
to prepend classes A and B to o's search path
This has been coded so that Mixin can be used either normally or under wrap()
However, since the wrapped version does not receive the construction arguments
(i.e. Base.__init__() doesn't happen a second time and the arguments to the original are lost)
Mixin needs to tolerate not either receiving or not receiving the init args.
Use this idiom for greatest compatibility:
class Mixin(Base):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
del args, kwargs #to ensure no code below can rely on these
# ...
(if your class has no specific base you can elide that part)
Luckily, most mixins fit this mold.
To avoid side-effects, a deep copy is made of obj.
If you want to save the effort, and *you know obj is immutable* or
*you are replacing the only reference to obj* you can pass clone=False.
o = wrapper(A,B, clone=False)(o)
*CAVEAT*: copy() demands obj be pickleable. *In particular*, if an
ancestor of obj defines __setstate__/__getstate__, but a more child
ancestor (including self) does not, any child class-specific attributes
will be mishandled at wrapping time and you will have only confusion bacon.
"""
if not isinstance(clone, bool): raise TypeError("clone should be a bool")
# Implementation:
#A typical wrapper uses {g,s}etattr() overloading, but why do that
#when you can just hack up what class the object thinks it is?
#As far as I can tell, this has the exact same effect:
#all the methods defined in the wrapper class get added to the object's search path
# this could also be solved as a metaclass problem
def __new__(obj):
logging.debug("wrapper.__new__(cls=*%s, obj=%s)" % (cls, obj))
_cls = cls + (BlockReinit, type(obj))
class W(*_cls): pass
if clone:
logging.debug("wrapper.__new__(cls=%s, obj=%s): cloning" % (cls, obj))
obj = deepcopy(obj)
obj.__class__ = W
obj.__init__()
return obj
return __new__
def test_wrap():
class X:
def __init__(self, *args, **kwargs):
logging.debug("X.__init__(*%s, **%s)" % (args, kwargs))
self.args = args
self.__dict__.update(kwargs)
class W(X):
"test wrap()-able class"
def __init__(self, *args, **kwargs):
logging.debug("W.__init__(*%s,**%s)" % (args, kwargs))
super().__init__(*args, **kwargs); del args, kwargs
self.antelope = "deer"
def test_wrapped(clone=False):
logging.debug("---- wrapped test (clone=%s)" % (clone,))
logging.debug("Constructing original object")
o = X(6,7,8,happy="sad",pacha="ziggurat",antelope="monkeyman") #mock "library" construction that we "can't" control
logging.debug(o.__class__)
assert o.antelope is "monkeyman"
logging.debug("Wrapping original object (clone=%s)" %(clone,))
o = wrapper(W, clone=clone)(o)
assert o.antelope is "deer"
def test_nonwrapped():
logging.debug("---- Non-wrapped test")
logging.debug("Constructing original (and only) object")
o = W(sandy_hills="in arabia")
assert o.sandy_hills is "in arabia"
assert not hasattr(o, 'happy')
assert o.antelope is "deer"
test_nonwrapped()
test_wrapped(True)
test_wrapped(False)
import os
def query(ask, options=["Y","N"], catch="N"):
"TODO: document"
options = list(options)
assert catch in options, "Catchall case should be in your list of valid options, or else what are you doing with your life?"
R = input("%s [%s] " % (ask, "/".join(options))).upper()
if not R: R = options[0]
if R not in options: R = catch
return R
def ask(ask):
"Ask a Y/N question; defaults to *no* on errors"
try:
return query(ask) == "Y"
except:
return False
from itertools import chain
def list_ret(g):
"""
Exhaust a generator to a list, and additionally return its return value which normally you need to catch in an exception handler.
returns: (list(g), return_value)
As with regular functions, return_value will be None if there isn't one and/or if g isn't actually a generator.
TODO: is this in stdlib somewhere?
"""
L = []
while True:
try:
L.append(next(g))
except StopIteration as stop:
return L, stop.value
from itertools import islice
def window(g, n):
"""
moving window generator
example:
given a sequence g = [1,2,3,4,...] and windowsize n=2, return the sequence [(1,2), (2,3), (3,4), ...]
"""
g = iter(g)
W = []
W.extend(islice(g, n-1))
for e in g:
W.append(e)
yield tuple(W)
W.pop(0)
def pairs(g):
return window(g,2)
def chomp(s):
"remove trailing newline"
"assumes universal newlines mode "
if s.endswith("\n"): s = s[:-1]
return s
import os
from shutil import rmtree
def rm(path):
"rm -r"
if os.path.isdir(path):
rmtree(path)
else:
os.unlink(path)
def flatten(L):
"""
flatten a nested list by one level
"""
return list(chain.from_iterable(L))
def parse_american_int(c):
"""
Parse an integer, possibly an American-style comma-separated integer.
"""
if not isinstance(c, str):
raise TypeError
#dirty hack; also what SO decided on: http://stackoverflow.com/questions/2953746/python-parse-comma-separated-number-into-int
return int(c.replace(",",""))
if __name__ == '__main__':
logging.root.setLevel(logging.DEBUG)
test_wrap()
print("%s tests passed" %(__file__))
| |
import functools
import hashlib
import json
import random
import uuid
from operator import attrgetter
from django import http
from django.conf import settings
from django.db.transaction import non_atomic_requests
from django.shortcuts import get_list_or_404, get_object_or_404, redirect
from django.utils.translation import ugettext as _
from django.utils.cache import patch_cache_control
from django.views.decorators.cache import cache_control
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.vary import vary_on_headers
import caching.base as caching
import jinja2
import commonware.log
import session_csrf
import waffle
from elasticsearch_dsl import Search
from mobility.decorators import mobilized, mobile_template
from rest_framework import serializers
from rest_framework.decorators import detail_route
from rest_framework.exceptions import ParseError
from rest_framework.generics import GenericAPIView, ListAPIView
from rest_framework.mixins import ListModelMixin, RetrieveModelMixin
from rest_framework.response import Response
from rest_framework.settings import api_settings
from rest_framework.viewsets import GenericViewSet
from session_csrf import anonymous_csrf_exempt
from olympia import amo
from olympia.access import acl
from olympia.amo import messages
from olympia.amo.decorators import post_required
from olympia.amo.forms import AbuseForm
from olympia.amo.utils import randslice, render
from olympia.amo.models import manual_order
from olympia.amo import urlresolvers
from olympia.amo.urlresolvers import reverse
from olympia.abuse.models import send_abuse_report
from olympia.bandwagon.models import Collection
from olympia.constants.payments import PAYPAL_MAX_COMMENT_LENGTH
from olympia.constants.categories import CATEGORIES_BY_ID
from olympia import paypal
from olympia.api.paginator import ESPageNumberPagination
from olympia.api.permissions import (
AllowAddonAuthor, AllowReadOnlyIfPublic, AllowRelatedObjectPermissions,
AllowReviewer, AllowReviewerUnlisted, AnyOf, GroupPermission)
from olympia.reviews.forms import ReviewForm
from olympia.reviews.models import Review, GroupedRating
from olympia.search.filters import (
AddonAppFilterParam, AddonCategoryFilterParam, AddonTypeFilterParam,
ReviewedContentFilter, SearchParameterFilter, SearchQueryFilter,
SortingFilter)
from olympia.stats.models import Contribution
from olympia.translations.query import order_by_translation
from olympia.versions.models import Version
from .decorators import addon_view_factory
from .forms import ContributionForm
from .indexers import AddonIndexer
from .models import Addon, Persona, FrozenAddon
from .serializers import (
AddonEulaPolicySerializer, AddonFeatureCompatibilitySerializer,
AddonSerializer, AddonSerializerWithUnlistedData, ESAddonSerializer,
VersionSerializer, StaticCategorySerializer)
from .utils import get_creatured_ids, get_featured_ids
log = commonware.log.getLogger('z.addons')
paypal_log = commonware.log.getLogger('z.paypal')
addon_view = addon_view_factory(qs=Addon.objects.valid)
addon_valid_disabled_pending_view = addon_view_factory(
qs=Addon.objects.valid_and_disabled_and_pending)
def author_addon_clicked(f):
"""Decorator redirecting clicks on "Other add-ons by author"."""
@functools.wraps(f)
def decorated(request, *args, **kwargs):
redirect_id = request.GET.get('addons-author-addons-select', None)
if not redirect_id:
return f(request, *args, **kwargs)
try:
target_id = int(redirect_id)
return http.HttpResponsePermanentRedirect(reverse(
'addons.detail', args=[target_id]))
except ValueError:
return http.HttpResponseBadRequest('Invalid add-on ID.')
return decorated
@addon_valid_disabled_pending_view
@non_atomic_requests
def addon_detail(request, addon):
"""Add-ons details page dispatcher."""
if addon.is_deleted or (addon.is_pending() and not addon.is_persona()):
# Allow pending themes to be listed.
raise http.Http404
if addon.is_disabled:
return render(request, 'addons/impala/disabled.html',
{'addon': addon}, status=404)
# addon needs to have a version and be valid for this app.
if addon.type in request.APP.types:
if addon.type == amo.ADDON_PERSONA:
return persona_detail(request, addon)
else:
if not addon.current_version:
raise http.Http404
return extension_detail(request, addon)
else:
# Redirect to an app that supports this type.
try:
new_app = [a for a in amo.APP_USAGE if addon.type
in a.types][0]
except IndexError:
raise http.Http404
else:
prefixer = urlresolvers.get_url_prefix()
prefixer.app = new_app.short
return http.HttpResponsePermanentRedirect(reverse(
'addons.detail', args=[addon.slug]))
@vary_on_headers('X-Requested-With')
@non_atomic_requests
def extension_detail(request, addon):
"""Extensions details page."""
# If current version is incompatible with this app, redirect.
comp_apps = addon.compatible_apps
if comp_apps and request.APP not in comp_apps:
prefixer = urlresolvers.get_url_prefix()
prefixer.app = comp_apps.keys()[0].short
return redirect('addons.detail', addon.slug, permanent=True)
# Popular collections this addon is part of.
collections = Collection.objects.listed().filter(
addons=addon, application=request.APP.id)
ctx = {
'addon': addon,
'src': request.GET.get('src', 'dp-btn-primary'),
'version_src': request.GET.get('src', 'dp-btn-version'),
'tags': addon.tags.not_denied(),
'grouped_ratings': GroupedRating.get(addon.id),
'review_form': ReviewForm(),
'reviews': Review.without_replies.all().filter(
addon=addon, is_latest=True),
'get_replies': Review.get_replies,
'collections': collections.order_by('-subscribers')[:3],
'abuse_form': AbuseForm(request=request),
}
# details.html just returns the top half of the page for speed. The bottom
# does a lot more queries we don't want on the initial page load.
if request.is_ajax():
# Other add-ons/apps from the same author(s).
ctx['author_addons'] = addon.authors_other_addons(app=request.APP)[:6]
return render(request, 'addons/impala/details-more.html', ctx)
else:
return render(request, 'addons/impala/details.html', ctx)
@mobilized(extension_detail)
@non_atomic_requests
def extension_detail(request, addon):
if not request.META.get('HTTP_USER_AGENT'):
ios_user = False
else:
ios_user = 'FxiOS' in request.META.get('HTTP_USER_AGENT')
return render(request, 'addons/mobile/details.html',
{'addon': addon, 'ios_user': ios_user})
def _category_personas(qs, limit):
def f():
return randslice(qs, limit=limit)
key = 'cat-personas:' + qs.query_key()
return caching.cached(f, key)
@mobile_template('addons/{mobile/}persona_detail.html')
@non_atomic_requests
def persona_detail(request, addon, template=None):
"""Details page for Personas."""
if not (addon.is_public() or addon.is_pending()):
raise http.Http404
persona = addon.persona
# This persona's categories.
categories = addon.categories.all()
category_personas = None
if categories.exists():
qs = Addon.objects.public().filter(categories=categories[0])
category_personas = _category_personas(qs, limit=6)
data = {
'addon': addon,
'persona': persona,
'categories': categories,
'author_personas': persona.authors_other_addons()[:3],
'category_personas': category_personas,
}
try:
author = addon.authors.all()[0]
except IndexError:
author = None
else:
author = author.get_url_path(src='addon-detail')
data['author_gallery'] = author
if not request.MOBILE:
# tags
dev_tags, user_tags = addon.tags_partitioned_by_developer
data.update({
'dev_tags': dev_tags,
'user_tags': user_tags,
'review_form': ReviewForm(),
'reviews': Review.without_replies.all().filter(
addon=addon, is_latest=True),
'get_replies': Review.get_replies,
'search_cat': 'themes',
'abuse_form': AbuseForm(request=request),
})
return render(request, template, data)
class BaseFilter(object):
"""
Filters help generate querysets for add-on listings.
You have to define ``opts`` on the subclass as a sequence of (key, title)
pairs. The key is used in GET parameters and the title can be used in the
view.
The chosen filter field is combined with the ``base`` queryset using
the ``key`` found in request.GET. ``default`` should be a key in ``opts``
that's used if nothing good is found in request.GET.
"""
def __init__(self, request, base, key, default, model=Addon):
self.opts_dict = dict(self.opts)
self.extras_dict = dict(self.extras) if hasattr(self, 'extras') else {}
self.request = request
self.base_queryset = base
self.key = key
self.model = model
self.field, self.title = self.options(self.request, key, default)
self.qs = self.filter(self.field)
def options(self, request, key, default):
"""Get the (option, title) pair we want according to the request."""
if key in request.GET and (request.GET[key] in self.opts_dict or
request.GET[key] in self.extras_dict):
opt = request.GET[key]
else:
opt = default
if opt in self.opts_dict:
title = self.opts_dict[opt]
else:
title = self.extras_dict[opt]
return opt, title
def all(self):
"""Get a full mapping of {option: queryset}."""
return dict((field, self.filter(field)) for field in dict(self.opts))
def filter(self, field):
"""Get the queryset for the given field."""
return getattr(self, 'filter_{0}'.format(field))()
def filter_featured(self):
ids = self.model.featured_random(self.request.APP, self.request.LANG)
return manual_order(self.base_queryset, ids, 'addons.id')
def filter_free(self):
if self.model == Addon:
return self.base_queryset.top_free(self.request.APP, listed=False)
else:
return self.base_queryset.top_free(listed=False)
def filter_paid(self):
if self.model == Addon:
return self.base_queryset.top_paid(self.request.APP, listed=False)
else:
return self.base_queryset.top_paid(listed=False)
def filter_popular(self):
return self.base_queryset.order_by('-weekly_downloads')
def filter_downloads(self):
return self.filter_popular()
def filter_users(self):
return self.base_queryset.order_by('-average_daily_users')
def filter_created(self):
return self.base_queryset.order_by('-created')
def filter_updated(self):
return self.base_queryset.order_by('-last_updated')
def filter_rating(self):
return self.base_queryset.order_by('-bayesian_rating')
def filter_hotness(self):
return self.base_queryset.order_by('-hotness')
def filter_name(self):
return order_by_translation(self.base_queryset.all(), 'name')
class ESBaseFilter(BaseFilter):
"""BaseFilter that uses elasticsearch."""
def __init__(self, request, base, key, default):
super(ESBaseFilter, self).__init__(request, base, key, default)
def filter(self, field):
sorts = {'name': 'name_sort',
'created': '-created',
'updated': '-last_updated',
'popular': '-weekly_downloads',
'users': '-average_daily_users',
'rating': '-bayesian_rating'}
return self.base_queryset.order_by(sorts[field])
@non_atomic_requests
def home(request):
# Add-ons.
base = Addon.objects.listed(request.APP).filter(type=amo.ADDON_EXTENSION)
# This is lame for performance. Kill it with ES.
frozen = list(FrozenAddon.objects.values_list('addon', flat=True))
# We want to display 6 Featured Extensions, Up & Coming Extensions and
# Featured Themes.
featured = Addon.objects.featured(request.APP, request.LANG,
amo.ADDON_EXTENSION)[:6]
hotness = base.exclude(id__in=frozen).order_by('-hotness')[:6]
personas = Addon.objects.featured(request.APP, request.LANG,
amo.ADDON_PERSONA)[:6]
# Most Popular extensions is a simple links list, we display slightly more.
popular = base.exclude(id__in=frozen).order_by('-average_daily_users')[:10]
# We want a maximum of 6 Featured Collections as well (though we may get
# fewer than that).
collections = Collection.objects.filter(listed=True,
application=request.APP.id,
type=amo.COLLECTION_FEATURED)[:6]
return render(request, 'addons/home.html',
{'popular': popular, 'featured': featured,
'hotness': hotness, 'personas': personas,
'src': 'homepage', 'collections': collections})
@mobilized(home)
@non_atomic_requests
def home(request):
# Shuffle the list and get 3 items.
def rand(xs):
return random.shuffle(xs) or xs[:3]
# Get some featured add-ons with randomness.
featured = Addon.featured_random(request.APP, request.LANG)[:3]
# Get 10 popular add-ons, then pick 3 at random.
qs = list(Addon.objects.listed(request.APP)
.filter(type=amo.ADDON_EXTENSION)
.order_by('-average_daily_users')
.values_list('id', flat=True)[:10])
popular = rand(qs)
# Do one query and split up the add-ons.
addons = (Addon.objects.filter(id__in=featured + popular)
.filter(type=amo.ADDON_EXTENSION))
featured = [a for a in addons if a.id in featured]
popular = sorted([a for a in addons if a.id in popular],
key=attrgetter('average_daily_users'), reverse=True)
if not request.META.get('HTTP_USER_AGENT'):
ios_user = False
else:
ios_user = 'FxiOS' in request.META.get('HTTP_USER_AGENT')
return render(request, 'addons/mobile/home.html',
{'featured': featured, 'popular': popular,
'ios_user': ios_user})
@non_atomic_requests
def homepage_promos(request):
from olympia.legacy_discovery.views import promos
version, platform = request.GET.get('version'), request.GET.get('platform')
if not (platform or version):
raise http.Http404
return promos(request, 'home', version, platform)
@addon_view
@non_atomic_requests
def eula(request, addon, file_id=None):
if not addon.eula:
return http.HttpResponseRedirect(addon.get_url_path())
if file_id:
version = get_object_or_404(addon.versions, files__id=file_id)
else:
version = addon.current_version
return render(request, 'addons/eula.html',
{'addon': addon, 'version': version})
@addon_view
@non_atomic_requests
def privacy(request, addon):
if not addon.privacy_policy:
return http.HttpResponseRedirect(addon.get_url_path())
return render(request, 'addons/privacy.html', {'addon': addon})
@addon_view
@non_atomic_requests
def developers(request, addon, page):
if addon.is_persona():
raise http.Http404()
if 'src' in request.GET:
contribution_src = src = request.GET['src']
else:
page_srcs = {
'developers': ('developers', 'meet-developers'),
'installed': ('meet-the-developer-post-install', 'post-download'),
'roadblock': ('meetthedeveloper_roadblock', 'roadblock'),
}
# Download src and contribution_src are different.
src, contribution_src = page_srcs.get(page)
return render(request, 'addons/impala/developers.html',
{'addon': addon, 'page': page, 'src': src,
'contribution_src': contribution_src})
@addon_view
@anonymous_csrf_exempt
@post_required
@non_atomic_requests
def contribute(request, addon):
# Enforce paypal-imposed comment length limit
commentlimit = PAYPAL_MAX_COMMENT_LENGTH
contrib_type = request.POST.get('type', 'suggested')
is_suggested = contrib_type == 'suggested'
source = request.POST.get('source', '')
comment = request.POST.get('comment', '')
amount = {
'suggested': addon.suggested_amount,
'onetime': request.POST.get('onetime-amount', '')
}.get(contrib_type, '')
if not amount:
amount = settings.DEFAULT_SUGGESTED_CONTRIBUTION
form = ContributionForm({'amount': amount})
if len(comment) > commentlimit or not form.is_valid():
return http.HttpResponse(json.dumps({'error': 'Invalid data.',
'status': '', 'url': '',
'paykey': ''}),
content_type='application/json')
contribution_uuid = hashlib.md5(str(uuid.uuid4())).hexdigest()
if addon.charity:
# TODO(andym): Figure out how to get this in the addon authors
# locale, rather than the contributors locale.
name, paypal_id = (u'%s: %s' % (addon.name, addon.charity.name),
addon.charity.paypal)
else:
name, paypal_id = addon.name, addon.paypal_id
# l10n: {0} is the addon name
contrib_for = _(u'Contribution for {0}').format(jinja2.escape(name))
paykey, error, status = '', '', ''
try:
paykey, status = paypal.get_paykey(
dict(amount=amount,
email=paypal_id,
ip=request.META.get('REMOTE_ADDR'),
memo=contrib_for,
pattern='addons.paypal',
slug=addon.slug,
uuid=contribution_uuid))
except paypal.PaypalError as error:
log.error(
'Error getting paykey, contribution for addon '
'(addon: %s, contribution: %s)'
% (addon.pk, contribution_uuid), exc_info=True)
if paykey:
contrib = Contribution(addon_id=addon.id, charity_id=addon.charity_id,
amount=amount, source=source,
source_locale=request.LANG,
annoying=addon.annoying,
uuid=str(contribution_uuid),
is_suggested=is_suggested,
suggested_amount=addon.suggested_amount,
comment=comment, paykey=paykey)
contrib.save()
url = '%s?paykey=%s' % (settings.PAYPAL_FLOW_URL, paykey)
if request.GET.get('result_type') == 'json' or request.is_ajax():
# If there was an error getting the paykey, then JSON will
# not have a paykey and the JS can cope appropriately.
return http.HttpResponse(json.dumps({'url': url,
'paykey': paykey,
'error': str(error),
'status': status}),
content_type='application/json')
return http.HttpResponseRedirect(url)
@csrf_exempt
@addon_view
@non_atomic_requests
def paypal_result(request, addon, status):
uuid = request.GET.get('uuid')
if not uuid:
raise http.Http404()
if status == 'cancel':
log.info('User cancelled contribution: %s' % uuid)
else:
log.info('User completed contribution: %s' % uuid)
response = render(request, 'addons/paypal_result.html',
{'addon': addon, 'status': status})
response['x-frame-options'] = 'allow'
return response
@addon_view
@non_atomic_requests
def license(request, addon, version=None):
if version is not None:
qs = addon.versions.filter(channel=amo.RELEASE_CHANNEL_LISTED,
files__status__in=amo.VALID_FILE_STATUSES)
version = get_list_or_404(qs, version=version)[0]
else:
version = addon.current_version
if not (version and version.license):
raise http.Http404
return render(request, 'addons/impala/license.html',
dict(addon=addon, version=version))
@non_atomic_requests
def license_redirect(request, version):
version = get_object_or_404(Version.objects, pk=version)
return redirect(version.license_url(), permanent=True)
@session_csrf.anonymous_csrf_exempt
@addon_view
@non_atomic_requests
def report_abuse(request, addon):
form = AbuseForm(request.POST or None, request=request)
if request.method == "POST" and form.is_valid():
send_abuse_report(request, addon, form.cleaned_data['text'])
messages.success(request, _('Abuse reported.'))
return http.HttpResponseRedirect(addon.get_url_path())
else:
return render(request, 'addons/report_abuse_full.html',
{'addon': addon, 'abuse_form': form})
@cache_control(max_age=60 * 60 * 24)
@non_atomic_requests
def persona_redirect(request, persona_id):
if persona_id == 0:
# Newer themes have persona_id == 0, doesn't mean anything.
return http.HttpResponseNotFound()
persona = get_object_or_404(Persona.objects, persona_id=persona_id)
try:
to = reverse('addons.detail', args=[persona.addon.slug])
except Addon.DoesNotExist:
# Would otherwise throw 500. Something funky happened during GP
# migration which caused some Personas to be without Addons (problem
# with cascading deletes?). Tell GoogleBot these are dead with a 404.
return http.HttpResponseNotFound()
return http.HttpResponsePermanentRedirect(to)
@non_atomic_requests
def icloud_bookmarks_redirect(request):
if (waffle.switch_is_active('icloud_bookmarks_redirect')):
return redirect('/blocked/i1214/', permanent=False)
else:
return addon_detail(request, 'icloud-bookmarks')
class AddonViewSet(RetrieveModelMixin, GenericViewSet):
permission_classes = [
AnyOf(AllowReadOnlyIfPublic, AllowAddonAuthor,
AllowReviewer, AllowReviewerUnlisted),
]
serializer_class = AddonSerializer
serializer_class_with_unlisted_data = AddonSerializerWithUnlistedData
# Permission classes disallow access to non-public/unlisted add-ons unless
# logged in as a reviewer/addon owner/admin, so we don't have to filter the
# base queryset here.
queryset = Addon.objects.all()
lookup_value_regex = '[^/]+' # Allow '.' for email-like guids.
def get_queryset(self):
# Special case: admins - and only admins - can see deleted add-ons.
# This is handled outside a permission class because that condition
# would pollute all other classes otherwise.
if self.request.user.is_authenticated() and self.request.user.is_staff:
return Addon.unfiltered.all()
return super(AddonViewSet, self).get_queryset()
def get_serializer_class(self):
# Override serializer to use serializer_class_with_unlisted_data if
# we are allowed to access unlisted data.
obj = getattr(self, 'instance')
request = self.request
if (acl.check_unlisted_addons_reviewer(request) or
(obj and request.user.is_authenticated() and
obj.authors.filter(pk=request.user.pk).exists())):
return self.serializer_class_with_unlisted_data
return self.serializer_class
def get_lookup_field(self, identifier):
lookup_field = 'pk'
if identifier and not identifier.isdigit():
# If the identifier contains anything other than a digit, it's
# either a slug or a guid. guids need to contain either {} or @,
# which are invalid in a slug.
if amo.ADDON_GUID_PATTERN.match(identifier):
lookup_field = 'guid'
else:
lookup_field = 'slug'
return lookup_field
def get_object(self):
identifier = self.kwargs.get('pk')
self.lookup_field = self.get_lookup_field(identifier)
self.kwargs[self.lookup_field] = identifier
self.instance = super(AddonViewSet, self).get_object()
return self.instance
@detail_route()
def feature_compatibility(self, request, pk=None):
obj = self.get_object()
serializer = AddonFeatureCompatibilitySerializer(
obj.feature_compatibility,
context=self.get_serializer_context())
return Response(serializer.data)
@detail_route()
def eula_policy(self, request, pk=None):
obj = self.get_object()
serializer = AddonEulaPolicySerializer(
obj, context=self.get_serializer_context())
return Response(serializer.data)
class AddonChildMixin(object):
"""Mixin containing method to retrive the parent add-on object."""
def get_addon_object(self, permission_classes=None):
"""Return the parent Addon object using the URL parameter passed
to the view.
`permission_classes` can be use passed to change which permission
classes the parent viewset will be used when loading the Addon object,
otherwise AddonViewSet.permission_classes will be used."""
if hasattr(self, 'addon_object'):
return self.addon_object
if permission_classes is None:
permission_classes = AddonViewSet.permission_classes
self.addon_object = AddonViewSet(
request=self.request, permission_classes=permission_classes,
kwargs={'pk': self.kwargs['addon_pk']}).get_object()
return self.addon_object
class AddonVersionViewSet(AddonChildMixin, RetrieveModelMixin,
ListModelMixin, GenericViewSet):
# Permissions are always checked against the parent add-on in
# get_addon_object() using AddonViewSet.permission_classes so we don't need
# to set any here. Some extra permission classes are added dynamically
# below in check_permissions() and check_object_permissions() depending on
# what the client is requesting to see.
permission_classes = []
# By default, we rely on queryset filtering to hide non-public/unlisted
# versions. get_queryset() might override this if we are asked to see
# non-valid, deleted and/or unlisted versions explicitly.
queryset = Version.objects.filter(
files__status=amo.STATUS_PUBLIC,
channel=amo.RELEASE_CHANNEL_LISTED).distinct()
serializer_class = VersionSerializer
def check_permissions(self, request):
requested = self.request.GET.get('filter')
if self.action == 'list':
if requested == 'all_with_deleted':
# To see deleted versions, you need Admin:%.
self.permission_classes = [GroupPermission('Admin', '%')]
elif requested == 'all_with_unlisted':
# To see unlisted versions, you need to be add-on author or
# unlisted reviewer.
self.permission_classes = [AnyOf(
AllowReviewerUnlisted, AllowAddonAuthor)]
elif requested == 'all_without_unlisted':
# To see all listed versions (not just public ones) you need to
# be add-on author or reviewer.
self.permission_classes = [AnyOf(
AllowReviewer, AllowAddonAuthor)]
# When listing, we can't use AllowRelatedObjectPermissions() with
# check_permissions(), because AllowAddonAuthor needs an author to
# do the actual permission check. To work around that, we call
# super + check_object_permission() ourselves, passing down the
# addon object directly.
return super(AddonVersionViewSet, self).check_object_permissions(
request, self.get_addon_object())
super(AddonVersionViewSet, self).check_permissions(request)
def check_object_permissions(self, request, obj):
# If the instance is marked as deleted and the client is not allowed to
# see deleted instances, we want to return a 404, behaving as if it
# does not exist.
if (obj.deleted and
not GroupPermission('Admin', '%').has_object_permission(
request, self, obj)):
raise http.Http404
if obj.channel == amo.RELEASE_CHANNEL_UNLISTED:
# If the instance is unlisted, only allow unlisted reviewers and
# authors..
self.permission_classes = [
AllowRelatedObjectPermissions(
'addon', [AnyOf(AllowReviewerUnlisted, AllowAddonAuthor)])
]
elif not obj.is_public():
# If the instance is disabled, only allow reviewers and authors.
self.permission_classes = [
AllowRelatedObjectPermissions(
'addon', [AnyOf(AllowReviewer, AllowAddonAuthor)])
]
super(AddonVersionViewSet, self).check_object_permissions(request, obj)
def get_queryset(self):
"""Return the right base queryset depending on the situation."""
requested = self.request.GET.get('filter')
valid_filters = (
'all_with_deleted',
'all_with_unlisted',
'all_without_unlisted',
'only_beta'
)
if requested is not None:
if self.action != 'list':
raise serializers.ValidationError(
'The "filter" parameter is not valid in this context.')
elif requested not in valid_filters:
raise serializers.ValidationError(
'Invalid "filter" parameter specified.')
# By default we restrict to valid, listed versions. Some filtering
# options are available when listing, and in addition, when returning
# a single instance, we don't filter at all.
if requested == 'all_with_deleted' or self.action != 'list':
self.queryset = Version.unfiltered.all()
elif requested == 'all_with_unlisted':
self.queryset = Version.objects.all()
elif requested == 'all_without_unlisted':
self.queryset = Version.objects.filter(
channel=amo.RELEASE_CHANNEL_LISTED)
elif requested == 'only_beta':
self.queryset = Version.objects.filter(
channel=amo.RELEASE_CHANNEL_LISTED,
files__status=amo.STATUS_BETA).distinct()
# Now that the base queryset has been altered, call super() to use it.
qs = super(AddonVersionViewSet, self).get_queryset()
# Filter with the add-on.
return qs.filter(addon=self.get_addon_object())
class AddonSearchView(ListAPIView):
authentication_classes = []
filter_backends = [
ReviewedContentFilter, SearchQueryFilter, SearchParameterFilter,
SortingFilter,
]
pagination_class = ESPageNumberPagination
permission_classes = []
serializer_class = ESAddonSerializer
def get_queryset(self):
return Search(using=amo.search.get_es(),
index=AddonIndexer.get_index_alias(),
doc_type=AddonIndexer.get_doctype_name())
@classmethod
def as_view(cls, **kwargs):
view = super(AddonSearchView, cls).as_view(**kwargs)
return non_atomic_requests(view)
class AddonFeaturedView(GenericAPIView):
authentication_classes = []
permission_classes = []
serializer_class = AddonSerializer
# We accept the 'page_size' parameter but we do not allow pagination for
# this endpoint since the order is random.
pagination_class = None
queryset = Addon.objects.valid()
def get(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset())
serializer = self.get_serializer(queryset, many=True)
# Simulate pagination-like results, without actual pagination.
return Response({'results': serializer.data})
@classmethod
def as_view(cls, **kwargs):
view = super(AddonFeaturedView, cls).as_view(**kwargs)
return non_atomic_requests(view)
def filter_queryset(self, queryset):
# We can pass the optional lang parameter to either get_creatured_ids()
# or get_featured_ids() below to get locale-specific results in
# addition to the generic ones.
lang = self.request.GET.get('lang')
if 'category' in self.request.GET:
# If a category is passed then the app and type parameters are
# mandatory because we need to find a category in the constants to
# pass to get_creatured_ids(), and category slugs are not unique.
# AddonCategoryFilterParam parses the request parameters for us to
# determine the category.
try:
category = AddonCategoryFilterParam(self.request).get_value()
except ValueError:
raise ParseError(
'Invalid app, category and/or type parameter(s).')
ids = get_creatured_ids(category, lang)
else:
# If no category is passed, only the app parameter is mandatory,
# because get_featured_ids() needs it to find the right collection
# to pick addons from. It can optionally filter by type, so we
# parse request for that as well.
try:
app = AddonAppFilterParam(
self.request).get_object_from_reverse_dict()
type_ = None
if 'type' in self.request.GET:
type_ = AddonTypeFilterParam(self.request).get_value()
except ValueError:
raise ParseError(
'Invalid app, category and/or type parameter(s).')
ids = get_featured_ids(app, lang=lang, type=type_)
# ids is going to be a random list of ids, we just slice it to get
# the number of add-ons that was requested. We do it before calling
# manual_order(), since it'll use the ids as part of a id__in filter.
try:
page_size = int(
self.request.GET.get('page_size', api_settings.PAGE_SIZE))
except ValueError:
raise ParseError('Invalid page_size parameter')
ids = ids[:page_size]
return manual_order(queryset, ids, 'addons.id')
class StaticCategoryView(ListAPIView):
authentication_classes = []
pagination_class = None
permission_classes = []
serializer_class = StaticCategorySerializer
def get_queryset(self):
return sorted(CATEGORIES_BY_ID.values(), key=lambda x: x.id)
@classmethod
def as_view(cls, **kwargs):
view = super(StaticCategoryView, cls).as_view(**kwargs)
return non_atomic_requests(view)
def finalize_response(self, request, response, *args, **kwargs):
response = super(StaticCategoryView, self).finalize_response(
request, response, *args, **kwargs)
patch_cache_control(response, max_age=60 * 60 * 6)
return response
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.