gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
#!/usr/bin/python
##############################################
###Python template
###Author: Elizabeth Lee
###Date: 10/3/13
###Purpose: visualize results of time-based epidemic simulations when aligned by epidemic time, which is defined as aligning tsteps at which simulation attained 5% of cumulative infections during the epidemic
#### pairs with age_perc_T_time.py
###Import data:
###Command Line: python age_perc_T_epitime_viz.py
##############################################
####### notes #######
### codebook of age class codes
# '1' - Toddlers: 0-2
# '2' - Preschool: 3-4
# '3' - Children: 5-18
# '4' - Adults: 19-64
# '5' - Seniors: 65+ (community)
# '6' - Elders: 65+ (nursing home)
# There are only 94 "elders" in the Vancouver network, and they all reside in one nursing home, so they can be combined with the seniors for analysis purposes (all_elderly).
### packages/modules ###
import matplotlib.pyplot as plt
import numpy as np
import pretty_print as pp
from collections import defaultdict
import zipfile
import percolations as perc
from time import clock
### plotting settings ###
colorvec = ['black', 'red', 'orange', 'gold', 'green', 'blue', 'cyan', 'darkviolet', 'hotpink']
### data processing parameters ###
align_prop = 0.05
### pickled data parameters ###
numsims = 1000 # number of simulations
size_epi = 515 # threshold value that designates an epidemic in the network (5% of network)
# gamma = probability of recovery at each time step
# on avg, assume 5 days till recovery
gamma = 0.2
# assume T ranges from 0.0 to 0.2, gamma = 1/5 and T = beta / (beta + gamma)
T1, T2 = 0.0, 0.2
# T1, T2 = 0.075, 0.075
# T1, T2 = 0.0643, 0.0643
b1, b2 = (-T1 * gamma)/(T1 - 1), (-T2 * gamma)/(T2 - 1) # 0, .05
blist = np.linspace(b1, b2, num=11, endpoint=True) # probability of transmission
# data structures
# d_node_age[str(node)] = age class
d_node_age = {}
### ziparchive to read and write results ###
zipname = '/home/elee/Dropbox/Elizabeth_Bansal_Lab/Age_Based_Simulations/Results/beta_time_%ssims_beta%.3f-%.3f_vax0.zip' %(numsims, b1, b2)
#############################################
# age data processing
graph_ages = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/Age_Based_Simulations/Data/urban_ages_Sarah.csv') # node number and age class
for line in graph_ages:
new_line = line.split()
for line in new_line:
node, age = line.split(',')
d_node_age[node] = age # node-ageclass dictionary
# define network size
N = len(d_node_age)
# create binary lists to indicate children and adults
ch = [1 if d_node_age[str(node)] == '3' else 0 for node in xrange(1, int(N) + 1)]
ad = [1 if d_node_age[str(node)] == '4' else 0 for node in xrange(1, int(N) + 1)]
##############################################
# data processing - convert tstep info into dictionaries
d_epiincid, d_epiOR, d_epiresults, d_epiAR, d_epiOR_filt = defaultdict(list), defaultdict(list), {}, defaultdict(list), defaultdict(list)
# # dict_epiincid[(beta, simnumber, 'T', 'C' or 'A')] = [T, C or A incid at tstep 0, T, C or A incid at tstep 1...], where incidence is simply number of new cases (raw)
# dict_epiincid = defaultdict(list)
# # dict_epiAR[(beta, simnumber, 'T', 'C' or 'A')] = [T, C or A attack rate at tstep 0, T, C or A attack rate at tstep 1...], where attack rate is number of new cases per population size
# dict_epiAR = defaultdict(list)
# # dict_epiOR[(beta, simnumber)] = [OR at tstep0, OR at tstep1...]
# dict_epiOR = defaultdict(list)
# # dict_epiOR_filt[(beta, simnum)] = [OR for each time step for epidemics only where OR is nan when we want to exclude the time point due to small infected numbers]
# dict_epiOR_filt = defaultdict(list)
# # dict_epiresults[(beta, simnumber)] = (episize, c_episize, a_episize)
# dict_epiresults = {}
for beta in blist:
processing = clock()
# reference filenames in zipfolder
Itstep_file = 'Results/Itstep_beta_time_%ssims_beta%.3f_vax0.txt' %(numsims, beta)
Rtstep_file = 'Results/Rtstep_beta_time_%ssims_beta%.3f_vax0.txt' %(numsims, beta)
# recreate epidata from zip archive
d_epiincid, d_epiOR, d_epiresults, d_epiAR, d_epiOR_filt = perc.recreate_epidata(Itstep_file, Rtstep_file, zipname, beta, size_epi, ch, ad, d_epiincid, d_epiOR, d_epiresults, d_epiAR, d_epiOR_filt)
print beta, "processed", clock() - processing
# grab unique list of betas that produced at least one epidemic
beta_epi = list(set([key[0] for key in d_epiincid]))
##############################################
### plot filtered and aligned OR by time for each beta value ###
# alignment at tstep where sim reaches 5% of total episize
# starting tstep on plot is mode of tsteps where sim reaches 5% of total episize
# each sim is one line, each beta is a diff color on one plot
for beta in beta_epi:
ORonly = clock()
# PROCESS X-AXIS: identify tstep at which sim reaches 5% of cum infections for the epidemic
# d_dummyalign_tstep[beta] = [5%cum-inf_tstep_sim1, 5%cum-inf_tstep_sim2..]
d_dummyalign_tstep, avg_align_tstep, dummyk = perc.define_epi_time(d_epiincid, beta, align_prop)
# TEST (11/19/13): realign plots for epitime to start at t = 0 by reassigning avg_align_tstep
avg_align_tstep = 0
# plot aligned data
# zip beta, episim number, and tstep for 5% cum-inf for sims where (beta, episim number) is the key for d_epiOR_filt
for k0, k1, t5 in zip((k[0] for k in dummyk), (k[1] for k in dummyk), d_dummyalign_tstep[beta]):
plt.plot(xrange(avg_align_tstep, avg_align_tstep+len(d_epiOR_filt[(k0, k1)][t5:])), d_epiOR_filt[(k0, k1)][t5:], marker = 'None', color = 'grey')
plt.plot(xrange(250), [1] * len(xrange(250)), marker = 'None', color = 'red', linewidth = 2)
plt.xlabel('epidemic time step, beta: ' + str(beta) + ', 5-95% cum infections')
plt.ylabel('OR, child:adult')
plt.ylim([0, 8])
plt.xlim([-1, 100])
print "OR only", beta, clock() - ORonly
# save plot
figname = 'Figures/epiORalign_beta_time_%ssims_beta%.3f_vax0.png' %(numsims, beta)
plt.savefig(figname)
plt.close()
pp.compress_to_ziparchive(zipname, figname)
# plt.show()
##############################################
### plot filtered and aligned OR by time for each beta value ###
### secondary axis with child and adult incidence ###
# alignment at tstep where sim reaches 5% of total episize
# starting tstep on plot is mode of tsteps where sim reaches 5% of total episize
# each sim is one line, each beta is a diff color on one plot
for beta in beta_epi:
ORincid = clock()
# PROCESS X-AXIS: identify tstep at which sim reaches 5% of cum infections for the epidemic
# d_dummyalign_tstep[beta] = [5%cum-inf_tstep_sim1, 5%cum-inf_tstep_sim2..]
d_dummyalign_tstep, avg_align_tstep, dummyk = perc.define_epi_time(d_epiincid, beta, align_prop)
# TEST (11/19/13): realign plots for epitime to start at t = 0 by reassigning avg_align_tstep
avg_align_tstep = 0
# PROCESS YAX_AR:
# call upon d_epiAR dictionary
# dict_epiAR[(beta, simnumber, 'T', 'C' or 'A')] = [T, C or A attack rate at tstep 0, T, C or A attack rate at tstep 1...], where attack rate is number of new cases per 100 individuals
# plot data
# create two y-axes
fig, yax_OR = plt.subplots()
yax_AR = yax_OR.twinx()
# zip beta, episim number, and tstep for 5% cum-inf for sims where (beta, episim number) is the key for d_epiOR_filt
for k0, k1, t5 in zip((k[0] for k in dummyk), (k[1] for k in dummyk), d_dummyalign_tstep[beta]):
## OR y-axis
OR, = yax_OR.plot(xrange(avg_align_tstep, avg_align_tstep+len(d_epiOR_filt[(k0, k1)][t5:])), d_epiOR_filt[(k0, k1)][t5:], marker = 'None', color = 'grey')
## AR y-axis
child, = yax_AR.plot(xrange(avg_align_tstep, avg_align_tstep+len(d_epiAR[(k0, k1, 'C')][t5:])), [AR * 100 for AR in d_epiAR[(k0, k1, 'C')][t5:]], marker = 'None', color = 'red')
adult, = yax_AR.plot(xrange(avg_align_tstep, avg_align_tstep+len(d_epiAR[(k0, k1, 'A')][t5:])), [AR * 100 for AR in d_epiAR[(k0, k1, 'A')][t5:]], marker = 'None', color = 'blue')
# plot settings
lines = [OR, child, adult]
yax_OR.legend(lines, ['Odds Ratio', 'Child Incidence', 'Adult Incidence'], loc = 'upper right')
yax_OR.set_ylabel('OR, child:adult')
yax_OR.set_ylim([0, 8])
yax_OR.set_xlim([-1, 100])
yax_OR.set_xlabel('epidemic time step, beta: ' + str(beta) + ', 5-95% cum infections')
yax_AR.set_ylabel('Incidence per 100')
yax_AR.set_ylim([0, 8])
print "ORincid", beta, clock() - ORonly
# save plot
figname = 'Figures/epiORincid_beta_time_%ssims_beta%.3f_vax0.png' %(numsims, beta)
plt.savefig(figname)
plt.close()
pp.compress_to_ziparchive(zipname, figname)
# plt.show()
|
|
# Copyright (c) 2015, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
# builtin
import os
import itertools
import collections
import functools
import StringIO
# external
import xlrd
from lxml import etree
# internal
from sdv import errors, utils, xmlconst
# relative
from . import common
from .. import schematron
# Rule worksheet columns
COL_FIELD_NAME = 0
COL_OCCURRENCE = 1
COL_XSI_TYPES = 3
COL_ALLOWED_VALUES = 4
# Instance Mapping worksheet columns
COL_LABEL = 0
COL_SELECTORS = 1
COL_TYPE_NAMESPACE = 2
# Namespace worksheet columns
COL_NAMESPACE = 0
COL_ALIAS = 1
# Occurrence values
OCCURRENCE_PROHIBITED = ('prohibited', 'must not')
OCCURRENCE_REQUIRED = ('required', 'must')
OCCURRENCE_OPTIONAL = ('optional', 'may')
OCCURRENCE_SUGGESTED = ('suggested', 'should')
OCCURRENCE_DISCOURAGED = ('should not',)
ALL_OPTIONAL_OCCURRENCES = tuple(
itertools.chain(
OCCURRENCE_OPTIONAL,
OCCURRENCE_SUGGESTED,
OCCURRENCE_DISCOURAGED
)
)
ALLOWED_OCCURRENCES = tuple(
itertools.chain(
OCCURRENCE_OPTIONAL,
OCCURRENCE_PROHIBITED,
OCCURRENCE_DISCOURAGED,
OCCURRENCE_REQUIRED,
OCCURRENCE_SUGGESTED
)
)
# Used by profile schematron for reporting error line numbers.
SAXON_LINENO = '[<value-of select="saxon:line-number()"/>]'
# Used to get the name of the context node.
NAME = '<value-of select="name()"/>'
class InstanceMapping(object):
"""Contains information about an entry in the Instance Mapping worksheet
of a Profile.
Args:
nsmap: A dictionary representation of the Namespaces worksheet.
Attributes:
selectors: A list of instance selectors for an Instance Mapping entry.
namespace: The type namespace for an Instance Mapping entry.
ns_alias: The namespace alias for the `namespace` to be used in the
output profile schematron.
"""
def __init__(self, nsmap):
self._nsmap = nsmap
self._ns_alias = None
self.label = None
self.selectors = None
self.namespace = None
@property
def selectors(self):
return self._selectors
@selectors.setter
def selectors(self, value):
"""Parses the cell value found in the Excel STIX profile for Instance
Mapping selectors.
Args:
value: An single selector, list of selectors, or a
comma-delimited string of selectors.
"""
if not value:
self._selectors = []
elif isinstance(value, basestring):
self._selectors = [x.strip().replace('"', "'") for x in value.split(",")]
elif hasattr(value, "__iter__"):
self._selectors = [str(x) for x in value]
else:
self._selectors = [value]
@property
def namespace(self):
return self._namespace
@namespace.setter
def namespace(self, value):
"""Sets the namespace and ns_alias properties.
Raises:
.ProfileParseError: if `value` is not found in the internal
namespace dictionary.
"""
if not value:
self._namespace = None
self._ns_alias = None
elif value in self._nsmap:
self._namespace = value
self._ns_alias = self._nsmap[value]
else:
err = "Unable to map namespace '{ns}' to namespace alias"
raise errors.ProfileParseError(err.format(ns=value))
@property
def ns_alias(self):
return self._ns_alias
def validate(self):
"""Checks that this is a valid InstanceMapping instance.
Raises:
errors.ProfileParseError: If ``namespace`` is ``None`` or
any of the selector values are empty.
"""
if not self.label:
err = "Missing type label in Instance Mapping"
raise errors.ProfileParseError(err)
if not self.namespace:
err = "Missing namespace for '{label}'' in Instance Mapping worksheet"
raise errors.ProfileParseError(err.format(label=self.label))
if not (self.selectors and all(self.selectors)):
err = ("Empty selector for '{label}' in Instance Mapping worksheet. "
"Look for extra commas in field.")
raise errors.ProfileParseError(err.format(label=self.label))
class Profile(collections.MutableSequence):
def __init__(self, namespaces):
self.id = "STIX_Schematron_Profile"
self._rules = [RootRule(namespaces)]
self._namespaces = namespaces
def insert(self, idx, value):
if not value:
return
self._rules.insert(idx, value)
def __getitem__(self, key):
return self._rules.__getitem__(key)
def __setitem__(self, key, value):
self._rules.__setitem__(key, value)
def __delitem__(self, key):
self._rules.__delitem__(key)
def __len__(self):
return len(self._rules)
def __nonzero__(self):
return bool(self._rules)
def _collect_rules(self):
"""Builds and returns a dictionary of ``BaseProfileRule``
implementations from the internal storage. The key is the Rule context
(e.g., "/", "stix:Indicator", "stix:STIX_Header/stix:Package_Intent").
Determining the context of a profile rule is done by examining the
following properties of the rule:
* If the rule is a Prohibits or Requires occurrence check, the
context is pulled directly from the _BaseProfileRule instance's
``context`` property. This value is derived from the context
label associated with the rule entry in the profile worksheet.
* If the rule checks for allowed values or implementations of an
element the context will be a selector pointing directly to the
element. This is done to cut down on validation noise (otherwise a
missing element would raise errors for a required element being
missing AND the element not containing an allowed value because it
wasn't found at all).
* If the rule checks for allowed values of an attribute, the rule
context will pulled directly from the _BaseProfileRule instance's
``context`` property. This should probably follow the rules
described above, but doesn't for no good reason.
Returns:
A dictionary of lists of rules associated by ``<rule>`` context.
"""
collected = collections.defaultdict(list)
for rule in self:
collected[rule.context_selector].append(rule)
return collected
@property
def rules(self):
"""Builds and returns a dictionary of ``BaseProfileRule``
implementations. The key is the Rule context.
"""
notype = schematron.make_pattern("no-type")
typed = schematron.make_pattern("xsi-typed")
rules = [notype, typed]
collected = self._collect_rules()
for ctx, profile_rules in collected.iteritems():
rule = schematron.make_rule(ctx)
rule.extend(x.as_etree() for x in profile_rules)
if "@xsi:type=" in utils.strip_whitespace(ctx):
typed.append(rule)
else:
notype.append(rule)
return rules
@property
def namespaces(self):
"""Returns a list of etree Elements that represent Schematron
``<ns prefix='foo' uri='bar'>`` elements.
"""
namespaces = []
for ns, prefix in self._namespaces.iteritems():
ns = schematron.make_ns(prefix, ns)
namespaces.append(ns)
return namespaces
def as_etree(self):
"""Returns an etree Schematron document for this ``Profile``."""
schema = schematron.make_schema()
schema.extend(self.namespaces)
schema.extend(self.rules)
return schema
class _BaseProfileRule(object):
"""Base class for profile rules.
Attributes:
context: The context selector for this rule. This is determined by
linking the rule context label to a selector.
field: The name of the element or attribute for which this rule
applies.
Args:
context: The context selector for this rule. This is determined by
linking the rule context label to a selector.
field: Tne name of the element or attribute for which this rule
applies.
"""
TYPE_REPORT = "report"
TYPE_ASSERT = "assert"
def __init__(self, field, instance_mapping):
self._instance_mapping = instance_mapping
self._type = None
self._role = "error"
self._context = utils.union(instance_mapping.selectors)
self.field = field
def _validate(self):
"""Perform validation/sanity checks on the input values."""
pass
@property
def field(self):
return self._field
@field.setter
def field(self, value):
if value.startswith("@"):
self._field = value
elif ":" in value:
self._field = value
else:
prefix = self._instance_mapping.ns_alias
self._field = "%s:%s" % (prefix, value)
def typens(self):
return self._instance_mapping.namespace
@property
def role(self):
"""Returns the Schematron assertion role for this rule."""
return self._role
@property
def type(self):
"""The type of Schematron test: ``report`` or ``assert``."""
return self._type
@property
def is_attr(self):
"""Returns ``True`` if this rule is defined for an attribute field."""
return self.field.startswith("@")
@property
def message(self):
"""Returns the error message to be displayed if this rule does not
evaluate successfully.
"""
raise NotImplementedError()
@property
def test(self):
"""The xpath test to evaluate against a node."""
raise NotImplementedError()
@property
def context_selector(self):
"""Returns the schematron rule context selector to be used for this
schematron assert/report 'rule'.
"""
raise NotImplementedError()
@property
def path(self):
"""Returns the fully qualified ``context/field`` path to the XML node
for which this assert/report applies.
"""
return "{0}/{1}".format(self._context, self.field)
def as_etree(self):
"""Returns a Schematron ``<assert>`` or ``<report>`` for this
profile rule.
"""
kwargs = {
'type': self.type, # 'assert' or 'report'
'ns': xmlconst.NS_SCHEMATRON, # schematron namespace
'test': self.test, # test selector
'role': self.role, # "error"
'message': self.message, # error message
'line': SAXON_LINENO # line number function
}
xml = '<{type} xmlns="{ns}" test="{test}" role="{role}">{message} {line}</{type}>'
rule = etree.XML(xml.format(**kwargs))
return rule
class RequiredRule(_BaseProfileRule):
"""Represents a profile rule which requires the presence of an element
or attribute.
This serializes to a Schematron ``<assert>`` directive as
it will raise an error if the field is **not** found in the instance
document.
"""
def __init__(self, field, instance_mapping):
super(RequiredRule, self).__init__(field, instance_mapping)
self._type = self.TYPE_ASSERT
@_BaseProfileRule.test.getter
def test(self):
return self.field
@_BaseProfileRule.context_selector.getter
def context_selector(self):
return self._context
@_BaseProfileRule.test.getter
def message(self):
msg = "{parent}/{field} is required by this profile."
return msg.format(parent=NAME, field=self.field)
class ProhibitedRule(_BaseProfileRule):
"""Represents a profile rule which prohibits the use of a particular
attribute or field.
This serializes to a Schematron ``<report>`` directive
as it will raise an error if the field **is found** in the instance
document.
"""
def __init__(self, field, instance_mapping):
super(ProhibitedRule, self).__init__(field, instance_mapping)
self._type = self.TYPE_REPORT
@_BaseProfileRule.test.getter
def test(self):
return self.field
@_BaseProfileRule.context_selector.getter
def context_selector(self):
return self._context
@_BaseProfileRule.message.getter
def message(self):
msg = "{parent}/{field} is prohibited by this profile."
return msg.format(parent=NAME, field=self.field)
class AllowedValuesRule(_BaseProfileRule):
"""Represents a profile rule which requires that a field value be one
of a defined set of allowed values.
This serializes to a schematron ``<assert>`` directive.
"""
def __init__(self, field, instance_mapping, required=True, values=None):
super(AllowedValuesRule, self).__init__(field, instance_mapping)
self._type = self.TYPE_ASSERT
self.is_required = required
self.values = values
@property
def values(self):
return self._values
@values.setter
def values(self, value):
"""Parses the cell value found in the Excel STIX profile for allowable
values.
Args:
value: An allowed value, list of allowed values, or a
comma-delimited string of allowed values.
"""
if not value:
self._values = []
elif isinstance(value, basestring):
self._values = [x.strip() for x in value.split(',')]
elif hasattr(value, "__getitem__"):
self._values = [str(x) for x in value]
else:
self._values = [value]
@_BaseProfileRule.context_selector.getter
def context_selector(self):
return self._context
@_BaseProfileRule.message.getter
def message(self):
msg = "The allowed values for {parent}/{field} are {values}."
return msg.format(parent=NAME, field=self.field, values=self.values)
@_BaseProfileRule.test.getter
def test(self):
"""Returns a test to check that a field is equal to one of the
allowable values.
This expects the ``<assert>`` directive to be places within a rule
where the selector is the field name if this rule applies to an
element name.
If the resulting ``<assert>`` applies to an attribute, this assumes
that the ``<rule>`` context will point to a parent element.
"""
test = " or ".join("%s='%s'" % (self.field, x) for x in self.values)
if not self.is_required:
test = "not({field}) or {values}".format(field=self.field, values=test)
return test
class AllowedImplsRule(_BaseProfileRule):
def __init__(self, field, instance_mapping, required=True, impls=None):
super(AllowedImplsRule, self).__init__(field, instance_mapping)
self._type = self.TYPE_ASSERT
self.is_required = required
self.impls = impls
def _validate(self):
if not self.is_attr:
return
err = ("Implementation rules cannot be applied to attribute fields: "
"{0}".format(self.path))
raise errors.ProfileParseError(err)
@property
def impls(self):
return self._impls
@impls.setter
def impls(self, value):
"""Parses the cell value found in the Excel STIX profile for allowable
implementations.
Args:
value: An allowed implementation value, list of allowed
implementations, or a comma-delimited string of allowed
implementations.
"""
if not value:
self._impls = []
elif isinstance(value, basestring):
self._impls = [x.strip() for x in value.split(',')]
elif hasattr(value, "__iter__"):
self._impls = [str(x) for x in value]
else:
self._impls = [value]
@_BaseProfileRule.context_selector.getter
def context_selector(self):
return self._context
@_BaseProfileRule.message.getter
def message(self):
msg = "The allowed implementations for {parent}/{field} are {types}"
return msg.format(parent=NAME, field=self.field, types=self.impls)
@_BaseProfileRule.test.getter
def test(self):
"""Returns a test to check that a field implementation is set to
one of the allowable values.
This expects the ``<assert>`` directive to be places within a rule
where the selector is the field name if this rule applies to an
element name.
"""
notype = "not({field}/@xsi:type)".format(field=self.field)
types = " or ".join("%s/@xsi:type='%s'" % (self.field, x) for x in self.impls)
test = "{notype} or {types}".format(notype=notype, types=types)
if not self.is_required:
test = "not({field}) or {impls}".format(field=self.field, impls=test)
return test
class RootRule(RequiredRule):
def __init__(self, nsmap):
mapping = InstanceMapping(nsmap=nsmap)
mapping.selectors = "/"
mapping.namespace = "http://stix.mitre.org/stix-1"
super(RootRule, self).__init__(
field="stix:STIX_Package",
instance_mapping=mapping
)
@_BaseProfileRule.test.getter
def test(self):
return self.field
@_BaseProfileRule.context_selector.getter
def context_selector(self):
return self._context
@_BaseProfileRule.message.getter
def message(self):
return "The root element must be a STIX_Package instance"
class ProfileError(schematron.SchematronError):
"""Represents STIX profile validation error.
Args:
doc: The instance document which was validated and produced this error.
error: The ``svrl:failed-assert`` or ``svrl:successful-report``
``etree._Element`` instance.
Attributes:
message: The STIX Profile validation error message.
"""
def __init__(self, doc, error):
super(ProfileError, self).__init__(doc, error)
self._line = self._parse_line(error.node)
def _parse_line(self, error):
"""Errors are reported as ``<error msg> [line number]``.
This method parses the line number out of th error message.
Returns:
A string line number for the `error`.
"""
text = super(ProfileError, self)._parse_message(error)
if not text:
return None
# Split the string on whitespace.
# Get the last item.
# Strip the leading '[' and trailing ']'.
line = text.split()[-1][1:-1]
return line
def __unicode__(self):
return super(ProfileError, self).__unicode__()
def __str__(self):
return super(ProfileError, self).__str__()
def _parse_message(self, error):
"""Parses the message component from the SVRL report error message.
Profile error messages are formatted as follows:
``<Error message text> [<line number>]``.
This method returns everything left of the line number marker `` [``.
"""
text = super(ProfileError, self)._parse_message(error)
if not text:
return None
return text[:text.rfind(' [')]
class ProfileValidationResults(schematron.SchematronValidationResults):
"""Represents STIX profile validation results. This is returned from
the :meth:`STIXProfileValidator.validate` method.
Args:
is_vaild: ``True`` if the document was valid and ``False`` otherwise.
doc: The document that was validated. This is an instance of
lxml._Element.
svrl_report: The SVRL report. This is an instance of
``lxml.isoschematron.Schematron.validation_report``
Attributes:
errors: A list of :class:`ProfileError` instances representing
errors found in the `svrl_report`.
"""
def __init__(self, is_valid, doc=None, svrl_report=None):
super(ProfileValidationResults, self).__init__(
is_valid=is_valid,
doc=doc,
svrl_report=svrl_report
)
def _parse_errors(self, svrl_report):
errors = self._get_errors(svrl_report)
return [ProfileError(self._doc, x) for x in errors]
class STIXProfileValidator(schematron.SchematronValidator):
"""Performs STIX Profile validation.
Args:
profile_fn: The filename of a ``.xlsx`` STIX Profile document.
"""
def __init__(self, profile_fn):
profile = self._parse_profile(profile_fn)
super(STIXProfileValidator, self).__init__(schematron=profile.as_etree())
def _build_rules(self, info, field, occurrence, types, values):
"""Builds a ``_BaseProfileRule`` implementation list for the rule
parameters.
Each rule can be broken up into the following components:
* Context Label: Any label that can be mapped to one or more instance
document selectors. For example: 'indicator:Indicator' which could
be mapped ('//indicator:Indicator', '//stixCommon:Indicator',
'//stix:Indicator'). The context label does not need to refer to
a schema data type, but often does.
* Field Name: An element or attribute name held by structure pointed
to by the context label. For example, if the context label is
'indicator:Indicator' a field name could be '@version' or
'Title'. Attributes are prefaced by '@'.
* Occurrence: These are typically, 'prohibited', 'required', 'optional'
or 'suggested'. Rules are only created for 'required' and
'prohibited' occurrence entries.
* Implementation Type(s): These are allowed implementations of a
``Field Name``. This is often used to define controlled vocabulary
or CybOX Object requirements. Example:
``stixVocabs:IndicatorType``. Multiple entries are comma delimited.
* Allowed Value(s): Allowable values for a ``Field Name``. Examples
are allowable `@version` values, or controlled vocabulary terms.
Entries marked as ``Required`` may also have ``Allowed Value`` and
``Implementation Types`` tests applied to the field as well.
Entries marked as ``Optional`` or ``Suggested`` are skipped unless
there are associated allowed values/fields. Generated rules will
validate values/implementations if the fields are found in the document.
Entries marked as ``Prohibited`` are only checked for presence. Any
values found in the ``Implementation Types` or ``Allowed Values``
fields will be ignored.
Returns:
A list of ``_BaseProfileRule`` implementations for the given
rule parameters.
"""
is_required = False
rules = []
if occurrence in OCCURRENCE_REQUIRED:
is_required = True
elif occurrence in OCCURRENCE_PROHIBITED:
rule = ProhibitedRule(field, info)
rules.append(rule)
elif occurrence in ALL_OPTIONAL_OCCURRENCES:
pass
else:
return rules
if types:
rule = AllowedImplsRule(field, info, is_required, types)
rules.append(rule)
if values:
rule = AllowedValuesRule(field, info, is_required, values)
rules.append(rule)
# Allowed value/impl rules will check for existence if the field is
# required, so we don't need an explicit existence check as well.
if is_required and not(types or values):
rule = RequiredRule(field, info)
rules.append(rule)
return rules
def _parse_worksheet_rules(self, worksheet, instance_map):
"""Parses the rules from the profile sheet `workheet`.
Args:
worksheet: A profile worksheet containing rules.
instance_map: A dictionary representation of the ``Instance
Mapping`` worksheet.
Returns:
A list of ``_BaseProfileRule`` implementations for the rules
defined in the `worksheet`.
Raises:
.ProfileParseError: If a rule context label has no associated
entry in `instance_map`.
"""
value = functools.partial(self._get_value, worksheet)
is_empty_row = functools.partial(self._is_empty_row, worksheet)
def check_label(label):
if label not in instance_map:
err = (
"Worksheet '{0}' context label '{1}' has no Instance "
"Mapping entry."
)
raise errors.ProfileParseError(
err.format(worksheet.name, label)
)
all_rules = []
for i in xrange(1, worksheet.nrows):
if is_empty_row(i):
continue
if not value(i, COL_OCCURRENCE):
ctx_label = value(i, COL_FIELD_NAME)
check_label(ctx_label)
continue
field = value(i, COL_FIELD_NAME)
occurrence = value(i, COL_OCCURRENCE).lower()
types = value(i, COL_XSI_TYPES)
values = value(i, COL_ALLOWED_VALUES)
if occurrence not in ALLOWED_OCCURRENCES:
err = "Found unknown occurrence '{0}' in worksheet '{1}'."
raise errors.ProfileParseError(
err.format(occurrence, worksheet.name)
)
rules = self._build_rules(
info=instance_map[ctx_label],
field=field,
occurrence=occurrence,
types=types,
values=values
)
all_rules.extend(rules)
return all_rules
def _parse_namespace_worksheet(self, worksheet):
"""Parses the Namespaces worksheet of a STIX profile. Returns a
dictionary representation.
``d = { <namespace> : <namespace alias> }``
By default, libxml2-required Saxon namespace is added to the return
dictionary.
"""
value = functools.partial(self._get_value, worksheet)
is_empty = functools.partial(self._is_empty_row, worksheet)
nsmap = {xmlconst.NS_SAXON: 'saxon'}
def check_namespace(ns, alias):
if ns and alias:
return
err = ("Missing namespace or alias: unable to parse Namespaces "
"worksheet")
raise errors.ProfileParseError(err)
for row in xrange(1, worksheet.nrows): # skip the first row
if is_empty(row):
continue
ns = value(row, COL_NAMESPACE)
alias = value(row, COL_ALIAS)
check_namespace(ns, alias)
nsmap[ns] = alias
return nsmap
def _parse_instance_mapping_worksheet(self, worksheet, nsmap):
"""Parses the supplied Instance Mapping worksheet and returns a
dictionary representation.
Args:
worksheet: The instance mapping worksheet of the profile.
nsmap: The namespace dictionary derived from the ``Namespace``
worksheet of the profile.
Returns:
A dictionary where the key is a Profile rule context label and the
value is an instance of the :class:`InstanceMapping`.
"""
value = functools.partial(self._get_value, worksheet)
is_empty = functools.partial(self._is_empty_row, worksheet)
instance_map = {}
def check_label(label):
if not label:
err = "Found empty type label in Instance Mapping worksheet"
raise errors.ProfileParseError(err)
if label not in instance_map:
return
err = ("Found duplicate type label in Instance Mapping worksheet: "
"'{label}'")
raise errors.ProfileParseError(err.format(label=label))
for row in xrange(1, worksheet.nrows):
if is_empty(row):
continue
label = value(row, COL_LABEL)
check_label(label)
mapping = InstanceMapping(nsmap)
mapping.label = label
mapping.namespace = value(row, COL_TYPE_NAMESPACE)
mapping.selectors = value(row, COL_SELECTORS)
mapping.validate()
instance_map[label] = mapping
return instance_map
def _parse_workbook_rules(self, workbook, instance_map):
"""Parses all worksheets contained in `workbook` which contain
profile rules. This will skip over the 'Overview', 'Namespace', and
'Instance Mapping' worksheets.
Args:
workbook: The profile Excel workbook.
instance_map: A dictionary representation of the
``Instance Mapping`` worksheet.
Returns:
A list of ``_BaseProfileRule`` implementations containing every
rule in the `workbook` profile.
"""
skip = ("Overview", "Namespaces", "Instance Mapping")
rules = []
for worksheet in workbook.sheets():
if worksheet.name in skip:
continue
wksht_rules = self._parse_worksheet_rules(worksheet, instance_map)
rules.extend(wksht_rules)
return rules
def _parse_profile(self, profile_fn):
"""Converts the supplied STIX profile into a Schematron representation.
The Schematron schema is returned as a etree._Element instance.
Args:
workbook: The profile Excel workbook.
Returns:
A Schematron ``etree._Element`` instance.
Raises:
.ProfileParseError: If `profile_fn` does not point to a valid
STIX profile or an error occurs while parsing the STIX profile.
"""
workbook = self._open_workbook(profile_fn)
ws = workbook.sheet_by_name
try:
namespaces = self._parse_namespace_worksheet(ws("Namespaces"))
instance_mapping = self._parse_instance_mapping_worksheet(
worksheet=ws("Instance Mapping"),
nsmap=namespaces
)
rules = self._parse_workbook_rules(workbook, instance_mapping)
profile = Profile(namespaces)
profile.extend(rules)
return profile
except xlrd.XLRDError as ex:
err = "Error occurred while parsing STIX Profile: %s" % str(ex)
raise errors.ProfileParseError(err)
finally:
self._unload_workbook(workbook)
def _unload_workbook(self, workbook):
"""Unloads the xlrd workbook."""
for worksheet in workbook.sheets():
workbook.unload_sheet(worksheet.name)
def _is_empty_row(self, worksheet, row):
"""Returns true if the `row` in `worksheet` does not contain any values
in any columns.
"""
cols = xrange(worksheet.ncols)
return not any(self._get_value(worksheet, row, col) for col in cols)
def _get_value(self, worksheet, row, col):
"""Returns the worksheet cell value found at (row,col)."""
if not worksheet:
raise errors.ProfileParseError("worksheet value was NoneType")
return str(worksheet.cell_value(row, col))
def _open_workbook(self, filename):
"""Returns xlrd.open_workbook(filename) or raises an Exception if the
filename extension is not .xlsx or the open_workbook() call fails.
"""
if not filename.lower().endswith(".xlsx"):
err = "Profile must have .XLSX extension. Filename provided: '{fn}'"
raise errors.ProfileParseError(err.format(fn=filename))
if not os.path.exists(filename):
err = "The profile document '{fn}' does not exist"
raise errors.ProfileParseError(err.format(fn=filename))
try:
return xlrd.open_workbook(filename)
except:
err = ("Error occurred while opening '{fn}'. File may be an invalid "
"or corrupted XSLX document.")
raise errors.ProfileParseError(err.format(fn=filename))
@schematron.SchematronValidator.xslt.getter
def xslt(self):
"""Returns an lxml.etree._ElementTree representation of the ISO
Schematron skeleton generated XSLT translation of a STIX profile.
The STIXProfileValidator uses the extension function
saxon:line-number() for reporting line numbers. This function is
stripped along with any references to the Saxon namespace from the
exported XSLT. This is due to compatibility issues between
Schematron/XSLT processing libraries. For example, SaxonPE/EE expects
the Saxon namespace to be "http://saxon.sf.net/" while libxslt expects
it to be "http://icl.com/saxon". The freely distributed SaxonHE
library does not support Saxon extension functions at all.
Returns:
An ``etree._ElementTree`` XSLT document.
"""
if not self._schematron:
return None
s = etree.tostring(self._schematron.validator_xslt)
s = s.replace(' [<axsl:text/><axsl:value-of select="saxon:line-number()"/><axsl:text/>]', '')
s = s.replace('xmlns:saxon="http://icl.com/saxon"', '')
s = s.replace('<svrl:ns-prefix-in-attribute-values uri="http://icl.com/saxon" prefix="saxon"/>', '')
parser = utils.get_xml_parser()
return etree.parse(StringIO.StringIO(s), parser=parser)
@schematron.SchematronValidator.schematron.getter
def schematron(self):
"""Returns an lxml.etree._ElementTree representation of the
ISO Schematron translation of a STIX profile.
The STIXProfileValidator uses the extension function
saxon:line-number() for reporting line numbers. This function is
stripped along with any references to the Saxon namespace from the
exported XSLT. This is due to compatibility issues between
Schematron/XSLT processing libraries. For example, SaxonPE/EE expects
the Saxon namespace to be "http://saxon.sf.net/" while libxslt expects
it to be "http://icl.com/saxon". The freely distributed SaxonHE
library does not support Saxon extension functions at all.
Returns:
An ``etree._ElementTree`` Schematron document.
"""
to_replace = ' %s' % SAXON_LINENO
s = etree.tostring(self._schematron.schematron)
s = s.replace(to_replace, '')
s = s.replace('<ns prefix="saxon" uri="http://icl.com/saxon"/>', '')
parser = utils.get_xml_parser()
return etree.parse(StringIO.StringIO(s), parser=parser)
@common.check_stix
def validate(self, doc):
"""Validates an XML instance document against a STIX profile.
Args:
doc: The STIX document. This can be a filename, file-like object,
``etree._Element``, or ``etree._ElementTree`` instance.
Returns:
An instance of
:class:`.ProfileValidationResults`.
Raises:
.ValidationError: If there are any issues parsing `doc`.
"""
root = utils.get_etree_root(doc)
is_valid = self._schematron.validate(root)
svrl_report = self._schematron.validation_report
results = ProfileValidationResults(is_valid, root, svrl_report)
return results
__all__ = [
'STIXProfileValidator',
'ProfileError',
'ProfileValidationResults'
]
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Distribution Strategy-related dataset transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops.options import ExternalStatePolicy
from tensorflow.python.data.util import nest
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops
from tensorflow.python.util.tf_export import tf_export
SHARD_HINT = -1
tf_export("data.experimental.SHARD_HINT").export_constant(
__name__, "SHARD_HINT")
class _AutoShardDataset(dataset_ops.UnaryDataset):
"""A `Dataset` that shards the `Dataset` automatically.
This dataset takes in an existing dataset and tries to automatically figure
out how to shard the dataset in a multi-worker scenario using graph rewrites.
If the AutoShardPolicy is set to FILE, it walks up the dataset graph until
it finds a reader dataset, then inserts a ShardDataset op before that node
so that each worker only sees some files.
If the AutoShardPolicy is set to DATA, it inserts a ShardDataset op at the
end of the input pipeline, before any terminal PrefetchDataset if there is
one. Additionally, if there is a RebatchDatasetV2 in the input pipeline, it
is written to legacy RebatchDataset for correctness reasons, since
RebatchDatasetV2 is incompatible with data sharding.
If the AutoShardPolicy is set to AUTO, it tries to do file-based sharding.
If it cannot find a reader dataset, it falls back to doing data-based
sharding.
If the AutoShardPolicy is set to OFF, it does nothing.
Attributes:
num_workers: Total number of workers to shard this dataset across.
index: The current worker index (out of the total number of workers) this
dataset is for.
num_replicas: The total number of replicas across all workers. This is used
only when sharding by data (either DATA or AUTO) in order to rewrite
RebatchDatasetV2 to RebatchDataset.
Raises:
NotFoundError: If we cannot find a suitable reader dataset to begin
automatically sharding the dataset.
"""
def __init__(self, input_dataset, num_workers, index, num_replicas=None):
self._input_dataset = input_dataset
self._element_spec = input_dataset.element_spec
variant_tensor = ged_ops.auto_shard_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
num_workers=num_workers,
index=index,
auto_shard_policy=int(
input_dataset.options().experimental_distribute.auto_shard_policy),
num_replicas=num_replicas,
**self._flat_structure)
super(_AutoShardDataset, self).__init__(input_dataset, variant_tensor)
@property
def element_spec(self):
return self._element_spec
def _AutoShardDatasetV1(input_dataset, num_workers, index, num_replicas=None): # pylint: disable=invalid-name
return dataset_ops.DatasetV1Adapter(
_AutoShardDataset(input_dataset, num_workers, index, num_replicas))
class _RebatchDataset(dataset_ops.UnaryDataset):
"""A `Dataset` that rebatches elements from its input into new batch sizes.
`_RebatchDataset(input_dataset, batch_sizes)` is functionally equivalent to
`input_dataset.unbatch().batch(N)`, where the value of N cycles through the
`batch_sizes` input list. The elements produced by this dataset have the same
rank as the elements of the input dataset.
For example:
```python
ds = tf.data.Dataset.range(8)
ds = ds.batch(4)
ds = _RebatchDataset(ds, batch_sizes=[2, 1, 1])
for elem in ds:
print(elem)
>> [0, 1], [2], [3], [4, 5], [6], [7]
ds = tf.data.Dataset.range(16)
ds = ds.batch(4)
ds = _RebatchDataset(ds, batch_sizes=[6])
for elem in ds:
print(elem)
>> [0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11], [12, 13, 14, 15]
```
"""
def __init__(self, input_dataset, batch_sizes, drop_remainder=False):
"""Creates a _RebatchDataset.
Args:
input_dataset: `Dataset` to rebatch.
batch_sizes: A `tf.int64` scalar or vector, representing the size of
batches to produce. If this argument is a vector, these values are
cycled through in order.
drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing
whether the last batch should be dropped in the case it has fewer than
`batch_sizes[cycle_index] elements; the default behavior is not to drop
the smaller batch.
"""
self._input_dataset = input_dataset
self._batch_sizes = ops.convert_to_tensor(
batch_sizes, dtype=dtypes.int64, name="batch_sizes")
self._drop_remainder = ops.convert_to_tensor(
drop_remainder, dtype=dtypes.bool, name="drop_remainder")
new_batch_dim = self._compute_static_batch_dim()
# pylint: disable=protected-access
self._element_spec = nest.map_structure(
lambda ts: ts._unbatch()._batch(new_batch_dim),
dataset_ops.get_structure(input_dataset))
# pylint: enable=protected-access
input_dataset = dataset_ops.normalize_to_dense(input_dataset)
variant_tensor = ged_ops.rebatch_dataset_v2(
input_dataset._variant_tensor, # pylint: disable=protected-access
batch_sizes=batch_sizes,
drop_remainder=drop_remainder,
**self._flat_structure)
super(_RebatchDataset, self).__init__(input_dataset, variant_tensor)
def _compute_static_batch_dim(self):
"""Computes the static batch dimension of a dataset if it can be determined.
Given the _RebatchDataset parameters, determines the batch dimension of this
dataset statically. Returns None if this cannot be determined or is
variable.
Returns:
An integer representing the batch dimension of the dataset. If it cannot
be determined statically, returns None.
Raises:
ValueError: The batch_sizes parameter is malformed, input_dataset is
not batched, or input_dataset batch sizes are incompatible with each
other.
"""
new_batch_dim = tensor_util.constant_value(self._batch_sizes)
if new_batch_dim is None:
return None
if isinstance(new_batch_dim, np.ndarray):
if len(new_batch_dim.shape) == 1:
if np.all(new_batch_dim == new_batch_dim[0]):
new_batch_dim = new_batch_dim[0]
else:
return None
elif len(new_batch_dim.shape) > 1:
raise ValueError("Expected batch_sizes to be a scalar or vector.")
if self._may_form_partial_batches(new_batch_dim):
return None
return new_batch_dim
def _may_form_partial_batches(self, desired_batch_size):
"""Returns whether this dataset may form partial batches."""
if tensor_util.constant_value(self._drop_remainder):
return False
def get_batch_dim(type_spec):
shape = type_spec._to_legacy_output_shapes() # pylint: disable=protected-access
if not isinstance(shape, tensor_shape.TensorShape):
return None
if shape.rank is None:
return None
if len(shape) < 1:
raise ValueError("Expected a dataset whose elements have rank >= 1 "
"but found a dataset whose elements are scalars. "
"You can fix the issue by adding the `batch` "
"transformation to the dataset.")
return shape.dims[0].value
input_batch_dims = [
get_batch_dim(ts)
for ts in nest.flatten(dataset_ops.get_structure(self._input_dataset))
]
known_input_batch_dims = [d for d in input_batch_dims if d is not None]
if not known_input_batch_dims:
return True
known_input_batch_dims = np.asarray(known_input_batch_dims)
if not np.all(known_input_batch_dims == known_input_batch_dims[0]):
raise ValueError("Batch dimensions of input dataset are not compatible.")
return known_input_batch_dims[0] % desired_batch_size != 0
@property
def element_spec(self):
return self._element_spec
class _LegacyRebatchDataset(dataset_ops.UnaryDataset):
"""A `Dataset` that divides its input batches into `num_replicas` sub-batches.
For each batch in the input dataset, _LegacyRebatchDataset will produce
`num_replicas` smaller batches whose sizes add up to the original batch size.
For example:
```python
ds = tf.data.Dataset.range(8)
ds = ds.batch(4)
ds = _LegacyRebatchDataset(ds, num_replicas=3)
for elem in ds:
print(elem)
>> [0, 1], [2, 3], [], [4, 5], [6, 7], []
```
"""
def __init__(self, input_dataset, num_replicas):
"""Creates a _LegacyRebatchDataset.
Args:
input_dataset: `Dataset` to rebatch.
num_replicas: A `tf.int64` scalar, representing the number of sub-batches
to split each batch from `input_dataset` into.
"""
def recalculate_batch_size(type_spec):
"""Recalculates the output_shape after dividing it by num_replicas."""
output_shape = type_spec._to_legacy_output_shapes() # pylint: disable=protected-access
if not isinstance(output_shape, tensor_shape.TensorShape):
return None
# If the output shape is unknown, we set the batch dimension to unknown.
if output_shape.rank is None:
return None
if len(output_shape) < 1:
raise ValueError("Expected a dataset whose elements have rank >= 1 "
"but found a dataset whose elements are scalars. "
"You can fix the issue by adding the `batch` "
"transformation to the dataset.")
output_dims = [d.value for d in output_shape.dims]
if output_dims[0] is not None and output_dims[0] % num_replicas == 0:
return output_dims[0] // num_replicas
# Set the batch dimension to unknown. If the global batch size does not
# divide num_replicas evenly, the minibatches may have different sizes.
return None
def rebatch(type_spec):
# pylint: disable=protected-access
batch_size = recalculate_batch_size(type_spec)
return type_spec._unbatch()._batch(batch_size)
# pylint: enable=protected-access
self._element_spec = nest.map_structure(
rebatch, dataset_ops.get_structure(input_dataset))
input_dataset = dataset_ops.normalize_to_dense(input_dataset)
variant_tensor = ged_ops.rebatch_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
num_replicas=num_replicas,
**self._flat_structure)
super(_LegacyRebatchDataset, self).__init__(input_dataset, variant_tensor)
@property
def element_spec(self):
return self._element_spec
class _RemoteDataset(dataset_ops.DatasetSource):
"""Creates a dataset on a given `device` given a graph def."""
def __init__(self, graph_def, device, element_spec):
self._elem_spec = element_spec
with ops.device(device):
variant_tensor = ged_ops.dataset_from_graph(graph_def)
super(_RemoteDataset, self).__init__(variant_tensor)
@property
def element_spec(self):
return self._elem_spec
def replicate(dataset, devices):
"""A transformation that replicates `dataset` onto a list of devices.
Args:
dataset: A `tf.data.Dataset` object.
devices: A list of devices to replicate the dataset on.
Returns:
A dictionary mapping device name to a dataset on that device.
"""
if not isinstance(dataset, dataset_ops.DatasetV2):
raise TypeError("`dataset` must be a `tf.data.Dataset` object.")
# pylint: disable=protected-access
dataset_device = dataset._variant_tensor.device
datasets = {}
if len(devices) == 1 and devices[0] == dataset_device:
datasets[devices[0]] = dataset
return datasets
with ops.colocate_with(dataset._variant_tensor):
dataset = dataset._apply_debug_options()
graph_def = dataset._as_serialized_graph(
strip_device_assignment=True,
external_state_policy=ExternalStatePolicy.WARN)
for device in devices:
ds = _RemoteDataset(graph_def, device, dataset.element_spec)
datasets[device] = ds
return datasets
def batch_sizes_for_worker(global_batch_size, num_workers,
num_replicas_per_worker, worker_index):
"""Determines how to rebatch a dataset for the given worker.
Given the global batch size, number of workers, number of replicas per worker,
and worker index, returns the correct batch sizes for rebatching a dataset
on worker `worker_index` of `num_workers`, such that each global step (across
all workers and replicas) will consume global_batch_size elements. The
returned value should be passed as the `batch_sizes` input parameter to
`tf.data.experimental.rebatch()`. The returned batch sizes meet the following
constraints:
Let G = global_batch_size, W = num_workers, R = num_replicas_per_worker
(A) for any worker, len(batch_sizes) = W * R
(B) for any worker, sum(batch_sizes) == G
(C) for any global step (i.e. R iterations on each worker), the sum of batches
consumed by replicas across all workers is G.
(D) any two batch sizes of any two replicas differs by at most one.
For example, suppose we have G = 7, W = 2, R = 2, and suppose we have two
files which each contain 7 elements:
```python
# WORKER 0
batch_sizes_0 = batch_sizes_for_worker(global_batch_size=global_batch_size,
num_workers=2,
num_replicas_per_worker=2,
worker_index=0)
print(batch_sizes_0)
>> [2, 2, 2, 1]
dataset_0 = tf.data.Dataset.from_tensor_slices(["file_a", "file_b"])
dataset_0 = dataset_0.shard(num_shards, index=0)
dataset_0 = dataset_0.batch(7)
dataset_0 = dataset_0.apply(tf.data.experimental.rebatch(batch_sizes_0))
for elem in dataset_0:
print(elem)
>> [[A0, A1], [A2, A3], [A4, A5], [A6]]
# WORKER 1
batch_sizes_1 = batch_sizes_for_worker(global_batch_size=global_batch_size,
num_workers=2,
num_replicas_per_worker=2,
worker_index=1)
print(batch_sizes_1)
>> [2, 1, 2, 2]
dataset_1 = tf.data.Dataset.from_tensor_slices(["file_a", "file_b"])
dataset_1 = dataset_1.shard(num_shards, index=1)
dataset_1 = dataset_1.batch(7)
dataset_1 = dataset_1.apply(tf.data.experimental.rebatch(batch_sizes_1))
for elem in dataset_1:
print(elem)
>> [[B0, B1], [B2], [B3, B4], [B5, B6]]
```
The above example will produce the following elements:
Step 1:
Worker 0 Replica 0: [A0, A1]
Worker 0 Replica 1: [A2, A3]
Worker 1 Replica 0: [B0, B1]
Worker 1 Replica 1: [B2]
Total batch size = 7
Step 2:
Worker 0 Replica 0: [A4, A5]
Worker 0 Replica 1: [A6]
Worker 1 Replica 0: [B3, B4]
Worker 1 Replica 1: [B5, B6]
Total batch size = 7
Args:
global_batch_size: A `tf.int64` scalar, representing the global batch size.
num_workers: An integer representing the number of workers the dataset will
be distributed across.
num_replicas_per_worker: An integer representing the number of replicas per
worker. All workers are assumed to have the same number of replicas.
worker_index: An integer index of the worker to be rebatched.
Returns:
A `tf.int64` vector, representing the batch sizes to rebatch the dataset
into.
"""
# Constraint (A)
num_subbatches = num_workers * num_replicas_per_worker
offset = worker_index * num_replicas_per_worker
const_value = tensor_util.constant_value(global_batch_size)
if const_value is not None:
# Use the constant global batch size for further calculations
global_batch_size = const_value
# Let N = W * R. Constraint (B) and (D) jointly mean that the iterations
# should have batch size either floor(B/N) or ceil(B/N). Namely, of the N
# subbatches a batch is split into, B - N * floor(B/N) of them will have size
# ceil(B/N), and the rest will have size floor(B/N).
floor = global_batch_size // num_subbatches
num_ceil = global_batch_size - (num_subbatches * floor)
# For worker 0, we assign the first num_ceil subbatches to have size
# ceil(B/N), and the remainder to have size floor(B/N). The other workers will
# each be offset by R * worker_index in order to meet constraint (C).
if const_value is not None:
# If the global batch size is a known constant value, we return a constant
# tensor directly instead of manipulating it with TF ops. This allows for
# better downstream shape inference.
worker_0 = [floor + 1] * num_ceil + [floor] * (num_subbatches - num_ceil)
return ops.convert_to_tensor(
worker_0[offset:] + worker_0[:offset],
dtype=dtypes.int64,
name="batch_sizes")
worker_0 = array_ops.ones(num_subbatches, dtype=dtypes.int64)
worker_0 = floor * worker_0 + array_ops.concat([
array_ops.ones(num_ceil, dtype=dtypes.int64),
array_ops.zeros(num_subbatches - num_ceil, dtype=dtypes.int64)
],
axis=0)
return array_ops.concat([worker_0[offset:], worker_0[:offset]], axis=0)
def compute_batch_size(dataset):
"""An operation that returns the batch size of the dataset.
This op tries to infer the batch size statically by walking up the dataset
tree from the final dataset node and returning the batch size of the first
batching dataset (such as from .batch() and .padded_batch()) that it
encounters. This differs from using the `element_spec` of a dataset in that it
does not account for partial batches.
This operation may fail if it encounters contradictory batch sizes (for
example, if the dataset is created by zipping together two datasets with
different batch sizes), if there are no explicit batching transformations, or
if there are operations downstream from the batching transformation that may
modify its batch size. In these cases, it returns a -1.
Args:
dataset: A `tf.data.Dataset` object.
Returns:
A `tf.int64` Tensor representing the batch size of the dataset sans partial
batches. If this cannot be inferred statically, the value of this tensor
will be -1.
"""
def get_static_batch_dim(output_shape):
if output_shape.rank is None:
return None
return output_shape.dims[0].value
batch_dims = [
get_static_batch_dim(ts._to_legacy_output_shapes()) # pylint: disable=protected-access
for ts in nest.flatten(dataset_ops.get_structure(dataset))
]
if all(d is not None for d in batch_dims):
if all(d == batch_dims[0] for d in batch_dims):
# If all batch dimensions are known and equal, return that directly.
batch_dim = batch_dims[0]
else:
# If all batch dimensions are known but not all equal, return -1.
batch_dim = -1
return constant_op.constant(
batch_dim, dtype=dtypes.int64, name="static_batch_size")
# If any batch dimensions are unknown, use compute_batch_size op.
return ged_ops.compute_batch_size(dataset._variant_tensor) # pylint: disable=protected-access
_AutoShardDatasetV1.__doc__ = _AutoShardDataset.__doc__
|
|
# BSD 3-Clause License
#
# Copyright (c) 2012, the Sentry Team, see AUTHORS for more details
# Copyright (c) 2019, Elasticsearch BV
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
import random
import sys
from elasticapm.conf.constants import EXCEPTION_CHAIN_MAX_DEPTH
from elasticapm.utils import compat, varmap
from elasticapm.utils.encoding import keyword_field, shorten, to_unicode
from elasticapm.utils.logging import get_logger
from elasticapm.utils.stacks import get_culprit, get_stack_info, iter_traceback_frames
__all__ = ("BaseEvent", "Exception", "Message")
logger = get_logger("elasticapm.events")
class BaseEvent(object):
@staticmethod
def to_string(client, data):
raise NotImplementedError
@staticmethod
def capture(client, **kwargs):
return {}
class Exception(BaseEvent):
"""
Exceptions store the following metadata:
- value: 'My exception value'
- type: 'ClassName'
- module '__builtin__' (i.e. __builtin__.TypeError)
- frames: a list of serialized frames (see _get_traceback_frames)
"""
@staticmethod
def to_string(client, data):
exc = data["exception"]
if exc["value"]:
return "%s: %s" % (exc["type"], exc["value"])
return exc["type"]
@staticmethod
def get_hash(data):
exc = data["exception"]
output = [exc["type"]]
for frame in data["stacktrace"]["frames"]:
output.append(frame["module"])
output.append(frame["function"])
return output
@staticmethod
def capture(client, exc_info=None, **kwargs):
culprit = exc_value = exc_type = exc_module = frames = exc_traceback = None
new_exc_info = False
if not exc_info or exc_info is True:
new_exc_info = True
exc_info = sys.exc_info()
if exc_info == (None, None, None):
raise ValueError("No exception found: capture_exception requires an active exception.")
try:
exc_type, exc_value, exc_traceback = exc_info
frames = get_stack_info(
iter_traceback_frames(exc_traceback, config=client.config),
with_locals=client.config.collect_local_variables in ("errors", "all"),
library_frame_context_lines=client.config.source_lines_error_library_frames,
in_app_frame_context_lines=client.config.source_lines_error_app_frames,
include_paths_re=client.include_paths_re,
exclude_paths_re=client.exclude_paths_re,
locals_processor_func=lambda local_var: varmap(
lambda k, val: shorten(
val,
list_length=client.config.local_var_list_max_length,
string_length=client.config.local_var_max_length,
dict_length=client.config.local_var_dict_max_length,
),
local_var,
),
)
culprit = kwargs.get("culprit", None) or get_culprit(
frames, client.config.include_paths, client.config.exclude_paths
)
if hasattr(exc_type, "__module__"):
exc_module = exc_type.__module__
exc_type = exc_type.__name__
else:
exc_module = None
exc_type = exc_type.__name__
finally:
if new_exc_info:
try:
del exc_info
del exc_traceback
except Exception as e:
logger.exception(e)
if "message" in kwargs:
message = kwargs["message"]
else:
message = "%s: %s" % (exc_type, to_unicode(exc_value)) if exc_value else str(exc_type)
data = {
"id": "%032x" % random.getrandbits(128),
"culprit": keyword_field(culprit),
"exception": {
"message": message,
"type": keyword_field(str(exc_type)),
"module": keyword_field(str(exc_module)),
"stacktrace": frames,
},
}
if hasattr(exc_value, "_elastic_apm_span_id"):
data["parent_id"] = exc_value._elastic_apm_span_id
del exc_value._elastic_apm_span_id
if compat.PY3:
depth = kwargs.get("_exc_chain_depth", 0)
if depth > EXCEPTION_CHAIN_MAX_DEPTH:
return
cause = exc_value.__cause__
chained_context = exc_value.__context__
# we follow the pattern of Python itself here and only capture the chained exception
# if cause is not None and __suppress_context__ is False
if chained_context and not (exc_value.__suppress_context__ and cause is None):
if cause:
chained_exc_type = type(cause)
chained_exc_value = cause
else:
chained_exc_type = type(chained_context)
chained_exc_value = chained_context
chained_exc_info = chained_exc_type, chained_exc_value, chained_context.__traceback__
chained_cause = Exception.capture(
client, exc_info=chained_exc_info, culprit="None", _exc_chain_depth=depth + 1
)
if chained_cause:
data["exception"]["cause"] = [chained_cause["exception"]]
return data
class Message(BaseEvent):
"""
Messages store the following metadata:
- message: 'My message from %s about %s'
- params: ('foo', 'bar')
"""
@staticmethod
def to_string(client, data):
return data["log"]["message"]
@staticmethod
def get_hash(data):
msg = data["param_message"]
return [msg["message"]]
@staticmethod
def capture(client, param_message=None, message=None, level=None, logger_name=None, **kwargs):
if message:
param_message = {"message": message}
params = param_message.get("params")
message = param_message["message"] % params if params else param_message["message"]
data = kwargs.get("data", {})
message_data = {
"id": "%032x" % random.getrandbits(128),
"log": {
"level": keyword_field(level or "error"),
"logger_name": keyword_field(logger_name or "__root__"),
"message": message,
"param_message": keyword_field(param_message["message"]),
},
}
if isinstance(data.get("stacktrace"), dict):
message_data["log"]["stacktrace"] = data["stacktrace"]["frames"]
if kwargs.get("exception"):
message_data["culprit"] = kwargs["exception"]["culprit"]
message_data["exception"] = kwargs["exception"]["exception"]
return message_data
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# kate: space-indent on; indent-width 4; mixedindent off; indent-mode python;
from __future__ import print_function
import sys
import os.path
from arsoft.trac.plugins.commitupdater import *
import trac.env
import time, unittest
from trac import perm
from trac.util.datefmt import time_now, utc
from trac.core import ComponentManager
from trac.ticket.model import Component
from trac.ticket import TicketSystem
from trac.versioncontrol.api import Repository, Changeset, NoSuchChangeset
from trac.web.session import Session
from trac.wiki.web_ui import ReadonlyWikiPolicy
from tracopt.versioncontrol.git.git_fs import GitRepository
from trac.test import EnvironmentStub, Mock, MockRequest
class MockRepository(Repository):
has_linear_changesets = True
def get_youngest_rev(self):
return 100
def normalize_path(self, path):
return path.strip('/') if path else ''
def normalize_rev(self, rev):
if rev is None or rev == '':
return self.youngest_rev
try:
nrev = int(rev)
except:
raise NoSuchChangeset(rev)
else:
if not (1 <= nrev <= self.youngest_rev):
raise NoSuchChangeset(rev)
return nrev
def get_node(self, path, rev):
assert rev % 3 == 1 # allow only 3n + 1
assert path in ('file', 'file-old')
return MockNode(self, path, rev, Node.FILE)
def get_changeset(self, rev):
assert rev % 3 == 1 # allow only 3n + 1
return MockChangeset(self, rev, 'message-%d' % rev, 'author-%d' % rev,
datetime(2001, 1, 1, tzinfo=utc) +
timedelta(seconds=rev))
def previous_rev(self, rev, path=''):
return rev - 1 if rev > 0 else None
def rev_older_than(self, rev1, rev2):
return self.normalize_rev(rev1) < self.normalize_rev(rev2)
def close(self):
pass
def _not_implemented(self, *args, **kwargs):
raise NotImplementedError
get_changes = _not_implemented
get_oldest_rev = _not_implemented
next_rev = _not_implemented
class test_commitupdater(unittest.TestCase):
def _test_authenticated_session(self, username, fullname, email):
"""
Verifies that a session cookie does not get used if the user is logged
in, and that Trac expires the cookie.
"""
req = MockRequest(self.env, authname=username)
req.incookie['trac_session'] = '123456'
session = Session(self.env, req)
self.assertEqual(username, session.sid)
session['email'] = email
session['name'] = fullname
session.save()
def setUp(self):
self.env = \
EnvironmentStub(enable=['trac.attachment.LegacyAttachmentPolicy',
'trac.perm.*',
'trac.wiki.web_ui.ReadonlyWikiPolicy',
'trac.ticket.*'])
self.policy = ReadonlyWikiPolicy(self.env)
store = perm.DefaultPermissionStore(self.env)
self.perm_sys = perm.PermissionSystem(self.env)
users = [('user1', 'User C', 'user1@example.org'),
('user2', 'User A', 'user2@example.org'),
('user3', 'User D', 'user3@example.org'),
('user4', 'User B', 'user4@example.org')]
self.env.insert_users(users)
store.grant_permission('user1', 'TICKET_MODIFY')
store.grant_permission('user2', 'TICKET_VIEW')
store.grant_permission('user3', 'TICKET_MODIFY')
store.grant_permission('user4', 'TICKET_MODIFY')
for (username, fullname, email) in users:
self._test_authenticated_session(username, fullname, email)
self.repo = Mock(MockRepository, 'testrepo',
{'name': 'testrepo', 'id': 4321}, None)
# Set all component objects to defaults
config = self.env.config
config.set("ticket","commit_ticket_update_commands.close","close closed closes fix fixed fixes")
config.set("ticket","commit_ticket_update_commands.implements","implement implements implemented impl")
config.set("ticket","commit_ticket_update_commands.invalidate","invalid invalidate invalidated invalidates")
config.set("ticket","commit_ticket_update_commands.refs","addresses re references refs see")
config.set("ticket","commit_ticket_update_commands.rejects","reject rejects rejected")
config.set("ticket","commit_ticket_update_commands.worksforme","worksforme")
config.set("ticket","commit_ticket_update_commands.alreadyimplemented","alreadyimplemented already_implemented")
config.set("ticket","commit_ticket_update_commands.reopen","reopen reopens reopened")
config.set("ticket","commit_ticket_update_commands.testready","testready test_ready ready_for_test rft")
config.set("ticket","commit_ticket_update_allowed_domains","example.org mydomain.net")
#config.set("ticket","commit_ticket_update_check_perms",False)
self._add_component('component3', 'user3')
self.ticket = Ticket(self.env)
self.ticket.populate({
'reporter': 'user1',
'summary': 'the summary',
'component': 'component3',
'owner': 'user3',
'status': 'new',
})
self.tkt_id = self.ticket.insert()
self.ticket2 = Ticket(self.env)
self.ticket2.populate({
'reporter': 'user2',
'summary': 'the summary',
'component': 'component3',
'owner': 'user2',
'status': 'new',
})
self.tkt2_id = self.ticket2.insert()
#for username, name, email in self.env.get_known_users():
# sys.stderr.write('known user %s, %s, %s\n' % (username, name, email))
with self.env.db_transaction as db:
db("INSERT INTO enum VALUES ('resolution', 'already_implemented', 6)")
#db("INSERT INTO enum VALUES ('resolution', 'worksforme', 5)")
self._committicketupdater = CommitTicketUpdater(self.env)
def noop(self):
pass
def tearDown(self):
self.env.reset_db()
def _add_component(self, name='test', owner='owner1'):
component = Component(self.env)
component.name = name
component.owner = owner
component.insert()
def build_comment(self,changeset):
revstring = str(changeset.rev)
drev = str(self.repo.display_rev(changeset.rev))
if self.repo.name:
revstring += '/' + self.repo.name
drev += '/' + self.repo.name
return """In [changeset:"%s" %s]:
{{{
#!CommitTicketReference repository="%s" revision="%s"
%s
}}}""" % (revstring, drev, self.repo.name, changeset.rev, changeset.message)
def check_ticket_comment(self,changeset):
for obj in [self.env]:
#print('comment=%s' % self.build_comment(changeset), file=sys.stderr)
self.assertEqual(self._committicketupdater.make_ticket_comment(self.repo,changeset), self.build_comment(changeset))
def test_check_closes(self):
message = "Fixed some stuff. closes #%i" % self.tkt_id
test_changeset = Mock(Changeset, self.repo, 42, message,
'user1@example.org', None)
self.check_ticket_comment(test_changeset)
# For each object in turn:
# Get tickets and commands
tickets = self._committicketupdater._parse_message(message)
# First, check we've got the tickets we were expecting
self.assertEqual(tickets.keys(),[self.tkt_id])
# Now check the actions are right
self.assertEqual(tickets.get(self.tkt_id),[self._committicketupdater.cmd_close])
ret = ret = self._committicketupdater.changeset_added_impl(self.repo, test_changeset)
(cmds, ticket) = ret[self.tkt_id]
self.assertEqual(ticket['status'], 'closed')
self.assertEqual(ticket['owner'], 'user1')
self.assertEqual(ticket['resolution'], 'fixed')
def test_check_implements(self):
message = "Fixed some stuff. implements #%i" % self.tkt_id
test_changeset = Mock(Changeset, self.repo, 42, message,
'user1@example.org', None)
self.check_ticket_comment(test_changeset)
# For each object in turn:
# Get tickets and commands
tickets = self._committicketupdater._parse_message(message)
# First, check we've got the tickets we were expecting
self.assertEqual(tickets.keys(),[self.tkt_id])
# Now check the actions are right
self.assertEqual(tickets.get(self.tkt_id),[self._committicketupdater.cmd_implements])
ret = self._committicketupdater.changeset_added_impl(self.repo, test_changeset)
(cmds, ticket) = ret[self.tkt_id]
self.assertEqual(ticket['status'], 'implemented')
self.assertEqual(ticket['owner'], 'user1')
def test_check_invalidate(self):
message = "Fixed some stuff. invalid #%i" % self.tkt_id
test_changeset = Mock(Changeset, self.repo, 42, message,
'user1@example.org', None)
self.check_ticket_comment(test_changeset)
# For each object in turn:
# Get tickets and commands
tickets = self._committicketupdater._parse_message(message)
# First, check we've got the tickets we were expecting
self.assertEqual(tickets.keys(),[self.tkt_id])
# Now check the actions are right
self.assertEqual(tickets.get(self.tkt_id),[self._committicketupdater.cmd_invalidate])
ret = self._committicketupdater.changeset_added_impl(self.repo, test_changeset)
(cmds, ticket) = ret[self.tkt_id]
self.assertEqual(ticket['status'], 'closed')
self.assertEqual(ticket['resolution'], 'invalid')
def test_check_rejects(self):
message = "Fixed some stuff. reject #%i" % self.tkt_id
test_changeset = Mock(Changeset, self.repo, 42, message,
'user1@example.org', None)
self.check_ticket_comment(test_changeset)
# For each object in turn:
# Get tickets and commands
tickets = self._committicketupdater._parse_message(message)
# First, check we've got the tickets we were expecting
self.assertEqual(tickets.keys(),[self.tkt_id])
# Now check the actions are right
self.assertEqual(tickets.get(self.tkt_id),[self._committicketupdater.cmd_rejects])
ret = self._committicketupdater.changeset_added_impl(self.repo, test_changeset)
(cmds, ticket) = ret[self.tkt_id]
self.assertEqual(ticket['status'], 'rejected')
def test_check_worksforme(self):
message = "Fixed some stuff. worksforme #%i" % self.tkt_id
test_changeset = Mock(Changeset, self.repo, 42, message,
'user1@example.org', None)
self.check_ticket_comment(test_changeset)
# For each object in turn:
# Get tickets and commands
tickets = self._committicketupdater._parse_message(message)
# First, check we've got the tickets we were expecting
self.assertEqual(tickets.keys(),[self.tkt_id])
# Now check the actions are right
self.assertEqual(tickets.get(self.tkt_id),[self._committicketupdater.cmd_worksforme])
ret = self._committicketupdater.changeset_added_impl(self.repo, test_changeset)
(cmds, ticket) = ret[self.tkt_id]
self.assertEqual(ticket['status'], 'closed')
self.assertEqual(ticket['resolution'], 'worksforme')
def test_check_alreadyimplemented(self):
message = "Fixed some stuff. alreadyimplemented #%i" % self.tkt_id
test_changeset = Mock(Changeset, self.repo, 42, message,
'user1@example.org', None)
self.check_ticket_comment(test_changeset)
# For each object in turn:
# Get tickets and commands
tickets = self._committicketupdater._parse_message(message)
# First, check we've got the tickets we were expecting
self.assertEqual(tickets.keys(),[self.tkt_id])
# Now check the actions are right
self.assertEqual(tickets.get(self.tkt_id),[self._committicketupdater.cmd_alreadyimplemented])
ret = self._committicketupdater.changeset_added_impl(self.repo, test_changeset)
(cmds, ticket) = ret[self.tkt_id]
self.assertEqual(ticket['status'], 'closed')
self.assertEqual(ticket['resolution'], 'already_implemented')
def test_check_already_implemented(self):
message = "Fixed some stuff. already_implemented #%i" % self.tkt_id
test_changeset = Mock(Changeset, self.repo, 42, message,
'user1@example.org', None)
self.check_ticket_comment(test_changeset)
# For each object in turn:
# Get tickets and commands
tickets = self._committicketupdater._parse_message(message)
# First, check we've got the tickets we were expecting
self.assertEqual(tickets.keys(),[self.tkt_id])
# Now check the actions are right
self.assertEqual(tickets.get(self.tkt_id),[self._committicketupdater.cmd_alreadyimplemented])
ret = self._committicketupdater.changeset_added_impl(self.repo, test_changeset)
(cmds, ticket) = ret[self.tkt_id]
self.assertEqual(ticket['status'], 'closed')
self.assertEqual(ticket['resolution'], 'already_implemented')
def test_check_reopens(self):
message = "Fixed some stuff. worksforme #%i" % self.tkt_id
test_changeset = Mock(Changeset, self.repo, 42, message,
'user1@example.org', None)
ret = self._committicketupdater.changeset_added_impl(self.repo, test_changeset)
message = "Fixed some stuff. reopen #%i" % self.tkt_id
test_changeset = Mock(Changeset, self.repo, 42, message,
'user1@example.org', None)
self.check_ticket_comment(test_changeset)
# For each object in turn:
# Get tickets and commands
tickets = self._committicketupdater._parse_message(message)
# First, check we've got the tickets we were expecting
self.assertEqual(tickets.keys(),[self.tkt_id])
# Now check the actions are right
self.assertEqual(tickets.get(self.tkt_id),[self._committicketupdater.cmd_reopens])
ret = self._committicketupdater.changeset_added_impl(self.repo, test_changeset)
(cmds, ticket) = ret[self.tkt_id]
self.assertEqual(ticket['status'], 'reopened')
def test_check_testready(self):
message = "Fixed some stuff. ready_for_test #%i" % self.tkt_id
test_changeset = Mock(Changeset, self.repo, 42, message,
'user1@example.org', None)
self.check_ticket_comment(test_changeset)
# For each object in turn:
# Get tickets and commands
tickets = self._committicketupdater._parse_message(message)
# First, check we've got the tickets we were expecting
self.assertEqual(tickets.keys(),[self.tkt_id])
# Now check the actions are right
self.assertEqual(tickets.get(self.tkt_id),[self._committicketupdater.cmd_testready])
ret = self._committicketupdater.changeset_added_impl(self.repo, test_changeset)
(cmds, ticket) = ret[self.tkt_id]
self.assertEqual(ticket['status'], 'test_ready')
def test_allowed_domains(self):
message = "Fixed some stuff. reopen #%i" % self.tkt_id
test_changeset_declined = Mock(Changeset, self.repo, 42, message,
"test_person <me@gohome.now>", None)
self.assertEqual(self._committicketupdater._is_author_allowed(test_changeset_declined.author),False)
test_changeset_allowed = Mock(Changeset, self.repo, 42, message,
"test_person <me@mydomain.net>", None)
self.assertEqual(self._committicketupdater._is_author_allowed(test_changeset_allowed.author),True)
test_changeset_no_domain = Mock(Changeset, self.repo, 42, message,
"test_person", None)
self.assertEqual(self._committicketupdater._is_author_allowed(test_changeset_no_domain.author),False)
message = "Fixed some stuff. fixed #%i" % self.tkt_id
test_changeset = Mock(Changeset, self.repo, 42, message,
'test_person <me@gohome.now>', None)
self.check_ticket_comment(test_changeset)
# For each object in turn:
# Get tickets and commands
tickets = self._committicketupdater._parse_message(message)
# First, check we've got the tickets we were expecting
self.assertEqual(tickets.keys(),[self.tkt_id])
ret = self._committicketupdater.changeset_added_impl(self.repo, test_changeset)
(cmds, ticket) = ret[self.tkt_id]
self.assertEqual(ticket['status'], 'new')
def test_check_closes_multiple(self):
message = "Fixed some stuff. closes #%i, #%i" % (self.tkt_id, self.tkt2_id)
test_changeset = Mock(Changeset, self.repo, 42, message,
'user1@example.org', None)
self.check_ticket_comment(test_changeset)
# For each object in turn:
# Get tickets and commands
tickets = self._committicketupdater._parse_message(message)
# First, check we've got the tickets we were expecting
self.assertEqual(tickets.keys(),[self.tkt_id, self.tkt2_id])
# Now check the actions are right
self.assertEqual(tickets.get(self.tkt_id),[self._committicketupdater.cmd_close])
self.assertEqual(tickets.get(self.tkt2_id),[self._committicketupdater.cmd_close])
ret = ret = self._committicketupdater.changeset_added_impl(self.repo, test_changeset)
(cmds, ticket) = ret[self.tkt_id]
self.assertEqual(ticket['status'], 'closed')
self.assertEqual(ticket['owner'], 'user1')
self.assertEqual(ticket['resolution'], 'fixed')
(cmds, ticket2) = ret[self.tkt2_id]
self.assertEqual(ticket2['status'], 'closed')
self.assertEqual(ticket2['owner'], 'user1')
self.assertEqual(ticket2['resolution'], 'fixed')
def test_check_closes_non_existing_ticket(self):
non_existing_ticket_id = 12345
message = "Fixed some stuff. closes #%i" % non_existing_ticket_id
test_changeset = Mock(Changeset, self.repo, 42, message,
'user1@example.org', None)
self.check_ticket_comment(test_changeset)
# For each object in turn:
# Get tickets and commands
tickets = self._committicketupdater._parse_message(message)
# First, check we've got the tickets we were expecting
self.assertEqual(tickets.keys(),[non_existing_ticket_id])
# Now check the actions are right
self.assertEqual(tickets.get(non_existing_ticket_id),[self._committicketupdater.cmd_close])
ret = ret = self._committicketupdater.changeset_added_impl(self.repo, test_changeset)
(cmds, ticket) = ret[non_existing_ticket_id]
self.assertEqual(ticket, None)
def test_check_closes_with_full_email_addr(self):
message = "Fixed some stuff. closes #%i" % (self.tkt_id)
test_changeset = Mock(Changeset, self.repo, 42, message,
'User One <user1@example.org>', None)
self.check_ticket_comment(test_changeset)
# For each object in turn:
# Get tickets and commands
tickets = self._committicketupdater._parse_message(message)
# First, check we've got the tickets we were expecting
self.assertEqual(tickets.keys(),[self.tkt_id])
# Now check the actions are right
self.assertEqual(tickets.get(self.tkt_id),[self._committicketupdater.cmd_close])
ret = ret = self._committicketupdater.changeset_added_impl(self.repo, test_changeset)
(cmds, ticket) = ret[self.tkt_id]
self.assertEqual(ticket['status'], 'closed')
self.assertEqual(ticket['owner'], 'user1')
self.assertEqual(ticket['resolution'], 'fixed')
if __name__ == '__main__':
unittest.main()
#t = test_commitupdater(methodName='noop')
#t.setUp()
#t.test_check_implements_cmd()
#t.tearDown()
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Manage the lifecycle of runtime processes and dispatch requests to them."""
import collections
import logging
import threading
import time
import google
from google.appengine.tools.devappserver2 import errors
NORMAL_REQUEST = 0
READY_REQUEST = 1 # A warmup request i.e. /_ah/warmup.
BACKGROUND_REQUEST = 2 # A request to create a background thread.
SHUTDOWN_REQUEST = 3 # A request to stop the module i.e. /_ah/stop.
# A request to send a command to the module for evaluation e.g. for use by
# interactive shells.
INTERACTIVE_REQUEST = 4
# Constants for use with FILE_CHANGE_INSTANCE_RESTART_POLICY. These constants
# determine whether an instance will be restarted if a file is changed in
# the application_root or any directory returned by
# InstanceFactory.get_restart_directories.
ALWAYS = 0 # Always restart instances.
AFTER_FIRST_REQUEST = 1 # Restart instances that have received >= 1 request.
NEVER = 2 # Never restart instances.
class CannotAcceptRequests(errors.Error):
"""An Instance cannot accept a request e.g. because it is quitting."""
class CannotQuitServingInstance(errors.Error):
"""An Instance cannot be quit e.g. because it is handling a request."""
class InvalidInstanceId(errors.Error):
"""The requested instance id is not serving."""
class RuntimeProxy(object):
"""Abstract base class for a subclass that manages a runtime process."""
def handle(self, environ, start_response, url_map, match, request_id,
request_type):
"""Serves this request by forwarding it to the runtime process.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler matching this request.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Yields:
A sequence of strings containing the body of the HTTP response.
"""
raise NotImplementedError()
def start(self):
"""Starts the runtime process and waits until it is ready to serve."""
raise NotImplementedError()
def quit(self):
"""Terminates the runtime process."""
raise NotImplementedError()
class Instance(object):
"""Handle requests through a RuntimeProxy."""
def __init__(self,
request_data,
instance_id,
runtime_proxy,
max_concurrent_requests,
max_background_threads=0,
expect_ready_request=False):
"""Initializer for Instance.
Args:
request_data: A wsgi_request_info.WSGIRequestInfo that will be provided
with request information for use by API stubs.
instance_id: A string or integer representing the unique (per module) id
of the instance.
runtime_proxy: A RuntimeProxy instance that will be used to handle
requests.
max_concurrent_requests: The maximum number of concurrent requests that
the instance can handle. If the instance does not support concurrent
requests then the value should be 1.
max_background_threads: The maximum number of background threads that
the instance can handle. If the instance does not support background
threads then the value should be 0.
expect_ready_request: If True then the instance will be sent a special
request (i.e. /_ah/warmup or /_ah/start) before it can handle external
requests.
"""
self._request_data = request_data
self._instance_id = instance_id
self._max_concurrent_requests = max_concurrent_requests
self._max_background_threads = max_background_threads
self._runtime_proxy = runtime_proxy
self._condition = threading.Condition()
self._num_outstanding_requests = 0 # Protected by self._condition.
self._num_running_background_threads = 0 # Protected by self._condition.
self._total_requests = 0 # Protected by self._condition.
self._started = False # Protected by self._condition.
self._quitting = False # Protected by self._condition.
self._quit = False # Protected by self._condition.
self._last_request_end_time = time.time() # Protected by self._condition.
self._expecting_ready_request = expect_ready_request
self._expecting_shutdown_request = False
# A deque containg (start_time, end_time) 2-tuples representing completed
# requests. This is used to compute latency and qps statistics.
self._request_history = collections.deque() # Protected by self._condition.
def __repr__(self):
statuses = []
if not self._started:
statuses.append('not started')
if self._quitting:
statuses.append('quitting')
if self._quit:
statuses.append('quit')
if self._expecting_ready_request:
statuses.append('handling ready request')
if statuses:
status = ' [%s]' % ' '.join(statuses)
else:
status = ''
return '<Instance %s: %d/%d, total: %d%s>' % (
self._instance_id,
self._num_outstanding_requests,
self._max_concurrent_requests,
self._total_requests,
status)
@property
def instance_id(self):
"""The unique string or integer id for the Instance."""
return self._instance_id
@property
def total_requests(self):
"""The total number requests that the Instance has handled."""
with self._condition:
return self._total_requests
@property
def remaining_request_capacity(self):
"""The number of extra requests that the Instance can currently handle."""
with self._condition:
return self._max_concurrent_requests - self._num_outstanding_requests
@property
def remaining_background_thread_capacity(self):
"""The number of extra background threads the Instance can handle."""
with self._condition:
return self._max_background_threads - self._num_running_background_threads
@property
def num_outstanding_requests(self):
"""The number of requests that the Instance is currently handling."""
with self._condition:
return self._num_outstanding_requests
@property
def idle_seconds(self):
"""The number of seconds that the Instance has been idle.
Will be 0.0 if the Instance has not started.
"""
with self._condition:
if self._num_outstanding_requests:
return 0.0
elif not self._started:
return 0.0
else:
return time.time() - self._last_request_end_time
@property
def handling_ready_request(self):
"""True if the Instance is handling or will be sent a ready request."""
return self._expecting_ready_request
def get_latency_60s(self):
"""Returns the average request latency over the last 60s in seconds."""
with self._condition:
self._trim_request_history_to_60s()
if not self._request_history:
return 0.0
else:
total_latency = sum(
end - start for (start, end) in self._request_history)
return total_latency / len(self._request_history)
def get_qps_60s(self):
"""Returns the average queries-per-second over the last 60 seconds."""
with self._condition:
self._trim_request_history_to_60s()
if not self._request_history:
return 0.0
else:
return len(self._request_history) / 60.0
@property
def has_quit(self):
with self._condition:
return self._quit or self._quitting or self._expecting_shutdown_request
@property
def can_accept_requests(self):
"""True if .handle() will accept requests.
Does not consider outstanding request volume.
"""
with self._condition:
return (not self._quit and
not self._quitting and
not self._expecting_ready_request and
not self._expecting_shutdown_request and
self._started)
def _trim_request_history_to_60s(self):
"""Removes obsolete entries from _outstanding_request_history."""
window_start = time.time() - 60
with self._condition:
while self._request_history:
t, _ = self._request_history[0]
if t < window_start:
self._request_history.popleft()
else:
break
def start(self):
"""Start the instance and the RuntimeProxy.
Returns:
True if the Instance was started or False, if the Instance has already
been quit.
"""
with self._condition:
if self._quit:
return False
self._runtime_proxy.start()
with self._condition:
if self._quit:
self._runtime_proxy.quit()
return False
self._last_request_end_time = time.time()
self._started = True
logging.debug('Started instance: %s', self)
return True
def quit(self, allow_async=False, force=False, expect_shutdown=False):
"""Quits the instance and the RuntimeProxy.
Args:
allow_async: Whether to enqueue the quit after all requests have completed
if the instance cannot be quit immediately.
force: Whether to force the instance to quit even if the instance is
currently handling a request. This overrides allow_async if True.
expect_shutdown: Whether the instance will be sent a shutdown request.
Raises:
CannotQuitServingInstance: if the Instance is currently handling a
request and allow_async is False.
"""
with self._condition:
if self._quit:
return
if not self._started:
self._quit = True
return
if expect_shutdown:
self._expecting_shutdown_request = True
return
if (self._num_outstanding_requests or
self._num_running_background_threads or
self._expecting_shutdown_request):
if not force:
if allow_async or expect_shutdown:
self._quitting = True
return
raise CannotQuitServingInstance()
self._quit = True
self._runtime_proxy.quit()
self._condition.notify_all()
logging.debug('Quit instance: %s', self)
def reserve_background_thread(self):
"""Reserves a background thread slot.
Raises:
CannotAcceptRequests: if the Instance is already handling the maximum
permissible number of background threads or is not in a state where it
can handle background threads.
"""
with self._condition:
if self._quit:
raise CannotAcceptRequests('Instance has been quit')
if not self._started:
raise CannotAcceptRequests('Instance has not started')
if not self.remaining_background_thread_capacity:
raise CannotAcceptRequests(
'Instance has no additional background thread capacity')
self._num_running_background_threads += 1
def handle(self, environ, start_response, url_map, match, request_id,
request_type):
"""Handles an HTTP request by forwarding it to the RuntimeProxy.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler matching this request.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
request_type: The type of the request. See *_REQUEST module constants.
Returns:
An iterable over strings containing the body of the HTTP response.
Raises:
CannotAcceptRequests: if the Instance has quit or is already handling the
maximum permissible number of concurrent requests.
"""
start_time = time.time()
with self._condition:
if self._quit:
raise CannotAcceptRequests('Instance has been quit')
if not self._started:
raise CannotAcceptRequests('Instance has not started')
if request_type not in (BACKGROUND_REQUEST, SHUTDOWN_REQUEST):
if self._quitting:
raise CannotAcceptRequests('Instance is shutting down')
if self._expecting_ready_request and request_type != READY_REQUEST:
raise CannotAcceptRequests('Instance is waiting for ready request')
if not self.remaining_request_capacity:
raise CannotAcceptRequests('Instance has no additional capacity')
self._num_outstanding_requests += 1
self._request_data.set_request_instance(request_id, self)
self._total_requests += 1
try:
# Force the generator to complete so the code in the finally block runs
# at the right time.
return list(self._runtime_proxy.handle(environ,
start_response,
url_map,
match,
request_id,
request_type))
finally:
logging.debug('Request handled by %s in %0.4fs',
self, time.time() - start_time)
with self._condition:
if request_type == READY_REQUEST:
self._expecting_ready_request = False
if request_type == BACKGROUND_REQUEST:
self._num_running_background_threads -= 1
elif request_type != SHUTDOWN_REQUEST:
self._num_outstanding_requests -= 1
self._last_request_end_time = time.time()
self._trim_request_history_to_60s()
self._request_history.append((start_time, self._last_request_end_time))
if request_type == READY_REQUEST:
self._condition.notify(self._max_concurrent_requests)
elif request_type == SHUTDOWN_REQUEST:
self._expecting_shutdown_request = False
self.quit(allow_async=True)
elif request_type == NORMAL_REQUEST:
self._condition.notify()
if (not self._num_outstanding_requests and
not self._num_running_background_threads):
if self._quitting:
self.quit()
def wait(self, timeout_time):
"""Wait for this instance to have capacity to serve a request.
Args:
timeout_time: A float containing a time in seconds since the epoch to wait
until before timing out.
Returns:
True if the instance has request capacity or False if the timeout time was
reached or the instance has been quit.
"""
with self._condition:
while (time.time() < timeout_time and not
(self.remaining_request_capacity and self.can_accept_requests)
and not self.has_quit):
self._condition.wait(timeout_time - time.time())
return bool(self.remaining_request_capacity and self.can_accept_requests)
class InstanceFactory(object):
"""An abstract factory that creates instances for an InstancePool.
Attributes:
max_concurrent_requests: The maximum number of concurrent requests that
Instances created by this factory can handle. If the Instances do not
support concurrent requests then the value should be 1.
START_URL_MAP: An apinfo.URLMap that should be used as the default
/_ah/start handler if no user-specified script handler matches.
WARMUP_URL_MAP: An apinfo.URLMap that should be used as the default
/_ah/warmup handler if no user-specified script handler matches.
"""
START_URL_MAP = None
WARMUP_URL_MAP = None
# If True then the runtime supports interactive command evaluation e.g. for
# use in interactive shells.
SUPPORTS_INTERACTIVE_REQUESTS = False
# Controls how instances are restarted when a file relevant to the application
# is changed. Possible values: NEVER, AFTER_FIRST_RESTART, ALWAYS.
FILE_CHANGE_INSTANCE_RESTART_POLICY = None
def __init__(self, request_data, max_concurrent_requests,
max_background_threads=0):
"""Initializer for InstanceFactory.
Args:
request_data: A wsgi_request_info.WSGIRequestInfo instance that will be
populated with Instance data for use by the API stubs.
max_concurrent_requests: The maximum number of concurrent requests that
Instances created by this factory can handle. If the Instances do not
support concurrent requests then the value should be 1.
max_background_threads: The maximum number of background threads that
the instance can handle. If the instance does not support background
threads then the value should be 0.
"""
self.request_data = request_data
self.max_concurrent_requests = max_concurrent_requests
self.max_background_threads = max_background_threads
def get_restart_directories(self):
"""Returns a list of directories changes in which should trigger a restart.
Returns:
A list of directory paths. Changes (i.e. files added, deleted or modified)
in these directories will trigger the restart of all instances created
with this factory.
"""
return []
def files_changed(self):
"""Called when a file relevant to the factory *might* have changed."""
def configuration_changed(self, config_changes):
"""Called when the configuration of the module has changed.
Args:
config_changes: A set containing the changes that occured. See the
*_CHANGED constants in the application_configuration module.
"""
def new_instance(self, instance_id, expect_ready_request=False):
"""Create and return a new Instance.
Args:
instance_id: A string or integer representing the unique (per module) id
of the instance.
expect_ready_request: If True then the instance will be sent a special
request (i.e. /_ah/warmup or /_ah/start) before it can handle external
requests.
Returns:
The newly created instance.Instance.
"""
raise NotImplementedError()
|
|
#from out2_sup import *
import out2_sup as model_
rootObj = model_.rootTag(
comments=[
model_.comments(
content_ = [
model_.MixedContainer(1, 0, "", "1. This is a "),
model_.MixedContainer(2, 2, "emp", "foolish"),
model_.MixedContainer(1, 0, "", " comment. It is "),
model_.MixedContainer(2, 2, "emp", "really"),
model_.MixedContainer(1, 0, "", " important."),
],
valueOf_ = """1. This is a comment. It is important.""",
),
model_.comments(
content_ = [
model_.MixedContainer(1, 0, "", "2. This is a "),
model_.MixedContainer(2, 2, "emp", "silly"),
model_.MixedContainer(1, 0, "", " comment. It is "),
model_.MixedContainer(2, 2, "emp", "very"),
model_.MixedContainer(1, 0, "", " significant."),
],
valueOf_ = """2. This is a comment. It is significant.""",
),
],
person=[
model_.person(
ratio = 3.200000,
id = 1,
value = "abcd",
name='Alberta',
interest=[
'gardening',
'reading',
],
category=5,
agent=[
],
promoter=[
],
),
model_.person(
id = 2,
name='Bernardo',
interest=[
'programming',
],
category=0,
agent=[
model_.agent(
firstname='Darren',
lastname='Diddly',
priority=4.500000,
info=model_.info(
rating = 5.330000,
type_ = 321,
name = "Albert Abasinian",
),
),
],
promoter=[
],
),
model_.person(
id = 3,
name='Charlie',
interest=[
'people',
'cats',
'dogs',
],
category=8,
agent=[
],
promoter=[
model_.booster(
firstname='David',
lastname='Donaldson',
other_value=[
],
type_=[
],
client_handler=[
],
),
model_.booster(
firstname='Edward',
lastname='Eddleberry',
other_value=[
],
type_=[
],
client_handler=[
],
),
],
),
model_.person(
id = 4,
),
],
programmer=[
model_.programmer(
language = "python",
area = "xml",
id = 2,
name='Charles Carlson',
interest=[
'programming',
],
category=2233,
agent=[
model_.agent(
firstname='Ernest',
lastname='Echo',
priority=3.800000,
info=model_.info(
rating = 5.330000,
type_ = 321,
name = "George Gregory",
),
),
],
promoter=[
],
description='A very happy programmer',
email='charles@happyprogrammers.com',
elposint=14,
elnonposint=0,
elnegint=-12,
elnonnegint=4,
eldate='2005-04-26',
eltoken='aa bb cc dd',
elshort=123,
ellong=13241234123412341234,
elparam=model_.param(
semantic = "a big semantic",
name = "Davy",
id = "id001",
valueOf_ = """""",
),
),
],
python_programmer=[
model_.python_programmer(
nick_name = "davy",
language = "python",
area = "xml",
vegetable = "tomato",
fruit = "peach",
ratio = 8.700000,
id = 232,
value = "abcd",
name='Darrel Dawson',
interest=[
'hang gliding',
],
category=3344,
agent=[
model_.agent(
firstname='Harvey',
lastname='Hippolite',
priority=5.200000,
info=model_.info(
rating = 6.550000,
type_ = 543,
name = "Harvey Hippolite",
),
),
],
promoter=[
],
description='An object-orientated programmer',
email='darrel@happyprogrammers.com',
favorite_editor='jed',
),
],
java_programmer=[
model_.java_programmer(
nick_name = "davy",
language = "python",
area = "xml",
vegetable = "tomato",
fruit = "peach",
ratio = 8.700000,
id = 232,
value = "abcd",
name='Darrel Dawson',
interest=[
'hang gliding',
],
category=3344,
agent=[
model_.agent(
firstname='Harvey',
lastname='Hippolite',
priority=5.200000,
info=model_.info(
rating = 6.550000,
type_ = 543,
name = "Harvey Hippolite",
),
),
],
promoter=[
],
description='An object-orientated programmer',
email='darrel@happyprogrammers.com',
favorite_editor='jed',
),
],
)
|
|
# -*- coding: utf-8 -*-
# Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright 2008, Benjamin Kampmann <ben.kampmann@googlemail.com>
"""
This is a Media Backend that allows you to access the Trailers from Apple.com
"""
from coherence.backend import BackendItem, BackendStore
from coherence.upnp.core import DIDLLite
from coherence.upnp.core.utils import ReverseProxyUriResource
from twisted.web import client
from twisted.internet import task, reactor
from coherence.extern.et import parse_xml
DEFAULT_NAME = 'Apple Trailers'
XML_URL = "http://www.apple.com/trailers/home/xml/current.xml"
USER_AGENT = 'QuickTime/7.6.2 (qtver=7.6.2;os=Windows NT 5.1Service Pack 3)'
ROOT_ID = 0
class AppleTrailerProxy(ReverseProxyUriResource):
def __init__(self, uri):
ReverseProxyUriResource.__init__(self, uri)
def render(self, request):
request.requestHeaders.setRawHeaders('user-agent', USER_AGENT)
return ReverseProxyUriResource.render(self, request)
class Trailer(BackendItem):
def __init__(self, parent_id, urlbase, id=None, name=None, cover=None,
url=None):
BackendItem.__init__(self)
self.parentid = parent_id
self.id = id
self.name = name
self.cover = cover
if(len(urlbase) and urlbase[-1] != '/'):
urlbase += '/'
self.url = urlbase + str(self.id)
self.location = AppleTrailerProxy(url)
self.item = DIDLLite.VideoItem(id, parent_id, self.name)
self.item.albumArtURI = self.cover
def get_path(self):
return self.url
class Container(BackendItem):
logCategory = 'apple_trailers'
def __init__(self, id, parent_id, name, store=None, \
children_callback=None):
BackendItem.__init__(self)
self.id = id
self.parent_id = parent_id
self.name = name
self.mimetype = 'directory'
self.update_id = 0
self.children = []
self.item = DIDLLite.Container(id, parent_id, self.name)
self.item.childCount = None # self.get_child_count()
def get_children(self, start=0, end=0):
if(end - start > 25 or
start - end == start or
end - start == 0):
end = start + 25
if end != 0:
return self.children[start:end]
return self.children[start:]
def get_child_count(self):
return len(self.children)
def get_item(self):
return self.item
def get_name(self):
return self.name
def get_id(self):
return self.id
class AppleTrailersStore(BackendStore):
logCategory = 'apple_trailers'
implements = ['MediaServer']
def __init__(self, server, *args, **kwargs):
BackendStore.__init__(self, server, **kwargs)
self.next_id = 1000
self.name = kwargs.get('name', DEFAULT_NAME)
self.refresh = int(kwargs.get('refresh', 8)) * (60 * 60)
self.trailers = {}
self.wmc_mapping = {'15': 0}
dfr = self.update_data()
# first get the first bunch of data before sending init_completed
dfr.addCallback(lambda x: self.init_completed())
def queue_update(self, result):
reactor.callLater(self.refresh, self.update_data)
return result
def update_data(self):
dfr = client.getPage(XML_URL)
dfr.addCallback(parse_xml)
dfr.addCallback(self.parse_data)
dfr.addCallback(self.queue_update)
return dfr
def parse_data(self, xml_data):
def iterate(root):
for item in root.findall('./movieinfo'):
trailer = self._parse_into_trailer(item)
yield trailer
root = xml_data.getroot()
return task.coiterate(iterate(root))
def _parse_into_trailer(self, item):
"""
info = item.find('info')
for attr in ('title', 'runtime', 'rating', 'studio', 'postdate',
'releasedate', 'copyright', 'director', 'description'):
setattr(trailer, attr, info.find(attr).text)
"""
data = {}
data['id'] = item.get('id')
data['name'] = item.find('./info/title').text
data['cover'] = item.find('./poster/location').text
data['url'] = item.find('./preview/large').text
trailer = Trailer(ROOT_ID, self.urlbase, **data)
duration = None
try:
hours = 0
minutes = 0
seconds = 0
duration = item.find('./info/runtime').text
try:
hours, minutes, seconds = duration.split(':')
except ValueError:
try:
minutes, seconds = duration.split(':')
except ValueError:
seconds = duration
duration = "%d:%02d:%02d" % (int(hours), int(minutes), int(seconds))
except:
pass
try:
trailer.item.director = item.find('./info/director').text
except:
pass
try:
trailer.item.description = item.find('./info/description').text
except:
pass
res = DIDLLite.Resource(trailer.get_path(), 'http-get:*:video/quicktime:*')
res.duration = duration
try:
res.size = item.find('./preview/large').get('filesize', None)
except:
pass
trailer.item.res.append(res)
if self.server.coherence.config.get('transcoding', 'no') == 'yes':
dlna_pn = 'DLNA.ORG_PN=AVC_TS_BL_CIF15_AAC'
dlna_tags = DIDLLite.simple_dlna_tags[:]
dlna_tags[2] = 'DLNA.ORG_CI=1'
url = self.urlbase + str(trailer.id) + '?transcoded=mp4'
new_res = DIDLLite.Resource(url,
'http-get:*:%s:%s' % ('video/mp4', ';'.join([dlna_pn] + dlna_tags)))
new_res.size = None
res.duration = duration
trailer.item.res.append(new_res)
dlna_pn = 'DLNA.ORG_PN=JPEG_TN'
dlna_tags = DIDLLite.simple_dlna_tags[:]
dlna_tags[2] = 'DLNA.ORG_CI=1'
dlna_tags[3] = 'DLNA.ORG_FLAGS=00f00000000000000000000000000000'
url = self.urlbase + str(trailer.id) + '?attachment=poster&transcoded=thumb&type=jpeg'
new_res = DIDLLite.Resource(url,
'http-get:*:%s:%s' % ('image/jpeg', ';'.join([dlna_pn] + dlna_tags)))
new_res.size = None
#new_res.resolution = "160x160"
trailer.item.res.append(new_res)
if not hasattr(trailer.item, 'attachments'):
trailer.item.attachments = {}
trailer.item.attachments['poster'] = data['cover']
self.trailers[trailer.id] = trailer
def get_by_id(self, id):
try:
if int(id) == 0:
return self.container
else:
return self.trailers.get(id, None)
except:
return None
def upnp_init(self):
if self.server:
self.server.connection_manager_server.set_variable( \
0, 'SourceProtocolInfo', ['http-get:*:video/quicktime:*', 'http-get:*:video/mp4:*'])
self.container = Container(ROOT_ID, -1, self.name)
trailers = self.trailers.values()
trailers.sort(cmp=lambda x, y: cmp(x.get_name().lower(), y.get_name().lower()))
self.container.children = trailers
def __repr__(self):
return self.__class__.__name__
|
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
import unittest, json, sys
import xmlrunner
import importlib
from frappe.modules import load_doctype_module, get_module_name
from frappe.utils import cstr
import frappe.utils.scheduler
import cProfile, StringIO, pstats
unittest_runner = unittest.TextTestRunner
def xmlrunner_wrapper(output):
"""Convenience wrapper to keep method signature unchanged for XMLTestRunner and TextTestRunner"""
def _runner(*args, **kwargs):
kwargs['output'] = output
return xmlrunner.XMLTestRunner(*args, **kwargs)
return _runner
def main(app=None, module=None, doctype=None, verbose=False, tests=(), force=False, profile=False, junit_xml_output=None):
global unittest_runner
xmloutput_fh = None
if junit_xml_output:
xmloutput_fh = open(junit_xml_output, 'w')
unittest_runner = xmlrunner_wrapper(xmloutput_fh)
else:
unittest_runner = unittest.TextTestRunner
try:
frappe.flags.print_messages = verbose
frappe.flags.in_test = True
if not frappe.db:
frappe.connect()
# if not frappe.conf.get("db_name").startswith("test_"):
# raise Exception, 'db_name must start with "test_"'
# workaround! since there is no separate test db
frappe.clear_cache()
frappe.utils.scheduler.disable_scheduler()
set_test_email_config()
if verbose:
print 'Running "before_tests" hooks'
for fn in frappe.get_hooks("before_tests", app_name=app):
frappe.get_attr(fn)()
if doctype:
ret = run_tests_for_doctype(doctype, verbose, tests, force, profile)
elif module:
ret = run_tests_for_module(module, verbose, tests, profile)
else:
ret = run_all_tests(app, verbose, profile)
frappe.db.commit()
# workaround! since there is no separate test db
frappe.clear_cache()
return ret
finally:
if xmloutput_fh:
xmloutput_fh.flush()
xmloutput_fh.close()
def set_test_email_config():
frappe.conf.update({
"auto_email_id": "test@example.com",
"mail_server": "smtp.example.com",
"mail_login": "test@example.com",
"mail_password": "test",
"admin_password": "admin"
})
def run_all_tests(app=None, verbose=False, profile=False):
import os
apps = [app] if app else frappe.get_installed_apps()
test_suite = unittest.TestSuite()
for app in apps:
for path, folders, files in os.walk(frappe.get_pymodule_path(app)):
for dontwalk in ('locals', '.git', 'public'):
if dontwalk in folders:
folders.remove(dontwalk)
# print path
for filename in files:
filename = cstr(filename)
if filename.startswith("test_") and filename.endswith(".py"):
# print filename[:-3]
_add_test(app, path, filename, verbose, test_suite=test_suite)
if profile:
pr = cProfile.Profile()
pr.enable()
out = unittest_runner(verbosity=1+(verbose and 1 or 0)).run(test_suite)
if profile:
pr.disable()
s = StringIO.StringIO()
ps = pstats.Stats(pr, stream=s).sort_stats('cumulative')
ps.print_stats()
print s.getvalue()
return out
def run_tests_for_doctype(doctype, verbose=False, tests=(), force=False, profile=False):
module = frappe.db.get_value("DocType", doctype, "module")
if not module:
print 'Invalid doctype {0}'.format(doctype)
sys.exit(1)
test_module = get_module_name(doctype, module, "test_")
if force:
for name in frappe.db.sql_list("select name from `tab%s`" % doctype):
frappe.delete_doc(doctype, name, force=True)
make_test_records(doctype, verbose=verbose, force=force)
module = frappe.get_module(test_module)
return _run_unittest(module, verbose=verbose, tests=tests, profile=profile)
def run_tests_for_module(module, verbose=False, tests=(), profile=False):
module = importlib.import_module(module)
if hasattr(module, "test_dependencies"):
for doctype in module.test_dependencies:
make_test_records(doctype, verbose=verbose)
return _run_unittest(module=module, verbose=verbose, tests=tests, profile=profile)
def _run_unittest(module, verbose=False, tests=(), profile=False):
test_suite = unittest.TestSuite()
module_test_cases = unittest.TestLoader().loadTestsFromModule(module)
if tests:
for each in module_test_cases:
for test_case in each.__dict__["_tests"]:
if test_case.__dict__["_testMethodName"] in tests:
test_suite.addTest(test_case)
else:
test_suite.addTest(module_test_cases)
if profile:
pr = cProfile.Profile()
pr.enable()
out = unittest_runner(verbosity=1+(verbose and 1 or 0)).run(test_suite)
if profile:
pr.disable()
s = StringIO.StringIO()
ps = pstats.Stats(pr, stream=s).sort_stats('cumulative')
ps.print_stats()
print s.getvalue()
return out
def _add_test(app, path, filename, verbose, test_suite=None):
import os
if os.path.sep.join(["doctype", "doctype", "boilerplate"]) in path:
# in /doctype/doctype/boilerplate/
return
app_path = frappe.get_pymodule_path(app)
relative_path = os.path.relpath(path, app_path)
if relative_path=='.':
module_name = app
else:
module_name = '{app}.{relative_path}.{module_name}'.format(app=app,
relative_path=relative_path.replace('/', '.'), module_name=filename[:-3])
module = frappe.get_module(module_name)
if getattr(module, "selenium_tests", False) and not frappe.conf.run_selenium_tests:
return
if not test_suite:
test_suite = unittest.TestSuite()
if os.path.basename(os.path.dirname(path))=="doctype":
txt_file = os.path.join(path, filename[5:].replace(".py", ".json"))
with open(txt_file, 'r') as f:
doc = json.loads(f.read())
doctype = doc["name"]
make_test_records(doctype, verbose)
test_suite.addTest(unittest.TestLoader().loadTestsFromModule(module))
def make_test_records(doctype, verbose=0, force=False):
if not frappe.db:
frappe.connect()
for options in get_dependencies(doctype):
if options == "[Select]":
continue
if not options in frappe.local.test_objects:
if options in frappe.local.test_objects:
print "No test records or circular reference for {0}".format(options)
frappe.local.test_objects[options] = []
make_test_records(options, verbose, force)
make_test_records_for_doctype(options, verbose, force)
def get_modules(doctype):
module = frappe.db.get_value("DocType", doctype, "module")
try:
test_module = load_doctype_module(doctype, module, "test_")
if test_module:
reload(test_module)
except ImportError:
test_module = None
return module, test_module
def get_dependencies(doctype):
module, test_module = get_modules(doctype)
meta = frappe.get_meta(doctype)
link_fields = meta.get_link_fields()
for df in meta.get_table_fields():
link_fields.extend(frappe.get_meta(df.options).get_link_fields())
options_list = [df.options for df in link_fields] + [doctype]
if hasattr(test_module, "test_dependencies"):
options_list += test_module.test_dependencies
options_list = list(set(options_list))
if hasattr(test_module, "test_ignore"):
for doctype_name in test_module.test_ignore:
if doctype_name in options_list:
options_list.remove(doctype_name)
return options_list
def make_test_records_for_doctype(doctype, verbose=0, force=False):
module, test_module = get_modules(doctype)
if verbose:
print "Making for " + doctype
if hasattr(test_module, "_make_test_records"):
frappe.local.test_objects[doctype] += test_module._make_test_records(verbose)
elif hasattr(test_module, "test_records"):
frappe.local.test_objects[doctype] += make_test_objects(doctype, test_module.test_records, verbose)
else:
test_records = frappe.get_test_records(doctype)
if test_records:
frappe.local.test_objects[doctype] += make_test_objects(doctype, test_records, verbose)
elif verbose:
print_mandatory_fields(doctype)
def make_test_objects(doctype, test_records, verbose=None):
records = []
# if not frappe.get_meta(doctype).issingle:
# existing = frappe.get_all(doctype, filters={"name":("like", "_T-" + doctype + "-%")})
# if existing:
# return [d.name for d in existing]
#
# existing = frappe.get_all(doctype, filters={"name":("like", "_Test " + doctype + "%")})
# if existing:
# return [d.name for d in existing]
for doc in test_records:
if not doc.get("doctype"):
doc["doctype"] = doctype
d = frappe.copy_doc(doc)
if doc.get('name'):
d.name = doc.get('name')
if frappe.local.test_objects.get(d.doctype):
# do not create test records, if already exists
return []
if d.meta.get_field("naming_series"):
if not d.naming_series:
d.naming_series = "_T-" + d.doctype + "-"
# submit if docstatus is set to 1 for test record
docstatus = d.docstatus
d.docstatus = 0
try:
d.run_method("before_test_insert")
d.insert()
if docstatus == 1:
d.submit()
except frappe.NameError:
pass
except Exception, e:
if d.flags.ignore_these_exceptions_in_test and e.__class__ in d.flags.ignore_these_exceptions_in_test:
pass
else:
raise
records.append(d.name)
frappe.db.commit()
return records
def print_mandatory_fields(doctype):
print "Please setup make_test_records for: " + doctype
print "-" * 60
meta = frappe.get_meta(doctype)
print "Autoname: " + (meta.autoname or "")
print "Mandatory Fields: "
for d in meta.get("fields", {"reqd":1}):
print d.parent + ":" + d.fieldname + " | " + d.fieldtype + " | " + (d.options or "")
print
|
|
from invoke import task
import glob
import os
import json
import webbrowser
import requests
import re
import subprocess
import datetime
from monty.os import cd
from pymatgen import __version__ as CURRENT_VER
NEW_VER = datetime.datetime.today().strftime("%Y.%-m.%-d")
"""
Deployment file to facilitate releases of pymatgen.
Note that this file is meant to be run from the root directory of the pymatgen
repo.
"""
__author__ = "Shyue Ping Ong"
__email__ = "ongsp@ucsd.edu"
__date__ = "Sep 1, 2014"
@task
def make_doc(ctx):
with open("CHANGES.rst") as f:
contents = f.read()
toks = re.split("\-{3,}", contents)
n = len(toks[0].split()[-1])
changes = [toks[0]]
changes.append("\n" + "\n".join(toks[1].strip().split("\n")[0:-1]))
changes = ("-" * n).join(changes)
with open("docs_rst/latest_changes.rst", "w") as f:
f.write(changes)
with cd("examples"):
ctx.run("jupyter nbconvert --to html *.ipynb")
ctx.run("mv *.html ../docs_rst/_static")
with cd("docs_rst"):
ctx.run("cp ../CHANGES.rst change_log.rst")
ctx.run("sphinx-apidoc --separate -d 6 -o . -f ../pymatgen")
ctx.run("rm pymatgen*.tests.*rst")
for f in glob.glob("*.rst"):
if f.startswith('pymatgen') and f.endswith('rst'):
newoutput = []
suboutput = []
subpackage = False
with open(f, 'r') as fid:
for line in fid:
clean = line.strip()
if clean == "Subpackages":
subpackage = True
if not subpackage and not clean.endswith("tests"):
newoutput.append(line)
else:
if not clean.endswith("tests"):
suboutput.append(line)
if clean.startswith("pymatgen") and not clean.endswith("tests"):
newoutput.extend(suboutput)
subpackage = False
suboutput = []
with open(f, 'w') as fid:
fid.write("".join(newoutput))
ctx.run("make html")
ctx.run("cp _static/* ../docs/html/_static")
with cd("docs"):
ctx.run("cp -r html/* .")
ctx.run("rm -r html")
ctx.run("rm -r doctrees")
ctx.run("rm -r _sources")
# This makes sure pymatgen.org works to redirect to the Gihub page
ctx.run("echo \"pymatgen.org\" > CNAME")
# Avoid the use of jekyll so that _dir works as intended.
ctx.run("touch .nojekyll")
@task
def update_doc(ctx):
make_doc(ctx)
ctx.run("git add .")
ctx.run("git commit -a -m \"Update docs\"")
ctx.run("git push")
@task
def publish(ctx):
ctx.run("rm dist/*.*", warn=True)
ctx.run("python setup.py register sdist bdist_wheel")
ctx.run("twine upload dist/*")
@task
def set_ver(ctx):
lines = []
with open("pymatgen/__init__.py", "rt") as f:
for l in f:
if "__version__" in l:
lines.append('__version__ = "%s"' % NEW_VER)
else:
lines.append(l.rstrip())
with open("pymatgen/__init__.py", "wt") as f:
f.write("\n".join(lines))
lines = []
with open("setup.py", "rt") as f:
for l in f:
lines.append(re.sub(r'version=([^,]+),', 'version="%s",' % NEW_VER,
l.rstrip()))
with open("setup.py", "wt") as f:
f.write("\n".join(lines))
@task
def update_coverage(ctx):
with cd("docs/_build/html/"):
ctx.run("git pull")
ctx.run("nosetests --config=nose.cfg --cover-html --cover-html-dir=docs/_build/html/coverage")
update_doc()
@task
def merge_stable(ctx):
ctx.run("git tag -a v%s -m \"v%s release\"" % (NEW_VER, NEW_VER))
ctx.run("git push --tags")
ctx.run("git checkout stable")
ctx.run("git pull")
ctx.run("git merge master")
ctx.run("git push")
ctx.run("git checkout master")
@task
def release_github(ctx):
with open("CHANGES.rst") as f:
contents = f.read()
toks = re.split("\-+", contents)
desc = toks[1].strip()
toks = desc.split("\n")
desc = "\n".join(toks[:-1]).strip()
payload = {
"tag_name": "v" + NEW_VER,
"target_commitish": "master",
"name": "v" + NEW_VER,
"body": desc,
"draft": False,
"prerelease": False
}
response = requests.post(
"https://api.github.com/repos/materialsproject/pymatgen/releases",
data=json.dumps(payload),
headers={"Authorization": "token " + os.environ["GITHUB_RELEASES_TOKEN"]})
print(response.text)
@task
def update_changelog(ctx):
output = subprocess.check_output(["git", "log", "--pretty=format:%s",
"v%s..HEAD" % CURRENT_VER])
lines = ["* " + l for l in output.decode("utf-8").strip().split("\n")]
with open("CHANGES.rst") as f:
contents = f.read()
l = "=========="
toks = contents.split(l)
head = "\n\nv%s\n" % NEW_VER + "-" * (len(NEW_VER) + 1) + "\n"
toks.insert(-1, head + "\n".join(lines))
with open("CHANGES.rst", "w") as f:
f.write(toks[0] + l + "".join(toks[1:]))
@task
def log_ver(ctx):
filepath = os.path.join(os.environ["HOME"], "Dropbox", "Public",
"pymatgen", NEW_VER)
with open(filepath, "w") as f:
f.write("Release")
@task
def release(ctx, notest=False):
ctx.run("rm -r dist build pymatgen.egg-info", warn=True)
set_ver(ctx)
if not notest:
ctx.run("nosetests")
publish(ctx)
log_ver(ctx)
update_doc(ctx)
merge_stable(ctx)
release_github(ctx)
@task
def open_doc(ctx):
pth = os.path.abspath("docs/_build/html/index.html")
webbrowser.open("file://" + pth)
|
|
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# import matplotlib.finance as mf
from matplotlib.widgets import MultiCursor
import statsmodels.tsa.stattools as stt
# import scipy.signal as sgn
import statsmodels.api as sm
# from statsmodels.sandbox.regression.predstd import wls_prediction_std
# from matplotlib.mlab import PCA
from collections import defaultdict
#------------------------------------------------
'''Some time length'''
night_len = int(4*3600*2.5)
mor_len = int(4*3600*2.25)
aftn_len = int(4*3600*1.5)
day_len = night_len + mor_len + aftn_len + 4
#-----------------------------------------------
'''add columns'''
def AddCol(df):
vol = df.ix[:, 'volume'].diff()
# this addition is for the convenience of Log y scale plot
# vol +=1
vol = vol.rename('vol_diff')
openint = df.ix[:, 'openInterest'].diff()
# this addition is for the convenience of Log y scale plot
# openint += 1
openint = openint.rename('openInt_diff')
mid = (df.ix[:, 'askPrc_0'] + df.ix[:, 'bidPrc_0']) / 2.
mid = mid.rename('midPrc')
ret = df.join([vol, openint, mid])
return ret
# -------------------------------------------------
def ForwardDiff(df, n=1):
"""Calculate the difference of value after n rows.
Parameters
----------
df : pandas DataFrame
n : int
Returns
-------
ret : DataFrame.
"""
ret = df.diff(periods=n)
ret = ret.shift(periods= -1 * n)
ret = ret.dropna()
return ret
def CutHighVar(df, length=200):
'''
Purpose: Cut a small period after opening in the morning and at night.
Because this time range, the var of price is high, which harmd our model.
df: pd.DataFrame or pd.Series. With datetime index
length: int. the length you want to cut, counted in ticks. Cannot be larger than 240
'''
ret = df
bool_arr1 = np.logical_or(ret.index.hour == 21, ret.index.hour == 9)
bool_arr = np.logical_and.reduce([bool_arr1,
ret.index.minute == 0,
ret.index.second <= int(length//4) - 1])
ret = ret[np.logical_not(bool_arr)]
return ret
def CutTail(df, length=60):
'''
Purpose: Cut a small period before market close.
df: pd.DataFrame or pd.Series. With datetime index
length: int. the length you want to cut, counted in ticks. Cannot be larger than 240
'''
ret = df
last_boolean1 = np.logical_and.reduce(
[ret.index.hour == 14,
ret.index.minute == 59,
ret.index.second >= 60 - int(length//4)])
# this is the last tick
last_boolean2 = ret.index.hour == 15
ret = ret[np.logical_not(np.logical_or(last_boolean1, last_boolean2))]
return ret
def DayChangeNum(ser, distance=7):
'''
ser is price move series after process.
distance counting in hours
'''
h = ser.index.hour
h_diff = np.diff(h)
h_diff = np.insert(h_diff, 1, 0)
ret = np.where(np.abs(h_diff) > distance)[0]
return ret
# def NormPriceMove(ser, daychgnum):
# ret = ser.copy()
# for i in range(len(daychgnum) - 1):
# mysamp = ret.iloc[daychgnum[i]: daychgnum[i+1]]
# #print mysamp
# mystd = mysamp.std()
# print mystd
# ret.iloc[daychgnum[i]: daychgnum[i+1]] /= mystd
# return ret
def CuthlLimit(df, forward=60, backward=100, how='all', depth=0):
"""Cut those reach high low Limit, including an extended length around them
Parameters
----------
df : Original DataFrame including all level quote infomation
forward : forward_ticks of price move
backward : sample length needed to generate an indicator
how : only consider highLimit, lowLimit or allLimit
depth : consider price which level quote reach high low Limit
Returns
-------
ret : selected boolean array
"""
extend_len = 2 * max([forward, backward]) + 1
s1 = 'bidQty_' + str(depth)
s2 = 'askQty_' + str(depth)
if how == 'all':
arr1 = df.ix[:, s1] == 0
arr2 = df[s2] == 0
bool_arr = np.logical_or(arr1, arr2)
#bool_arr = np.logical_or(df[s1] == 0, df[s2] == 0)
elif how == 'bid':
bool_arr = (df[s1] == 0)
elif how == 'ask':
bool_arr = (df[s2] == 0)
else:
print 'ERROR!'
float_arr = bool_arr.astype(float)
float_arr_diffusion = pd.Series(data=float_arr).rolling(window=extend_len, center=True).mean()
dicard_arr = float_arr_diffusion.fillna(value=1.).astype(bool)
return np.logical_not(dicard_arr)
def GiveMePM(df, nforward=60, nbackward=100, lim=[0, 30], cutdepth=0, norm=False, high_var_length=200):
"""from original DataFrame calculate price move Series,
including CutTail and CutHighVar.
Parameters
----------
df : the Original DataFrame.
forward : forward_ticks of price move
backward : sample length needed to generate an indicator
n : forward_ticks
lim : can be like (0, 20), counting in days, or an int array of index.
norm : if True, normalize the price move using every day std.
Returns
-------
ret : price move series.
"""
global day_len
if len(lim) == 2:
samp = df.ix[day_len*lim[0]: day_len*lim[1], 'midPrc']
else:
samp = df.ix[lim, 'midPrc']
#print 'samp'
ret = ForwardDiff(samp, nforward)
#print 'ForwardDiff'
# ret = CuthlLimit(ret, how='all', depth=cutdepth).loc[:, 'midPrc']
# #print 'CuthlLimit'
ret = CutTail(ret, nforward)
#print 'CutTail'
cut_head_length = max([high_var_length, nbackward])
ret = CutHighVar(ret, length=cut_head_length)
#print 'CutHighVar'
# if norm:
# ret_daychangenum = DayChangeNum(ret)
# ret = NormPriceMove(ret, ret_daychangenum)
selected_arr = CuthlLimit(df, forward=nforward, backward=nbackward, how='all', depth=cutdepth)
return ret[selected_arr].dropna()
def GiveMeIndex(arri, arro):
'''
Generate integer index
arr is a two dim ndarray, with each element being a time range(counting in days).
'''
global day_len
index_in = list()
for k in arri:
index_in = index_in + list(range(day_len * k[0], day_len * k[1]))
index_out = list()
for k in arro:
index_out = index_out + list(range(day_len * k[0], day_len * k[1]))
return index_in, index_out
|
|
from __future__ import print_function
import os
import time
import numpy as np
import theano
import theano.tensor as T
import lasagne
import matplotlib.pyplot as plt
from tqdm import tqdm
from mpl_toolkits.axes_grid1 import make_axes_locatable
from lasagne.layers import InputLayer, Conv2DLayer, Pool2DLayer
from lasagne.regularization import regularize_network_params, l2
VERBOSE = False
GRID_SEARCH = False
NUM_ELECTRODES = 30 # 4 or 30 electrodes
def bootstrap(data, labels, boot_type="downsample"):
print("Bootstrapping data...")
ot_class = 0
mw_class = 1
ot_idx = np.where(labels == ot_class)
mw_idx = np.where(labels == mw_class)
# Get OT examples
ot_data = data[ot_idx]
ot_labels = labels[ot_idx]
print(" - OT (class: {}) | Data: {} | Labels: {}".format(ot_class, ot_data.shape, ot_labels.shape))
# Get MW examples
mw_data = data[mw_idx]
mw_labels = labels[mw_idx]
print(" - MW (class: {}) | Data: {} | Labels: {}".format(mw_class, mw_data.shape, mw_labels.shape))
# Set majority and minority classes
if ot_data.shape[0] > mw_data.shape[0]:
maj_class, maj_data, maj_labels = ot_class, ot_data, ot_labels
min_class, min_data, min_labels = mw_class, mw_data, mw_labels
else:
maj_class, maj_data, maj_labels = mw_class, mw_data, mw_labels
min_class, min_data, min_labels = ot_class, ot_data, ot_labels
print(" - Majority class: {} (N = {}) | Minority class: {} (N = {})".format(maj_class, maj_data.shape[0],
min_class, min_data.shape[0]))
# Upsample minority class
if boot_type == "upsample":
print("Upsampling minority class...")
num_to_boot = maj_data.shape[0] - min_data.shape[0]
print(" - Number to upsample: {}".format(num_to_boot))
bootstrap_idx = np.random.randint(min_data.shape[0], size=num_to_boot)
min_data_boot = min_data[bootstrap_idx]
min_labels_boot = min_labels[bootstrap_idx]
final_data = np.concatenate((data, min_data_boot), axis=0)
final_labels = np.concatenate((labels, min_labels_boot), axis=0)
elif boot_type == "downsample":
print("Downsampling majority class...")
# Resample N = number of minority examples
num_to_boot = min_data.shape[0]
bootstrap_idx = np.random.randint(maj_data.shape[0], size=num_to_boot)
maj_data_boot = maj_data[bootstrap_idx]
maj_labels_boot = maj_labels[bootstrap_idx]
final_data = np.concatenate((maj_data_boot, min_data), axis=0)
final_labels = np.concatenate((maj_labels_boot, min_labels), axis=0)
print("Final class balance: {} ({}) - {} ({})".format(
maj_class, len(np.where(final_labels==maj_class)[0]),
min_class, len(np.where(final_labels==min_class)[0])))
return final_data, final_labels
# Load EEG data
base_dir = os.path.abspath(os.path.join(os.path.join(os.path.dirname(__file__), os.pardir), os.pardir))
data_dir = os.path.join(base_dir, "data")
data = np.load(os.path.join(data_dir, 'all_data_6_2d_full_30ch_bands.npy'))
data_labels = np.load(os.path.join(data_dir, 'all_data_6_2d_full_30ch_bands_labels.npy'))
data_labels = data_labels[:,1]
## EEG frequency bands were created directly from MATLAB data using:
#from scipy import signal
#
#FREQ_BANDS = ((0.1, 3), (4, 7), (8, 13), (16, 31)) # Delta, theta, alpha, beta
#SAMPLE_RATE = 256
#
#all_data = np.reshape(all_data, (-1, 1, 30, 512))
#
#filtered_data = [all_data]
#
#for band in FREQ_BANDS:
# lower = float(band[0])/(SAMPLE_RATE/2) # Proportion of Nyquist frequency
# upper = float(band[1])/(SAMPLE_RATE/2) # Proportion of Nyquist frequency
# b, a = signal.butter(1, [lower, upper], 'bandpass')
#
# output_signal = signal.filtfilt(b, a, all_data, axis=3)
#
# filtered_data.append(output_signal)
#
#all_data = np.concatenate(filtered_data, axis=1)
# Electrode Order (30 channels)
electrode_order_30 = ('Fp1','Fp2','Fz',
'F4','F8','FC6',
'C4','T8','CP6',
'P4','P8','P10',
'O2','Oz','O1',
'P9','P3','P7',
'CP5','C3','T7',
'FC5','F7','F3',
'FC1','FC2','Cz',
'CP1','CP2','Pz')
# Downsample to specific electrodes (4 channels)
electrode_order_4 = ('Fp1','Oz','T8','T7')
if NUM_ELECTRODES == 4:
data_temp = np.empty((data.shape[0], data.shape[1], 4, data.shape[3])).astype('float32')
for i in range(data.shape[0]):
data_temp[i,:,0,:] = data[i,:,0,:] # Fp1
data_temp[i,:,1,:] = data[i,:,13,:] # Oz
data_temp[i,:,2,:] = data[i,:,7,:] # T8
data_temp[i,:,3,:] = data[i,:,20,:] # T7
data = data_temp
del data_temp
# Plot frequency bands
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(10, 8))
ax1.title.set_text('Unstandardized')
ax1.grid(True)
ax1.plot(data[0,0,0,:], label="Raw")
ax1.plot(data[0,1,0,:], label="Delta")
ax1.plot(data[0,2,0,:], label="Theta")
ax1.plot(data[0,3,0,:], label="Alpha")
ax1.plot(data[0,4,0,:], label="Beta")
ax1.set_xlim([0, 512])
ax1.legend(loc='upper right')
# Standardize data per trial
# Significantly improves gradient descent
data = (data - data.mean(axis=(2,3),keepdims=1)) / data.std(axis=(2,3),keepdims=1)
ax2.title.set_text('Standardized')
ax2.grid(True)
ax2.plot(data[0,0,0,:], label="Raw")
ax2.plot(data[0,1,0,:], label="Delta")
ax2.plot(data[0,2,0,:], label="Theta")
ax2.plot(data[0,3,0,:], label="Alpha")
ax2.plot(data[0,4,0,:], label="Beta")
ax2.set_xlim([0, 512])
ax2.legend(loc='upper right')
plt.show()
# Up/downsample the data to balance classes
data, data_labels = bootstrap(data, data_labels, "downsample")
# Create train, validation, test sets
rng = np.random.RandomState(5334) # Set random seed
indices = rng.permutation(data.shape[0])
split_train, split_val, split_test = .6, .2, .2
split_train = int(round(data.shape[0]*split_train))
split_val = split_train + int(round(data.shape[0]*split_val))
train_idx = indices[:split_train]
val_idx = indices[split_train:split_val]
test_idx = indices[split_val:]
train_data = data[train_idx,:]
train_labels = data_labels[train_idx]
val_data = data[val_idx,:]
val_labels = data_labels[val_idx]
test_data = data[test_idx,:]
test_labels = data_labels[test_idx]
def build_cnn(k_height=1, k_width=25, input_var=None):
# Input layer, as usual:
l_in = InputLayer(shape=(None, 5, NUM_ELECTRODES, 512), input_var=input_var)
l_conv1 = Conv2DLayer(incoming = l_in, num_filters = 8,
filter_size = (k_height, k_width),
stride = 1, pad = 'same',
W = lasagne.init.Normal(std = 0.02),
nonlinearity = lasagne.nonlinearities.very_leaky_rectify)
l_pool1 = Pool2DLayer(incoming = l_conv1, pool_size = (2,2), stride = (2,2))
l_drop1 = lasagne.layers.dropout(l_pool1, p=.75)
l_fc = lasagne.layers.DenseLayer(
l_drop1,
num_units=50,
nonlinearity=lasagne.nonlinearities.rectify)
l_drop2 = lasagne.layers.dropout(l_fc, p=.75)
l_out = lasagne.layers.DenseLayer(
l_drop2,
num_units=2,
nonlinearity=lasagne.nonlinearities.softmax)
return l_out
# ############################# Batch iterator ###############################
# This is just a simple helper function iterating over training data in
# mini-batches of a particular size, optionally in random order. It assumes
# data is available as numpy arrays. For big datasets, you could load numpy
# arrays as memory-mapped files (np.load(..., mmap_mode='r')), or write your
# own custom data iteration function. For small datasets, you can also copy
# them to GPU at once for slightly improved performance. This would involve
# several changes in the main program, though, and is not demonstrated here.
# Notice that this function returns only mini-batches of size `batchsize`.
# If the size of the data is not a multiple of `batchsize`, it will not
# return the last (remaining) mini-batch.
def iterate_minibatches(inputs, targets, batchsize, shuffle=False):
assert len(inputs) == len(targets)
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
# tqdm() can be removed if no visual progress bar is needed
for start_idx in tqdm(range(0, len(inputs) - batchsize + 1, batchsize)):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield inputs[excerpt], targets[excerpt]
def main(model='cnn', batch_size=500, num_epochs=500, k_height=1, k_width=25):
# Prepare Theano variables for inputs and targets
input_var = T.tensor4('inputs')
target_var = T.ivector('targets')
network = build_cnn(k_height, k_width, input_var)
# Create a loss expression for training, i.e., a scalar objective we want
# to minimize (for our multi-class problem, it is the cross-entropy loss):
prediction = lasagne.layers.get_output(network)
loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)
loss = loss.mean()
# We could add some weight decay as well here, see lasagne.regularization.
l2_reg = regularize_network_params(network, l2)
loss += l2_reg * 0.001
train_acc = T.mean(T.eq(T.argmax(prediction, axis=1), target_var),
dtype=theano.config.floatX)
# Create update expressions for training
params = lasagne.layers.get_all_params(network, trainable=True)
updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate=0.01)
#updates = lasagne.updates.adam(loss, params, learning_rate=0.1)
# Create a loss expression for validation/testing. The crucial difference
# here is that we do a deterministic forward pass through the network,
# disabling dropout layers.
test_prediction = lasagne.layers.get_output(network, deterministic=True)
test_loss = lasagne.objectives.categorical_crossentropy(test_prediction,
target_var)
test_loss = test_loss.mean()
# As a bonus, also create an expression for the classification accuracy:
test_acc = T.mean(T.eq(T.argmax(test_prediction, axis=1), target_var),
dtype=theano.config.floatX)
# Compile a function performing a training step on a mini-batch (by giving
# the updates dictionary) and returning the corresponding training loss:
train_fn = theano.function([input_var, target_var], [loss, train_acc], updates=updates)
# Compile a second function computing the validation loss and accuracy:
val_fn = theano.function([input_var, target_var], [test_loss, test_acc])
training_hist = []
val_hist = []
print("Starting training...")
# We iterate over epochs:
for epoch in range(num_epochs):
# In each epoch, we do a full pass over the training data:
print("Training epoch {}...".format(epoch+1))
train_err = 0
train_acc = 0
train_batches = 0
start_time = time.time()
for batch in iterate_minibatches(train_data, train_labels, batch_size, shuffle=True):
inputs, targets = batch
err, acc = train_fn(inputs, targets)
train_err += err
train_acc += acc
train_batches += 1
if VERBOSE:
print("Epoch: {} | Mini-batch: {}/{} | Elapsed time: {:.2f}s".format(
epoch+1,
train_batches,
train_data.shape[0]/batch_size,
time.time()-start_time))
training_hist.append(train_err / train_batches)
# And a full pass over the validation data:
print("Validating epoch...")
val_err = 0
val_acc = 0
val_batches = 0
for batch in iterate_minibatches(val_data, val_labels, batch_size, shuffle=False):
inputs, targets = batch
err, acc = val_fn(inputs, targets)
val_err += err
val_acc += acc
val_batches += 1
val_hist.append(val_err / val_batches)
# Then we print the results for this epoch:
print("Epoch {} of {} took {:.3f}s".format(
epoch + 1, num_epochs, time.time() - start_time))
print(" training loss:\t\t{:.6f}".format(train_err / train_batches))
print(" training accuracy:\t\t{:.2f} %".format(
train_acc / train_batches * 100))
print(" validation loss:\t\t{:.6f}".format(val_err / val_batches))
print(" validation accuracy:\t\t{:.2f} %".format(
val_acc / val_batches * 100))
# After training, we compute and print the test predictions/error:
test_err = 0
test_acc = 0
test_batches = 0
for batch in iterate_minibatches(test_data, test_labels, batch_size, shuffle=False):
inputs, targets = batch
err, acc = val_fn(inputs, targets)
test_err += err
test_acc += acc
test_batches += 1
test_perc = (test_acc / test_batches) * 100
print("Final results:")
print(" test loss:\t\t\t{:.6f}".format(test_err / test_batches))
print(" test accuracy:\t\t{:.2f} %".format(test_perc))
# Plot learning
plt.plot(range(1, num_epochs+1), training_hist, label="Training")
plt.plot(range(1, num_epochs+1), val_hist, label="Validation")
plt.grid(True)
plt.title("Training Curve\nKernel size: ({},{}) - Test acc: {:.2f}%".format(k_height, k_width, test_perc))
plt.xlim(1, num_epochs+1)
plt.xlabel("Epoch #")
plt.ylabel("Loss")
plt.legend(loc='best')
plt.show()
# Optionally, you could now dump the network weights to a file like this:
# np.savez('model.npz', *lasagne.layers.get_all_param_values(network))
#
# And load them again later on like this:
# with np.load('model.npz') as f:
# param_values = [f['arr_%d' % i] for i in range(len(f.files))]
# lasagne.layers.set_all_param_values(network, param_values)
return test_perc
if GRID_SEARCH:
# Set filter sizes to search across (odd size only)
if NUM_ELECTRODES == 4:
search_heights = range(1, 5, 2) # Across spatial domain (electrodes)
elif NUM_ELECTRODES == 30:
search_heights = range(1, 16, 4) # Across spatial domain (electrodes)
search_widths = range(1, 64, 4) # Across temporal domain (time samples)
# Preallocate accuracy grid
grid_accuracy = np.empty((len(search_heights), len(search_widths)))
num_kernels = grid_accuracy.size
cur_kernel = 0
for i, h in enumerate(search_heights):
for j, w in enumerate(search_widths):
# Train with current kernel size
cur_kernel += 1
print("***** Kernel {}/{} | Size: ({},{}) *****".format(cur_kernel, num_kernels, h, w))
cur_test_acc = main(batch_size=200, num_epochs=20, k_height=h, k_width=w)
grid_accuracy[i, j] = cur_test_acc
# Show accuracy heatmap
fig, ax = plt.subplots(figsize=(10, 10))
heatmap = ax.imshow(grid_accuracy, cmap = plt.cm.bone, interpolation = 'mitchell')
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.2)
cb = plt.colorbar(heatmap, orientation='vertical', cax=cax)
cb.ax.set_title('Test Acc (%)', {'fontsize': 10, 'horizontalalignment': 'left'})
ax.grid(True)
ax.set_xlabel('Kernel Width', weight='bold')
ax.set_ylabel('Kernel Height', weight='bold')
ax.xaxis.set_label_position('top')
ax.xaxis.tick_top()
ax.set_xticks(range(grid_accuracy.shape[1])) # X element position
ax.set_yticks(range(grid_accuracy.shape[0])) # Y element position
ax.set_xticklabels(search_widths) # Labels for X axis
ax.set_yticklabels(search_heights) # Labels for Y axis
plt.show()
# Get highest accuracy and associated kernel size:
best_idx = np.unravel_index(grid_accuracy.argmax(), grid_accuracy.shape)
print("Highest accuracy: {:.2f}%".format(np.max(grid_accuracy)))
print("Best kernel size: ({},{})".format(search_heights[best_idx[0]],
search_widths[best_idx[1]]))
# Highest search accuracy (4 electrodes): NA
# Best kernel size (4 electrodes): NA
# Highest search accuracy (30 electrodes): NA
# Best kernel size (30 electrodes): NA
# Train model using ideal kernel size over more epochs
cur_test_acc = main(batch_size=200, num_epochs=300,
k_height=search_heights[best_idx[0]],
k_width=search_widths[best_idx[1]])
# Final test accuracy (4 electrodes): NA
# Final test accuracy (30 electrodes): NA
else:
# Use best filter sizes determined from previous 4/30 electrode models
if NUM_ELECTRODES == 4:
cur_test_acc = main(batch_size=200, num_epochs=400, k_height=3, k_width=17) # 66.00%
elif NUM_ELECTRODES == 30:
cur_test_acc = main(batch_size=200, num_epochs=400, k_height=1, k_width=25) # 67.50%
|
|
#!/usr/bin/env python
import os
import shutil
import glob
import time
import sys
import subprocess
import string
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PKG_NAME = os.path.basename(SCRIPT_DIR)
PARAMETERS = None
XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/5000/dbus/user_bus_socket"
SRC_DIR = "/home/app/content"
PKG_SRC_DIR = "%s/tct/opt/%s" % (SRC_DIR, PKG_NAME)
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
while True:
cmd_return_code = cmd_proc.poll()
if cmd_return_code != None:
break
if not cmd.endswith("&"):
while True:
line = cmd_proc.stdout.readline().strip("\r\n")
print line
if not line or line.find("daemon started") >= 0:
break
output.append(line)
return (cmd_return_code, output)
def updateCMD(cmd=None):
if "xwalkctl" in cmd:
cmd = "su - app -c '%s;%s'" % (XW_ENV, cmd)
return cmd
def getPKGID(pkg_name=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (
PARAMETERS.device, updateCMD('xwalkctl'))
else:
cmd = "ssh %s \"%s\"" % (
PARAMETERS.device, updateCMD('xwalkctl'))
(return_code, output) = doCMD(cmd)
if return_code != 0:
return None
test_app_id = None
for line in output:
pkg_infos = line.split()
if len(pkg_infos) == 1:
continue
name = pkg_infos[1]
if pkg_name == name:
test_app_id = pkg_infos[0]
print test_app_id
break
return test_app_id
def doRemoteCMD(cmd=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (PARAMETERS.device, updateCMD(cmd))
else:
cmd = "ssh %s \"%s\"" % (PARAMETERS.device, updateCMD(cmd))
return doCMD(cmd)
def doRemoteCopy(src=None, dest=None):
if PARAMETERS.mode == "SDB":
cmd_prefix = "sdb -s %s push" % PARAMETERS.device
cmd = "%s %s %s" % (cmd_prefix, src, dest)
else:
cmd = "scp -r %s %s:/%s" % (src, PARAMETERS.device, dest)
(return_code, output) = doCMD(cmd)
doRemoteCMD("sync")
if return_code != 0:
return True
else:
return False
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".xpk"):
pkg_id = getPKGID(os.path.basename(os.path.splitext(file)[0]))
if not pkg_id:
action_status = False
continue
(return_code, output) = doRemoteCMD(
"xwalkctl -u %s" % pkg_id)
for line in output:
if "Failure" in line:
action_status = False
break
(return_code, output) = doRemoteCMD(
"rm -rf %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
return action_status
def instPKGs():
action_status = True
(return_code, output) = doRemoteCMD(
"mkdir -p %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".xpk"):
if not doRemoteCopy(os.path.join(root, file), "%s/%s" % (SRC_DIR, file)):
action_status = False
(return_code, output) = doRemoteCMD(
"xwalkctl -i %s/%s" % (SRC_DIR, file))
doRemoteCMD("rm -rf %s/%s" % (SRC_DIR, file))
for line in output:
if "Failure" in line:
action_status = False
break
for item in glob.glob("%s/*" % SCRIPT_DIR):
if item.endswith(".xpk"):
continue
elif item.endswith("inst.py"):
continue
else:
item_name = os.path.basename(item)
if not doRemoteCopy(item, PKG_SRC_DIR+"/"+item_name):
#if not doRemoteCopy(item, PKG_SRC_DIR):
action_status = False
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-m", dest="mode", action="store", help="Specify mode")
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception, e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.mode:
PARAMETERS.mode = "SDB"
if PARAMETERS.mode == "SDB":
if not PARAMETERS.device:
(return_code, output) = doCMD("sdb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
else:
PARAMETERS.mode = "SSH"
if not PARAMETERS.device:
print "No device provided"
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
|
|
"""
wind.driver
~~~~~~~~~~~
Drivers for io multiplexing.
"""
import select
from itertools import chain
from wind.exceptions import PollError, WindException
def pick():
"""Pick best event driver depending on OS.
`Select`, `Poll` are available in most OS.
`Epoll` is available on Linux 2.5.44 and newer.
`KQueue` is available on most BSD.
"""
try:
candidates = ['select', 'poll', 'epoll', 'kqueue']
# Cast result of `filter` because `filter` no longer returns
# `list` in Python 3.x
driver = list(filter(lambda x: hasattr(select, x), candidates))[-1]
return eval(driver.title())().instance
except (IndexError, NameError):
raise WindException('No available event driver')
class PollEvents:
READ = 0x001
WRITE = 0x004
ERROR = 0x008
class Events(object):
"""`Events` class having `Dict` containing only event_mask.
This class is needed to reduce code duplication when
setting event_mask to `Dict` in many drivers.
"""
def __init__(self):
self._events = {}
def add(self, fd, event_mask):
self._events[fd] = self._events.get(fd, 0) | event_mask
def pop(self, fd):
self._events.pop(fd)
def items(self):
return self._events.items()
class BaseDriver(object):
"""Forces implementation of select.epoll interface"""
def __init__(self):
self._driver = None
def close(self):
pass
def fileno(self):
pass
def fromfd(self, fd):
pass
def modify(self, fd, event_mask):
pass
def register(self, fd, event_mask):
raise NotImplemented("Should implement `register` method")
def unregister(self, fd):
raise NotImplemented("Should implement `unregister` method")
def poll(self, poll_timeout):
raise NotImplemented("Should implement `poll` method")
@property
def instance(self):
return self._driver
class Select(BaseDriver):
"""Wraps unix system call `select`.
Used when there is no support for `epoll` or `kqueue` in kernel.
"""
def __init__(self):
self.read_fds = set()
self.write_fds = set()
self.error_fds = set()
self._driver = self
def register(self, fd, event_mask):
if fd in self.fds():
raise PollError('Fd %d already registered' % fd)
if event_mask & PollEvents.READ or event_mask & PollEvents.ERROR:
self.read_fds.add(fd)
elif event_mask & PollEvents.WRITE:
self.write_fds.add(fd)
else:
raise PollError('Cannot register undefined event')
def unregister(self, fd):
self.read_fds.discard(fd)
self.write_fds.discard(fd)
self.error_fds.discard(fd)
def modify(self, fd, event_mask):
self.unregister(fd)
try:
self.register(fd, event_mask)
except PollError as e:
e.args[0] = 'Cannot modify undefined event'
raise
def poll(self, poll_timeout):
"""Returns `List` of (fd, event) pair
@param poll_timeout: Value for select timeout.(sec)
If timeout is `0`, it specifies a poll and never blocks.
"""
read, write, error = select.select(
self.read_fds, self.write_fds, self.error_fds, poll_timeout)
events = Events()
for fd in read:
events.add(fd, PollEvents.READ)
for fd in write:
events.add(fd, PollEvents.WRITE)
for fd in error:
events.add(fd, PollEvents.ERROR)
return events.items()
def fds(self):
"""Returns all fds observed in this event reactor"""
return set(chain(self.read_fds, self.write_fds, self.error_fds))
class Poll(BaseDriver):
def __init__(self):
self._driver = select.poll()
class Epoll(BaseDriver):
def __init__(self):
self._driver = select.epoll()
class Kqueue(BaseDriver):
def __init__(self):
self._driver = self
# Saves observed fd, event_mask in `Dict`.
self._events = {}
# OS dependent `kqueue` implementation.
self._kq = select.kqueue()
self._kevent = Kevent()
# Max number of events that will be returned from `kqueue.control`
self._max_events = 200
def close(self):
self._events = {}
self._kq.close()
def fromfd(self, fd):
return self._kq.fromfd(fd)
def register(self, fd, event_mask):
if fd in self.fds():
raise PollError('Fd %d already registered' % fd)
self.control(fd, event_mask, select.KQ_EV_ADD)
self._events[fd] = event_mask
def unregister(self, fd):
self.control(fd, self._events[fd], select.KQ_EV_DELETE)
self._events.pop(fd, None)
def modify(self, fd, event_mask):
self.unregister(fd)
try:
self.register(fd, event_mask)
except PollError as e:
e.args[0] = 'Cannot modify undefined event'
raise
def control(self, fd, event_mask, flag):
if event_mask & PollEvents.READ or event_mask & PollEvents.ERROR:
kevent = self._kevent.read_events(fd, flag)
elif event_mask & PollEvents.WRITE:
kevent = self._kevent.write_events(fd, flag)
# Calls low level interface to kevent.
self._kq.control([kevent], 0, None)
def poll(self, poll_timeout):
"""Returns `List` of (fd, event) pair
@param poll_timeout: Value for select timeout.(sec)
If timeout is `0`, it specifies a poll and never blocks.
"""
events = Events()
event_list = self._kq.control(
None, self._max_events, poll_timeout)
for event in event_list:
fd = event.ident
if event.filter == select.KQ_FILTER_READ:
events.add(fd, PollEvents.READ)
elif event.filter == select.KQ_FILTER_WRITE:
if event.flags == select.KQ_EV_EOF:
events.add(fd, PollEvents.ERROR)
else:
events.add(fd, PollEvents.WRITE)
if event.flags == select.KQ_EV_ERROR:
events[fd] = events.get(fd, 0) | PollEvents.ERROR
return events.items()
def fds(self):
return self._events.keys()
class Kevent(object):
"""Wraps `select.kevent` system call.
This class is used to register events with the queue, and
return any pending events to user.
"""
def __init__(self):
pass
def read_events(self, fd, flags):
return select.kevent(
fd, filter=select.KQ_FILTER_READ, flags=flags)
def write_events(self, fd, flags):
return select.kevent(
fd, filter=select.KQ_FILTER_WRITE, flags=flags)
|
|
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import gdal
from skimage import filter
import numpy as np
from numpy.lib.function_base import histogram
import skimage
def CalMI(a,b):
"""
Calculate the MI
"""
reference = np.array(a)
query = np.array(b)
L=256
miMat = np.zeros((L,L))
reference.shape = -1
query.shape = -1
miImg = zip(reference.tolist(),query.tolist())
#print(miImg)
for m in miImg:
miMat[m] = miMat[m]+1
miMat = np.array(miMat) / np.double(reference.size)
refHist,temp = histogram(reference,256,range=(0,256))
queHist,temp = histogram(query,256,range=(0,256))
refHist = refHist / np.double(reference.size)
queHist = queHist / np.double(query.size)
r=-refHist*np.log2(refHist+0.000000000000000000000000000001)
q=-queHist*np.log2(queHist+0.000000000000000000000000000001)
r[refHist==0]=0
q[queHist==0]=0
r = np.sum(r)
q = np.sum(q)
refHist.shape = refHist.size,1
rq = (refHist*queHist)
MI = miMat*np.log2((miMat)/(rq+0.000000000000000000000001)+0.000000000000000000000001)
MI[miMat == 0]=0
MI=np.sum(MI)
NMI = 2*MI/(r+q)
#print(r,q,MI)
return NMI
def CalCC(a,b):
"""
Calculate the CC
---------------------------
a: data
b: data
"""
return np.abs(np.corrcoef(np.reshape(a,(1,-1)),np.reshape(b,(1,-1)))[0,1])
def GetRectBuffer(edge,width):
"""
Generate the Buffer With edge
----------------------------
edge: the binary edge image
width: Buffer's width
"""
if(width % 2 == 0):
print("The Width Must be odd")
return 0
buf = edge.copy()
# # XXX: Test
# buf = np.ones((10,10))
# width = 5
imgH,imgW = buf.shape
halfwidth = width/2
# Expand buf with zeros
# DirY bottom
imgBufferY= np.zeros((halfwidth,imgW))
buf = np.concatenate((buf,imgBufferY))
# DirY up
buf = np.concatenate((imgBufferY,buf))
#DirX Left
imgBufferX= np.zeros((imgH+halfwidth*2,halfwidth))
buf = np.concatenate((buf,imgBufferX),axis = 1)
#DirX Right
buf = np.concatenate((imgBufferX,buf),axis = 1)
def __bufferForPoint(x,y):
"""
Get Rect Area for point
----------------------------
x,y for (x,y) in image
"""
# i for x, and j for y
# FIXME: The speed of loop is slow
for i in range(width):
for j in range(width):
buf[j+y,i+x] = 1
for i in range(imgW):
for j in range(imgH):
if(edge[j,i] == 1):
__bufferForPoint(i,j)
return buf[halfwidth:(halfwidth+imgH),halfwidth:(halfwidth+imgW)]
def ReadData(file):
"""
Use GDAL to Read file as Numpy array
------------------------------------
file: The file path
"""
ds = gdal.Open(file)
return ds.ReadAsArray()
def GetEdgeByData(data,s):
"""
Use Skimage's filter canny to get canny edge
--------------------------------------------
data: Image data
sigma: Gaussion Blur' Sigma(Scale)
"""
return filter.canny(data,sigma = s)
def GetBlurData(data,sigma):
"""
Use SKimage's Gaussian filter to get Blur image for generate main edge using edges
------------------------------------
data: Image Data
sigma: Gaussian Blur's Sigma(scale)
"""
return filter.gaussian_filter(data,sigma)
def GetBufferEdge(data,buffer):
"""
Use the Buffer to Get Edge
-----------------------------------
data: blur image data
buffer: edge Buffer data
"""
data[buffer==0]=0
return data
def readRefPoints(ptFile):
"""
Read the Points from ptFile
----------------------------------
ptFile: Points file. Contain column X and column Y
"""
return np.loadtxt(ptFile)
#def
def FindSamePoint(pts,searchWidth,ws,refImage,desImage):
"""
Find the same points as pts in desImage
-----------------------------------------------
pts: points in refImage
searchWidth: Search width by pixels
ws: Window Size
refImage: Reference Image
desImage: Image for looking points
"""
# m for the number of pts
m,n=pts.shape
desPts=np.zeros(pts.shape)
for k in range(m):
x = pts[k,0]
y = pts[k,1]
halfSearchWid = searchWidth/2
halfWs = ws/2
temp=0
tempi=0
tempj=0
# import ipdb; ipdb.set_trace()
print(k)
for i in range(-halfSearchWid,halfSearchWid+1):
for j in range(-halfSearchWid,halfSearchWid+1):
# re = CalMI(refImage[(y-halfWs):(y+halfWs+1),(x-halfWs):(x+halfWs+1)], \
# desImage[(j+y-halfWs):(j+y+halfWs+1),(i+x-halfWs):(i+x+halfWs+1)])
re = CalCC(refImage[(y-halfWs):(y+halfWs+1),(x-halfWs):(x+halfWs+1)], \
desImage[(j+y-halfWs):(j+y+halfWs+1),(i+x-halfWs):(i+x+halfWs+1)])
if(re>temp):
temp = re
tempi = i
tempj = j
desPts[k,0]=x+tempi
desPts[k,1]=y+tempj
return desPts
def plotwithpixels(img,pts,outFile,textcolor):
"""
plot the image with pts
----------------------------
img: image
pts: point([x,y])
outFile: the path of outImageFile
color: TEXT Color
"""
plt.imshow(img,plt.cm.gray)
plt.plot(pts[:,0],pts[:,1],'r+',markersize = 15)
h,w = img.shape
n,temp = pts.shape
for i in range(n):
plt.text(pts[i,0]+15,pts[i,1]-15,str(i+1),fontsize=15,color=textcolor)
plt.xlim((0,w))
plt.ylim((h,0))
plt.axis('off')
plt.savefig(outFile,dpi=300)
plt.close()
def GetGradient(img):
"""
"""
a,b = np.gradient(np.double(img))
return np.sqrt((a**2+b**2)/2)
if __name__=='__main__':
"""
test function
"""
workspace = '/Users/kevin/Desktop/work-2-2/'
# op_image
opfile = workspace+'op.tif'
# sar_image
sarfile = workspace+'sar.tif'
#point file(optical)
oppts = workspace+'oppts.txt'
opimg= ReadData(opfile)
sarimg = ReadData(sarfile)
pts = readRefPoints(oppts)
#sigma
edgesigma = 4
blursigma = 2
# import ipdb; ipdb.set_trace()
opedge = GetEdgeByData(opimg,3)
saredge = GetEdgeByData(sarimg,edgesigma)
plt.imsave(workspace+'opedge.tif',opedge,cmap=plt.cm.gray)
plt.imsave(workspace+'saredge.tif',saredge,cmap = plt.cm.gray)
# import ipdb; ipdb.set_trace()
opblur = GetBlurData(opimg,blursigma)
sarblur = GetBlurData(sarimg,blursigma)
plt.imsave(workspace+'opblur.tif',opblur,cmap=plt.cm.gray)
plt.imsave(workspace+'sarblur.tif',sarblur,cmap = plt.cm.gray)
#width
bufwidth = 15
opbuf = GetRectBuffer(opedge, bufwidth)
sarbuf = GetRectBuffer(saredge,bufwidth)
plt.imsave(workspace+'opbuf.tif',opbuf,cmap=plt.cm.gray)
plt.imsave(workspace+'sarbuf.tif',sarbuf,cmap = plt.cm.gray)
# opbufedge = GetBufferEdge(opblur,opbuf)
# sarbufedge = GetBufferEdge(sarblur,sarbuf)
opbufedge = GetBufferEdge(GetGradient(opblur),opbuf)
sarbufedge = GetBufferEdge(GetGradient(sarblur),sarbuf)
# searchwidth
searchwidth = 50
# windows size
ws = 49
# repts = FindSamePoint(pts,searchwidth,ws,GetGradient(opimg),GetGradient(sarimg))
# repts = FindSamePoint(pts,searchwidth,ws,opbufedge,sarbufedge)
repts = FindSamePoint(pts,searchwidth,ws,GetGradient(opblur),GetGradient(sarblur))
# XXX: Test in Origin Image
# repts = FindSamePoint(pts,searchwidth,ws,opimg,sarimg)
plt.imsave(workspace+'opbufedge.tif',opbufedge,cmap=plt.cm.gray)
plt.imsave(workspace+'sarbufedge.tif',sarbufedge,cmap = plt.cm.gray)
plotwithpixels(opimg,pts,workspace+'opre.tif','yellow')
plotwithpixels(sarimg,repts,workspace+'sarre.tif','red')
np.savetxt(workspace+'test.txt',repts,fmt = '%.0d')
print(repts)
|
|
from django.forms.fields import BooleanField
from django.test.client import RequestFactory
from django.utils.safestring import SafeText
from django.utils.translation import ugettext_lazy as _
import mock
from nose.tools import eq_, ok_
import mkt
import mkt.site.tests
from mkt.comm.models import CommunicationNote
from mkt.constants.features import APP_FEATURES
from mkt.developers.models import AppLog
from mkt.files.models import FileUpload
from mkt.reviewers.models import RereviewQueue
from mkt.site.fixtures import fixture
from mkt.site.tests import user_factory
from mkt.submit import forms
from mkt.users.models import UserProfile
from mkt.webapps.models import AppFeatures, Webapp
class TestNewWebappForm(mkt.site.tests.TestCase):
def setUp(self):
self.request = RequestFactory().get('/')
self.request.user = user_factory()
self.file = FileUpload.objects.create(valid=True)
self.file.user = self.request.user
self.file.save()
def test_no_user(self):
self.file.user = None
self.file.save()
form = forms.NewWebappForm({'free_platforms': ['free-firefoxos'],
'upload': self.file.uuid},
request=self.request)
assert not form.is_valid()
eq_(form.ERRORS['user'], form.errors['free_platforms'])
eq_(form.ERRORS['user'], form.errors['paid_platforms'])
def test_correct_user(self):
form = forms.NewWebappForm({'free_platforms': ['free-firefoxos'],
'upload': self.file.uuid},
request=self.request)
assert form.is_valid(), form.errors
def test_incorrect_user(self):
self.file.user = user_factory()
self.file.save()
form = forms.NewWebappForm({'upload': self.file.uuid},
request=self.request)
assert not form.is_valid()
eq_(form.ERRORS['user'], form.errors['free_platforms'])
eq_(form.ERRORS['user'], form.errors['paid_platforms'])
def test_not_free_or_paid(self):
form = forms.NewWebappForm({})
assert not form.is_valid()
eq_(form.ERRORS['none'], form.errors['free_platforms'])
eq_(form.ERRORS['none'], form.errors['paid_platforms'])
def test_paid(self):
form = forms.NewWebappForm({'paid_platforms': ['paid-firefoxos'],
'upload': self.file.uuid},
request=self.request)
assert form.is_valid()
eq_(form.get_paid(), mkt.ADDON_PREMIUM)
def test_free(self):
form = forms.NewWebappForm({'free_platforms': ['free-firefoxos'],
'upload': self.file.uuid})
assert form.is_valid()
eq_(form.get_paid(), mkt.ADDON_FREE)
def test_platform(self):
mappings = (
({'free_platforms': ['free-firefoxos']}, [mkt.DEVICE_GAIA]),
({'paid_platforms': ['paid-firefoxos']}, [mkt.DEVICE_GAIA]),
({'free_platforms': ['free-firefoxos',
'free-android-mobile']},
[mkt.DEVICE_GAIA, mkt.DEVICE_MOBILE]),
({'free_platforms': ['free-android-mobile',
'free-android-tablet']},
[mkt.DEVICE_MOBILE, mkt.DEVICE_TABLET]),
)
for data, res in mappings:
data['upload'] = self.file.uuid
form = forms.NewWebappForm(data)
assert form.is_valid(), form.errors
self.assertSetEqual(res, form.get_devices())
def test_both(self):
form = forms.NewWebappForm({'paid_platforms': ['paid-firefoxos'],
'free_platforms': ['free-firefoxos']},
request=self.request)
assert not form.is_valid()
eq_(form.ERRORS['both'], form.errors['free_platforms'])
eq_(form.ERRORS['both'], form.errors['paid_platforms'])
def test_multiple(self):
form = forms.NewWebappForm({'free_platforms': ['free-firefoxos',
'free-desktop'],
'upload': self.file.uuid})
assert form.is_valid()
def test_not_packaged(self):
form = forms.NewWebappForm({'free_platforms': ['free-firefoxos'],
'upload': self.file.uuid})
assert form.is_valid(), form.errors
assert not form.is_packaged()
@mock.patch('mkt.submit.forms.parse_addon',
lambda *args: {'version': None})
def test_packaged_allowed_everywhere(self):
for device in ('free-firefoxos',
'free-desktop',
'free-android-tablet',
'free-android-mobile'):
form = forms.NewWebappForm({'free_platforms': [device],
'upload': self.file.uuid,
'packaged': True},
request=self.request)
assert form.is_valid(), form.errors
assert form.is_packaged()
class TestNewWebappVersionForm(mkt.site.tests.TestCase):
def setUp(self):
self.request = RequestFactory().get('/')
self.file = FileUpload.objects.create(valid=True)
def test_no_upload(self):
form = forms.NewWebappVersionForm(request=self.request,
is_packaged=True)
assert not form.is_valid(), form.errors
@mock.patch('mkt.submit.forms.parse_addon',
lambda *args: {"origin": "app://hy.fr"})
@mock.patch('mkt.submit.forms.verify_app_domain')
def test_verify_app_domain_called(self, _verify):
self.create_switch('webapps-unique-by-domain')
form = forms.NewWebappVersionForm({'upload': self.file.uuid},
request=self.request,
is_packaged=True)
assert form.is_valid(), form.errors
assert _verify.called
@mock.patch('mkt.submit.forms.parse_addon',
lambda *args: {"origin": "app://hy.fr"})
def test_verify_app_domain_exclude_same(self):
app = mkt.site.tests.app_factory(app_domain='app://hy.fr')
form = forms.NewWebappVersionForm(
{'upload': self.file.uuid}, request=self.request, is_packaged=True,
addon=app)
assert form.is_valid(), form.errors
@mock.patch('mkt.submit.forms.parse_addon',
lambda *args: {"origin": "app://hy.fr"})
def test_verify_app_domain_exclude_different(self):
app = mkt.site.tests.app_factory(app_domain='app://yo.lo')
mkt.site.tests.app_factory(app_domain='app://hy.fr')
form = forms.NewWebappVersionForm(
{'upload': self.file.uuid}, request=self.request, is_packaged=True,
addon=app)
assert not form.is_valid(), form.errors
assert ('An app already exists on this domain; '
'only one app per domain is allowed.' in form.errors['upload'])
class TestAppDetailsBasicForm(mkt.site.tests.TestCase):
fixtures = fixture('user_999', 'webapp_337141')
def setUp(self):
self.request = mock.Mock()
self.request.user = UserProfile.objects.get(id=999)
self.request.groups = ()
def get_app(self):
return Webapp.objects.get(pk=337141)
def get_data(self, **kwargs):
default = {
'app_slug': 'thisIsAslug',
'description': '...',
'privacy_policy': '...',
'support_email': 'test@example.com',
'notes': '',
'publish_type': mkt.PUBLISH_IMMEDIATE,
}
default.update(kwargs)
return default
def test_slug(self):
app = self.get_app()
form = forms.AppDetailsBasicForm(self.get_data(), request=self.request,
instance=app)
assert form.is_valid(), form.errors
form.save()
eq_(app.app_slug, 'thisisaslug')
def test_comm_thread(self):
app = self.get_app()
note_body = 'please approve this app'
form = forms.AppDetailsBasicForm(self.get_data(notes=note_body),
request=self.request, instance=app)
assert form.is_valid(), form.errors
form.save()
notes = CommunicationNote.objects.all()
eq_(notes.count(), 1)
eq_(notes[0].body, note_body)
def test_publish_type(self):
app = self.get_app()
form = forms.AppDetailsBasicForm(
self.get_data(publish_type=mkt.PUBLISH_PRIVATE),
request=self.request, instance=app)
assert form.is_valid(), form.errors
form.save()
eq_(app.publish_type, mkt.PUBLISH_PRIVATE)
def test_help_text_uses_safetext_and_includes_url(self):
app = self.get_app()
form = forms.AppDetailsBasicForm(
self.get_data(publish_type=mkt.PUBLISH_PRIVATE),
request=self.request, instance=app)
help_text = form.base_fields['privacy_policy'].help_text
eq_(type(help_text), SafeText)
ok_('{url}' not in help_text)
ok_(form.PRIVACY_MDN_URL in help_text)
def test_is_offline_guess_false(self):
app = self.get_app()
app.guess_is_offline = lambda: False
assert not app.is_offline
forms.AppDetailsBasicForm(
self.get_data(),
request=self.request,
instance=app)
assert not app.is_offline
def test_is_offline_guess_false_override(self):
app = self.get_app()
app.guess_is_offline = lambda: False
form = forms.AppDetailsBasicForm(
self.get_data(is_offline=True),
request=self.request,
instance=app)
assert form.is_valid(), form.errors
form.save()
eq_(app.is_offline, True)
def test_is_offline_guess_true(self):
app = self.get_app()
app.guess_is_offline = lambda: True
assert not app.is_offline
forms.AppDetailsBasicForm(
self.get_data(is_offline=None),
request=self.request,
instance=app)
assert app.is_offline
def test_is_offline_guess_true_override(self):
app = self.get_app()
app.guess_is_offline = lambda: True
form = forms.AppDetailsBasicForm(
self.get_data(is_offline=False),
request=self.request,
instance=app)
assert form.is_valid(), form.errors
form.save()
eq_(app.is_offline, False)
def test_tags(self):
app = self.get_app()
form = forms.AppDetailsBasicForm(
self.get_data(tags='card games, poker'), request=self.request,
instance=app)
assert form.is_valid(), form.errors
form.save()
eq_(app.tags.count(), 2)
self.assertSetEqual(
app.tags.values_list('tag_text', flat=True),
['card games', 'poker'])
class TestAppFeaturesForm(mkt.site.tests.TestCase):
fixtures = fixture('user_999', 'webapp_337141')
def setUp(self):
mkt.set_user(UserProfile.objects.all()[0])
self.form = forms.AppFeaturesForm()
self.app = Webapp.objects.get(pk=337141)
self.features = self.app.current_version.features
def _check_log(self, action):
assert AppLog.objects.filter(
addon=self.app, activity_log__action=action.id).exists(), (
"Didn't find `%s` action in logs." % action.short)
def test_required(self):
f_names = self.form.fields.keys()
for value in (True, False):
form = forms.AppFeaturesForm(dict((n, value) for n in f_names))
eq_(form.is_valid(), True, form.errors)
def test_correct_fields(self):
fields = self.form.fields
f_values = fields.values()
assert 'version' not in fields
assert all(isinstance(f, BooleanField) for f in f_values)
self.assertSetEqual(fields, AppFeatures()._fields())
def test_required_api_fields(self):
fields = [f.help_text for f in self.form.required_api_fields()]
eq_(fields, sorted(f['name'] for f in APP_FEATURES.values()))
def test_required_api_fields_nonascii(self):
forms.AppFeaturesForm.base_fields['has_apps'].help_text = _(
u'H\xe9llo')
fields = [f.help_text for f in self.form.required_api_fields()]
eq_(fields, sorted(f['name'] for f in APP_FEATURES.values()))
def test_changes_mark_for_rereview(self):
self.features.update(has_sms=True)
data = {'has_apps': True}
self.form = forms.AppFeaturesForm(instance=self.features, data=data)
self.form.save()
ok_(self.features.has_apps)
ok_(not self.features.has_sms)
ok_(not self.features.has_contacts)
action_id = mkt.LOG.REREVIEW_FEATURES_CHANGED.id
assert AppLog.objects.filter(addon=self.app,
activity_log__action=action_id).exists()
eq_(RereviewQueue.objects.count(), 1)
def test_no_changes_not_marked_for_rereview(self):
self.features.update(has_sms=True)
data = {'has_sms': True}
self.form = forms.AppFeaturesForm(instance=self.features, data=data)
self.form.save()
ok_(not self.features.has_apps)
ok_(self.features.has_sms)
eq_(RereviewQueue.objects.count(), 0)
action_id = mkt.LOG.REREVIEW_FEATURES_CHANGED.id
assert not AppLog.objects.filter(
addon=self.app,
activity_log__action=action_id).exists()
def test_changes_mark_for_rereview_bypass(self):
self.features.update(has_sms=True)
data = {'has_apps': True}
self.form = forms.AppFeaturesForm(instance=self.features, data=data)
self.form.save(mark_for_rereview=False)
ok_(self.features.has_apps)
ok_(not self.features.has_sms)
eq_(RereviewQueue.objects.count(), 0)
action_id = mkt.LOG.REREVIEW_FEATURES_CHANGED.id
assert not AppLog.objects.filter(
addon=self.app,
activity_log__action=action_id).exists()
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.kernel_tests import checkpoint_test_base
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import combinations
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.platform import test
def _weights_type_combinations():
return combinations.combine(weights_type=["list", "tensor", "dataset"])
def _get_weights_of_type(weights_list, weights_type):
if weights_type == "list":
return weights_list
if weights_type == "tensor":
return ops.convert_to_tensor(weights_list, name="weights")
return dataset_ops.Dataset.from_tensors(weights_list).repeat()
class DirectedInterleaveDatasetTest(test_base.DatasetTestBase,
parameterized.TestCase):
@combinations.generate(test_base.default_test_combinations())
def testBasic(self):
selector_dataset = dataset_ops.Dataset.range(10).repeat(100)
input_datasets = [
dataset_ops.Dataset.from_tensors(i).repeat(100) for i in range(10)
]
dataset = dataset_ops._DirectedInterleaveDataset(selector_dataset,
input_datasets)
next_element = self.getNext(dataset)
for _ in range(100):
for i in range(10):
self.assertEqual(i, self.evaluate(next_element()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
def _normalize(self, vec):
return vec / vec.sum()
def _chi2(self, expected, actual):
actual = np.asarray(actual)
expected = np.asarray(expected)
diff = actual - expected
chi2 = np.sum(diff * diff / expected, axis=0)
return chi2
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
_weights_type_combinations()))
def testSampleFromDatasets(self, weights_type):
random_seed.set_random_seed(1619)
num_samples = 5000
rand_probs = self._normalize(np.random.random_sample((5,)))
# Use chi-squared test to assert that the observed distribution matches the
# expected distribution. Based on the implementation in
# "third_party/tensorflow/python/kernel_tests/multinomial_op_test.py".
for probs in [[.85, .05, .1], rand_probs, [1.]]:
weights = _get_weights_of_type(np.asarray(probs), weights_type)
classes = len(probs)
# Create a dataset that samples each integer in `[0, num_datasets)`
# with probability given by `weights[i]`.
dataset = dataset_ops.Dataset.sample_from_datasets([
dataset_ops.Dataset.from_tensors(i).repeat() for i in range(classes)
], weights)
dataset = dataset.take(num_samples)
next_element = self.getNext(dataset)
freqs = np.zeros([classes])
for _ in range(num_samples):
freqs[self.evaluate(next_element())] += 1
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
self.assertLess(self._chi2(probs, freqs / num_samples), 1e-2)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
_weights_type_combinations()))
def testSampleFromDatasetsStoppingOnEmptyDataset(self, weights_type):
# Sampling stops when the first dataset is exhausted.
weights = _get_weights_of_type(np.asarray([.5, .1, .4]), weights_type)
datasets = [
dataset_ops.Dataset.from_tensors(np.int64(-1)),
dataset_ops.Dataset.from_tensors(np.int64(1)).repeat(),
dataset_ops.Dataset.range(10).repeat()
]
sample_dataset = dataset_ops.Dataset.sample_from_datasets(
datasets, weights=weights, stop_on_empty_dataset=True)
samples_list = self.getIteratorOutput(self.getNext(sample_dataset))
self.assertEqual(samples_list.count(-1), 1)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
_weights_type_combinations()))
def testSampleFromDatasetsSkippingEmptyDataset(self, weights_type):
# Sampling skips the first dataset after it becomes empty.
weights = _get_weights_of_type(np.asarray([.5, .1, .4]), weights_type)
datasets = [
dataset_ops.Dataset.from_tensors(np.int64(-1)),
dataset_ops.Dataset.from_tensors(np.int64(1)).repeat(),
dataset_ops.Dataset.range(10).repeat()
]
sample_dataset = dataset_ops.Dataset.sample_from_datasets(
datasets, weights=weights, stop_on_empty_dataset=False).take(100)
samples_list = self.getIteratorOutput(self.getNext(sample_dataset))
self.assertLen(samples_list, 100)
self.assertEqual(samples_list.count(-1), 1)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
_weights_type_combinations()))
def testSampleFromDatasetsWithZeroWeight(self, weights_type):
# Sampling stops when the second dataset is exhausted.
weights = _get_weights_of_type(np.asarray([0., 1.]), weights_type)
datasets = [
dataset_ops.Dataset.from_tensors(-1).repeat(2),
dataset_ops.Dataset.from_tensors(1).repeat(2)
]
sample_dataset = dataset_ops.Dataset.sample_from_datasets(
datasets, weights=weights, stop_on_empty_dataset=True)
self.assertDatasetProduces(sample_dataset, [1, 1])
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
_weights_type_combinations()))
def testSampleFromEmptyDataset(self, weights_type):
weights = _get_weights_of_type(np.asarray([1., 0.]), weights_type)
datasets = [
dataset_ops.Dataset.range(0),
dataset_ops.Dataset.range(1).repeat()
]
sample_dataset = dataset_ops.Dataset.sample_from_datasets(
datasets, weights=weights, stop_on_empty_dataset=True)
self.assertDatasetProduces(sample_dataset, [])
@combinations.generate(test_base.default_test_combinations())
def testSampleFromDatasetsSkippingDatasetsWithZeroWeight(self):
# Sampling skips the first dataset.
weights = np.asarray([0., 1.])
datasets = [
dataset_ops.Dataset.from_tensors(-1).repeat(),
dataset_ops.Dataset.from_tensors(1)
]
sample_dataset = dataset_ops.Dataset.sample_from_datasets(
datasets, weights=weights, stop_on_empty_dataset=False)
self.assertDatasetProduces(sample_dataset, [1])
@combinations.generate(test_base.default_test_combinations())
def testSampleFromDatasetsAllWeightsAreZero(self):
# Sampling skips both datasets.
weights = np.asarray([0., 0.])
datasets = [
dataset_ops.Dataset.from_tensors(-1).repeat(),
dataset_ops.Dataset.from_tensors(1).repeat()
]
sample_dataset = dataset_ops.Dataset.sample_from_datasets(
datasets, weights=weights, stop_on_empty_dataset=False)
self.assertDatasetProduces(sample_dataset, [])
@combinations.generate(test_base.default_test_combinations())
def testSampleFromDatasetsCardinality(self):
ds1 = dataset_ops.Dataset.from_tensors([1.0]).repeat()
ds2 = dataset_ops.Dataset.from_tensors([2.0]).repeat()
ds = dataset_ops.Dataset.sample_from_datasets([ds1, ds2])
self.assertEqual(self.evaluate(ds.cardinality()), dataset_ops.INFINITE)
@combinations.generate(test_base.default_test_combinations())
def testSampleFromDatasetsNested(self):
ds1 = dataset_ops.Dataset.range(10).window(2)
ds2 = dataset_ops.Dataset.range(10, 20).window(2)
ds = dataset_ops.Dataset.sample_from_datasets([ds1, ds2],
weights=[0.3, 0.7])
ds = ds.flat_map(lambda x: x)
next_element = self.getNext(ds)
self.evaluate(next_element())
@combinations.generate(test_base.default_test_combinations())
def testChooseFromDatasets(self):
words = [b"foo", b"bar", b"baz"]
datasets = [dataset_ops.Dataset.from_tensors(w).repeat() for w in words]
choice_array = np.random.randint(3, size=(15,), dtype=np.int64)
choice_dataset = dataset_ops.Dataset.from_tensor_slices(choice_array)
dataset = dataset_ops.Dataset.choose_from_datasets(datasets, choice_dataset)
next_element = self.getNext(dataset)
for i in choice_array:
self.assertEqual(words[i], self.evaluate(next_element()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
@combinations.generate(test_base.default_test_combinations())
def testChooseFromDatasetsStoppingOnEmptyDataset(self):
datasets = [
dataset_ops.Dataset.from_tensors(b"foo").repeat(2),
dataset_ops.Dataset.from_tensors(b"bar").repeat(),
dataset_ops.Dataset.from_tensors(b"baz").repeat(),
]
choice_array = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2], dtype=np.int64)
choice_dataset = dataset_ops.Dataset.from_tensor_slices(choice_array)
dataset = dataset_ops.Dataset.choose_from_datasets(
datasets, choice_dataset, stop_on_empty_dataset=True)
self.assertDatasetProduces(dataset, [b"foo", b"foo"])
@combinations.generate(test_base.default_test_combinations())
def testChooseFromDatasetsSkippingEmptyDatasets(self):
datasets = [
dataset_ops.Dataset.from_tensors(b"foo").repeat(2),
dataset_ops.Dataset.from_tensors(b"bar").repeat(),
dataset_ops.Dataset.from_tensors(b"baz").repeat(),
]
choice_array = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2], dtype=np.int64)
choice_dataset = dataset_ops.Dataset.from_tensor_slices(choice_array)
dataset = dataset_ops.Dataset.choose_from_datasets(
datasets, choice_dataset, stop_on_empty_dataset=False)
# Chooses 2 elements from the first dataset while the selector specifies 3.
self.assertDatasetProduces(
dataset,
[b"foo", b"foo", b"bar", b"bar", b"bar", b"baz", b"baz", b"baz"])
@combinations.generate(test_base.default_test_combinations())
def testChooseFromDatasetsChoiceDatasetIsEmpty(self):
datasets = [
dataset_ops.Dataset.from_tensors(b"foo").repeat(),
dataset_ops.Dataset.from_tensors(b"bar").repeat(),
dataset_ops.Dataset.from_tensors(b"baz").repeat(),
]
dataset = dataset_ops.Dataset.choose_from_datasets(
datasets,
choice_dataset=dataset_ops.Dataset.range(0),
stop_on_empty_dataset=False)
self.assertDatasetProduces(dataset, [])
@combinations.generate(test_base.default_test_combinations())
def testChooseFromDatasetsNested(self):
ds1 = dataset_ops.Dataset.range(10).window(2)
ds2 = dataset_ops.Dataset.range(10, 20).window(2)
choice_dataset = dataset_ops.Dataset.range(2).repeat(5)
ds = dataset_ops.Dataset.choose_from_datasets([ds1, ds2], choice_dataset)
ds = ds.flat_map(lambda x: x)
expected = []
for i in range(5):
for j in range(2):
expected.extend([10*j + 2*i, 10*j + 2*i + 1])
self.assertDatasetProduces(ds, expected)
@combinations.generate(test_base.default_test_combinations())
def testErrors(self):
with self.assertRaisesRegex(ValueError, r"should have the same length"):
dataset_ops.Dataset.sample_from_datasets(
[dataset_ops.Dataset.range(10),
dataset_ops.Dataset.range(20)],
weights=[0.25, 0.25, 0.25, 0.25])
with self.assertRaisesRegex(TypeError, "`tf.float32` or `tf.float64`"):
dataset_ops.Dataset.sample_from_datasets(
[dataset_ops.Dataset.range(10),
dataset_ops.Dataset.range(20)],
weights=[1, 1])
with self.assertRaisesRegex(TypeError, "must have compatible"):
dataset_ops.Dataset.sample_from_datasets([
dataset_ops.Dataset.from_tensors(0),
dataset_ops.Dataset.from_tensors(0.0)
])
with self.assertRaisesRegex(
ValueError, r"Invalid `datasets`. `datasets` should not be empty."):
dataset_ops.Dataset.sample_from_datasets(datasets=[], weights=[])
with self.assertRaisesRegex(TypeError, "tf.int64"):
dataset_ops.Dataset.choose_from_datasets(
[
dataset_ops.Dataset.from_tensors(0),
dataset_ops.Dataset.from_tensors(1)
],
choice_dataset=dataset_ops.Dataset.from_tensors(1.0))
with self.assertRaisesRegex(TypeError, "scalar"):
dataset_ops.Dataset.choose_from_datasets(
[
dataset_ops.Dataset.from_tensors(0),
dataset_ops.Dataset.from_tensors(1)
],
choice_dataset=dataset_ops.Dataset.from_tensors([1.0]))
with self.assertRaisesRegex(errors.InvalidArgumentError, "out of range"):
dataset = dataset_ops.Dataset.choose_from_datasets(
[dataset_ops.Dataset.from_tensors(0)],
choice_dataset=dataset_ops.Dataset.from_tensors(
constant_op.constant(1, dtype=dtypes.int64)))
next_element = self.getNext(dataset)
self.evaluate(next_element())
with self.assertRaisesRegex(
ValueError, r"Invalid `datasets`. `datasets` should not be empty."):
dataset_ops.Dataset.choose_from_datasets(
datasets=[], choice_dataset=dataset_ops.Dataset.from_tensors(1.0))
with self.assertRaisesRegex(
TypeError, r"`choice_dataset` should be a `tf.data.Dataset`"):
datasets = [dataset_ops.Dataset.range(42)]
dataset_ops.Dataset.choose_from_datasets(datasets, choice_dataset=None)
class SampleFromDatasetsCheckpointTest(checkpoint_test_base.CheckpointTestBase,
parameterized.TestCase):
def _build_dataset(self, probs, num_samples):
datasets = [
dataset_ops.Dataset.from_tensors(i).repeat(None)
for i in range(len(probs))
]
dataset = dataset_ops.Dataset.sample_from_datasets(
datasets, probs, seed=1813)
return dataset.take(num_samples)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
checkpoint_test_base.default_test_combinations()))
def test(self, verify_fn):
verify_fn(
self, lambda: self._build_dataset([0.5, 0.5], 100), num_outputs=100)
if __name__ == "__main__":
test.main()
|
|
"""The tests for the UniFi device tracker platform."""
from copy import copy
from datetime import timedelta
from aiounifi.controller import SIGNAL_CONNECTION_STATE
from aiounifi.websocket import STATE_DISCONNECTED, STATE_RUNNING
from asynctest import patch
from homeassistant import config_entries
from homeassistant.components import unifi
import homeassistant.components.device_tracker as device_tracker
from homeassistant.components.unifi.const import (
CONF_SSID_FILTER,
CONF_TRACK_CLIENTS,
CONF_TRACK_DEVICES,
CONF_TRACK_WIRED_CLIENTS,
)
from homeassistant.const import STATE_UNAVAILABLE
from homeassistant.helpers import entity_registry
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from .test_controller import ENTRY_CONFIG, setup_unifi_integration
CLIENT_1 = {
"essid": "ssid",
"hostname": "client_1",
"ip": "10.0.0.1",
"is_wired": False,
"last_seen": 1562600145,
"mac": "00:00:00:00:00:01",
}
CLIENT_2 = {
"hostname": "client_2",
"ip": "10.0.0.2",
"is_wired": True,
"last_seen": 1562600145,
"mac": "00:00:00:00:00:02",
"name": "Wired Client",
}
CLIENT_3 = {
"essid": "ssid2",
"hostname": "client_3",
"ip": "10.0.0.3",
"is_wired": False,
"last_seen": 1562600145,
"mac": "00:00:00:00:00:03",
}
CLIENT_4 = {
"essid": "ssid",
"hostname": "client_4",
"ip": "10.0.0.4",
"is_wired": True,
"last_seen": 1562600145,
"mac": "00:00:00:00:00:04",
}
DEVICE_1 = {
"board_rev": 3,
"device_id": "mock-id",
"has_fan": True,
"fan_level": 0,
"ip": "10.0.1.1",
"last_seen": 1562600145,
"mac": "00:00:00:00:01:01",
"model": "US16P150",
"name": "device_1",
"overheating": True,
"state": 1,
"type": "usw",
"upgradable": True,
"version": "4.0.42.10433",
}
DEVICE_2 = {
"board_rev": 3,
"device_id": "mock-id",
"has_fan": True,
"ip": "10.0.1.1",
"mac": "00:00:00:00:01:01",
"model": "US16P150",
"name": "device_1",
"state": 0,
"type": "usw",
"version": "4.0.42.10433",
}
async def test_platform_manually_configured(hass):
"""Test that nothing happens when configuring unifi through device tracker platform."""
assert (
await async_setup_component(
hass, device_tracker.DOMAIN, {device_tracker.DOMAIN: {"platform": "unifi"}}
)
is False
)
assert unifi.DOMAIN not in hass.data
async def test_no_clients(hass):
"""Test the update_clients function when no clients are found."""
await setup_unifi_integration(hass)
assert len(hass.states.async_all()) == 1
async def test_tracked_devices(hass):
"""Test the update_items function with some clients."""
client_4_copy = copy(CLIENT_4)
client_4_copy["last_seen"] = dt_util.as_timestamp(dt_util.utcnow())
controller = await setup_unifi_integration(
hass,
options={CONF_SSID_FILTER: ["ssid"]},
clients_response=[CLIENT_1, CLIENT_2, CLIENT_3, client_4_copy],
devices_response=[DEVICE_1, DEVICE_2],
known_wireless_clients=(CLIENT_4["mac"],),
)
assert len(hass.states.async_all()) == 6
client_1 = hass.states.get("device_tracker.client_1")
assert client_1 is not None
assert client_1.state == "not_home"
client_2 = hass.states.get("device_tracker.wired_client")
assert client_2 is not None
assert client_2.state == "not_home"
client_3 = hass.states.get("device_tracker.client_3")
assert client_3 is not None
assert client_3.state == "not_home"
# Wireless client with wired bug, if bug active on restart mark device away
client_4 = hass.states.get("device_tracker.client_4")
assert client_4 is not None
assert client_4.state == "not_home"
device_1 = hass.states.get("device_tracker.device_1")
assert device_1 is not None
assert device_1.state == "not_home"
# State change signalling works
client_1_copy = copy(CLIENT_1)
client_1_copy["last_seen"] = dt_util.as_timestamp(dt_util.utcnow())
event = {"meta": {"message": "sta:sync"}, "data": [client_1_copy]}
controller.api.message_handler(event)
device_1_copy = copy(DEVICE_1)
device_1_copy["last_seen"] = dt_util.as_timestamp(dt_util.utcnow())
event = {"meta": {"message": "device:sync"}, "data": [device_1_copy]}
controller.api.message_handler(event)
await hass.async_block_till_done()
client_1 = hass.states.get("device_tracker.client_1")
assert client_1.state == "home"
device_1 = hass.states.get("device_tracker.device_1")
assert device_1.state == "home"
# Disabled device is unavailable
device_1_copy = copy(DEVICE_1)
device_1_copy["disabled"] = True
event = {"meta": {"message": "device:sync"}, "data": [device_1_copy]}
controller.api.message_handler(event)
await hass.async_block_till_done()
device_1 = hass.states.get("device_tracker.device_1")
assert device_1.state == STATE_UNAVAILABLE
async def test_controller_state_change(hass):
"""Verify entities state reflect on controller becoming unavailable."""
controller = await setup_unifi_integration(
hass, clients_response=[CLIENT_1], devices_response=[DEVICE_1],
)
assert len(hass.states.async_all()) == 3
# Controller unavailable
controller.async_unifi_signalling_callback(
SIGNAL_CONNECTION_STATE, STATE_DISCONNECTED
)
await hass.async_block_till_done()
client_1 = hass.states.get("device_tracker.client_1")
assert client_1.state == STATE_UNAVAILABLE
device_1 = hass.states.get("device_tracker.device_1")
assert device_1.state == STATE_UNAVAILABLE
# Controller available
controller.async_unifi_signalling_callback(SIGNAL_CONNECTION_STATE, STATE_RUNNING)
await hass.async_block_till_done()
client_1 = hass.states.get("device_tracker.client_1")
assert client_1.state == "not_home"
device_1 = hass.states.get("device_tracker.device_1")
assert device_1.state == "not_home"
async def test_option_track_clients(hass):
"""Test the tracking of clients can be turned off."""
controller = await setup_unifi_integration(
hass, clients_response=[CLIENT_1, CLIENT_2], devices_response=[DEVICE_1],
)
assert len(hass.states.async_all()) == 4
client_1 = hass.states.get("device_tracker.client_1")
assert client_1 is not None
client_2 = hass.states.get("device_tracker.wired_client")
assert client_2 is not None
device_1 = hass.states.get("device_tracker.device_1")
assert device_1 is not None
hass.config_entries.async_update_entry(
controller.config_entry, options={CONF_TRACK_CLIENTS: False},
)
await hass.async_block_till_done()
client_1 = hass.states.get("device_tracker.client_1")
assert client_1 is None
client_2 = hass.states.get("device_tracker.wired_client")
assert client_2 is None
device_1 = hass.states.get("device_tracker.device_1")
assert device_1 is not None
hass.config_entries.async_update_entry(
controller.config_entry, options={CONF_TRACK_CLIENTS: True},
)
await hass.async_block_till_done()
client_1 = hass.states.get("device_tracker.client_1")
assert client_1 is not None
client_2 = hass.states.get("device_tracker.wired_client")
assert client_2 is not None
device_1 = hass.states.get("device_tracker.device_1")
assert device_1 is not None
async def test_option_track_wired_clients(hass):
"""Test the tracking of wired clients can be turned off."""
controller = await setup_unifi_integration(
hass, clients_response=[CLIENT_1, CLIENT_2], devices_response=[DEVICE_1],
)
assert len(hass.states.async_all()) == 4
client_1 = hass.states.get("device_tracker.client_1")
assert client_1 is not None
client_2 = hass.states.get("device_tracker.wired_client")
assert client_2 is not None
device_1 = hass.states.get("device_tracker.device_1")
assert device_1 is not None
hass.config_entries.async_update_entry(
controller.config_entry, options={CONF_TRACK_WIRED_CLIENTS: False},
)
await hass.async_block_till_done()
client_1 = hass.states.get("device_tracker.client_1")
assert client_1 is not None
client_2 = hass.states.get("device_tracker.wired_client")
assert client_2 is None
device_1 = hass.states.get("device_tracker.device_1")
assert device_1 is not None
hass.config_entries.async_update_entry(
controller.config_entry, options={CONF_TRACK_WIRED_CLIENTS: True},
)
await hass.async_block_till_done()
client_1 = hass.states.get("device_tracker.client_1")
assert client_1 is not None
client_2 = hass.states.get("device_tracker.wired_client")
assert client_2 is not None
device_1 = hass.states.get("device_tracker.device_1")
assert device_1 is not None
async def test_option_track_devices(hass):
"""Test the tracking of devices can be turned off."""
controller = await setup_unifi_integration(
hass, clients_response=[CLIENT_1, CLIENT_2], devices_response=[DEVICE_1],
)
assert len(hass.states.async_all()) == 4
client_1 = hass.states.get("device_tracker.client_1")
assert client_1 is not None
client_2 = hass.states.get("device_tracker.wired_client")
assert client_2 is not None
device_1 = hass.states.get("device_tracker.device_1")
assert device_1 is not None
hass.config_entries.async_update_entry(
controller.config_entry, options={CONF_TRACK_DEVICES: False},
)
await hass.async_block_till_done()
client_1 = hass.states.get("device_tracker.client_1")
assert client_1 is not None
client_2 = hass.states.get("device_tracker.wired_client")
assert client_2 is not None
device_1 = hass.states.get("device_tracker.device_1")
assert device_1 is None
hass.config_entries.async_update_entry(
controller.config_entry, options={CONF_TRACK_DEVICES: True},
)
await hass.async_block_till_done()
client_1 = hass.states.get("device_tracker.client_1")
assert client_1 is not None
client_2 = hass.states.get("device_tracker.wired_client")
assert client_2 is not None
device_1 = hass.states.get("device_tracker.device_1")
assert device_1 is not None
async def test_option_ssid_filter(hass):
"""Test the SSID filter works."""
controller = await setup_unifi_integration(
hass, options={CONF_SSID_FILTER: ["ssid"]}, clients_response=[CLIENT_3],
)
assert len(hass.states.async_all()) == 2
# SSID filter active
client_3 = hass.states.get("device_tracker.client_3")
assert client_3.state == "not_home"
client_3_copy = copy(CLIENT_3)
client_3_copy["last_seen"] = dt_util.as_timestamp(dt_util.utcnow())
event = {"meta": {"message": "sta:sync"}, "data": [client_3_copy]}
controller.api.message_handler(event)
await hass.async_block_till_done()
# SSID filter active even though time stamp should mark as home
client_3 = hass.states.get("device_tracker.client_3")
assert client_3.state == "not_home"
# Remove SSID filter
hass.config_entries.async_update_entry(
controller.config_entry, options={CONF_SSID_FILTER: []},
)
event = {"meta": {"message": "sta:sync"}, "data": [client_3_copy]}
controller.api.message_handler(event)
await hass.async_block_till_done()
# SSID no longer filtered
client_3 = hass.states.get("device_tracker.client_3")
assert client_3.state == "home"
async def test_wireless_client_go_wired_issue(hass):
"""Test the solution to catch wireless device go wired UniFi issue.
UniFi has a known issue that when a wireless device goes away it sometimes gets marked as wired.
"""
client_1_client = copy(CLIENT_1)
client_1_client["last_seen"] = dt_util.as_timestamp(dt_util.utcnow())
controller = await setup_unifi_integration(hass, clients_response=[client_1_client])
assert len(hass.states.async_all()) == 2
client_1 = hass.states.get("device_tracker.client_1")
assert client_1 is not None
assert client_1.state == "home"
client_1_client["is_wired"] = True
client_1_client["last_seen"] = dt_util.as_timestamp(dt_util.utcnow())
event = {"meta": {"message": "sta:sync"}, "data": [client_1_client]}
controller.api.message_handler(event)
await hass.async_block_till_done()
client_1 = hass.states.get("device_tracker.client_1")
assert client_1.state == "home"
with patch.object(
unifi.device_tracker.dt_util,
"utcnow",
return_value=(dt_util.utcnow() + timedelta(minutes=5)),
):
event = {"meta": {"message": "sta:sync"}, "data": [client_1_client]}
controller.api.message_handler(event)
await hass.async_block_till_done()
client_1 = hass.states.get("device_tracker.client_1")
assert client_1.state == "not_home"
client_1_client["is_wired"] = False
client_1_client["last_seen"] = dt_util.as_timestamp(dt_util.utcnow())
event = {"meta": {"message": "sta:sync"}, "data": [client_1_client]}
controller.api.message_handler(event)
await hass.async_block_till_done()
client_1 = hass.states.get("device_tracker.client_1")
assert client_1.state == "home"
async def test_restoring_client(hass):
"""Test the update_items function with some clients."""
config_entry = config_entries.ConfigEntry(
version=1,
domain=unifi.DOMAIN,
title="Mock Title",
data=ENTRY_CONFIG,
source="test",
connection_class=config_entries.CONN_CLASS_LOCAL_POLL,
system_options={},
options={},
entry_id=1,
)
registry = await entity_registry.async_get_registry(hass)
registry.async_get_or_create(
device_tracker.DOMAIN,
unifi.DOMAIN,
"{}-site_id".format(CLIENT_1["mac"]),
suggested_object_id=CLIENT_1["hostname"],
config_entry=config_entry,
)
registry.async_get_or_create(
device_tracker.DOMAIN,
unifi.DOMAIN,
"{}-site_id".format(CLIENT_2["mac"]),
suggested_object_id=CLIENT_2["hostname"],
config_entry=config_entry,
)
await setup_unifi_integration(
hass,
options={unifi.CONF_BLOCK_CLIENT: True},
clients_response=[CLIENT_2],
clients_all_response=[CLIENT_1],
)
assert len(hass.states.async_all()) == 3
device_1 = hass.states.get("device_tracker.client_1")
assert device_1 is not None
async def test_dont_track_clients(hass):
"""Test don't track clients config works."""
await setup_unifi_integration(
hass,
options={unifi.controller.CONF_TRACK_CLIENTS: False},
clients_response=[CLIENT_1],
devices_response=[DEVICE_1],
)
assert len(hass.states.async_all()) == 2
client_1 = hass.states.get("device_tracker.client_1")
assert client_1 is None
device_1 = hass.states.get("device_tracker.device_1")
assert device_1 is not None
assert device_1.state == "not_home"
async def test_dont_track_devices(hass):
"""Test don't track devices config works."""
await setup_unifi_integration(
hass,
options={unifi.controller.CONF_TRACK_DEVICES: False},
clients_response=[CLIENT_1],
devices_response=[DEVICE_1],
)
assert len(hass.states.async_all()) == 2
client_1 = hass.states.get("device_tracker.client_1")
assert client_1 is not None
assert client_1.state == "not_home"
device_1 = hass.states.get("device_tracker.device_1")
assert device_1 is None
async def test_dont_track_wired_clients(hass):
"""Test don't track wired clients config works."""
await setup_unifi_integration(
hass,
options={unifi.controller.CONF_TRACK_WIRED_CLIENTS: False},
clients_response=[CLIENT_1, CLIENT_2],
)
assert len(hass.states.async_all()) == 2
client_1 = hass.states.get("device_tracker.client_1")
assert client_1 is not None
assert client_1.state == "not_home"
client_2 = hass.states.get("device_tracker.client_2")
assert client_2 is None
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
try:
from .sub_resource_py3 import SubResource
from .backend_address_pool_py3 import BackendAddressPool
from .inbound_nat_rule_py3 import InboundNatRule
from .security_rule_py3 import SecurityRule
from .network_interface_dns_settings_py3 import NetworkInterfaceDnsSettings
from .network_interface_py3 import NetworkInterface
from .network_security_group_py3 import NetworkSecurityGroup
from .route_py3 import Route
from .route_table_py3 import RouteTable
from .public_ip_address_dns_settings_py3 import PublicIPAddressDnsSettings
from .public_ip_address_py3 import PublicIPAddress
from .ip_configuration_py3 import IPConfiguration
from .resource_navigation_link_py3 import ResourceNavigationLink
from .subnet_py3 import Subnet
from .network_interface_ip_configuration_py3 import NetworkInterfaceIPConfiguration
from .application_gateway_backend_address_py3 import ApplicationGatewayBackendAddress
from .application_gateway_backend_address_pool_py3 import ApplicationGatewayBackendAddressPool
from .application_gateway_connection_draining_py3 import ApplicationGatewayConnectionDraining
from .application_gateway_backend_http_settings_py3 import ApplicationGatewayBackendHttpSettings
from .application_gateway_backend_health_server_py3 import ApplicationGatewayBackendHealthServer
from .application_gateway_backend_health_http_settings_py3 import ApplicationGatewayBackendHealthHttpSettings
from .application_gateway_backend_health_pool_py3 import ApplicationGatewayBackendHealthPool
from .application_gateway_backend_health_py3 import ApplicationGatewayBackendHealth
from .application_gateway_sku_py3 import ApplicationGatewaySku
from .application_gateway_ssl_policy_py3 import ApplicationGatewaySslPolicy
from .application_gateway_ip_configuration_py3 import ApplicationGatewayIPConfiguration
from .application_gateway_authentication_certificate_py3 import ApplicationGatewayAuthenticationCertificate
from .application_gateway_ssl_certificate_py3 import ApplicationGatewaySslCertificate
from .application_gateway_frontend_ip_configuration_py3 import ApplicationGatewayFrontendIPConfiguration
from .application_gateway_frontend_port_py3 import ApplicationGatewayFrontendPort
from .application_gateway_http_listener_py3 import ApplicationGatewayHttpListener
from .application_gateway_path_rule_py3 import ApplicationGatewayPathRule
from .application_gateway_probe_py3 import ApplicationGatewayProbe
from .application_gateway_request_routing_rule_py3 import ApplicationGatewayRequestRoutingRule
from .application_gateway_url_path_map_py3 import ApplicationGatewayUrlPathMap
from .application_gateway_firewall_disabled_rule_group_py3 import ApplicationGatewayFirewallDisabledRuleGroup
from .application_gateway_web_application_firewall_configuration_py3 import ApplicationGatewayWebApplicationFirewallConfiguration
from .application_gateway_py3 import ApplicationGateway
from .application_gateway_firewall_rule_py3 import ApplicationGatewayFirewallRule
from .application_gateway_firewall_rule_group_py3 import ApplicationGatewayFirewallRuleGroup
from .application_gateway_firewall_rule_set_py3 import ApplicationGatewayFirewallRuleSet
from .application_gateway_available_waf_rule_sets_result_py3 import ApplicationGatewayAvailableWafRuleSetsResult
from .resource_py3 import Resource
from .dns_name_availability_result_py3 import DnsNameAvailabilityResult
from .express_route_circuit_authorization_py3 import ExpressRouteCircuitAuthorization
from .express_route_circuit_peering_config_py3 import ExpressRouteCircuitPeeringConfig
from .route_filter_rule_py3 import RouteFilterRule
from .express_route_circuit_stats_py3 import ExpressRouteCircuitStats
from .express_route_circuit_peering_py3 import ExpressRouteCircuitPeering
from .route_filter_py3 import RouteFilter
from .ipv6_express_route_circuit_peering_config_py3 import Ipv6ExpressRouteCircuitPeeringConfig
from .express_route_circuit_sku_py3 import ExpressRouteCircuitSku
from .express_route_circuit_service_provider_properties_py3 import ExpressRouteCircuitServiceProviderProperties
from .express_route_circuit_py3 import ExpressRouteCircuit
from .express_route_circuit_arp_table_py3 import ExpressRouteCircuitArpTable
from .express_route_circuits_arp_table_list_result_py3 import ExpressRouteCircuitsArpTableListResult
from .express_route_circuit_routes_table_py3 import ExpressRouteCircuitRoutesTable
from .express_route_circuits_routes_table_list_result_py3 import ExpressRouteCircuitsRoutesTableListResult
from .express_route_circuit_routes_table_summary_py3 import ExpressRouteCircuitRoutesTableSummary
from .express_route_circuits_routes_table_summary_list_result_py3 import ExpressRouteCircuitsRoutesTableSummaryListResult
from .express_route_service_provider_bandwidths_offered_py3 import ExpressRouteServiceProviderBandwidthsOffered
from .express_route_service_provider_py3 import ExpressRouteServiceProvider
from .frontend_ip_configuration_py3 import FrontendIPConfiguration
from .load_balancing_rule_py3 import LoadBalancingRule
from .probe_py3 import Probe
from .inbound_nat_pool_py3 import InboundNatPool
from .outbound_nat_rule_py3 import OutboundNatRule
from .load_balancer_py3 import LoadBalancer
from .error_details_py3 import ErrorDetails
from .error_py3 import Error
from .azure_async_operation_result_py3 import AzureAsyncOperationResult
from .effective_network_security_group_association_py3 import EffectiveNetworkSecurityGroupAssociation
from .effective_network_security_rule_py3 import EffectiveNetworkSecurityRule
from .effective_network_security_group_py3 import EffectiveNetworkSecurityGroup
from .effective_network_security_group_list_result_py3 import EffectiveNetworkSecurityGroupListResult
from .effective_route_py3 import EffectiveRoute
from .effective_route_list_result_py3 import EffectiveRouteListResult
from .network_watcher_py3 import NetworkWatcher
from .topology_parameters_py3 import TopologyParameters
from .topology_association_py3 import TopologyAssociation
from .topology_resource_py3 import TopologyResource
from .topology_py3 import Topology
from .verification_ip_flow_parameters_py3 import VerificationIPFlowParameters
from .verification_ip_flow_result_py3 import VerificationIPFlowResult
from .next_hop_parameters_py3 import NextHopParameters
from .next_hop_result_py3 import NextHopResult
from .security_group_view_parameters_py3 import SecurityGroupViewParameters
from .network_interface_association_py3 import NetworkInterfaceAssociation
from .subnet_association_py3 import SubnetAssociation
from .security_rule_associations_py3 import SecurityRuleAssociations
from .security_group_network_interface_py3 import SecurityGroupNetworkInterface
from .security_group_view_result_py3 import SecurityGroupViewResult
from .packet_capture_storage_location_py3 import PacketCaptureStorageLocation
from .packet_capture_filter_py3 import PacketCaptureFilter
from .packet_capture_parameters_py3 import PacketCaptureParameters
from .packet_capture_py3 import PacketCapture
from .packet_capture_result_py3 import PacketCaptureResult
from .packet_capture_query_status_result_py3 import PacketCaptureQueryStatusResult
from .troubleshooting_parameters_py3 import TroubleshootingParameters
from .query_troubleshooting_parameters_py3 import QueryTroubleshootingParameters
from .troubleshooting_recommended_actions_py3 import TroubleshootingRecommendedActions
from .troubleshooting_details_py3 import TroubleshootingDetails
from .troubleshooting_result_py3 import TroubleshootingResult
from .retention_policy_parameters_py3 import RetentionPolicyParameters
from .flow_log_status_parameters_py3 import FlowLogStatusParameters
from .flow_log_information_py3 import FlowLogInformation
from .connectivity_source_py3 import ConnectivitySource
from .connectivity_destination_py3 import ConnectivityDestination
from .connectivity_parameters_py3 import ConnectivityParameters
from .connectivity_issue_py3 import ConnectivityIssue
from .connectivity_hop_py3 import ConnectivityHop
from .connectivity_information_py3 import ConnectivityInformation
from .patch_route_filter_rule_py3 import PatchRouteFilterRule
from .patch_route_filter_py3 import PatchRouteFilter
from .bgp_community_py3 import BGPCommunity
from .bgp_service_community_py3 import BgpServiceCommunity
from .usage_name_py3 import UsageName
from .usage_py3 import Usage
from .virtual_network_peering_py3 import VirtualNetworkPeering
from .address_space_py3 import AddressSpace
from .dhcp_options_py3 import DhcpOptions
from .virtual_network_py3 import VirtualNetwork
from .ip_address_availability_result_py3 import IPAddressAvailabilityResult
from .virtual_network_usage_name_py3 import VirtualNetworkUsageName
from .virtual_network_usage_py3 import VirtualNetworkUsage
from .virtual_network_gateway_ip_configuration_py3 import VirtualNetworkGatewayIPConfiguration
from .virtual_network_gateway_sku_py3 import VirtualNetworkGatewaySku
from .vpn_client_root_certificate_py3 import VpnClientRootCertificate
from .vpn_client_revoked_certificate_py3 import VpnClientRevokedCertificate
from .vpn_client_configuration_py3 import VpnClientConfiguration
from .bgp_settings_py3 import BgpSettings
from .bgp_peer_status_py3 import BgpPeerStatus
from .gateway_route_py3 import GatewayRoute
from .virtual_network_gateway_py3 import VirtualNetworkGateway
from .vpn_client_parameters_py3 import VpnClientParameters
from .bgp_peer_status_list_result_py3 import BgpPeerStatusListResult
from .gateway_route_list_result_py3 import GatewayRouteListResult
from .tunnel_connection_health_py3 import TunnelConnectionHealth
from .local_network_gateway_py3 import LocalNetworkGateway
from .ipsec_policy_py3 import IpsecPolicy
from .virtual_network_gateway_connection_py3 import VirtualNetworkGatewayConnection
from .connection_reset_shared_key_py3 import ConnectionResetSharedKey
from .connection_shared_key_py3 import ConnectionSharedKey
except (SyntaxError, ImportError):
from .sub_resource import SubResource
from .backend_address_pool import BackendAddressPool
from .inbound_nat_rule import InboundNatRule
from .security_rule import SecurityRule
from .network_interface_dns_settings import NetworkInterfaceDnsSettings
from .network_interface import NetworkInterface
from .network_security_group import NetworkSecurityGroup
from .route import Route
from .route_table import RouteTable
from .public_ip_address_dns_settings import PublicIPAddressDnsSettings
from .public_ip_address import PublicIPAddress
from .ip_configuration import IPConfiguration
from .resource_navigation_link import ResourceNavigationLink
from .subnet import Subnet
from .network_interface_ip_configuration import NetworkInterfaceIPConfiguration
from .application_gateway_backend_address import ApplicationGatewayBackendAddress
from .application_gateway_backend_address_pool import ApplicationGatewayBackendAddressPool
from .application_gateway_connection_draining import ApplicationGatewayConnectionDraining
from .application_gateway_backend_http_settings import ApplicationGatewayBackendHttpSettings
from .application_gateway_backend_health_server import ApplicationGatewayBackendHealthServer
from .application_gateway_backend_health_http_settings import ApplicationGatewayBackendHealthHttpSettings
from .application_gateway_backend_health_pool import ApplicationGatewayBackendHealthPool
from .application_gateway_backend_health import ApplicationGatewayBackendHealth
from .application_gateway_sku import ApplicationGatewaySku
from .application_gateway_ssl_policy import ApplicationGatewaySslPolicy
from .application_gateway_ip_configuration import ApplicationGatewayIPConfiguration
from .application_gateway_authentication_certificate import ApplicationGatewayAuthenticationCertificate
from .application_gateway_ssl_certificate import ApplicationGatewaySslCertificate
from .application_gateway_frontend_ip_configuration import ApplicationGatewayFrontendIPConfiguration
from .application_gateway_frontend_port import ApplicationGatewayFrontendPort
from .application_gateway_http_listener import ApplicationGatewayHttpListener
from .application_gateway_path_rule import ApplicationGatewayPathRule
from .application_gateway_probe import ApplicationGatewayProbe
from .application_gateway_request_routing_rule import ApplicationGatewayRequestRoutingRule
from .application_gateway_url_path_map import ApplicationGatewayUrlPathMap
from .application_gateway_firewall_disabled_rule_group import ApplicationGatewayFirewallDisabledRuleGroup
from .application_gateway_web_application_firewall_configuration import ApplicationGatewayWebApplicationFirewallConfiguration
from .application_gateway import ApplicationGateway
from .application_gateway_firewall_rule import ApplicationGatewayFirewallRule
from .application_gateway_firewall_rule_group import ApplicationGatewayFirewallRuleGroup
from .application_gateway_firewall_rule_set import ApplicationGatewayFirewallRuleSet
from .application_gateway_available_waf_rule_sets_result import ApplicationGatewayAvailableWafRuleSetsResult
from .resource import Resource
from .dns_name_availability_result import DnsNameAvailabilityResult
from .express_route_circuit_authorization import ExpressRouteCircuitAuthorization
from .express_route_circuit_peering_config import ExpressRouteCircuitPeeringConfig
from .route_filter_rule import RouteFilterRule
from .express_route_circuit_stats import ExpressRouteCircuitStats
from .express_route_circuit_peering import ExpressRouteCircuitPeering
from .route_filter import RouteFilter
from .ipv6_express_route_circuit_peering_config import Ipv6ExpressRouteCircuitPeeringConfig
from .express_route_circuit_sku import ExpressRouteCircuitSku
from .express_route_circuit_service_provider_properties import ExpressRouteCircuitServiceProviderProperties
from .express_route_circuit import ExpressRouteCircuit
from .express_route_circuit_arp_table import ExpressRouteCircuitArpTable
from .express_route_circuits_arp_table_list_result import ExpressRouteCircuitsArpTableListResult
from .express_route_circuit_routes_table import ExpressRouteCircuitRoutesTable
from .express_route_circuits_routes_table_list_result import ExpressRouteCircuitsRoutesTableListResult
from .express_route_circuit_routes_table_summary import ExpressRouteCircuitRoutesTableSummary
from .express_route_circuits_routes_table_summary_list_result import ExpressRouteCircuitsRoutesTableSummaryListResult
from .express_route_service_provider_bandwidths_offered import ExpressRouteServiceProviderBandwidthsOffered
from .express_route_service_provider import ExpressRouteServiceProvider
from .frontend_ip_configuration import FrontendIPConfiguration
from .load_balancing_rule import LoadBalancingRule
from .probe import Probe
from .inbound_nat_pool import InboundNatPool
from .outbound_nat_rule import OutboundNatRule
from .load_balancer import LoadBalancer
from .error_details import ErrorDetails
from .error import Error
from .azure_async_operation_result import AzureAsyncOperationResult
from .effective_network_security_group_association import EffectiveNetworkSecurityGroupAssociation
from .effective_network_security_rule import EffectiveNetworkSecurityRule
from .effective_network_security_group import EffectiveNetworkSecurityGroup
from .effective_network_security_group_list_result import EffectiveNetworkSecurityGroupListResult
from .effective_route import EffectiveRoute
from .effective_route_list_result import EffectiveRouteListResult
from .network_watcher import NetworkWatcher
from .topology_parameters import TopologyParameters
from .topology_association import TopologyAssociation
from .topology_resource import TopologyResource
from .topology import Topology
from .verification_ip_flow_parameters import VerificationIPFlowParameters
from .verification_ip_flow_result import VerificationIPFlowResult
from .next_hop_parameters import NextHopParameters
from .next_hop_result import NextHopResult
from .security_group_view_parameters import SecurityGroupViewParameters
from .network_interface_association import NetworkInterfaceAssociation
from .subnet_association import SubnetAssociation
from .security_rule_associations import SecurityRuleAssociations
from .security_group_network_interface import SecurityGroupNetworkInterface
from .security_group_view_result import SecurityGroupViewResult
from .packet_capture_storage_location import PacketCaptureStorageLocation
from .packet_capture_filter import PacketCaptureFilter
from .packet_capture_parameters import PacketCaptureParameters
from .packet_capture import PacketCapture
from .packet_capture_result import PacketCaptureResult
from .packet_capture_query_status_result import PacketCaptureQueryStatusResult
from .troubleshooting_parameters import TroubleshootingParameters
from .query_troubleshooting_parameters import QueryTroubleshootingParameters
from .troubleshooting_recommended_actions import TroubleshootingRecommendedActions
from .troubleshooting_details import TroubleshootingDetails
from .troubleshooting_result import TroubleshootingResult
from .retention_policy_parameters import RetentionPolicyParameters
from .flow_log_status_parameters import FlowLogStatusParameters
from .flow_log_information import FlowLogInformation
from .connectivity_source import ConnectivitySource
from .connectivity_destination import ConnectivityDestination
from .connectivity_parameters import ConnectivityParameters
from .connectivity_issue import ConnectivityIssue
from .connectivity_hop import ConnectivityHop
from .connectivity_information import ConnectivityInformation
from .patch_route_filter_rule import PatchRouteFilterRule
from .patch_route_filter import PatchRouteFilter
from .bgp_community import BGPCommunity
from .bgp_service_community import BgpServiceCommunity
from .usage_name import UsageName
from .usage import Usage
from .virtual_network_peering import VirtualNetworkPeering
from .address_space import AddressSpace
from .dhcp_options import DhcpOptions
from .virtual_network import VirtualNetwork
from .ip_address_availability_result import IPAddressAvailabilityResult
from .virtual_network_usage_name import VirtualNetworkUsageName
from .virtual_network_usage import VirtualNetworkUsage
from .virtual_network_gateway_ip_configuration import VirtualNetworkGatewayIPConfiguration
from .virtual_network_gateway_sku import VirtualNetworkGatewaySku
from .vpn_client_root_certificate import VpnClientRootCertificate
from .vpn_client_revoked_certificate import VpnClientRevokedCertificate
from .vpn_client_configuration import VpnClientConfiguration
from .bgp_settings import BgpSettings
from .bgp_peer_status import BgpPeerStatus
from .gateway_route import GatewayRoute
from .virtual_network_gateway import VirtualNetworkGateway
from .vpn_client_parameters import VpnClientParameters
from .bgp_peer_status_list_result import BgpPeerStatusListResult
from .gateway_route_list_result import GatewayRouteListResult
from .tunnel_connection_health import TunnelConnectionHealth
from .local_network_gateway import LocalNetworkGateway
from .ipsec_policy import IpsecPolicy
from .virtual_network_gateway_connection import VirtualNetworkGatewayConnection
from .connection_reset_shared_key import ConnectionResetSharedKey
from .connection_shared_key import ConnectionSharedKey
from .application_gateway_paged import ApplicationGatewayPaged
from .express_route_circuit_authorization_paged import ExpressRouteCircuitAuthorizationPaged
from .express_route_circuit_peering_paged import ExpressRouteCircuitPeeringPaged
from .express_route_circuit_paged import ExpressRouteCircuitPaged
from .express_route_service_provider_paged import ExpressRouteServiceProviderPaged
from .load_balancer_paged import LoadBalancerPaged
from .network_interface_paged import NetworkInterfacePaged
from .network_security_group_paged import NetworkSecurityGroupPaged
from .security_rule_paged import SecurityRulePaged
from .network_watcher_paged import NetworkWatcherPaged
from .packet_capture_result_paged import PacketCaptureResultPaged
from .public_ip_address_paged import PublicIPAddressPaged
from .route_filter_paged import RouteFilterPaged
from .route_filter_rule_paged import RouteFilterRulePaged
from .route_table_paged import RouteTablePaged
from .route_paged import RoutePaged
from .bgp_service_community_paged import BgpServiceCommunityPaged
from .usage_paged import UsagePaged
from .virtual_network_paged import VirtualNetworkPaged
from .virtual_network_usage_paged import VirtualNetworkUsagePaged
from .subnet_paged import SubnetPaged
from .virtual_network_peering_paged import VirtualNetworkPeeringPaged
from .virtual_network_gateway_paged import VirtualNetworkGatewayPaged
from .virtual_network_gateway_connection_paged import VirtualNetworkGatewayConnectionPaged
from .local_network_gateway_paged import LocalNetworkGatewayPaged
from .network_management_client_enums import (
TransportProtocol,
IPAllocationMethod,
IPVersion,
SecurityRuleProtocol,
SecurityRuleAccess,
SecurityRuleDirection,
RouteNextHopType,
ApplicationGatewayProtocol,
ApplicationGatewayCookieBasedAffinity,
ApplicationGatewayBackendHealthServerHealth,
ApplicationGatewaySkuName,
ApplicationGatewayTier,
ApplicationGatewaySslProtocol,
ApplicationGatewayRequestRoutingRuleType,
ApplicationGatewayOperationalState,
ApplicationGatewayFirewallMode,
AuthorizationUseStatus,
ExpressRouteCircuitPeeringAdvertisedPublicPrefixState,
Access,
ExpressRouteCircuitPeeringType,
ExpressRouteCircuitPeeringState,
ExpressRouteCircuitSkuTier,
ExpressRouteCircuitSkuFamily,
ServiceProviderProvisioningState,
LoadDistribution,
ProbeProtocol,
NetworkOperationStatus,
EffectiveRouteSource,
EffectiveRouteState,
ProvisioningState,
AssociationType,
Direction,
Protocol,
NextHopType,
PcProtocol,
PcStatus,
PcError,
Origin,
Severity,
IssueType,
ConnectionStatus,
VirtualNetworkPeeringState,
VirtualNetworkGatewayType,
VpnType,
VirtualNetworkGatewaySkuName,
VirtualNetworkGatewaySkuTier,
BgpPeerState,
ProcessorArchitecture,
VirtualNetworkGatewayConnectionStatus,
VirtualNetworkGatewayConnectionType,
IpsecEncryption,
IpsecIntegrity,
IkeEncryption,
IkeIntegrity,
DhGroup,
PfsGroup,
)
__all__ = [
'SubResource',
'BackendAddressPool',
'InboundNatRule',
'SecurityRule',
'NetworkInterfaceDnsSettings',
'NetworkInterface',
'NetworkSecurityGroup',
'Route',
'RouteTable',
'PublicIPAddressDnsSettings',
'PublicIPAddress',
'IPConfiguration',
'ResourceNavigationLink',
'Subnet',
'NetworkInterfaceIPConfiguration',
'ApplicationGatewayBackendAddress',
'ApplicationGatewayBackendAddressPool',
'ApplicationGatewayConnectionDraining',
'ApplicationGatewayBackendHttpSettings',
'ApplicationGatewayBackendHealthServer',
'ApplicationGatewayBackendHealthHttpSettings',
'ApplicationGatewayBackendHealthPool',
'ApplicationGatewayBackendHealth',
'ApplicationGatewaySku',
'ApplicationGatewaySslPolicy',
'ApplicationGatewayIPConfiguration',
'ApplicationGatewayAuthenticationCertificate',
'ApplicationGatewaySslCertificate',
'ApplicationGatewayFrontendIPConfiguration',
'ApplicationGatewayFrontendPort',
'ApplicationGatewayHttpListener',
'ApplicationGatewayPathRule',
'ApplicationGatewayProbe',
'ApplicationGatewayRequestRoutingRule',
'ApplicationGatewayUrlPathMap',
'ApplicationGatewayFirewallDisabledRuleGroup',
'ApplicationGatewayWebApplicationFirewallConfiguration',
'ApplicationGateway',
'ApplicationGatewayFirewallRule',
'ApplicationGatewayFirewallRuleGroup',
'ApplicationGatewayFirewallRuleSet',
'ApplicationGatewayAvailableWafRuleSetsResult',
'Resource',
'DnsNameAvailabilityResult',
'ExpressRouteCircuitAuthorization',
'ExpressRouteCircuitPeeringConfig',
'RouteFilterRule',
'ExpressRouteCircuitStats',
'ExpressRouteCircuitPeering',
'RouteFilter',
'Ipv6ExpressRouteCircuitPeeringConfig',
'ExpressRouteCircuitSku',
'ExpressRouteCircuitServiceProviderProperties',
'ExpressRouteCircuit',
'ExpressRouteCircuitArpTable',
'ExpressRouteCircuitsArpTableListResult',
'ExpressRouteCircuitRoutesTable',
'ExpressRouteCircuitsRoutesTableListResult',
'ExpressRouteCircuitRoutesTableSummary',
'ExpressRouteCircuitsRoutesTableSummaryListResult',
'ExpressRouteServiceProviderBandwidthsOffered',
'ExpressRouteServiceProvider',
'FrontendIPConfiguration',
'LoadBalancingRule',
'Probe',
'InboundNatPool',
'OutboundNatRule',
'LoadBalancer',
'ErrorDetails',
'Error',
'AzureAsyncOperationResult',
'EffectiveNetworkSecurityGroupAssociation',
'EffectiveNetworkSecurityRule',
'EffectiveNetworkSecurityGroup',
'EffectiveNetworkSecurityGroupListResult',
'EffectiveRoute',
'EffectiveRouteListResult',
'NetworkWatcher',
'TopologyParameters',
'TopologyAssociation',
'TopologyResource',
'Topology',
'VerificationIPFlowParameters',
'VerificationIPFlowResult',
'NextHopParameters',
'NextHopResult',
'SecurityGroupViewParameters',
'NetworkInterfaceAssociation',
'SubnetAssociation',
'SecurityRuleAssociations',
'SecurityGroupNetworkInterface',
'SecurityGroupViewResult',
'PacketCaptureStorageLocation',
'PacketCaptureFilter',
'PacketCaptureParameters',
'PacketCapture',
'PacketCaptureResult',
'PacketCaptureQueryStatusResult',
'TroubleshootingParameters',
'QueryTroubleshootingParameters',
'TroubleshootingRecommendedActions',
'TroubleshootingDetails',
'TroubleshootingResult',
'RetentionPolicyParameters',
'FlowLogStatusParameters',
'FlowLogInformation',
'ConnectivitySource',
'ConnectivityDestination',
'ConnectivityParameters',
'ConnectivityIssue',
'ConnectivityHop',
'ConnectivityInformation',
'PatchRouteFilterRule',
'PatchRouteFilter',
'BGPCommunity',
'BgpServiceCommunity',
'UsageName',
'Usage',
'VirtualNetworkPeering',
'AddressSpace',
'DhcpOptions',
'VirtualNetwork',
'IPAddressAvailabilityResult',
'VirtualNetworkUsageName',
'VirtualNetworkUsage',
'VirtualNetworkGatewayIPConfiguration',
'VirtualNetworkGatewaySku',
'VpnClientRootCertificate',
'VpnClientRevokedCertificate',
'VpnClientConfiguration',
'BgpSettings',
'BgpPeerStatus',
'GatewayRoute',
'VirtualNetworkGateway',
'VpnClientParameters',
'BgpPeerStatusListResult',
'GatewayRouteListResult',
'TunnelConnectionHealth',
'LocalNetworkGateway',
'IpsecPolicy',
'VirtualNetworkGatewayConnection',
'ConnectionResetSharedKey',
'ConnectionSharedKey',
'ApplicationGatewayPaged',
'ExpressRouteCircuitAuthorizationPaged',
'ExpressRouteCircuitPeeringPaged',
'ExpressRouteCircuitPaged',
'ExpressRouteServiceProviderPaged',
'LoadBalancerPaged',
'NetworkInterfacePaged',
'NetworkSecurityGroupPaged',
'SecurityRulePaged',
'NetworkWatcherPaged',
'PacketCaptureResultPaged',
'PublicIPAddressPaged',
'RouteFilterPaged',
'RouteFilterRulePaged',
'RouteTablePaged',
'RoutePaged',
'BgpServiceCommunityPaged',
'UsagePaged',
'VirtualNetworkPaged',
'VirtualNetworkUsagePaged',
'SubnetPaged',
'VirtualNetworkPeeringPaged',
'VirtualNetworkGatewayPaged',
'VirtualNetworkGatewayConnectionPaged',
'LocalNetworkGatewayPaged',
'TransportProtocol',
'IPAllocationMethod',
'IPVersion',
'SecurityRuleProtocol',
'SecurityRuleAccess',
'SecurityRuleDirection',
'RouteNextHopType',
'ApplicationGatewayProtocol',
'ApplicationGatewayCookieBasedAffinity',
'ApplicationGatewayBackendHealthServerHealth',
'ApplicationGatewaySkuName',
'ApplicationGatewayTier',
'ApplicationGatewaySslProtocol',
'ApplicationGatewayRequestRoutingRuleType',
'ApplicationGatewayOperationalState',
'ApplicationGatewayFirewallMode',
'AuthorizationUseStatus',
'ExpressRouteCircuitPeeringAdvertisedPublicPrefixState',
'Access',
'ExpressRouteCircuitPeeringType',
'ExpressRouteCircuitPeeringState',
'ExpressRouteCircuitSkuTier',
'ExpressRouteCircuitSkuFamily',
'ServiceProviderProvisioningState',
'LoadDistribution',
'ProbeProtocol',
'NetworkOperationStatus',
'EffectiveRouteSource',
'EffectiveRouteState',
'ProvisioningState',
'AssociationType',
'Direction',
'Protocol',
'NextHopType',
'PcProtocol',
'PcStatus',
'PcError',
'Origin',
'Severity',
'IssueType',
'ConnectionStatus',
'VirtualNetworkPeeringState',
'VirtualNetworkGatewayType',
'VpnType',
'VirtualNetworkGatewaySkuName',
'VirtualNetworkGatewaySkuTier',
'BgpPeerState',
'ProcessorArchitecture',
'VirtualNetworkGatewayConnectionStatus',
'VirtualNetworkGatewayConnectionType',
'IpsecEncryption',
'IpsecIntegrity',
'IkeEncryption',
'IkeIntegrity',
'DhGroup',
'PfsGroup',
]
|
|
from __future__ import division
import pytest
import numpy as np
from pandas import (
Index,
IntervalIndex,
interval_range,
CategoricalIndex,
Timestamp,
Timedelta,
NaT)
from pandas.core.dtypes.dtypes import CategoricalDtype, IntervalDtype
import pandas.util.testing as tm
class Base(object):
"""Tests common to IntervalIndex with any subtype"""
def test_astype_idempotent(self, index):
result = index.astype('interval')
tm.assert_index_equal(result, index)
result = index.astype(index.dtype)
tm.assert_index_equal(result, index)
def test_astype_object(self, index):
result = index.astype(object)
expected = Index(index.values, dtype='object')
tm.assert_index_equal(result, expected)
assert not result.equals(index)
def test_astype_category(self, index):
result = index.astype('category')
expected = CategoricalIndex(index.values)
tm.assert_index_equal(result, expected)
result = index.astype(CategoricalDtype())
tm.assert_index_equal(result, expected)
# non-default params
categories = index.dropna().unique().values[:-1]
dtype = CategoricalDtype(categories=categories, ordered=True)
result = index.astype(dtype)
expected = CategoricalIndex(
index.values, categories=categories, ordered=True)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('dtype', [
'int64', 'uint64', 'float64', 'complex128', 'period[M]',
'timedelta64', 'timedelta64[ns]', 'datetime64', 'datetime64[ns]',
'datetime64[ns, US/Eastern]'])
def test_astype_cannot_cast(self, index, dtype):
msg = 'Cannot cast IntervalIndex to dtype'
with tm.assert_raises_regex(TypeError, msg):
index.astype(dtype)
def test_astype_invalid_dtype(self, index):
msg = 'data type "fake_dtype" not understood'
with tm.assert_raises_regex(TypeError, msg):
index.astype('fake_dtype')
class TestIntSubtype(Base):
"""Tests specific to IntervalIndex with integer-like subtype"""
indexes = [
IntervalIndex.from_breaks(np.arange(-10, 11, dtype='int64')),
IntervalIndex.from_breaks(
np.arange(100, dtype='uint64'), closed='left'),
]
@pytest.fixture(params=indexes)
def index(self, request):
return request.param
@pytest.mark.parametrize('subtype', [
'float64', 'datetime64[ns]', 'timedelta64[ns]'])
def test_subtype_conversion(self, index, subtype):
dtype = IntervalDtype(subtype)
result = index.astype(dtype)
expected = IntervalIndex.from_arrays(index.left.astype(subtype),
index.right.astype(subtype),
closed=index.closed)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('subtype_start, subtype_end', [
('int64', 'uint64'), ('uint64', 'int64')])
def test_subtype_integer(self, subtype_start, subtype_end):
index = IntervalIndex.from_breaks(np.arange(100, dtype=subtype_start))
dtype = IntervalDtype(subtype_end)
result = index.astype(dtype)
expected = IntervalIndex.from_arrays(index.left.astype(subtype_end),
index.right.astype(subtype_end),
closed=index.closed)
tm.assert_index_equal(result, expected)
@pytest.mark.xfail(reason='GH 15832')
def test_subtype_integer_errors(self):
# int64 -> uint64 fails with negative values
index = interval_range(-10, 10)
dtype = IntervalDtype('uint64')
with pytest.raises(ValueError):
index.astype(dtype)
class TestFloatSubtype(Base):
"""Tests specific to IntervalIndex with float subtype"""
indexes = [
interval_range(-10.0, 10.0, closed='neither'),
IntervalIndex.from_arrays([-1.5, np.nan, 0., 0., 1.5],
[-0.5, np.nan, 1., 1., 3.],
closed='both'),
]
@pytest.fixture(params=indexes)
def index(self, request):
return request.param
@pytest.mark.parametrize('subtype', ['int64', 'uint64'])
def test_subtype_integer(self, subtype):
index = interval_range(0.0, 10.0)
dtype = IntervalDtype(subtype)
result = index.astype(dtype)
expected = IntervalIndex.from_arrays(index.left.astype(subtype),
index.right.astype(subtype),
closed=index.closed)
tm.assert_index_equal(result, expected)
# raises with NA
msg = 'Cannot convert NA to integer'
with tm.assert_raises_regex(ValueError, msg):
index.insert(0, np.nan).astype(dtype)
@pytest.mark.xfail(reason='GH 15832')
def test_subtype_integer_errors(self):
# float64 -> uint64 fails with negative values
index = interval_range(-10.0, 10.0)
dtype = IntervalDtype('uint64')
with pytest.raises(ValueError):
index.astype(dtype)
# float64 -> integer-like fails with non-integer valued floats
index = interval_range(0.0, 10.0, freq=0.25)
dtype = IntervalDtype('int64')
with pytest.raises(ValueError):
index.astype(dtype)
dtype = IntervalDtype('uint64')
with pytest.raises(ValueError):
index.astype(dtype)
@pytest.mark.parametrize('subtype', ['datetime64[ns]', 'timedelta64[ns]'])
def test_subtype_datetimelike(self, index, subtype):
dtype = IntervalDtype(subtype)
msg = 'Cannot convert .* to .*; subtypes are incompatible'
with tm.assert_raises_regex(TypeError, msg):
index.astype(dtype)
class TestDatetimelikeSubtype(Base):
"""Tests specific to IntervalIndex with datetime-like subtype"""
indexes = [
interval_range(Timestamp('2018-01-01'), periods=10, closed='neither'),
interval_range(Timestamp('2018-01-01'), periods=10).insert(2, NaT),
interval_range(Timestamp('2018-01-01', tz='US/Eastern'), periods=10),
interval_range(Timedelta('0 days'), periods=10, closed='both'),
interval_range(Timedelta('0 days'), periods=10).insert(2, NaT),
]
@pytest.fixture(params=indexes)
def index(self, request):
return request.param
@pytest.mark.parametrize('subtype', ['int64', 'uint64'])
def test_subtype_integer(self, index, subtype):
dtype = IntervalDtype(subtype)
result = index.astype(dtype)
expected = IntervalIndex.from_arrays(index.left.astype(subtype),
index.right.astype(subtype),
closed=index.closed)
tm.assert_index_equal(result, expected)
def test_subtype_float(self, index):
dtype = IntervalDtype('float64')
msg = 'Cannot convert .* to .*; subtypes are incompatible'
with tm.assert_raises_regex(TypeError, msg):
index.astype(dtype)
def test_subtype_datetimelike(self):
# datetime -> timedelta raises
dtype = IntervalDtype('timedelta64[ns]')
msg = 'Cannot convert .* to .*; subtypes are incompatible'
index = interval_range(Timestamp('2018-01-01'), periods=10)
with tm.assert_raises_regex(TypeError, msg):
index.astype(dtype)
index = interval_range(Timestamp('2018-01-01', tz='CET'), periods=10)
with tm.assert_raises_regex(TypeError, msg):
index.astype(dtype)
# timedelta -> datetime raises
dtype = IntervalDtype('datetime64[ns]')
index = interval_range(Timedelta('0 days'), periods=10)
with tm.assert_raises_regex(TypeError, msg):
index.astype(dtype)
|
|
# swift_build_support/targets.py - Build target helpers -*- python -*-
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
import os
import platform
class Platform(object):
"""
Abstract representation of a platform Swift can run on.
"""
def __init__(self, name, archs, sdk_name=None):
"""
Create a platform with the given name and list of architectures.
"""
self.name = name
self.targets = [Target(self, arch) for arch in archs]
# FIXME: Eliminate this argument; apparently the SDK names are
# internally a private implementation detail of the build script, so we
# should just make them the same as the platform name.
self.sdk_name = name.upper() if sdk_name is None else sdk_name
# Add a property for each arch.
for target in self.targets:
setattr(self, target.arch, target)
@property
def is_darwin(self):
"""Convenience function for checking if this is a Darwin platform."""
return isinstance(self, DarwinPlatform)
@property
def supports_benchmark(self):
# By default, we don't support benchmarks on most platforms.
return False
@property
def uses_host_tests(self):
"""
Check if this is a Darwin platform that needs a connected device
for tests.
"""
# By default, we don't use connected devices on most platforms.
return False
def contains(self, target_name):
"""
Returns True if the given target name belongs to a one of this
platform's targets.
"""
for target in self.targets:
if target.name == target_name:
return True
return False
class DarwinPlatform(Platform):
def __init__(self, name, archs, sdk_name=None, is_simulator=False):
self.is_simulator = is_simulator
super(DarwinPlatform, self).__init__(name, archs, sdk_name)
@property
def is_embedded(self):
"""Check if this is a Darwin platform for embedded devices."""
return self.name != "macosx" and self.name != "maccatalyst"
@property
def supports_benchmark(self):
# By default, on Darwin we support benchmarks on all non-simulator
# platforms.
return not self.is_simulator
@property
def uses_host_tests(self):
"""
Check if this is a Darwin platform that needs a connected device
for tests.
"""
return self.is_embedded and not self.is_simulator
class AndroidPlatform(Platform):
@property
def uses_host_tests(self):
"""
Check if this is a Darwin platform that needs a connected device
for tests.
"""
return True
class Target(object):
"""
Abstract representation of a target Swift can run on.
"""
def __init__(self, platform, arch):
self.platform = platform
self.arch = arch
# Delegate to the platform, this is usually not arch specific.
self.supports_benchmark = self.platform.supports_benchmark
@property
def name(self):
return "{}-{}".format(self.platform.name, self.arch)
class StdlibDeploymentTarget(object):
OSX = DarwinPlatform("macosx", archs=["x86_64"],
sdk_name="OSX")
iOS = DarwinPlatform("iphoneos", archs=["armv7", "armv7s", "arm64", "arm64e"],
sdk_name="IOS")
iOSSimulator = DarwinPlatform("iphonesimulator", archs=["i386", "x86_64"],
sdk_name="IOS_SIMULATOR",
is_simulator=True)
# Never build/test benchmarks on iOS armv7s.
iOS.armv7s.supports_benchmark = False
AppleTV = DarwinPlatform("appletvos", archs=["arm64"],
sdk_name="TVOS")
AppleTVSimulator = DarwinPlatform("appletvsimulator", archs=["x86_64"],
sdk_name="TVOS_SIMULATOR",
is_simulator=True)
AppleWatch = DarwinPlatform("watchos", archs=["armv7k"],
sdk_name="WATCHOS")
AppleWatchSimulator = DarwinPlatform("watchsimulator", archs=["i386"],
sdk_name="WATCHOS_SIMULATOR",
is_simulator=True)
Linux = Platform("linux", archs=[
"x86_64",
"i686",
"armv6",
"armv7",
"aarch64",
"powerpc64",
"powerpc64le",
"s390x"])
FreeBSD = Platform("freebsd", archs=["x86_64"])
OpenBSD = Platform("openbsd", archs=["amd64"])
Cygwin = Platform("cygwin", archs=["x86_64"])
Android = AndroidPlatform("android", archs=["armv7", "aarch64"])
Windows = Platform("windows", archs=["x86_64"])
Haiku = Platform("haiku", archs=["x86_64"])
# The list of known platforms.
known_platforms = [
OSX,
iOS, iOSSimulator,
AppleTV, AppleTVSimulator,
AppleWatch, AppleWatchSimulator,
Linux,
FreeBSD,
OpenBSD,
Cygwin,
Android,
Windows,
Haiku]
# Cache of targets by name.
_targets_by_name = dict((target.name, target)
for platform in known_platforms
for target in platform.targets)
@staticmethod
def host_target():
"""
Return the host target for the build machine, if it is one of
the recognized targets. Otherwise, throw a NotImplementedError.
"""
system = platform.system()
machine = platform.machine()
if system == 'Linux':
if 'ANDROID_DATA' in os.environ:
if machine.startswith('armv7'):
return StdlibDeploymentTarget.Android.armv7
elif machine == 'aarch64':
return StdlibDeploymentTarget.Android.aarch64
raise NotImplementedError('Android System with architecture '
'"%s" is not supported' % machine)
if machine == 'x86_64':
return StdlibDeploymentTarget.Linux.x86_64
elif machine == 'i686':
return StdlibDeploymentTarget.Linux.i686
elif machine.startswith('armv7'):
# linux-armv7* is canonicalized to 'linux-armv7'
return StdlibDeploymentTarget.Linux.armv7
elif machine.startswith('armv6'):
# linux-armv6* is canonicalized to 'linux-armv6'
return StdlibDeploymentTarget.Linux.armv6
elif machine == 'aarch64':
return StdlibDeploymentTarget.Linux.aarch64
elif machine == 'ppc64':
return StdlibDeploymentTarget.Linux.powerpc64
elif machine == 'ppc64le':
return StdlibDeploymentTarget.Linux.powerpc64le
elif machine == 's390x':
return StdlibDeploymentTarget.Linux.s390x
elif system == 'Darwin':
if machine == 'x86_64':
return StdlibDeploymentTarget.OSX.x86_64
elif system == 'FreeBSD':
if machine == 'amd64':
return StdlibDeploymentTarget.FreeBSD.x86_64
elif system == 'OpenBSD':
if machine == 'amd64':
return StdlibDeploymentTarget.OpenBSD.amd64
elif system == 'CYGWIN_NT-10.0':
if machine == 'x86_64':
return StdlibDeploymentTarget.Cygwin.x86_64
elif system == 'Windows':
if machine == "AMD64":
return StdlibDeploymentTarget.Windows.x86_64
elif system == 'Haiku':
if machine == 'x86_64':
return StdlibDeploymentTarget.Haiku.x86_64
raise NotImplementedError('System "%s" with architecture "%s" is not '
'supported' % (system, machine))
@classmethod
def get_target_for_name(cls, name):
return cls._targets_by_name.get(name)
@classmethod
def get_targets_by_name(cls, names):
return [cls.get_target_for_name(name) for name in names]
@classmethod
def get_target_names(cls):
return sorted([name for (name, target) in
cls._targets_by_name.items()])
def install_prefix():
"""
Returns the default path at which built Swift products (like bin, lib,
and include) will be installed, based on the host machine's operating
system.
"""
if platform.system() == 'Darwin':
return '/Applications/Xcode.app/Contents/Developer/Toolchains/' + \
'XcodeDefault.xctoolchain/usr'
else:
return '/usr'
def darwin_toolchain_prefix(darwin_install_prefix):
"""
Given the install prefix for a Darwin system, and assuming that that path
is to a .xctoolchain directory, return the path to the .xctoolchain
directory.
"""
return os.path.split(darwin_install_prefix)[0]
def toolchain_path(install_destdir, install_prefix):
"""
Given the install prefix for a Darwin system, and assuming that that path
is to a .xctoolchain directory, return the path to the .xctoolchain
directory in the given install directory.
This toolchain is being populated during the build-script invocation.
Downstream products can use products that were previously installed into
this toolchain.
"""
built_toolchain_path = install_destdir
if platform.system() == 'Darwin':
# The prefix is an absolute path, so concatenate without os.path.
built_toolchain_path += darwin_toolchain_prefix(install_prefix) + "/usr"
else:
built_toolchain_path += install_prefix
return built_toolchain_path
|
|
"""distutils.dir_util
Utility functions for manipulating directories and directory trees."""
__revision__ = "$Id: dir_util.py 76956 2009-12-21 01:22:46Z tarek.ziade $"
import os
from distutils.errors import DistutilsFileError, DistutilsInternalError
from distutils import log
# cache for by mkpath() -- in addition to cheapening redundant calls,
# eliminates redundant "creating /foo/bar/baz" messages in dry-run mode
_path_created = {}
# I don't use os.makedirs because a) it's new to Python 1.5.2, and
# b) it blows up if the directory already exists (I want to silently
# succeed in that case).
def mkpath(name, mode=0777, verbose=1, dry_run=0):
"""Create a directory and any missing ancestor directories.
If the directory already exists (or if 'name' is the empty string, which
means the current directory, which of course exists), then do nothing.
Raise DistutilsFileError if unable to create some directory along the way
(eg. some sub-path exists, but is a file rather than a directory).
If 'verbose' is true, print a one-line summary of each mkdir to stdout.
Return the list of directories actually created.
"""
global _path_created
# Detect a common bug -- name is None
if not isinstance(name, basestring):
raise DistutilsInternalError, \
"mkpath: 'name' must be a string (got %r)" % (name,)
# XXX what's the better way to handle verbosity? print as we create
# each directory in the path (the current behaviour), or only announce
# the creation of the whole path? (quite easy to do the latter since
# we're not using a recursive algorithm)
name = os.path.normpath(name)
created_dirs = []
if os.path.isdir(name) or name == '':
return created_dirs
if _path_created.get(os.path.abspath(name)):
return created_dirs
(head, tail) = os.path.split(name)
tails = [tail] # stack of lone dirs to create
while head and tail and not os.path.isdir(head):
(head, tail) = os.path.split(head)
tails.insert(0, tail) # push next higher dir onto stack
# now 'head' contains the deepest directory that already exists
# (that is, the child of 'head' in 'name' is the highest directory
# that does *not* exist)
for d in tails:
#print "head = %s, d = %s: " % (head, d),
head = os.path.join(head, d)
abs_head = os.path.abspath(head)
if _path_created.get(abs_head):
continue
if verbose >= 1:
log.info("creating %s", head)
if not dry_run:
try:
os.mkdir(head)
created_dirs.append(head)
except OSError, exc:
raise DistutilsFileError, \
"could not create '%s': %s" % (head, exc[-1])
_path_created[abs_head] = 1
return created_dirs
def create_tree(base_dir, files, mode=0777, verbose=1, dry_run=0):
"""Create all the empty directories under 'base_dir' needed to put 'files'
there.
'base_dir' is just the a name of a directory which doesn't necessarily
exist yet; 'files' is a list of filenames to be interpreted relative to
'base_dir'. 'base_dir' + the directory portion of every file in 'files'
will be created if it doesn't already exist. 'mode', 'verbose' and
'dry_run' flags are as for 'mkpath()'.
"""
# First get the list of directories to create
need_dir = {}
for file in files:
need_dir[os.path.join(base_dir, os.path.dirname(file))] = 1
need_dirs = need_dir.keys()
need_dirs.sort()
# Now create them
for dir in need_dirs:
mkpath(dir, mode, verbose=verbose, dry_run=dry_run)
def copy_tree(src, dst, preserve_mode=1, preserve_times=1,
preserve_symlinks=0, update=0, verbose=1, dry_run=0):
"""Copy an entire directory tree 'src' to a new location 'dst'.
Both 'src' and 'dst' must be directory names. If 'src' is not a
directory, raise DistutilsFileError. If 'dst' does not exist, it is
created with 'mkpath()'. The end result of the copy is that every
file in 'src' is copied to 'dst', and directories under 'src' are
recursively copied to 'dst'. Return the list of files that were
copied or might have been copied, using their output name. The
return value is unaffected by 'update' or 'dry_run': it is simply
the list of all files under 'src', with the names changed to be
under 'dst'.
'preserve_mode' and 'preserve_times' are the same as for
'copy_file'; note that they only apply to regular files, not to
directories. If 'preserve_symlinks' is true, symlinks will be
copied as symlinks (on platforms that support them!); otherwise
(the default), the destination of the symlink will be copied.
'update' and 'verbose' are the same as for 'copy_file'.
"""
from distutils.file_util import copy_file
if not dry_run and not os.path.isdir(src):
raise DistutilsFileError, \
"cannot copy tree '%s': not a directory" % src
try:
names = os.listdir(src)
except os.error, (errno, errstr):
if dry_run:
names = []
else:
raise DistutilsFileError, \
"error listing files in '%s': %s" % (src, errstr)
if not dry_run:
mkpath(dst, verbose=verbose)
outputs = []
for n in names:
src_name = os.path.join(src, n)
dst_name = os.path.join(dst, n)
if preserve_symlinks and os.path.islink(src_name):
link_dest = os.readlink(src_name)
if verbose >= 1:
log.info("linking %s -> %s", dst_name, link_dest)
if not dry_run:
os.symlink(link_dest, dst_name)
outputs.append(dst_name)
elif os.path.isdir(src_name):
outputs.extend(
copy_tree(src_name, dst_name, preserve_mode,
preserve_times, preserve_symlinks, update,
verbose=verbose, dry_run=dry_run))
else:
copy_file(src_name, dst_name, preserve_mode,
preserve_times, update, verbose=verbose,
dry_run=dry_run)
outputs.append(dst_name)
return outputs
def _build_cmdtuple(path, cmdtuples):
"""Helper for remove_tree()."""
for f in os.listdir(path):
real_f = os.path.join(path,f)
if os.path.isdir(real_f) and not os.path.islink(real_f):
_build_cmdtuple(real_f, cmdtuples)
else:
cmdtuples.append((os.remove, real_f))
cmdtuples.append((os.rmdir, path))
def remove_tree(directory, verbose=1, dry_run=0):
"""Recursively remove an entire directory tree.
Any errors are ignored (apart from being reported to stdout if 'verbose'
is true).
"""
from distutils.util import grok_environment_error
global _path_created
if verbose >= 1:
log.info("removing '%s' (and everything under it)", directory)
if dry_run:
return
cmdtuples = []
_build_cmdtuple(directory, cmdtuples)
for cmd in cmdtuples:
try:
cmd[0](cmd[1])
# remove dir from cache if it's already there
abspath = os.path.abspath(cmd[1])
if abspath in _path_created:
del _path_created[abspath]
except (IOError, OSError), exc:
log.warn(grok_environment_error(
exc, "error removing %s: " % directory))
def ensure_relative(path):
"""Take the full path 'path', and make it a relative path.
This is useful to make 'path' the second argument to os.path.join().
"""
drive, path = os.path.splitdrive(path)
if path[0:1] == os.sep:
path = drive + path[1:]
return path
|
|
# pythonpath modification to make hytra and empryonic available
# for import without requiring it to be installed
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
sys.path.insert(0, os.path.abspath('..'))
# standard imports
import configargparse as argparse
import logging
import numpy as np
import h5py
import vigra
from skimage.external import tifffile
import glob
import hytra.util.axesconversion
from hytra.util.skimage_tifffile_hack import hack
def find_splits(filename, start_frame):
# store split events indexed by timestep, then parent
split_events = {}
num_splits = 0
# reads the man_track.txt file
with open(filename, 'rt') as tracks:
for line in tracks:
track_start = map(int, line.split())
if track_start[3] > 0:
num_splits += 1
# parent = 0 is appearance, otherwise a split
parent = track_start[3]
timestep = track_start[1] - start_frame
idx = track_start[0]
if not timestep in split_events:
split_events[timestep] = {}
if not parent in split_events[timestep]:
split_events[timestep][parent] = []
split_events[timestep][parent].append(idx)
logging.getLogger('ctc_gt_to_hdf5.py').info("Found number of splits: {}".format(num_splits))
return split_events
def remap_label_image(label_image, mapping):
"""
given a label image and a mapping, creates and
returns a new label image with remapped object pixel values
"""
remapped_label_image = np.zeros_like(label_image)
for src, dest in mapping.items():
remapped_label_image[label_image == src] = dest
return remapped_label_image
def remap_events(events, mappingA, mappingB=None):
"""
Takes a numpy.ndarray of `events` and applies the specified mappings (dictionaries).
Each row is mapped as follows: the first element is mapped according to `mappingA`,
where all following elements are mapped with `mappingB`.
Thus for divisions or moves, `mappingA` would refer to the previous frame, and `mappingB`
to the current one.
**Returns** the remapped events as numpy.ndarray
"""
if len(events.shape) == 1:
events = np.expand_dims(events, axis=1)
new_events = np.zeros_like(events)
# set up a lambda function to manage the mapping, then use numpy's vectorize to allow it to
# be applied to every individual element in the vector.
new_events[:, 0] = np.apply_along_axis(np.vectorize(lambda x: mappingA[x]), 0, events[:, 0])
if events.shape[1] > 1:
if mappingB is None:
raise AssertionError("Need two sets of mappings for division and move events!")
new_events[:, 1:] = np.apply_along_axis(np.vectorize(lambda x: mappingB[x]), 0, events[:, 1:])
return new_events
def find_label_image_remapping(label_image):
"""
Given any kind of label image, find a mapping that leaves the background at value
zero, and all further object IDs such that they are consecutive and start at 1 (needed for VIGRA).
**Returns** a dictionary that maps from label_image object ids to continuous labels,
or None if the labels are already good
"""
# check whether we already have continuous labels
labels = list(np.unique(label_image))
labels.sort()
if labels == list(range(max(labels))):
return None
# otherwise, create the appropriate mapping
continuous_labels = list(range(len(labels)))
return dict(zip(labels, continuous_labels))
def save_label_image_for_frame(options, label_volume, out_h5, frame, mapping_per_frame=None):
"""
Takes a specific frame or all frames from the `label_volume` and stores them in `out_h5`.
**If** `options.single_frames == True` then the respective part is taken from the label_volume
and stored in `/segmentation/labels` of `out_h5`, and a `mapping_per_frame` is applied if given.
**Otherwise** the full volume is saved when `frame == 0`, and it does nothing for all further
frames.
`mapping_per_frame` must be a dictionary, with frames as keys, and the values are then again dictionaries
from the indices of objects in a frame of `label_volume` to the output indices.
"""
if options.single_frames:
out_label_volume = label_volume[..., frame, :]
if options.index_remapping and mapping_per_frame is not None:
out_label_volume = remap_label_image(out_label_volume, mapping_per_frame[frame])
out_label_volume = hytra.util.axesconversion.adjustOrder(out_label_volume, 'xyzc', options.groundtruth_axes)
out_h5.create_dataset("segmentation/labels", data=out_label_volume, dtype='u2', compression='gzip')
else:
raise NotImplementedError
out_label_volume = np.transpose(label_volume, axes=[2, 1, 0, 3])
if options.index_remapping and mapping_per_frame is not None:
# remap every frame in the volume individually
for frame in range(label_volume.shape[2]):
remapped_frame = remap_label_image(out_label_volume[..., frame, 0], mapping_per_frame[frame])
out_label_volume[..., frame, 0] = remapped_frame
out_h5.create_dataset("label_image", data=out_label_volume, dtype='u2', compression='gzip')
out_label_volume = (out_label_volume.swapaxes(1, 2))[..., np.newaxis]
out_h5.create_dataset("label_image_T", data=out_label_volume, dtype='u2', compression='gzip')
def create_label_volume(options):
# read image
# label_volume = vigra.impex.readVolume('/export/home/lparcala/Fluo-N2DH-SIM/01_GT/TRA/man_track000.tif')
# label_volume = tifffile.imread(options.input_tif) # this could work, if tifffile would work.
path, files = hack(options.input_tif)
os.chdir(path)
label_volume = tifffile.imread(files)
logging.getLogger('ctc_gt_to_hdf5.py').info("Found dataset of size {}".format(label_volume.shape))
label_volume = hytra.util.axesconversion.adjustOrder(label_volume, options.tif_input_axes, 'xyztc')
timeaxis = label_volume.shape[3]
split_events = find_splits(options.input_track, options.start_frame)
if not os.path.exists(options.output_file):
#create new folder for gt files
os.mkdir(options.output_file)
else:
# as h5py somehow appends the old file instead of overwriting, do it manually
if os.path.isfile(options.output_file):
os.remove(options.output_file)
elif os.path.isdir(options.output_file):
import shutil
shutil.rmtree(options.output_file)
os.mkdir(options.output_file)
# store object ids per frame and generate mappings
objects_per_frame = []
mapping_per_frame = {}
for frame in range(timeaxis):
label_image = label_volume[..., frame, 0]
mapping_per_frame[frame] = find_label_image_remapping(label_image)
# handle frame zero
if not options.single_frames:
# one holistic volume file
out_h5 = h5py.File(options.output_file, 'w')
ids = out_h5.create_group('ids')
tracking = out_h5.create_group('tracking')
# create empty tracking group for first frame
tracking_frame = tracking.create_group(format(0, options.filename_zero_padding))
else:
frame = 0
out_h5 = h5py.File(options.output_file + format(frame, options.filename_zero_padding) + '.h5', 'w')
tracking_frame = out_h5.create_group('tracking')
save_label_image_for_frame(options, label_volume, out_h5, 0, mapping_per_frame)
for frame in range(timeaxis):
label_image = label_volume[..., frame, 0]
objects = np.unique(label_image)
objects_per_frame.append(set(objects))
if not options.single_frames:
ids.create_dataset(format(frame, options.filename_zero_padding), data=objects, dtype='u2')
# handle all further frames
for frame in range(1, timeaxis):
if options.single_frames:
out_h5 = h5py.File(options.output_file + format(frame, options.filename_zero_padding) + '.h5', 'w')
tracking_frame = out_h5.create_group('tracking')
else:
tracking_frame = tracking.create_group(format(frame, options.filename_zero_padding))
save_label_image_for_frame(options, label_volume, out_h5, frame, mapping_per_frame)
# intersect track id sets of both frames, and place moves in HDF5 file
tracks_in_both_frames = objects_per_frame[frame - 1] & objects_per_frame[frame] - set([0])
moves = zip(list(tracks_in_both_frames), list(tracks_in_both_frames))
# add the found splits as both, mitosis and split events
if frame in split_events.keys():
splits_in_frame = split_events[frame]
# make sure all splits have the same dimension
splits = []
for key, value in splits_in_frame.items():
value = [v for v in value if v in objects_per_frame[frame]]
if key not in objects_per_frame[frame - 1]:
logging.getLogger('ctc_gt_to_hdf5.py').warning("Parent {} of split is not in previous frame {}. Ignored".format(key, frame - 1))
continue
if len(value) > 1:
if len(value) > 2:
logging.getLogger('ctc_gt_to_hdf5.py').warning("Cutting off children of {} in timestep {}".format(key, frame))
# cut off divisions into more than 2
splits.append([key] + value[0:2])
elif len(value) == 1:
# store as move
logging.getLogger('ctc_gt_to_hdf5.py').warning("Store move ({},{}) instead of split into one in timestep {}".format(key, value[0], frame))
moves.append((key, value[0]))
if len(splits) > 0:
splits = np.array(splits)
if options.index_remapping:
splits = remap_events(splits, mapping_per_frame[frame - 1], mapping_per_frame[frame])
tracking_frame.create_dataset("Splits", data=splits, dtype='u2')
mitosis = [splits[i][0] for i in range(splits.shape[0])]
tracking_frame.create_dataset("Mitosis", data=np.array(mitosis), dtype='u2')
if len(moves) > 0:
if options.index_remapping:
moves = remap_events(np.array(moves), mapping_per_frame[frame - 1], mapping_per_frame[frame])
tracking_frame.create_dataset("Moves", data=moves, dtype='u2')
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Convert Cell Tracking Challenge Ground Truth to our HDF5 event format',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-c', '--config', is_config_file=True, help='config file path')
# file paths
parser.add_argument('--ctc-track-input-tif', type=str, dest='tif_input_file_pattern', required=True,
help='File pattern of Cell Tracking Challenge data: man_track*.tif')
parser.add_argument('--ctc-track-input-axes', type=str, dest='tif_input_axes', default='txy',
help='Axes string defining the input shape. Separate tiff files are stacked along the first axes.')
parser.add_argument('--ctc-track-input-txt', type=str, dest='input_track', required=True,
help='Path to Cell Tracking Challenge manual tracking file: man_track.txt')
parser.add_argument('--groundtruth', type=str, dest='output_file', required=True,
help='Filename for the resulting HDF5 file/folder.')
parser.add_argument("--groundtruth-axes", dest='groundtruth_axes', type=str, default='xyzc',
help="axes ordering to use when creating the ground truth segmentations per frame (no t!), e.g. xyzc")
parser.add_argument('--start-frame', type=int, dest='start_frame', default=0,
help='First frame number (usually 0, but e.g. their rapoport starts at 150')
parser.add_argument('--ctc-to-gt-single-frames', action='store_true', dest='single_frames',
help='output single frame h5 files instead of one volume. Filename is appended with numbers.')
parser.add_argument('--h5-filename-zero-pad-length', type=str, dest='filename_zero_padding', default='04')
parser.add_argument('--ctc-to-gt-index-remapping', action='store_true', dest='index_remapping',
help='Remap indices so that the objects in each frame have continuous ascending indices.')
parser.add_argument("--verbose", dest='verbose', action='store_true', default=False)
# parse command line
options, unknown = parser.parse_known_args()
options.input_tif = glob.glob(options.tif_input_file_pattern)
options.input_tif.sort()
if options.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
logging.getLogger('ctc_gt_to_hdf5.py').debug("Ignoring unknown parameters: {}".format(unknown))
create_label_volume(options)
|
|
# coding: utf-8
from tw.api import WidgetsList
from tw.forms.fields import Form
from tribal.widgets.components import *
from tribal.model import *
from tribal.model.sample import *
#__all__=["search_form", "report_form", "SFSamplingWidget", "SFPrintoutWidget", "SF3DImageWidget"]
def getOptions(dbobj, orderField, user=None):
def returnFun():
rs=DBSession.query(dbobj).order_by(getattr(dbobj, orderField))
if not user:
rs = rs.filter(dbobj.active==0)
return [("", "")]+[(str(r.id), str(r)) for r in rs]
else:return [("", "")]+[(str(r.group_id), str(r)) for r in rs]
return returnFun
class SearchForm(RPACForm):
fields=[
RPACSelect("project_own", label_text = "Region", options = getOptions(Region, "name")),
# RPACSelect("customer", label_text = "Vendor/Customer", options = getOptions(Customer, "name")),
RPACHidden('customer'),
RPACAjaxText("customer_name", label_text = "Vendor/Customer"),
RPACAjaxText("project_owner", label_text = "Project Owner"),
# RPACSelect("program", label_text = "Program", options = getOptions(Program, "name")),
RPACHidden('program'),
RPACAjaxText("program_name", label_text = "Corporate Customer"),
RPACSelect("contact_team", label_text = "Division Team", options = getOptions(Team, "name")),
RPACSelect("project", label_text = "Brand", options = [("", ""), ]),
RPACAjaxText("contact_person", label_text = "Contact Person"),
RPACSelect("item_category", label_text = "Item Category", options = getOptions(ItemCategory, "name")),
RPACText("item_description", label_text = "Item Description"),
RPACSelect("team", label_text = "Request Person's Team", options = getOptions(Team, "name")),
RPACText("item_code", label_text = "Item Code"),
RPACAjaxText("create_by", label_text = "Request Person"),
RPACCalendarPicker("create_time_from", label_text = "Request Date(from)"),
RPACText("system_no", label_text = "Job Number"),
RPACCalendarPicker("create_time_to", label_text = "Request Date(to)"),
RPACText("reference_code", label_text = "Reference Code"),
RPACSelect("status", label_text = "Status", options = [("", ""), (str(NEW_REQUEST), "New"),(str(UNDER_DEVELOPMENT), "Under Development"), (str(COMPLETED_REQUEST), "Complete"), (str(CANCELED_REQUEST), "Cancelled"), (str(DRAFT), "Draft")]),
HiddenField("field"),
HiddenField("direction"),
]
search_form=SearchForm()
class ReportForm(RPACForm):
fields=[
# RPACSelect("project_own", label_text="Region", options=getOptions(Region, "name")),
# RPACSelect("customer", label_text="Vendor/Customer Name", options=getOptions(Customer, "name")),
# RPACSelect("program", label_text="Program", options=getOptions(Program, "name")),
# RPACSelect("team", label_text="Team", options=getOptions(Team, "name")),
# RPACSelect("project", label_text="Project", options=[("", ""), ]),
# RPACText("item_code", label_text="Item Code"),
RPACCalendarPicker("create_time_from", label_text = "Request Date(from)"),
RPACCalendarPicker("create_time_to", label_text = "Request Date(to)"),
RPACSelect("report_type", label_text = "Report Tpe", options = [("weekly", "Weekly"), ("summary", "Summary")]),
]
report_form=ReportForm()
##################################################################
#
# abstract widget for the sample development sub form
#
##################################################################
class AbstractWidget(Form):
template="tribal.templates.sample.sub_form_master"
def __init__(self, id = None, parent = None, children = [], **kw):
super(Form, self).__init__(id, parent, children, **kw)
##################################################################
#
# detail define for every form
#
##################################################################
#for Program -> Target
class SFTargetWidget(AbstractWidget):
js_url="/js/custom/sample/SFTargetWidget.js"
label="Target"
sub_template="session_target.mak"
#for Program -> Avon
class SFAvonWidget(AbstractWidget):
js_url="/js/custom/sample/SFAvonWidget.js"
label="Avon"
sub_template="session_avon.mak"
#for Program -> Besy Buy
class SFBestBuyWidget(AbstractWidget):
js_url="/js/custom/sample/SFBestBuyWidget.js"
label="Best Buy"
sub_template="session_bestbuy.mak"
#for Structure -> box
class SFBoxWidget(AbstractWidget):
js_url="/js/custom/sample/SFBoxWidget.js"
label="Boxes Design"
sub_template="session_box.mak"
#for Structure -> Tray
class SFTrayWidget(AbstractWidget):
js_url="/js/custom/sample/SFTrayWidget.js"
label="Tray Design"
sub_template="session_tray.mak"
#for Structure -> Floor
class SFFloorWidget(AbstractWidget):
js_url="/js/custom/sample/SFFloorWidget.js"
label="Floor/Pallet Display/Sidekick"
sub_template="session_floor.mak"
#for Structure -> General
class SFGeneralWidget(AbstractWidget):
js_url="/js/custom/sample/SFGeneralWidget.js"
label="General packaging design"
sub_template="session_general.mak"
#for Artwork -> Label
class SFLabelWidget(AbstractWidget):
js_url="/js/custom/sample/SFLabelWidget.js"
label="Barcode Label"
sub_template="session_label.mak"
#for Artwork -> Artwork
class SFArtworkWidget(AbstractWidget):
js_url="/js/custom/sample/SFArtworkWidget.js"
label="Artwork"
sub_template="session_artwork.mak"
#for Output -> Printout
class SFPrintoutWidget(AbstractWidget):
js_url="/js/custom/sample/SFPrintoutWidget.js"
label="Printout"
sub_template="session_printout.mak"
#for Output -> Sampling
class SFSamplingWidget(AbstractWidget):
js_url="/js/custom/sample/SFSamplingWidget.js"
label="Sampling"
sub_template="session_sampling.mak"
#for Output -> 3D imges
class SF3DImageWidget(AbstractWidget):
js_url="/js/custom/sample/SF3DImageWidget.js"
label="3D Images"
sub_template="session_3dimage.mak"
#for Output -> Assembly Sheet
class SFAssemblyWidget(AbstractWidget):
js_url="/js/custom/sample/SFAssemblyWidget.js"
label="Assembly Sheet"
sub_template="session_assembly.mak"
#for Output -> drop test Sheet
class SFDropWidget(AbstractWidget):
js_url="/js/custom/sample/SFDropWidget.js"
label="Drop Test"
sub_template="session_drop.mak"
#for Output -> Upload / Download / File Checking Sheet
class SFUploadWidget(AbstractWidget):
js_url="/js/custom/sample/SFUploadWidget.js"
label="Upload/Download/File Checking"
sub_template="session_upload.mak"
#for Output -> Container Loading Sheet
class SFContainerWidget(AbstractWidget):
js_url="/js/custom/sample/SFContainerWidget.js"
label="Container Loading"
sub_template="session_container.mak"
#for Output -> File Convert Sheet
class SFFileConvertWidget(AbstractWidget):
js_url="/js/custom/sample/SFFileConvertWidget.js"
label="File Convert"
sub_template="session_fileconvert.mak"
#for Output -> Photo Shot
class SFPhotoWidget(AbstractWidget):
js_url="/js/custom/sample/SFPhotoWidget.js"
label="Photo Shot"
sub_template="session_photo.mak"
|
|
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import datetime
import hashlib
import os
import re
import time
import urllib
import jinja2
GITHUB_VIEW_TEMPLATE = 'https://github.com/kubernetes/kubernetes/blob/%s/%s#L%s'
GITHUB_COMMIT_TEMPLATE = 'https://github.com/kubernetes/kubernetes/commit/%s'
def do_timestamp(unix_time, css_class='timestamp', tmpl='%F %H:%M'):
"""Convert an int Unix timestamp into a human-readable datetime."""
t = datetime.datetime.utcfromtimestamp(unix_time)
return jinja2.Markup('<span class="%s" data-epoch="%s">%s</span>' %
(css_class, unix_time, t.strftime(tmpl)))
def do_dt_to_epoch(dt):
return time.mktime(dt.timetuple())
def do_shorttimestamp(unix_time):
t = datetime.datetime.utcfromtimestamp(unix_time)
return jinja2.Markup('<span class="shorttimestamp" data-epoch="%s">%s</span>' %
(unix_time, t.strftime('%d %H:%M')))
def do_duration(seconds):
"""Convert a numeric duration in seconds into a human-readable string."""
hours, seconds = divmod(seconds, 3600)
minutes, seconds = divmod(seconds, 60)
if hours:
return '%dh%dm' % (hours, minutes)
if minutes:
return '%dm%ds' % (minutes, seconds)
else:
if seconds < 10:
return '%.2fs' % seconds
return '%ds' % seconds
def do_slugify(inp):
"""Convert an arbitrary string into a url-safe slug."""
inp = re.sub(r'[^\w\s-]+', '', inp)
return re.sub(r'\s+', '-', inp).lower()
def do_linkify_stacktrace(inp, commit):
"""Add links to a source code viewer for every mentioned source line."""
inp = unicode(jinja2.escape(inp))
if not commit:
return jinja2.Markup(inp) # this was already escaped, mark it safe!
def rep(m):
path, line = m.groups()
return '<a href="%s">%s</a>' % (
GITHUB_VIEW_TEMPLATE % (commit, path, line), m.group(0))
return jinja2.Markup(re.sub(r'^/\S*/kubernetes/(\S+):(\d+)$', rep, inp,
flags=re.MULTILINE))
def do_github_commit_link(commit):
commit_url = jinja2.escape(GITHUB_COMMIT_TEMPLATE % commit)
return jinja2.Markup('<a href="%s">%s</a>' % (commit_url, commit[:8]))
def do_testcmd(name):
if name.startswith('k8s.io/'):
try:
pkg, name = name.split(' ')
except ValueError: # don't block the page render
logging.error('Unexpected Go unit test name %r', name)
return name
return 'go test -v %s -run %s$' % (pkg, name)
elif name.startswith('//'):
return 'bazel test %s' % name
else:
name = re.sub(r'^\[k8s\.io\] ', '', name)
name_escaped = re.escape(name).replace('\\ ', '\\s')
test_args = ('--ginkgo.focus=%s$' % name_escaped)
return "go run hack/e2e.go -v -test --test_args='%s'" % test_args
def do_parse_pod_name(text):
"""Find the pod name from the failure and return the pod name."""
p = re.search(r' pod (\S+)', text)
if p:
return re.sub(r'[\'"\\:]', '', p.group(1))
else:
return ""
def do_label_attr(labels, name):
'''
>> do_label_attr(['needs-rebase', 'size/XS'], 'size')
'XS'
'''
name += '/'
for label in labels:
if label.startswith(name):
return label[len(name):]
return ''
def do_classify_size(payload):
'''
Determine the size class for a PR, based on either its labels or
on the magnitude of its changes.
'''
size = do_label_attr(payload['labels'], 'size')
if not size and 'additions' in payload and 'deletions' in payload:
lines = payload['additions'] + payload['deletions']
# based on mungegithub/mungers/size.go
for limit, label in [
(10, 'XS'),
(30, 'S'),
(100, 'M'),
(500, 'L'),
(1000, 'XL')
]:
if lines < limit:
return label
return 'XXL'
return size
def do_render_status(payload, user):
states = set()
text = 'Pending'
if 'lgtm' in payload.get('labels', []):
text = 'LGTM'
elif user in payload.get('attn', {}):
text = payload['attn'][user].title()
if '#' in text: # strip start/end attn timestamps
text = text[:text.index('#')]
for ctx, (state, _url, desc) in payload.get('status', {}).items():
if ctx == 'Submit Queue' and state == 'pending':
if 'does not have lgtm' in desc.lower():
# Don't show overall status as pending when Submit
# won't continue without LGTM.
continue
if ctx == 'code-review/reviewable' and state == 'pending':
# Reviewable isn't a CI, so we don't care if it's pending.
# Its dashboard might replace all of this eventually.
continue
states.add(state)
icon = ''
if 'failure' in states:
icon = 'x'
state = 'failure'
elif 'pending' in states:
icon = 'primitive-dot'
state = 'pending'
elif 'success' in states:
icon = 'check'
state = 'success'
if icon:
icon = '<span class="text-%s octicon octicon-%s"></span>' % (
state, icon)
return jinja2.Markup('%s%s' % (icon, text))
def do_get_latest(payload, user):
text = payload.get('attn', {}).get(user)
if not text:
return None
if '#' not in text:
return None
_text, _start, latest = text.rsplit('#', 2)
return int(latest)
def do_ltrim(s, needle):
if s.startswith(needle):
return s[len(needle):]
return s
def do_select(seq, pred):
return filter(pred, seq)
def do_tg_url(testgrid_query, test_name=''):
if test_name:
regex = '^Overall$|' + re.escape(test_name)
testgrid_query += '&include-filter-by-regex=%s' % urllib.quote(regex)
return 'https://k8s-testgrid.appspot.com/%s' % testgrid_query
def do_gcs_browse_url(gcs_path):
if not gcs_path.endswith('/'):
gcs_path += '/'
return 'http://gcsweb.k8s.io/gcs' + gcs_path
static_hashes = {}
def do_static(filename):
filename = 'static/%s' % filename
if filename not in static_hashes:
data = open(filename).read()
static_hashes[filename] = hashlib.sha1(data).hexdigest()[:10]
return '/%s?%s' % (filename, static_hashes[filename])
do_basename = os.path.basename
do_dirname = os.path.dirname
do_quote_plus = urllib.quote_plus
def register(filters):
"""Register do_* functions in this module in a dictionary."""
for name, func in globals().items():
if name.startswith('do_'):
filters[name[3:]] = func
|
|
import logging
from PyQt4 import QtGui, QtCore
from PyQt4.QtGui import QMenu, QMenuBar, QToolBar, QAction
from gii.core import signals, app
from Menu import MenuManager
from gii.qt.IconCache import getIcon
class ToolBarItem(object):
def __init__( self, name, **option ):
option = option or {}
self.name = name.lower()
self.label = option.get( 'label', name )
self.priority = option.get( 'priority', 0 )
self.shortcut = option.get( 'shortcut', False )
self.cmd = option.get( 'command', None )
self.cmdArgs = option.get( 'command_args', None )
self.groupId = option.get( 'group', None )
iconName = option.get( 'icon', None )
self.icon = iconName and getIcon( iconName ) or None
self.parent = None
self.owner = None
self.onClick = None
self.signal = None
self.itemType = False
widget = option.get( 'widget', None )
menuLink = option.get( 'menu_link')
if widget:
self.qtAction = QtGui.QWidgetAction( None )
self.qtAction.setDefaultWidget( widget )
elif menuLink:
m = MenuManager.get().find( menuLink )
if m and hasattr( m, 'qtAction' ):
self.qtAction = m.qtAction
else:
logging.error( 'not valid menu link:' + self.menuLink )
self.qtAction = QtGui.QAction( self.label, None )
else:
self.itemType = option.get( 'type', False )
self.onClick = option.get( 'on_click', None )
self.signal = None
self.qtAction = QtGui.QAction(
self.label, None,
checkable = self.itemType == 'check',
triggered = self.handleEvent,
shortcut = self.shortcut
)
if self.icon:
self.qtAction.setIcon( self.icon )
def setEnabled( self, enabled = True ):
self.qtAction.setEnabled( enabled )
def getName( self ):
return self.name
def getAction( self ):
return self.qtAction
def getValue(self):
if self.itemType in ('check','radio'):
return self.qtAction.isChecked()
return True
def setValue( self, value ):
if self.itemType in ('check','radio'):
self.qtAction.setChecked( value and True or False )
def getOwner( self ):
if self.owner: return self.owner
if self.parent: return self.parent.getOwner()
return None
def handleEvent( self ):
value = self.getValue()
owner = self.getOwner()
if owner and hasattr( owner, 'onTool' ):
owner.onTool( self )
if self.signal:
self.signal( value )
if self.onClick != None:
self.onClick( value )
if self.cmd:
args = self.cmdArgs or {}
app.doCommand( self.cmd, **args )
def trigger( self ):
if self.qtAction:
self.qtAction.trigger()
class ToolBarNode(object):
"""docstring for ToolBar"""
def __init__(self, name, qtToolbar, **option):
self.name = name or ''
assert isinstance( qtToolbar, QToolBar )
self.qtToolbar = qtToolbar
self.items = {}
self.groups = {}
self.owner = option.get( 'owner', None )
if not hasattr( qtToolbar, '_icon_size' ):
iconSize = option.get( 'icon_size', 16 )
qtToolbar.setIconSize( QtCore.QSize( iconSize, iconSize ) )
def getQtToolbar( self ):
return self.qtToolbar
def affirmGroup( self, id ):
group = self.groups.get( id, None )
if not group:
group = QtGui.QActionGroup( self.qtToolbar )
self.groups[ id ] = group
return group
def addTools( self, dataList ):
for data in dataList:
if data == '----':
self.addTool( data )
elif isinstance( data, dict ):
name = data.get( 'name', None )
if name:
self.addTool( **data )
def addTool( self, name, **option ):
if name == '----':
self.qtToolbar.addSeparator()
return
item = ToolBarItem( name, **option )
self.items[ name ] = item
self.qtToolbar.addAction( item.qtAction )
item.parent = self
if item.groupId:
group = self.affirmGroup( item.groupId )
group.addAction( item.qtAction )
return item
def addWidget( self, widget ):
return self.qtToolbar.addWidget( widget )
def getQtToolBar( self ):
return self.qtToolbar
def addSeparator( self ):
self.qtToolbar.addSeparator()
def getTool( self, name ):
return self.items.get( name, None )
def removeTool( self, name ):
tool = self.getTool( name )
if tool:
self.qtToolbar.removeAction( tool.qtAction )
del self.items[ name ]
def enableTool( self, name, enabled = True ):
tool = self.getTool( name )
if tool:
tool.setEnabled( enabled )
def setEnabled( self, enabled = True ):
self.qtToolbar.setEnabled( enabled )
def setValue( self, value ):
pass
def getOwner( self ):
return self.owner
class ToolBarManager(object):
"""docstring for ToolBarManager"""
_singleton = None
@staticmethod
def get():
return ToolBarManager._singleton
def __init__(self):
assert not ToolBarManager._singleton
ToolBarManager._singleton = self
self.toolbars = {}
def addToolBar( self, name, toolbar, owner, **option ):
tb = ToolBarNode( name, toolbar, **option )
tb.owner = owner
if name:
self.toolbars[ name ] = tb
return tb
def find( self, path ):
blobs = path.split('/')
l = len(blobs)
if l< 1 or l > 2:
logging.error( 'invalid toolbar path' + path )
return None
toolbar = self.toolbars.get( blobs[0] )
if l == 2 :
return toolbar and toolbar.getTool( blobs[1] ) or None
return toolbar
def addTool( self, path, option = {}, owner = None ):
blobs = path.split('/')
if len(blobs) != 2:
logging.error( 'invalid toolbar item path' + path )
return None
toolbar = self.find( blobs[0] )
if toolbar:
tool = toolbar.addTool( blobs[1], **option )
if tool: tool.owner = owner
return tool
logging.error( 'toolbar not found:' + blobs[0] )
return None
def enableTool( self, path, enabled = True ):
tool = self.find( path )
if tool:
tool.setEnabled( enabled )
else:
logging.error( 'toolbar/tool not found:' + path )
def wrapToolBar( name, qtToolbar, **kwargs ):
barnode = ToolBarNode( name, qtToolbar, **kwargs )
return barnode
ToolBarManager()
|
|
# -*- coding: utf-8 -*-
from rest_framework import permissions
from rest_framework import exceptions
from addons.base.models import BaseAddonSettings
from osf.models import (
AbstractNode,
Contributor,
DraftRegistration,
Institution,
Node,
NodeRelation,
OSFUser,
PreprintService,
PrivateLink,
)
from osf.utils import permissions as osf_permissions
from website.project.metadata.utils import is_prereg_admin
from api.base.utils import get_user_auth, is_deprecated
class ContributorOrPublic(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
from api.nodes.views import NodeProvider
if isinstance(obj, BaseAddonSettings):
obj = obj.owner
if isinstance(obj, (NodeProvider, PreprintService)):
obj = obj.node
assert isinstance(obj, (AbstractNode, NodeRelation)), 'obj must be an Node, NodeProvider, NodeRelation, PreprintService, or AddonSettings; got {}'.format(obj)
auth = get_user_auth(request)
if request.method in permissions.SAFE_METHODS:
return obj.is_public or obj.can_view(auth)
else:
return obj.can_edit(auth)
class IsPublic(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
assert isinstance(obj, AbstractNode), 'obj must be an Node got {}'.format(obj)
auth = get_user_auth(request)
return obj.is_public or obj.can_view(auth)
class IsAdmin(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
assert isinstance(obj, AbstractNode), 'obj must be an Node, got {}'.format(obj)
auth = get_user_auth(request)
return obj.has_permission(auth.user, osf_permissions.ADMIN)
class IsContributor(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
assert isinstance(obj, AbstractNode), 'obj must be an Node, got {}'.format(obj)
auth = get_user_auth(request)
if request.method in permissions.SAFE_METHODS:
return obj.is_contributor(auth.user)
else:
return obj.has_permission(auth.user, 'write')
class IsAdminOrReviewer(permissions.BasePermission):
"""
Prereg admins can update draft registrations.
"""
def has_object_permission(self, request, view, obj):
assert isinstance(obj, (AbstractNode, DraftRegistration, PrivateLink)), 'obj must be an Node, Draft Registration, or PrivateLink, got {}'.format(obj)
auth = get_user_auth(request)
if request.method != 'DELETE' and is_prereg_admin(auth.user):
return True
return obj.has_permission(auth.user, osf_permissions.ADMIN)
class AdminOrPublic(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
assert isinstance(obj, (AbstractNode, OSFUser, Institution, BaseAddonSettings, DraftRegistration, PrivateLink)), 'obj must be an Node, User, Institution, Draft Registration, PrivateLink, or AddonSettings; got {}'.format(obj)
auth = get_user_auth(request)
if request.method in permissions.SAFE_METHODS:
return obj.is_public or obj.can_view(auth)
else:
return obj.has_permission(auth.user, osf_permissions.ADMIN)
class ExcludeWithdrawals(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
if isinstance(obj, Node):
node = obj
else:
context = request.parser_context['kwargs']
node = AbstractNode.load(context[view.node_lookup_url_kwarg])
if node.is_retracted:
return False
return True
class ContributorDetailPermissions(permissions.BasePermission):
"""Permissions for contributor detail page."""
def has_object_permission(self, request, view, obj):
assert isinstance(obj, (AbstractNode, OSFUser, Contributor)), 'obj must be User, Contributor, or Node, got {}'.format(obj)
auth = get_user_auth(request)
context = request.parser_context['kwargs']
node = AbstractNode.load(context[view.node_lookup_url_kwarg])
user = OSFUser.load(context['user_id'])
if request.method in permissions.SAFE_METHODS:
return node.is_public or node.can_view(auth)
elif request.method == 'DELETE':
return node.has_permission(auth.user, osf_permissions.ADMIN) or auth.user == user
else:
return node.has_permission(auth.user, osf_permissions.ADMIN)
class ContributorOrPublicForPointers(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
assert isinstance(obj, (AbstractNode, NodeRelation)), 'obj must be an Node or NodeRelation, got {}'.format(obj)
auth = get_user_auth(request)
parent_node = AbstractNode.load(request.parser_context['kwargs']['node_id'])
pointer_node = NodeRelation.load(request.parser_context['kwargs']['node_link_id']).child
if request.method in permissions.SAFE_METHODS:
has_parent_auth = parent_node.can_view(auth)
has_pointer_auth = pointer_node.can_view(auth)
public = pointer_node.is_public
has_auth = public or (has_parent_auth and has_pointer_auth)
return has_auth
else:
has_auth = parent_node.can_edit(auth)
return has_auth
class ContributorOrPublicForRelationshipPointers(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
assert isinstance(obj, dict)
auth = get_user_auth(request)
parent_node = obj['self']
if request.method in permissions.SAFE_METHODS:
return parent_node.can_view(auth)
elif request.method == 'DELETE':
return parent_node.can_edit(auth)
else:
has_parent_auth = parent_node.can_edit(auth)
if not has_parent_auth:
return False
pointer_nodes = []
for pointer in request.data.get('data', []):
node = AbstractNode.load(pointer['id'])
if not node or node.is_collection:
raise exceptions.NotFound(detail='Node with id "{}" was not found'.format(pointer['id']))
pointer_nodes.append(node)
has_pointer_auth = True
for pointer in pointer_nodes:
if not pointer.can_view(auth):
has_pointer_auth = False
break
return has_pointer_auth
class RegistrationAndPermissionCheckForPointers(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
node_link = NodeRelation.load(request.parser_context['kwargs']['node_link_id'])
node = AbstractNode.load(request.parser_context['kwargs'][view.node_lookup_url_kwarg])
auth = get_user_auth(request)
if request.method == 'DELETE'and node.is_registration:
raise exceptions.MethodNotAllowed(method=request.method)
if node.is_collection or node.is_registration:
raise exceptions.NotFound
if node != node_link.parent:
raise exceptions.NotFound
if request.method == 'DELETE' and not node.can_edit(auth):
return False
return True
class WriteOrPublicForRelationshipInstitutions(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
assert isinstance(obj, dict)
auth = get_user_auth(request)
node = obj['self']
if request.method in permissions.SAFE_METHODS:
return node.is_public or node.can_view(auth)
else:
return node.has_permission(auth.user, osf_permissions.WRITE)
class ReadOnlyIfRegistration(permissions.BasePermission):
"""Makes PUT and POST forbidden for registrations."""
def has_object_permission(self, request, view, obj):
if not isinstance(obj, AbstractNode):
obj = AbstractNode.load(request.parser_context['kwargs'][view.node_lookup_url_kwarg])
assert isinstance(obj, AbstractNode), 'obj must be an Node'
if obj.is_registration:
return request.method in permissions.SAFE_METHODS
return True
class ShowIfVersion(permissions.BasePermission):
def __init__(self, min_version, max_version, deprecated_message):
super(ShowIfVersion, self).__init__()
self.min_version = min_version
self.max_version = max_version
self.deprecated_message = deprecated_message
def has_object_permission(self, request, view, obj):
if is_deprecated(request.version, self.min_version, self.max_version):
raise exceptions.NotFound(detail=self.deprecated_message)
return True
class NodeLinksShowIfVersion(ShowIfVersion):
def __init__(self):
min_version = '2.0'
max_version = '2.0'
deprecated_message = 'This feature is deprecated as of version 2.1'
super(NodeLinksShowIfVersion, self).__init__(min_version, max_version, deprecated_message)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Perturb a `LinearOperator` with a rank `K` update."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util
from tensorflow.contrib.linalg.python.ops import linear_operator
from tensorflow.contrib.linalg.python.ops import linear_operator_diag
from tensorflow.contrib.linalg.python.ops import linear_operator_identity
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
__all__ = ["LinearOperatorUDVHUpdate",]
class LinearOperatorUDVHUpdate(linear_operator.LinearOperator):
"""Perturb a `LinearOperator` with a rank `K` update.
This operator acts like a [batch] matrix `A` with shape
`[B1,...,Bb, M, N]` for some `b >= 0`. The first `b` indices index a
batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
an `M x N` matrix.
`LinearOperatorUDVHUpdate` represents `A = L + U D V^H`, where
```
L, is a LinearOperator representing [batch] M x N matrices
U, is a [batch] M x K matrix. Typically K << M.
D, is a [batch] K x K matrix.
V, is a [batch] N x K matrix. Typically K << N.
V^H is the Hermitian transpose (adjoint) of V.
```
If `M = N`, determinants and solves are done using the matrix determinant
lemma and Woodbury identities, and thus require L and D to be non-singular.
Solves and determinants will be attempted unless the "is_non_singular"
property of L and D is False.
In the event that L and D are positive-definite, and U = V, solves and
determinants can be done using a Cholesky factorization.
```python
# Create a 3 x 3 diagonal linear operator.
diag_operator = LinearOperatorDiag(
diag_update=[1., 2., 3.], is_non_singular=True, is_self_adjoint=True,
is_positive_definite=True)
# Perturb with a rank 2 perturbation
operator = LinearOperatorUDVHUpdate(
operator=diag_operator,
u=[[1., 2.], [-1., 3.], [0., 0.]],
diag_update=[11., 12.],
v=[[1., 2.], [-1., 3.], [10., 10.]])
operator.shape
==> [3, 3]
operator.log_determinant()
==> scalar Tensor
x = ... Shape [3, 4] Tensor
operator.apply(x)
==> Shape [3, 4] Tensor
```
### Shape compatibility
This operator acts on [batch] matrix with compatible shape.
`x` is a batch matrix with compatible shape for `apply` and `solve` if
```
operator.shape = [B1,...,Bb] + [M, N], with b >= 0
x.shape = [B1,...,Bb] + [N, R], with R >= 0.
```
### Performance
Suppose `operator` is a `LinearOperatorUDVHUpdate` of shape `[M, N]`,
made from a rank `K` update of `base_operator` which performs `.apply(x)` on
`x` having `x.shape = [N, R]` with `O(L_apply*N*R)` complexity (and similarly
for `solve`, `determinant`. Then, if `x.shape = [N, R]`,
* `operator.apply(x)` is `O(L_apply*N*R + K*N*R)`
and if `M = N`,
* `operator.solve(x)` is `O(L_apply*N*R + N*K*R + K^2*R + K^3)`
* `operator.determinant()` is `O(L_determinant + L_solve*N*K + K^2*N + K^3)`
If instead `operator` and `x` have shape `[B1,...,Bb, M, N]` and
`[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`.
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, diag_update_positive`
and `square`
These have the following meaning
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
base_operator,
u,
diag_update=None,
v=None,
is_diag_update_positive=None,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name="LinearOperatorUDVHUpdate"):
"""Initialize a `LinearOperatorUDVHUpdate`.
This creates a `LinearOperator` of the form `A = L + U D V^H`, with
`L` a `LinearOperator`, `U, V` both [batch] matrices, and `D` a [batch]
diagonal matrix.
If `L` is non-singular, solves and determinants are available.
Solves/determinants both involve a solve/determinant of a `K x K` system.
In the event that L and D are self-adjoint positive-definite, and U = V,
this can be done using a Cholesky factorization. The user should set the
`is_X` matrix property hints, which will trigger the appropriate code path.
Args:
base_operator: Shape `[B1,...,Bb, M, N]` real `float32` or `float64`
`LinearOperator`. This is `L` above.
u: Shape `[B1,...,Bb, M, K]` `Tensor` of same `dtype` as `base_operator`.
This is `U` above.
diag_update: Optional shape `[B1,...,Bb, K]` `Tensor` with same `dtype`
as `base_operator`. This is the diagonal of `D` above.
Defaults to `D` being the identity operator.
v: Optional `Tensor` of same `dtype` as `u` and shape `[B1,...,Bb, N, K]`
Defaults to `v = u`, in which case the perturbation is symmetric.
If `M != N`, then `v` must be set since the perturbation is not square.
is_diag_update_positive: Python `bool`.
If `True`, expect `diag_update > 0`.
is_non_singular: Expect that this operator is non-singular.
Default is `None`, unless `is_positive_definite` is auto-set to be
`True` (see below).
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose. Default is `None`, unless `base_operator` is self-adjoint
and `v = None` (meaning `u=v`), in which case this defaults to `True`.
is_positive_definite: Expect that this operator is positive definite.
Default is `None`, unless `base_operator` is positive-definite
`v = None` (meaning `u=v`), and `is_diag_update_positive`, in which case
this defaults to `True`.
Note that we say an operator is positive definite when the quadratic
form `x^H A x` has positive real part for all nonzero `x`.
is_square: Expect that this operator acts like square [batch] matrices.
name: A name for this `LinearOperator`.
Raises:
ValueError: If `is_X` flags are set in an inconsistent way.
"""
# TODO(langmore) support complex types.
# Complex types are not allowed due to tf.cholesky() requiring float.
# If complex dtypes are allowed, we update the following
# 1. is_diag_update_positive should still imply that `diag > 0`, but we need
# to remind the user that this implies diag is real. This is needed
# because if diag has non-zero imaginary part, it will not be
# self-adjoint positive definite.
dtype = base_operator.dtype
allowed_dtypes = [dtypes.float32, dtypes.float64]
if dtype not in allowed_dtypes:
raise TypeError(
"Argument matrix must have dtype in %s. Found: %s"
% (allowed_dtypes, dtype))
if diag_update is None:
if is_diag_update_positive is False:
raise ValueError(
"Default diagonal is the identity, which is positive. However, "
"user set 'is_diag_update_positive' to False.")
is_diag_update_positive = True
# In this case, we can use a Cholesky decomposition to help us solve/det.
self._use_cholesky = (
base_operator.is_positive_definite and base_operator.is_self_adjoint
and is_diag_update_positive
and v is None)
# Possibly auto-set some characteristic flags from None to True.
# If the Flags were set (by the user) incorrectly to False, then raise.
if base_operator.is_self_adjoint and v is None and not dtype.is_complex:
if is_self_adjoint is False:
raise ValueError(
"A = L + UDU^H, with L self-adjoint and D real diagonal. Since"
" UDU^H is self-adjoint, this must be a self-adjoint operator.")
is_self_adjoint = True
# The condition for using a cholesky is sufficient for SPD, and
# we no weaker choice of these hints leads to SPD. Therefore,
# the following line reads "if hints indicate SPD..."
if self._use_cholesky:
if (
is_positive_definite is False
or is_self_adjoint is False
or is_non_singular is False):
raise ValueError(
"Arguments imply this is self-adjoint positive-definite operator.")
is_positive_definite = True
is_self_adjoint = True
values = base_operator.graph_parents + [u, diag_update, v]
with ops.name_scope(name, values=values):
# Create U and V.
self._u = ops.convert_to_tensor(u, name="u")
if v is None:
self._v = self._u
else:
self._v = ops.convert_to_tensor(v, name="v")
if diag_update is None:
self._diag_update = None
else:
self._diag_update = ops.convert_to_tensor(
diag_update, name="diag_update")
# Create base_operator L.
self._base_operator = base_operator
graph_parents = base_operator.graph_parents + [
self.u, self._diag_update, self.v]
graph_parents = [p for p in graph_parents if p is not None]
super(LinearOperatorUDVHUpdate, self).__init__(
dtype=self._base_operator.dtype,
graph_parents=graph_parents,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name)
# Create the diagonal operator D.
self._set_diag_operators(diag_update, is_diag_update_positive)
self._is_diag_update_positive = is_diag_update_positive
contrib_tensor_util.assert_same_float_dtype(
(base_operator, self.u, self.v, self._diag_update))
self._check_shapes()
# Pre-compute the so-called "capacitance" matrix
# C := D^{-1} + V^H L^{-1} U
self._capacitance = self._make_capacitance()
if self._use_cholesky:
self._chol_capacitance = linalg_ops.cholesky(self._capacitance)
def _check_shapes(self):
"""Static check that shapes are compatible."""
# Broadcast shape also checks that u and v are compatible.
uv_shape = array_ops.broadcast_static_shape(
self.u.get_shape(), self.v.get_shape())
batch_shape = array_ops.broadcast_static_shape(
self.base_operator.batch_shape, uv_shape[:-2])
self.base_operator.domain_dimension.assert_is_compatible_with(
uv_shape[-2])
if self._diag_update is not None:
uv_shape[-1].assert_is_compatible_with(self._diag_update.get_shape()[-1])
array_ops.broadcast_static_shape(
batch_shape, self._diag_update.get_shape()[:-1])
def _set_diag_operators(self, diag_update, is_diag_update_positive):
"""Set attributes self._diag_update and self._diag_operator."""
if diag_update is not None:
self._diag_operator = linear_operator_diag.LinearOperatorDiag(
self._diag_update, is_positive_definite=is_diag_update_positive)
self._diag_inv_operator = linear_operator_diag.LinearOperatorDiag(
1. / self._diag_update, is_positive_definite=is_diag_update_positive)
else:
if self.u.get_shape()[-1].value is not None:
r = self.u.get_shape()[-1].value
else:
r = array_ops.shape(self.u)[-1]
self._diag_operator = linear_operator_identity.LinearOperatorIdentity(
num_rows=r, dtype=self.dtype)
self._diag_inv_operator = self._diag_operator
@property
def u(self):
"""If this operator is `A = L + U D V^H`, this is the `U`."""
return self._u
@property
def v(self):
"""If this operator is `A = L + U D V^H`, this is the `V`."""
return self._v
@property
def is_diag_update_positive(self):
"""If this operator is `A = L + U D V^H`, this hints `D > 0` elementwise."""
return self._is_diag_update_positive
@property
def diag_update(self):
"""If this operator is `A = L + U D V^H`, this is the diagonal of `D`."""
return self._diag_update
@property
def diag_operator(self):
"""If this operator is `A = L + U D V^H`, this is `D`."""
return self._diag_operator
@property
def base_operator(self):
"""If this operator is `A = L + U D V^H`, this is the `L`."""
return self._base_operator
def _shape(self):
batch_shape = array_ops.broadcast_static_shape(
self.base_operator.batch_shape,
self.u.get_shape()[:-2])
return batch_shape.concatenate(self.base_operator.shape[-2:])
def _shape_tensor(self):
batch_shape = array_ops.broadcast_dynamic_shape(
self.base_operator.batch_shape_tensor(),
array_ops.shape(self.u)[:-2])
return array_ops.concat(
[batch_shape, self.base_operator.shape_tensor()[-2:]], axis=0)
def _apply(self, x, adjoint=False, adjoint_arg=False):
u = self.u
v = self.v
l = self.base_operator
d = self.diag_operator
leading_term = l.apply(x, adjoint=adjoint, adjoint_arg=adjoint_arg)
if adjoint:
uh_x = math_ops.matmul(u, x, adjoint_a=True, adjoint_b=adjoint_arg)
d_uh_x = d.apply(uh_x, adjoint=adjoint)
v_d_uh_x = math_ops.matmul(v, d_uh_x)
return leading_term + v_d_uh_x
else:
vh_x = math_ops.matmul(v, x, adjoint_a=True, adjoint_b=adjoint_arg)
d_vh_x = d.apply(vh_x, adjoint=adjoint)
u_d_vh_x = math_ops.matmul(u, d_vh_x)
return leading_term + u_d_vh_x
def _determinant(self):
if self.is_positive_definite:
return math_ops.exp(self.log_abs_determinant())
# The matrix determinant lemma gives
# https://en.wikipedia.org/wiki/Matrix_determinant_lemma
# det(L + UDV^H) = det(D^{-1} + V^H L^{-1} U) det(D) det(L)
# = det(C) det(D) det(L)
# where C is sometimes known as the capacitance matrix,
# C := D^{-1} + V^H L^{-1} U
det_c = linalg_ops.matrix_determinant(self._capacitance)
det_d = self.diag_operator.determinant()
det_l = self.base_operator.determinant()
return det_c * det_d * det_l
def _log_abs_determinant(self):
# Recall
# det(L + UDV^H) = det(D^{-1} + V^H L^{-1} U) det(D) det(L)
# = det(C) det(D) det(L)
log_abs_det_d = self.diag_operator.log_abs_determinant()
log_abs_det_l = self.base_operator.log_abs_determinant()
if self._use_cholesky:
chol_cap_diag = array_ops.matrix_diag_part(self._chol_capacitance)
log_abs_det_c = 2 * math_ops.reduce_sum(
math_ops.log(chol_cap_diag), reduction_indices=[-1])
else:
det_c = linalg_ops.matrix_determinant(self._capacitance)
log_abs_det_c = math_ops.log(math_ops.abs(det_c))
return log_abs_det_c + log_abs_det_d + log_abs_det_l
def _solve(self, rhs, adjoint=False, adjoint_arg=False):
if self.base_operator.is_non_singular is False:
raise ValueError(
"Solve not implemented unless this is a perturbation of a "
"non-singular LinearOperator.")
# The Woodbury formula gives:
# https://en.wikipedia.org/wiki/Woodbury_matrix_identity
# (L + UDV^H)^{-1}
# = L^{-1} - L^{-1} U (D^{-1} + V^H L^{-1} U)^{-1} V^H L^{-1}
# = L^{-1} - L^{-1} U C^{-1} V^H L^{-1}
# where C is the capacitance matrix, C := D^{-1} + V^H L^{-1} U
# Note also that, with ^{-H} being the inverse of the adjoint,
# (L + UDV^H)^{-H}
# = L^{-H} - L^{-H} V C^{-H} U^H L^{-H}
l = self.base_operator
if adjoint:
v = self.u
u = self.v
else:
v = self.v
u = self.u
# L^{-1} rhs
linv_rhs = l.solve(rhs, adjoint=adjoint, adjoint_arg=adjoint_arg)
# V^H L^{-1} rhs
vh_linv_rhs = math_ops.matmul(v, linv_rhs, adjoint_a=True)
# C^{-1} V^H L^{-1} rhs
if self._use_cholesky:
capinv_vh_linv_rhs = linalg_ops.cholesky_solve(
self._chol_capacitance, vh_linv_rhs)
else:
capinv_vh_linv_rhs = linalg_ops.matrix_solve(
self._capacitance, vh_linv_rhs, adjoint=adjoint)
# U C^{-1} V^H M^{-1} rhs
u_capinv_vh_linv_rhs = math_ops.matmul(u, capinv_vh_linv_rhs)
# L^{-1} U C^{-1} V^H L^{-1} rhs
linv_u_capinv_vh_linv_rhs = l.solve(u_capinv_vh_linv_rhs, adjoint=adjoint)
# L^{-1} - L^{-1} U C^{-1} V^H L^{-1}
return linv_rhs - linv_u_capinv_vh_linv_rhs
def _make_capacitance(self):
# C := D^{-1} + V^H L^{-1} U
# which is sometimes known as the "capacitance" matrix.
# L^{-1} U
linv_u = self.base_operator.solve(self.u)
# V^H L^{-1} U
vh_linv_u = math_ops.matmul(self.v, linv_u, adjoint_a=True)
# D^{-1} + V^H L^{-1} V
capacitance = self._diag_inv_operator.add_to_tensor(vh_linv_u)
return capacitance
|
|
"""
Author: Timothy Moore
Created On: 31th August 2017
Defines a two-dimensional quadtree of arbitrary
depth and bucket size.
"""
import inspect
import math
from collections import deque
from pygorithm.geometry import (vector2, polygon2, rect2)
class QuadTreeEntity(object):
"""
This is the minimum information required for an object to
be usable in a quadtree as an entity. Entities are the
things that you are trying to compare in a quadtree.
:ivar aabb: the axis-aligned bounding box of this entity
:type aabb: :class:`pygorithm.geometry.rect2.Rect2`
"""
def __init__(self, aabb):
"""
Create a new quad tree entity with the specified aabb
:param aabb: axis-aligned bounding box
:type aabb: :class:`pygorithm.geometry.rect2.Rect2`
"""
self.aabb = aabb
def __repr__(self):
"""
Create an unambiguous representation of this entity.
Example:
.. code-block:: python
from pygorithm.geometry import (vector2, rect2)
from pygorithm.data_structures import quadtree
_ent = quadtree.QuadTreeEntity(rect2.Rect2(5, 5))
# prints quadtreeentity(aabb=rect2(width=5, height=5, mincorner=vector2(x=0, y=0)))
print(repr(_ent))
:returns: unambiguous representation of this quad tree entity
:rtype: string
"""
return "quadtreeentity(aabb={})".format(repr(self.aabb))
def __str__(self):
"""
Create a human readable representation of this entity
Example:
.. code-block:: python
from pygorithm.geometry import (vector2, rect2)
from pygorithm.data_structures import quadtree
_ent = quadtree.QuadTreeEntity(rect2.Rect2(5, 5))
# prints entity(at rect(5x5 at <0, 0>))
print(str(_ent))
:returns: human readable representation of this entity
:rtype: string
"""
return "entity(at {})".format(str(self.aabb))
class QuadTree(object):
"""
A quadtree is a sorting tool for two-dimensional space, most
commonly used to reduce the number of required collision
calculations in a two-dimensional scene. In this context,
the scene is stepped without collision detection, then a
quadtree is constructed from all of the boundaries
.. caution::
Just because a quad tree has split does not mean entities will be empty. Any
entities which overlay any of the lines of the split will be included in the
parent of the quadtree.
.. tip::
It is important to tweak bucket size and depth to the problem, but a common error
is too small a bucket size. It is typically not reasonable to have a bucket size
smaller than 16; A good starting point is 64, then modify as appropriate. Larger
buckets reduce the overhead of the quad tree which could easily exceed the improvement
from reduced collision checks. The max depth is typically just a sanity check since
depth greater than 4 or 5 would either indicate a badly performing quadtree (too
dense objects, use an r-tree or kd-tree) or a very large world (where an iterative
quadtree implementation would be appropriate).
:ivar bucket_size: maximum number objects per bucket (before :py:attr:`.max_depth`)
:type bucket_size: int
:ivar max_depth: maximum depth of the quadtree
:type max_depth: int
:ivar depth: the depth of this node (0 being the topmost)
:type depth: int
:ivar location: where this quad tree node is situated
:type location: :class:`pygorithm.geometry.rect2.Rect2`
:ivar entities: the entities in this quad tree and in NO OTHER related quad tree
:type entities: list of :class:`.QuadTreeEntity`
:ivar children: either None or the 4 :class:`.QuadTree` children of this node
:type children: None or list of :class:`.QuadTree`
"""
def __init__(self, bucket_size, max_depth, location, depth = 0, entities = None):
"""
Initialize a new quad tree.
.. warning::
Passing entities to this quadtree will NOT cause it to split automatically!
You must call :py:meth:`.think` for that. This allows for more predictable
performance per line.
:param bucket_size: the number of entities in this quadtree
:type bucket_size: int
:param max_depth: the maximum depth for automatic splitting
:type max_depth: int
:param location: where this quadtree is located
:type location: :class:`pygorithm.geometry.rect2.Rect2`
:param depth: the depth of this node
:type depth: int
:param entities: the entities to initialize this quadtree with
:type entities: list of :class:`.QuadTreeEntity` or None for empty list
"""
self.bucket_size = bucket_size
self.max_depth = max_depth
self.location = location
self.depth = depth
self.entities = entities if entities is not None else []
self.children = None
def think(self, recursive = False):
"""
Call :py:meth:`.split` if appropriate
Split this quad tree if it has not split already and it has more
entities than :py:attr:`.bucket_size` and :py:attr:`.depth` is
less than :py:attr:`.max_depth`.
If `recursive` is True, think is called on the :py:attr:`.children` with
recursive set to True after splitting.
:param recursive: if `think(True)` should be called on :py:attr:`.children` (if there are any)
:type recursive: bool
"""
if not self.children and self.depth < self.max_depth and len(self.entities) > self.bucket_size:
self.split()
if recursive:
if self.children:
for child in self.children:
child.think(True)
def split(self):
"""
Split this quadtree.
.. caution::
A call to split will always split the tree or raise an error. Use
:py:meth:`.think` if you want to ensure the quadtree is operating
efficiently.
.. caution::
This function will not respect :py:attr:`.bucket_size` or
:py:attr:`.max_depth`.
:raises ValueError: if :py:attr:`.children` is not empty
"""
if self.children:
raise ValueError("cannot split twice")
_cls = type(self)
def _cstr(r):
return _cls(self.bucket_size, self.max_depth, r, self.depth + 1)
_halfwidth = self.location.width / 2
_halfheight = self.location.height / 2
_x = self.location.mincorner.x
_y = self.location.mincorner.y
self.children = [
_cstr(rect2.Rect2(_halfwidth, _halfheight, vector2.Vector2(_x, _y))),
_cstr(rect2.Rect2(_halfwidth, _halfheight, vector2.Vector2(_x + _halfwidth, _y))),
_cstr(rect2.Rect2(_halfwidth, _halfheight, vector2.Vector2(_x + _halfwidth, _y + _halfheight))),
_cstr(rect2.Rect2(_halfwidth, _halfheight, vector2.Vector2(_x, _y + _halfheight))) ]
_newents = []
for ent in self.entities:
quad = self.get_quadrant(ent)
if quad < 0:
_newents.append(ent)
else:
self.children[quad].entities.append(ent)
self.entities = _newents
def get_quadrant(self, entity):
"""
Calculate the quadrant that the specified entity belongs to.
Touching a line is considered overlapping a line. Touching is
determined using :py:meth:`math.isclose`
Quadrants are:
- -1: None (it overlaps 2 or more quadrants)
- 0: Top-left
- 1: Top-right
- 2: Bottom-right
- 3: Bottom-left
.. caution::
This function does not verify the entity is contained in this quadtree.
This operation takes O(1) time.
:param entity: the entity to place
:type entity: :class:`.QuadTreeEntity`
:returns: quadrant
:rtype: int
"""
_aabb = entity.aabb
_halfwidth = self.location.width / 2
_halfheight = self.location.height / 2
_x = self.location.mincorner.x
_y = self.location.mincorner.y
if math.isclose(_aabb.mincorner.x, _x + _halfwidth):
return -1
if math.isclose(_aabb.mincorner.x + _aabb.width, _x + _halfwidth):
return -1
if math.isclose(_aabb.mincorner.y, _y + _halfheight):
return -1
if math.isclose(_aabb.mincorner.y + _aabb.height, _y + _halfheight):
return -1
_leftside_isleft = _aabb.mincorner.x < _x + _halfwidth
_rightside_isleft = _aabb.mincorner.x + _aabb.width < _x + _halfwidth
if _leftside_isleft != _rightside_isleft:
return -1
_topside_istop = _aabb.mincorner.y < _y + _halfheight
_botside_istop = _aabb.mincorner.y + _aabb.height < _y + _halfheight
if _topside_istop != _botside_istop:
return -1
_left = _leftside_isleft
_top = _topside_istop
if _left:
if _top:
return 0
else:
return 3
else:
if _top:
return 1
else:
return 2
def insert_and_think(self, entity):
"""
Insert the entity into this or the appropriate child.
This also acts as thinking (recursively). Using :py:meth:`.insert_and_think`
iteratively is slightly less efficient but has more predictable performance
than initializing with a large number of entities then thinking is slightly
faster but may hang. Both may exceed recursion depth if :py:attr:`.max_depth`
is too large.
:param entity: the entity to insert
:type entity: :class:`.QuadTreeEntity`
"""
if not self.children and len(self.entities) == self.bucket_size and self.depth < self.max_depth:
self.split()
quad = self.get_quadrant(entity) if self.children else -1
if quad < 0:
self.entities.append(entity)
else:
self.children[quad].insert_and_think(entity)
def retrieve_collidables(self, entity, predicate = None):
"""
Find all entities that could collide with the specified entity.
.. warning::
If entity is, itself, in the quadtree, it will be returned. The
predicate may be used to prevent this using your preferred equality
method.
The predicate takes 1 positional argument (the entity being considered)
and returns `False` if the entity should never be returned, even if it
might collide with the entity. It should return `True` otherwise.
:param entity: the entity to find collidables for
:type entity: :class:`.QuadTreeEntity`
:param predicate: the predicate
:type predicate: :class:`types.FunctionType` or None
:returns: potential collidables (never `None)
:rtype: list of :class:`.QuadTreeEntity`
"""
result = list(filter(predicate, self.entities))
quadrant = self.get_quadrant(entity) if self.children else -1
if quadrant >= 0:
result.extend(self.children[quadrant].retrieve_collidables(entity, predicate))
elif self.children:
for child in self.children:
touching, overlapping, alwaysNone = rect2.Rect2.find_intersection(entity.aabb, child.location, find_mtv=False)
if touching or overlapping:
result.extend(child.retrieve_collidables(entity, predicate))
return result
def _iter_helper(self, pred):
"""
Calls pred on each child and childs child, iteratively.
pred takes one positional argument (the child).
:param pred: function to call
:type pred: `types.FunctionType`
"""
_stack = deque()
_stack.append(self)
while _stack:
curr = _stack.pop()
if curr.children:
for child in curr.children:
_stack.append(child)
pred(curr)
def find_entities_per_depth(self):
"""
Calculate the number of nodes and entities at each depth level in this
quad tree. Only returns for depth levels at or equal to this node.
This is implemented iteratively. See :py:meth:`.__str__` for usage example.
:returns: dict of depth level to number of entities
:rtype: dict int: int
"""
container = { 'result': {} }
def handler(curr, container=container):
container['result'][curr.depth] = container['result'].get(curr.depth, 0) + len(curr.entities)
self._iter_helper(handler)
return container['result']
def find_nodes_per_depth(self):
"""
Calculate the number of nodes at each depth level.
This is implemented iteratively. See :py:meth:`.__str__` for usage example.
:returns: dict of depth level to number of nodes
:rtype: dict int: int
"""
nodes_per_depth = {}
self._iter_helper(lambda curr, d=nodes_per_depth: d.update({ (curr.depth, d.get(curr.depth, 0) + 1) }))
return nodes_per_depth
def sum_entities(self, entities_per_depth=None):
"""
Sum the number of entities in this quad tree and all lower quad trees.
If `entities_per_depth` is not None, that array is used to calculate the sum
of entities rather than traversing the tree. Either way, this is implemented
iteratively. See :py:meth:`.__str__` for usage example.
:param entities_per_depth: the result of :py:meth:`.find_entities_per_depth`
:type entities_per_depth: `dict int: (int, int)` or None
:returns: number of entities in this and child nodes
:rtype: int
"""
if entities_per_depth is not None:
return sum(entities_per_depth.values())
container = { 'result': 0 }
def handler(curr, container=container):
container['result'] += len(curr.entities)
self._iter_helper(handler)
return container['result']
def calculate_avg_ents_per_leaf(self):
"""
Calculate the average number of entities per leaf node on this and child
quad trees.
In the ideal case, the average entities per leaf is equal to the bucket size,
implying maximum efficiency. Note that, as always with averages, this might
be misleading if this tree has reached its max depth.
This is implemented iteratively. See :py:meth:`.__str__` for usage example.
:returns: average number of entities at each leaf node
:rtype: :class:`numbers.Number`
"""
container = { 'leafs': 0, 'total': 0 }
def handler(curr, container=container):
if not curr.children:
container['leafs'] += 1
container['total'] += len(curr.entities)
self._iter_helper(handler)
return container['total'] / container['leafs']
def calculate_weight_misplaced_ents(self, sum_entities=None):
"""
Calculate a rating for misplaced entities.
A misplaced entity is one that is not on a leaf node. That weight is multiplied
by 4*remaining maximum depth of that node, to indicate approximately how
many additional calculations are required.
The result is then divided by the total number of entities on this node (either
calculated using :py:meth:`.sum_entities` or provided) to get the approximate
cost of the misplaced nodes in comparison with the placed nodes. A value greater
than 1 implies a different tree type (such as r-tree or kd-tree) should probably be
used.
This is implemented iteratively. See :py:meth:`.__str__` for usage example.
:param sum_entities: the number of entities on this node
:type sum_entities: int or None
:returns: weight of misplaced entities
:rtype: :class:`numbers.Number`
"""
# this iteration requires more context than _iter_helper provides.
# we must keep track of parents as well in order to correctly update
# weights
nonleaf_to_max_child_depth_dict = {}
# stack will be (quadtree, list (of parents) or None)
_stack = deque()
_stack.append((self, None))
while _stack:
curr, parents = _stack.pop()
if parents:
for p in parents:
nonleaf_to_max_child_depth_dict[p] = max(nonleaf_to_max_child_depth_dict.get(p, 0), curr.depth)
if curr.children:
new_parents = list(parents) if parents else []
new_parents.append(curr)
for child in curr.children:
_stack.append((child, new_parents))
_weight = 0
for nonleaf, maxchilddepth in nonleaf_to_max_child_depth_dict.items():
_weight += len(nonleaf.entities) * 4 * (maxchilddepth - nonleaf.depth)
_sum = self.sum_entities() if sum_entities is None else sum_entities
return _weight / _sum
def __repr__(self):
"""
Create an unambiguous representation of this quad tree.
This is implemented iteratively.
Example:
.. code-block:: python
from pygorithm.geometry import (vector2, rect2)
from pygorithm.data_structures import quadtree
# create a tree with a up to 2 entities in a bucket that
# can have a depth of up to 5.
_tree = quadtree.QuadTree(1, 5, rect2.Rect2(100, 100))
# add a few entities to the tree
_tree.insert_and_think(quadtree.QuadTreeEntity(rect2.Rect2(2, 2, vector2.Vector2(5, 5))))
_tree.insert_and_think(quadtree.QuadTreeEntity(rect2.Rect2(2, 2, vector2.Vector2(95, 5))))
# prints quadtree(bucket_size=1, max_depth=5, location=rect2(width=100, height=100, mincorner=vector2(x=0, y=0)), depth=0, entities=[], children=[quadtree(bucket_size=1, max_depth=5, location=rect2(width=50.0, height=50.0, mincorner=vector2(x=0, y=0)), depth=1, entities=[quadtreeentity(aabb=rect2(width=2, height=2, mincorner=vector2(x=5, y=5)))], children=None), quadtree(bucket_size=1, max_depth=5, location=rect2(width=50.0, height=50.0, mincorner=vector2(x=50.0, y=0)), depth=1, entities=[quadtreeentity(aabb=rect2(width=2, height=2, mincorner=vector2(x=95, y=5)))], children=None), quadtree(bucket_size=1, max_depth=5, location=rect2(width=50.0, height=50.0, mincorner=vector2(x=50.0, y=50.0)), depth=1, entities=[], children=None), quadtree(bucket_size=1, max_depth=5, location=rect2(width=50.0, height=50.0, mincorner=vector2(x=0, y=50.0)), depth=1, entities=[], children=None)])
:returns: unambiguous, recursive representation of this quad tree
:rtype: string
"""
return "quadtree(bucket_size={}, max_depth={}, location={}, depth={}, entities={}, children={})".format(self.bucket_size, self.max_depth, repr(self.location), self.depth, self.entities, self.children)
def __str__(self):
"""
Create a human-readable representation of this quad tree
.. caution::
Because of the complexity of quadtrees it takes a fair amount of calculation to
produce something somewhat legible. All returned statistics have paired functions.
This uses only iterative algorithms to calculate statistics.
Example:
.. code-block:: python
from pygorithm.geometry import (vector2, rect2)
from pygorithm.data_structures import quadtree
# create a tree with a up to 2 entities in a bucket that
# can have a depth of up to 5.
_tree = quadtree.QuadTree(2, 5, rect2.Rect2(100, 100))
# add a few entities to the tree
_tree.insert_and_think(quadtree.QuadTreeEntity(rect2.Rect2(2, 2, vector2.Vector2(5, 5))))
_tree.insert_and_think(quadtree.QuadTreeEntity(rect2.Rect2(2, 2, vector2.Vector2(95, 5))))
# prints quadtree(at rect(100x100 at <0, 0>) with 0 entities here (2 in total); (nodes, entities) per depth: [ 0: (1, 0), 1: (4, 2) ] (allowed max depth: 5, actual: 1), avg ent/leaf: 0.5 (target 1), misplaced weight 0.0 (0 best, >1 bad)
print(_tree)
:returns: human-readable representation of this quad tree
:rtype: string
"""
nodes_per_depth = self.find_nodes_per_depth()
_ents_per_depth = self.find_entities_per_depth()
_nodes_ents_per_depth_str = "[ {} ]".format(', '.join("{}: ({}, {})".format(dep, nodes_per_depth[dep], _ents_per_depth[dep]) for dep in nodes_per_depth.keys()))
_sum = self.sum_entities(entities_per_depth=_ents_per_depth)
_max_depth = max(_ents_per_depth.keys())
_avg_ent_leaf = self.calculate_avg_ents_per_leaf()
_mispl_weight = self.calculate_weight_misplaced_ents(sum_entities=_sum)
return "quadtree(at {} with {} entities here ({} in total); (nodes, entities) per depth: {} (allowed max depth: {}, actual: {}), avg ent/leaf: {} (target {}), misplaced weight {} (0 best, >1 bad)".format(self.location, len(self.entities), _sum, _nodes_ents_per_depth_str, self.max_depth, _max_depth, _avg_ent_leaf, self.bucket_size, _mispl_weight)
@staticmethod
def get_code():
"""
Get the code for the QuadTree class
:returns: code for QuadTree
:rtype: string
"""
return inspect.getsource(QuadTree)
|
|
"""
Main Zencoder module
"""
import os
import httplib2
from urllib import urlencode
# Note: I've seen this pattern for dealing with json in different versions of
# python in a lot of modules -- if there's a better way, I'd love to use it.
try:
# python 2.6 and greater
import json
except ImportError:
try:
# python 2.5
import simplejson
json = simplejson
except ImportError:
# if we're in django or Google AppEngine land
# use this as a last resort
from django.utils import simplejson
json = simplejson
class ZencoderError(Exception):
pass
class ZencoderResponseError(Exception):
def __init__(self, http_response, content):
self.http_response = http_response
self.content = content
class HTTPBackend(object):
"""
Abstracts out an HTTP backend, but defaults to httplib2
@FIXME: Build in support for supplying arbitrary backends
"""
def __init__(self, base_url, api_key, as_xml=False, resource_name=None, timeout=None, test=False):
"""
Creates an HTTPBackend object, which abstracts out some of the
library specific HTTP stuff.
"""
self.base_url = base_url
if resource_name:
self.base_url = self.base_url + resource_name
self.http = httplib2.Http(timeout=timeout)
self.as_xml = as_xml
self.api_key = api_key
self.test = test
if self.as_xml:
self.headers = {'Content-Type': 'application/xml',
'Accepts': 'application/xml'}
else:
self.headers = {'Content-Type': 'application/json',
'Accepts': 'application/json'}
def encode(self, data):
"""
Encodes data as either JSON or XML, so that it can be passed onto
the Zencoder API
"""
if not self.as_xml:
return json.dumps(data)
else:
raise NotImplementedError('Encoding as XML is not supported.')
def decode(self, raw_body):
"""
Returns the raw_body as json (the default) or XML
"""
if not self.as_xml:
# only parse json when it exists, else just return None
if not raw_body or raw_body == ' ':
return None
else:
return json.loads(raw_body)
else:
raise NotImplementedError('Decoding as XML is not supported.')
def delete(self, url, params=None):
"""
Executes an HTTP DELETE request for the given URL
params should be a urllib.urlencoded string
"""
if params:
url = '?'.join([url, params])
response, content = self.http.request(url, method="DELETE",
headers=self.headers)
return self.process(response, content)
def get(self, url, data=None):
"""
Executes an HTTP GET request for the given URL
data should be a dictionary of url parameters
"""
if data:
params = urlencode(data)
url = '?'.join([url, params])
response, content = self.http.request(url, method="GET",
headers=self.headers)
return self.process(response, content)
def post(self, url, body=None):
"""
Executes an HTTP POST request for the given URL
"""
response, content = self.http.request(url, method="POST",
body=body,
headers=self.headers)
return self.process(response, content)
def put(self, url, body=None):
"""
Executes an HTTP PUT request for the given URL
"""
_headers = self.headers.copy()
if body:
content_length = str(len(body))
else:
content_length = 0
_headers['Content-Length'] = str(content_length)
response, content = self.http.request(url, method="PUT",
body=body,
headers=_headers)
return self.process(response, content)
def process(self, http_response, content):
"""
Returns HTTP backend agnostic Response data
"""
try:
code = http_response.status
body = self.decode(content)
response = Response(code, body, content, http_response)
return response
except ValueError:
raise ZencoderResponseError(http_response, content)
class Zencoder(object):
""" This is the entry point to the Zencoder API """
def __init__(self, api_key=None, api_version=None, as_xml=False, timeout=None, test=False):
"""
Initializes Zencoder. You must have a valid API_KEY.
You can pass in the api_key as an argument, or set
'ZENCODER_API_KEY' as an environment variable, and it will use
that, if api_key is unspecified.
Set api_version='edge' to get the Zencoder development API. (defaults to 'v2')
Set as_xml=True to get back xml data instead of the default json.
"""
if not api_version:
api_version = 'v2'
self.base_url = 'https://app.zencoder.com/api/'
if not api_version == 'edge':
self.base_url = self.base_url + '%s/' % api_version
if not api_key:
try:
self.api_key = os.environ['ZENCODER_API_KEY']
except KeyError:
raise ZencoderError('ZENCODER_API_KEY not set')
else:
self.api_key = api_key
self.test = test
self.as_xml = as_xml
self.job = Job(self.base_url, self.api_key, self.as_xml, timeout=timeout, test=self.test)
self.account = Account(self.base_url, self.api_key, self.as_xml, timeout=timeout)
self.output = Output(self.base_url, self.api_key, self.as_xml, timeout=timeout)
class Response(object):
"""
The Response object stores the details of an API request in an XML/JSON
agnostic way.
"""
def __init__(self, code, body, raw_body, raw_response):
self.code = code
self.body = body
self.raw_body = raw_body
self.raw_response = raw_response
class Account(HTTPBackend):
""" Account object """
def __init__(self, base_url, api_key=None, as_xml=False, timeout=None):
"""
Initializes an Account object
"""
super(Account, self).__init__(base_url, api_key, as_xml, 'account', timeout=timeout)
def create(self, email, tos=1, options=None):
"""
Creates an account with Zencoder, no API Key necessary.
"""
data = {'email': email,
'terms_of_service': str(tos)}
if options:
data.update(options)
return self.post(self.base_url, body=self.encode(data))
def details(self):
"""
Gets your account details.
"""
data = {'api_key': self.api_key}
return self.get(self.base_url, data=data)
def integration(self):
"""
Puts your account into integration mode.
"""
data = {'api_key': self.api_key}
return self.get(self.base_url + '/integration', data=data)
def live(self):
"""
Puts your account into live mode."
"""
data = {'api_key': self.api_key}
return self.get(self.base_url + '/live', data=data)
class Output(HTTPBackend):
""" Gets information regarding outputs """
def __init__(self, base_url, api_key, as_xml=False, timeout=None):
"""
Contains all API methods relating to Outputs.
"""
super(Output, self).__init__(base_url, api_key, as_xml, 'outputs', timeout=timeout)
def progress(self, output_id):
"""
Gets the given output id's progress.
"""
data = {'api_key': self.api_key}
return self.get(self.base_url + '/%s/progress' % str(output_id),
data=data)
def details(self, output_id):
"""
Gets the given output id's details
"""
data = {'api_key': self.api_key}
return self.get(self.base_url + '/%s' % str(output_id),
data=data)
class Job(HTTPBackend):
"""
Contains all API methods relating to transcoding Jobs.
"""
def __init__(self, base_url, api_key, as_xml=False, timeout=None, test=False):
"""
Initialize a job object
"""
super(Job, self).__init__(base_url, api_key, as_xml, 'jobs', timeout=timeout, test=test)
def create(self, input, outputs=None, options=None):
"""
Create a job
@param input: the input url as string
@param outputs: a list of output dictionaries
@param options: a dictionary of job options
"""
as_test = int(self.test)
data = {"api_key": self.api_key, "input": input, "test": as_test}
if outputs:
data['outputs'] = outputs
if options:
data.update(options)
return self.post(self.base_url, body=self.encode(data))
def list(self, page=1, per_page=50):
"""
List some jobs
"""
data = {"api_key": self.api_key,
"page": page,
"per_page": per_page}
return self.get(self.base_url, data=data)
def details(self, job_id):
"""
Gets details for the given job
"""
data = {'api_key': self.api_key}
return self.get(self.base_url + '/%s' % str(job_id), data=data)
def progress(self, job_id):
data = {'api_key': self.api_key}
return self.get(self.base_url + '/%s/progress' % str(job_id), data=data)
def resubmit(self, job_id):
"""
Resubmits a job
"""
return self.put(self.base_url + '/%s/resubmit?api_key=%s' % (str(job_id), self.api_key))
def cancel(self, job_id):
"""
Cancels a job
"""
return self.put(self.base_url + '/%s/cancel?api_key=%s' % (str(job_id), self.api_key))
def delete(self, job_id):
"""
Deletes a job
"""
return self.cancel(job_id)
|
|
import os
import sys
import shutil
import subprocess
import boto3
import botocore
import hashlib
import struct
from multiprocessing.pool import ThreadPool
from threading import Semaphore
import urllib2
from urllib import urlretrieve
from timeit import default_timer as now
import json
from collections import OrderedDict
import math
import mxnet as mx
import numpy as np
from PIL import Image
from io import BytesIO
import os.path
f_params = 'resnet-18-0000.params'
f_symbol = 'resnet-18-symbol.json'
f_params_file = '/tmp/' + f_params
f_symbol_file = '/tmp/' + f_symbol
DECODER_PATH = '/tmp/DecoderAutomataCmd-static'
TEMP_OUTPUT_DIR = '/tmp/output'
LOCAL_INPUT_DIR = '/tmp/input'
WORK_PACKET_SIZE = 50
DEFAULT_LOG_LEVEL = 'warning'
DEFAULT_OUTPUT_BATCH_SIZE = 1
DEFAULT_KEEP_OUTPUT = False
MAX_PARALLEL_UPLOADS = 20
DEFAULT_OUT_FOLDER = 'fused-decode-mxnet-output'
OUTPUT_FILE_EXT = 'jpg'
def list_output_files():
fileExt = '.{0}'.format(OUTPUT_FILE_EXT)
outputFiles = [
x for x in os.listdir(TEMP_OUTPUT_DIR) if x.endswith(fileExt)
]
return outputFiles
def get_mxnet_input(startFrame):
data = []
outputFiles = list_output_files()
totalNum = len(outputFiles)
currEnd = totalNum + startFrame
for idx in xrange(startFrame, currEnd):
fileName = 'frame{:d}.jpg'.format(idx)
if fileName not in outputFiles:
print('ERROR: Cannot find file: {:s}'.format(fileName))
exit()
filePath = os.path.join(TEMP_OUTPUT_DIR, fileName)
with open(filePath, 'rb') as ifs:
data.append(ifs.read())
return data
def load_model(s_fname, p_fname):
"""
Load model checkpoint from file.
:return: (arg_params, aux_params)
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
"""
symbol = mx.symbol.load(s_fname)
save_dict = mx.nd.load(p_fname)
arg_params = {}
aux_params = {}
for k, v in save_dict.items():
tp, name = k.split(':', 1)
if tp == 'arg':
arg_params[name] = v
if tp == 'aux':
aux_params[name] = v
return symbol, arg_params, aux_params
def download_input_from_s3(bucketName, inputPrefix, startFrame):
def download_s3(s3Path, localPath):
try:
s3.Bucket(bucketName).download_file(s3Path, localPath)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
print("The object does not exist.")
else:
raise
protoFileName = 'decode_args{:d}.proto'.format(startFrame)
binFileName = 'start_frame{:d}.bin'.format(startFrame)
print('Downloading files {:s} and {:s} for batch {:d} \
from s3: {:s}/{:s}'.format(protoFileName, binFileName, startFrame,
bucketName, inputPrefix))
s3 = boto3.resource('s3')
s3ProtoName = inputPrefix + '/' + protoFileName
s3BinName = inputPrefix + '/' + binFileName
protoPath = LOCAL_INPUT_DIR + '/' + protoFileName
binPath = LOCAL_INPUT_DIR + '/' + binFileName
download_s3(s3ProtoName, protoPath)
download_s3(s3BinName, binPath)
return protoPath, binPath
def upload_output_to_s3(bucketName, fileName, out):
print('Uploading file {:s} to s3: {:s}'.format(fileName, bucketName))
s3 = boto3.client('s3')
try:
s3.put_object(Body=json.dumps(out), Bucket=bucketName, Key=fileName,
StorageClass='REDUCED_REDUNDANCY')
except botocore.exceptions.ClientError as e:
print e
raise
print('Done: {:s}/{:s}'.format(bucketName, fileName))
def ensure_clean_state():
if os.path.exists(TEMP_OUTPUT_DIR):
shutil.rmtree(TEMP_OUTPUT_DIR)
if not os.path.exists(TEMP_OUTPUT_DIR):
os.mkdir(TEMP_OUTPUT_DIR)
if os.path.exists(LOCAL_INPUT_DIR):
shutil.rmtree(LOCAL_INPUT_DIR)
if not os.path.exists(LOCAL_INPUT_DIR):
os.mkdir(LOCAL_INPUT_DIR)
if os.path.exists(DECODER_PATH):
os.remove(DECODER_PATH)
# shutil.copy('DecoderAutomataCmd-static', DECODER_PATH)
urlretrieve("https://s3-us-west-2.amazonaws.com/mxnet-params/DecoderAutomataCmd-static", DECODER_PATH)
os.chmod(DECODER_PATH, 0o755)
def convert_to_jpegs(protoPath, binPath):
assert(os.path.exists(TEMP_OUTPUT_DIR))
cmd = [DECODER_PATH, protoPath, binPath, TEMP_OUTPUT_DIR]
process = subprocess.Popen(
' '.join(cmd), shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = process.communicate()
rc = process.returncode
print 'stdout:', out
print 'stderr:', err
return rc == 0
def predict_batch(batch_size, data, mod):
'''
predict labels for a given batch of images
'''
data_size = len(data)
cnt = 0
new_width, new_height = 224, 224
out = "{"
while cnt < data_size:
# execute one batch
img_list = []
for frame in data[cnt:cnt+batch_size]:
img = Image.open(BytesIO(frame))
width, height = img.size # Get dimensions
left = (width - new_width)/2
top = (height - new_height)/2
right = (width + new_width)/2
bottom = (height + new_height)/2
img = img.crop((left, top, right, bottom))
# convert to numpy.ndarray
sample = np.asarray(img)
# swap axes to make image from (224, 224, 3) to (3, 224, 224)
sample = np.swapaxes(sample, 0, 2)
img = np.swapaxes(sample, 1, 2)
img_list.append(img)
batch = mx.io.DataBatch([mx.nd.array(img_list)], [])
mod.forward(batch)
probs = mod.get_outputs()[0].asnumpy()
print probs.shape
cnt_local = cnt
# the output format is : first is the relative id of the frame
# then the second.first is the category (num), second.second is the
# probability / confidence of the category
# Be aware that this is different from previous version!
for prob in probs:
prob = np.squeeze(prob)
a = np.argsort(prob)[::-1]
if cnt_local == 0:
out += '"0" : {{"{}" : "{}"}}'.format(a[0], prob[a[0]])
else:
out += ', "{:d}" : {{"{}" : "{}"}}'.format(cnt_local,
a[0], prob[a[0]])
cnt_local += 1
cnt += batch_size
out += "}"
return out
def handler(event, context):
timelist = OrderedDict()
start = now()
ensure_clean_state()
end = now()
print('Time to prepare decoder: {:.4f} s'.format(end - start))
timelist["prepare-decoder"] = (end - start)
inputBucket = 'vass-video-samples2'
inputPrefix = 'protobin/example3_134'
startFrame = 0
outputBatchSize = 50
outputBucket = "vass-video-samples2-results"
outputPrefix = DEFAULT_OUT_FOLDER
if 'inputBucket' in event:
inputBucket = event['inputBucket']
outputBucket = inputBucket + '-results'
else:
print('Warning: using default input bucket: {:s}'.format(inputBucket))
if 'outputBucket' in event:
outputBucket = event['outputBucket']
else:
print('Warning: using default output bucket: {:s}'.format(outputBucket))
if 'inputPrefix' in event:
inputPrefix = event['inputPrefix']
else:
print('Warning: using default input prefix: {:s}'.format(inputPrefix))
if 'startFrame' in event:
startFrame = event['startFrame']
else:
print('Warning: default startFrame: {:d}'.format(startFrame))
if 'outputBatchSize' in event:
outputBatchSize = event['outputBatchSize']
else:
print('Warning: default batch size: {:d}'.format(outputBatchSize))
if 'outputPrefix' in event:
outputPrefix = event['outputPrefix']
outputPrefix = outputPrefix + '/{}_{}'.format(inputPrefix.split('/')[-1],
outputBatchSize)
start = now()
protoPath, binPath = download_input_from_s3(inputBucket, inputPrefix,
startFrame)
end = now()
print('Time to download input files: {:.4f} s'.format(end - start))
timelist["download-input"] = (end - start)
inputBatch = 0
try:
try:
start = now()
if not convert_to_jpegs(protoPath, binPath):
raise Exception('Failed to decode video chunk {:d}'.format(startFrame))
end = now()
print('Time to decode: {:.4f} '.format(end - start))
timelist["decode"] = (end - start)
finally:
shutil.rmtree(LOCAL_INPUT_DIR)
# start = now()
# if outputBatchSize > 1:
# inputBatch = combine_output_files(startFrame, outputBatchSize)
# end = now()
# print('Time to combine output files: {:.4f} '.format(end - start))
# timelist["combine-output"] = (end - start)
# start = now()
# fileCount, totalSize = upload_output_to_s3(outputBucket, outputPrefix)
# end = now()
# if outputBatchSize == 1:
# inputBatch = fileCount
# print('Time to upload output files: {:.4f} '.format(end - start))
# timelist["upload-output"] = (end - start)
# instead of uploading, now we start the MXNet directly!
start = now()
urlretrieve("https://s3-us-west-2.amazonaws.com/mxnet-params/resnet-18-0000.params", f_params_file)
urlretrieve("https://s3-us-west-2.amazonaws.com/mxnet-params/resnet-18-symbol.json", f_symbol_file)
end = now()
print('Time to download MXNet model: {:.4f} s'.format(end - start))
timelist["download-model"] = (end - start)
start = now()
data = get_mxnet_input(startFrame)
outputBatchSize = len(data)
end = now()
print('Time to extract {:d} file: {:.4f} s'.format(outputBatchSize, end - start))
timelist["extract"] = (end - start)
start = now()
sym, arg_params, aux_params = load_model(f_symbol_file, f_params_file)
mod = mx.mod.Module(symbol=sym, label_names=None)
mod.bind(for_training=False, data_shapes=[('data', (outputBatchSize,3,224,224))],
label_shapes=mod._label_shapes)
mod.set_params(arg_params, aux_params, allow_missing=True)
end = now()
print('Time to prepare and load parameters: {:.4f} s'.format(end - start))
timelist["load-model"] = end - start
start = now()
labels = predict_batch(outputBatchSize, data, mod)
end = now()
print('Time to predict the {:d} batch: {:.4f} s'.format(outputBatchSize,
end - start))
timelist["predict"] = end - start
start = now()
outputKey = os.path.join(outputPrefix, 'frame{:d}-{:d}.out'.format(
startFrame, outputBatchSize))
out = {
"results": labels
}
upload_output_to_s3(outputBucket, outputKey, out)
end = now()
print('Time to upload to s3 is: {:.4f} s'.format(end - start))
timelist["upload-output"] = end - start
finally:
start = now()
if not DEFAULT_KEEP_OUTPUT:
shutil.rmtree(TEMP_OUTPUT_DIR)
end = now()
print('Time to clean output files: {:.4f} '.format(end - start))
timelist["clean-output"] = (end - start)
# timelist["input-batch"] = inputBatch
timelist["output-batch"] = outputBatchSize
print 'Timelist:' + json.dumps(timelist)
out = {
'statusCode': 200,
'body': {
'startFrame': startFrame,
'outputBatchSize': outputBatchSize
}
}
return out
if __name__ == '__main__':
inputBucket = 'vass-video-samples2'
inputPrefix = 'protobin/example3_138_50'
startFrame = 0
outputBatchSize = 50
outputPrefix = 'fused-decode-mxnet-output'
totalFrame = 6221
if (len(sys.argv) > 1):
totalFrame = min(int(sys.argv[1]), totalFrame)
for startFrame in xrange(0, totalFrame, WORK_PACKET_SIZE):
event = {
'inputBucket': inputBucket,
'inputPrefix': inputPrefix,
'startFrame': startFrame,
'outputBatchSize': outputBatchSize,
'outputPrefix': outputPrefix
}
start = now()
result = handler(event, {})
end = now()
duration = (end - start) * 1000
billedDuration = math.ceil(duration / 100.0) * 100.0
print('Duration: {:.2f} ms Billed Duration: {:.0f} ms Memory Size: 1536 MB Max Memory Used: 1536 MB'.format(duration, billedDuration))
|
|
" analytical test problem to validate 2D and 3D solvers "
import math
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from collections import OrderedDict
from dolfin import *
from nanopores import *
from nanopores.physics.simplepnps import *
# --- define parameters ---
add_params(
bV = -0.1, # [V]
rho = -0.05, # [C/m**2]
h2D = .2,
h3D = .2,
Nmax = 1e5,
damp = 1.,
bulkcon = 300.,
iterative = True,
)
print PARAMS
# --- create 2D geometry ---
Rz = 2. # [nm] length in z direction of channel part
R = 1. # [nm] pore radius
hcross = .2 # [nm] height of crossection
domain2D = Box([0., -Rz], [R, Rz])
cross = Box([0., 0.], [R, hcross])
domain2D.addsubdomains(
main = domain2D - cross,
cross = cross,
)
domain2D.addboundaries(
lowerb = domain2D.boundary("bottom"),
upperb = domain2D.boundary("top"),
wall = domain2D.boundary("right"),
cross = cross.boundary("bottom"),
#center = domain2D.boundary("left")
)
domain2D.params["lscale"] = 1e9
domain2D.synonymes = dict(
fluid = {"main", "cross"},
pore = "fluid",
#chargedmembraneb = "wall",
noslip = "wall",
nopressure = "center",
bulk = {"upperb", "lowerb"},
#nocbc = {"lowerb"},
)
geo2D = domain2D.create_geometry(lc=h2D)
print "Number of cells (2D):", geo2D.mesh.num_cells()
#domain2D.plot()
# --- create 3D geometry by rotating ---
domain3D = rotate_z(domain2D)
geo3D = domain3D.create_geometry(lc=h3D)
print "Number of cells (3D):", geo3D.mesh.num_cells()
#domain3D.plot()
# --- create geometry for 1D crossection ---
# TODO: it would be cool if the 1D domain could be extracted from the 2D one
# (should be pretty easy)
domain1D = Interval(0., R)
domain1D.addsubdomain(domain1D, "fluid")
domain1D.addboundaries(
wall = domain1D.boundary("right"),
center = domain1D.boundary("left")
)
domain1D.params["lscale"] = 1e9
domain1D.synonymes = dict(
pore = "fluid",
chargedmembraneb = "wall",
)
geo1D = domain1D.create_geometry(lc=.001)
# --- define physical parameters for 1D problem ---
phys_params = dict(
Membraneqs = rho,
bulkcon = bulkcon,
v0 = {}
)
phys = Physics("pore", geo1D, **phys_params)
# --- solve 1D problem for "exact" solution ---
pb = solve_pde(SimplePBProblem, geo1D, phys, cyl=True, iterative=False, tolnewton=1e-10)
# define expressions for interpolation into 2D/3D
phi = pb.solution
UT = phys.UT
c0 = phys.bulkcon
D = phys.DPore
lscale = phys.lscale
E0 = -lscale*bV/(2.*Rz)
eps = phys.permittivity["water"]
eta = phys.eta
print "Diffusion constant in pore:", D*1e9, "[nm**2/ns]"
print "Constant electric field:", E0*1e-9, "[V/nm]"
print "Debye length:", phys.debye*1e9, "[nm]"
def cpPB(x):
return c0*exp(-phi(x)/UT)
def cmPB(x):
return c0*exp(phi(x)/UT)
def pPB(x):
return -2.*c0*cFarad*UT*(math.cosh(phi(x)/UT) - math.cosh(phi(0.)/UT))
def uPB(x):
return eps*E0/eta*(phi(x) - phi(R))
def r(x): # radius for 2D AND 3D:
return sqrt(sum(t**2 for t in x[:-1]))
class vPB(Expression):
def eval(self, value, x):
value[0] = bV*x[-1]/(2.*Rz) + phi(r(x))
class JpPB(Expression):
def eval(self, value, x):
value[0] = (+D/UT*E0 + uPB(r(x)))*cpPB(r(x))
class JmPB(Expression):
def eval(self, value, x):
value[0] = (-D/UT*E0 + uPB(r(x)))*cmPB(r(x))
class cpPBEx(Expression):
def eval(self, value, x):
value[0] = cpPB(r(x))
class cmPBEx(Expression):
def eval(self, value, x):
value[0] = cmPB(r(x))
class pPBEx(Expression):
def eval(self, value, x):
value[0] = pPB(r(x))
# compute "exact" current
r2pi = Expression("2*pi*x[0]")
u_PB = Constant(eps/eta)*(phi - Constant(phi(R)))
J_el = Constant(D/UT)*(exp(-phi/UT) + exp(phi/UT))
J_u = u_PB*(exp(-phi/UT) - exp(phi/UT))
J_PB_el = assemble(Constant(cFarad*c0*E0/lscale**2)*J_el*r2pi*dx)
J_PB_u = assemble(Constant(cFarad*c0*E0/lscale**2)*J_u*r2pi*dx)
J_PB = J_PB_el + J_PB_u
print "J (PB): %s [A]" % J_PB
print " J_el: %s [A]" % J_PB_el
print " J_u : %s [A]" % J_PB_u
# --- define physical parameters and customized BCs of 2D problem ---
# constant Dirichlet BCs for v, cp, cm on wall,
# non-zero flux BCs on top/bottom
# non-standard pressure BC
lscale = Constant(phys.lscale)
phys_params.update(
cp0 = dict(
wall = c0*exp(-phi(R)/UT),
bulk = cpPBEx()),
cm0 = dict(
wall = c0*exp(+phi(R)/UT),
bulk = cmPBEx()),
v0 = dict(wall = vPB()),
#cpflux = dict(bulk = JpPB()*n3D[2]),
#cmflux = dict(bulk = JmPB()*n3D[2]),
pressure = dict(bulk = pPBEx()),
surfcharge = dict(
wall = rho,
upperb = lscale*eps*bV/(2.*Rz),
lowerb = -lscale*eps*bV/(2.*Rz),)
)
phys2D = Physics("pore", geo2D, **phys_params)
phys3D = Physics("pore", geo3D, **phys_params)
# --- define goal functional: current through crosssection ---
grad = phys2D.grad
def J_PNP(U, geo):
v, cp, cm = U
dim = geo.physics.dim
coeff = Constant(1.) if dim==3 else r2pi
Jp = Constant(D)*(-grad(cp) - Constant(1/UT)*cp*grad(v))
Jm = Constant(D)*(-grad(cm) + Constant(1/UT)*cm*grad(v))
Jsurf = avg(Constant(cFarad/lscale**2)*(Jp - Jm)[dim-1] * coeff) * geo.dS("cross")
Jvol = Constant(cFarad/lscale**2/hcross)*(Jp - Jm)[dim-1] * coeff * geo.dx("cross")
return dict(Jsurf=Jsurf, Jvol=Jvol)
def J(U, geo):
v, cp, cm, u, p = U
dim = geo.physics.dim
coeff = Constant(1.) if dim==3 else r2pi
Jp = Constant(D)*(-grad(cp) - Constant(1/UT)*cp*grad(v)) + cp*u
Jm = Constant(D)*(-grad(cm) + Constant(1/UT)*cm*grad(v)) + cm*u
Jsurf = avg(Constant(cFarad/lscale**2)*(Jp - Jm)[dim-1] * coeff) * geo.dS("cross")
Jvol = Constant(cFarad/lscale**2/hcross)*(Jp - Jm)[dim-1] * coeff * geo.dx("cross")
return dict(Jsurf=Jsurf, Jvol=Jvol)
"""
def saveJ(self):
self.save_estimate("(Jsing_h - J)/J", abs((self.functionals["Jsurf"].value()-J_PB)/J_PB),
N=self.solution.function_space().dim())
self.save_estimate("(J_h - J)/J", abs((self.functionals["Jvol"].value()-J_PB)/J_PB),
N=self.solution.function_space().dim())
"""
"""
def saveJ(self):
#i = self.geo.mesh.num_vertices()
i = len(self.functionals["Jvol"].values)
self.save_estimate("(Jsing_h - J)/J", abs((self.functionals["Jsurf"].value()-J_PB)/J_PB), N=i)
self.save_estimate("(J_h - J)/J", abs((self.functionals["Jvol"].value()-J_PB)/J_PB), N=i)
"""
def saveJ(self):
#i = self.geo.mesh.num_vertices()
i = len(self.functionals["Jvol"].values) + 1
self.save_estimate("(Jsing_h - J)/J" + Dstr, abs((self.functionals["Jsurf"].evaluate()-J_PB)/J_PB), N=i)
self.save_estimate(r"$|J_h - J|/J$" + Dstr, abs((self.functionals["Jvol"].evaluate()-J_PB)/J_PB), N=i)
print " rel. error Jv:", abs((self.functionals["Jvol"].value()-J_PB)/J_PB)
print " rel. error Js:", abs((self.functionals["Jsurf"].value()-J_PB)/J_PB)
# --- set up PNP+Stokes problem ---
problems = OrderedDict([
("pnp", SimplePNPProblem),
("stokes", SimpleStokesProblem)])
def couple_pnp(ustokes):
return dict(ustokes = ustokes.sub(0))
def couple_stokes(upnp, phys):
v, cp, cm = upnp.split()
f = -phys.cFarad*(cp - cm)*grad(v)
return dict(f = f)
couplers = dict(
pnp = couple_pnp,
stokes = couple_stokes
)
# --- solve 2D problem ---
Dstr = " (2D)"
problem = CoupledProblem(problems, couplers, geo2D, phys2D, cyl=True, conservative=False, ku=1, beta=10.)
pnps2D = CoupledSolver(problem, goals=[J], damp=damp, inewton=1, ipicard=30, tolnewton=1e-2)
#pnps2D.single_solve(inside_loop=saveJ)
for i in pnps2D.fixedpoint():
saveJ(pnps2D)
# --- solve 3D problem ---
Dstr = " (3D)"
problem = CoupledProblem(problems, couplers, geo3D, phys3D, cyl=False, conservative=False, ku=1, beta=1.)
problem.problems["pnp"].method["iterative"] = iterative
problem.problems["stokes"].method["iterative"] = iterative
pnps3D = CoupledSolver(problem, goals=[J], damp=damp, inewton=1, ipicard=30, tolnewton=1e-2)
#pnps3D.single_solve(inside_loop=saveJ)
for i in pnps3D.fixedpoint():
saveJ(pnps3D)
# --- visualization ---
#pnps2D.visualize()
#pnps3D.visualize()
(v0, cp0, cm0, u0, p0) = pnps2D.solutions()
(v, cp, cm, u, p) = pnps3D.solutions()
#plot1D({"phi PB":phi}, (0., R, 101), "x", dim=1, axlabels=("r [nm]", "potential [V]"))
#plot1D({"phi (2D)": v0}, (0., R, 101), "x", dim=2, axlabels=("r [nm]", "potential [V]"), newfig=False)
#plot1D({"phi (3D)": v}, (0., R, 101), "x", dim=3, axlabels=("r [nm]", "potential [V]"), newfig=False)
figsize = 5*0.8, 4*0.8
plt.figure("concentrations", figsize=figsize)
params = dict(axis="x", axlabels=("r [nm]", "concentration [mol/m$^3$]"), newfig=False)
plot1D({r"$c^+$ PB":cpPB, r"$c^-$ PB":cmPB},
(0., R, 101), dim=1, style="b-", **params)
plot1D({r"$c^+$ 2D": cp0, r"$c^-$ 2D": cm0},
(0., R, 11), dim=2, style="gs", **params)
plot1D({r"$c^+$ 3D": cp, r"$c^-$ 3D": cm},
(0.05*R, 0.95*R, 10), dim=3, style="ro", **params)
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
"""
plot1D({
"c+ (2D)": cp0,
"c- (2D)": cm0,
"c+ PB":lambda x: cpPB(0.),
"c- PB":lambda x: cmPB(0.)},
(-Rz, Rz, 101), "y", origin=(.0, 0.), dim=2, axlabels=("z [nm]", "concentration [mol/m**3]"))
plot1D({"c+ (2D)": cp, "c- PNP (2D)": cm},
(-Rz, Rz, 101), "z", origin=(.0, 0., 0.), dim=3, axlabels=("z [nm]", "concentration [mol/m**3]"), newfig=False)
"""
plt.figure("velocity", figsize=figsize)
params = dict(axis="x", axlabels=("r [nm]", "velocity [m/s]"), newfig=False)
plot1D({r"$u_z$ PB":uPB},
(0., R, 101), dim=1, style="b-", **params)
plot1D({r"$u_z$ 2D":u0[1]},
(0., R, 11), dim=2, style="gs", **params)
plot1D({r"$u_z$ 3D":u[2]},
(0.05*R, 0.95*R, 10), dim=3, style="ro", **params)
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
#plot1D({"ur PB":lambda x:0.}, (0., R, 101), "x", dim=1, axlabels=("r [nm]", "velocity [m/s]"))
#plot1D({"ur (2D)":u0[0]}, (0., R, 101), "x", dim=2, axlabels=("r [nm]", "velocity [m/s]"), newfig=False)
#plot1D({"ur (3D)":u[0]}, (0., R, 101), "x", dim=3, axlabels=("r [nm]", "velocity [m/s]"), newfig=False)
#plot1D({"p PB":pPB}, (0., R, 101), "x", dim=1, axlabels=("r [nm]", "velocity [m/s]"))
#plot1D({"p (2D)":p0}, (0., R, 101), "x", dim=2, axlabels=("r [nm]", "velocity [m/s]"), newfig=False)
#plot1D({"p (3D)":p}, (0., R, 101), "x", dim=3, axlabels=("r [nm]", "velocity [m/s]"), newfig=False)
fig = plt.figure("hybrid", figsize=figsize)
pnps2D.estimators[r"$|J_h - J|/J$" + " (2D)"].newtonplot(fig=False)
pnps3D.estimators[r"$|J_h - J|/J$" + " (3D)"].newtonplot(fig=False)
fig.axes[0].xaxis.set_major_locator(ticker.MultipleLocator(1))
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
from folders import FIGDIR
savefigs("anaPNPS2", FIGDIR)
#pnps.estimators["(Jsing_h - J)/J"].newtonplot()
#pnp.estimators["(Jsing_h - J)/J"].newtonplot()
#pnp.estimators["(J_h - J)/J"].newtonplot(fig=False)
#saveplots("anaPNPS", meta=PARAMS)
#showplots()
|
|
'''This module defines a class of objects, called FRED objects, that make it easy to download and manipulate data from the Federal Reserve Economic Database (FRED). FRED is a rich database managed by the Federal Reserve Bank of St Louis. Learn more about FRED:
http://research.stlouisfed.org/fred2/
An instance of a fred object is initialized with the command:
fredclass.fred(series_id)
where series_id is the unique Series ID for the FRED data series that is to be retreived (string format).
Module dependencies: matplotlib, numpy, scipy, statsmodels
Created by: Brian C Jenkins. Email comments and suggestions to bcjenkin@uci.edu. Version date: August 29, 2014.'''
import urllib, dateutil, pylab, datetime
from scipy.signal import lfilter
import numpy as np
import statsmodels.api as sm
tsa = sm.tsa
#
# Oct. 27, 2013: Added:
# 1. 4 quarter moving average method
# 2. log method
# 3. bp, hp, cf filters
# 4. monthly-to-quarter frequency converter
# 5. per capita
# 6. quickplot function
# 7. window_equalize function
# 8. quarter-to-annual fequency converter
# Need to add:
# 8. (log) linear detrend
# Oct. 8, 2013: Commented raw2.pop(), Line 45. I don't know why it's there.
# Oct. 8, 2013: Changed Line 109 to max0 = T from max0 = T-1
# Jan. 22, 2014: Added:
# 1. monthtoannual
# Changed:
# 1. quartertoannuala to quartertoannual and changed the arguments.
# Removed / Commented out:
# 1. quartertoannualb
# Apr. 17, 2014: Changed:
# 1. the ouput of the filters to .cycle and .trend so save the
# original data.
# Added:
# 1. A linear trend method.
# 2. A first-difference method.
# 3. A function for computing linear trends of a set of series with a
# common trend.
# 4. A function for computing first differences of a set of series with
# a common trend.
# Aug. 21, 2014: Added:
# 1. Added better documentation for everything
# 2. Added new attributes for filtering methods.
# Mar. 21, 2015: Added:
# 1. Option to pc() method compute percentage change ahead. Default is still backwards
# 2. Option to apc() method compute annual percentage change ahead. Default is still backwards
#
# Jun. 5, 2015: Updated:
# 1. Changed how dates for .pc() method are adjusted so that the length of dates corresponds to length of data
# 2. Created an option for annualizing percentage change data
# 3. Added options for setting filtering parameters for BP, HP, CF filters
# 4. Additional functions toFred() and date_numbers()
class fred:
def __init__(self,series_id):
# download fred series from FRED and save information about the series
series_url = "http://research.stlouisfed.org/fred2/data/"
series_url = series_url + series_id + '.txt'
webs = urllib.urlopen(series_url)
raw = [line for line in webs]
for k, val in enumerate(raw):
if raw[k][0:5] == 'Title':
self.title = " ".join(x for x in raw[k].split()[1:])
elif raw[k][0:3] == 'Sou':
self.source = " ".join(x for x in raw[k].split()[1:])
elif raw[k][0:3] == 'Sea':
self.season = " ".join(x for x in raw[k].split()[1:])
elif raw[k][0:3] == 'Fre':
self.freq = " ".join(x for x in raw[k].split()[1:])
if self.freq[0:5] == 'Daily':
self.t=365
elif self.freq[0:6] == 'Weekly':
self.t=52
elif self.freq[0:7] == 'Monthly':
self.t=12
elif self.freq[0:9] == 'Quarterly':
self.t=4
elif self.freq[0:6] == 'Annual':
self.t=1
elif raw[k][0:3] == 'Uni':
self.units = " ".join(x for x in raw[k].split()[1:])
elif raw[k][0:3] == 'Dat':
self.daterange = " ".join(x for x in raw[k].split()[1:])
elif raw[k][0:3] == 'Las':
self.updated = " ".join(x for x in raw[k].split()[1:])
elif raw[k][0:3] == 'DAT':
raw2 = raw[k+1:]
break
# raw2.pop()
date=range(len(raw2))
data=range(len(raw2))
# Create data for FRED object. Replace missing values with NaN string
for k,n in enumerate(raw2):
date[k] = raw2[k].split()[0]
if raw2[k].split()[1] != '.':
data[k] = float(raw2[k].split()[1])
else:
data[k] = 'NaN'
self.id = series_id
self.data = data
self.dates = date
self.datenums = [dateutil.parser.parse(s) for s in self.dates]
def pc(self,log=True,method='backward',annualize=False):
'''Transforms data into percent change'''
T = len(self.data)
t = self.t
if log==True:
pct = [100 * np.log(self.data[k+1]/ self.data[k]) for k in range(T-1)]
else:
pct = [100 * (self.data[k+1] - self.data[k]) / self.data[k] for k in range(T-1)]
if annualize==True:
pct = [t*x for x in pct]
if method=='backward':
dte = self.dates[1:]
elif method=='forward':
dte = self.dates[:-1]
self.data =pct
self.dates =dte
self.datenums = [dateutil.parser.parse(s) for s in self.dates]
self.units = 'Percent'
self.title = 'Percentage Change in '+self.title
def apc(self,log=True,method='backward'):
'''Transforms data into percent change from year ago'''
T = len(self.data)
t = self.t
if log==True:
pct = [100 * np.log(self.data[k+t]/ self.data[k]) for k in range(T-t)]
else:
pct = [100 * (self.data[k+t] - self.data[k]) / self.data[k] for k in range(T-t)]
if method=='backward':
dte = self.dates[t:]
elif method=='forward':
dte = self.dates[:T-t]
self.data =pct
self.dates =dte
self.datenums = [dateutil.parser.parse(s) for s in self.dates]
self.units = 'Percent'
self.title = 'Annual Percentage Change in '+self.title
def ma(self,length):
'''Transforms data into a moving average of a specified length'''
T = len(self.data)
self.data = lfilter(np.ones(length)/length, 1, self.data)[length:]
# self.dates =self.dates[length:]
self.dates =self.dates[length/2:-length/2]
self.datenums = [dateutil.parser.parse(s) for s in self.dates]
self.daterange = self.dates[0]+' to '+self.dates[-1]
self.title = 'Moving average of '+self.title
def replace(self,new):
'''Replaces the data attribue of object with a new series.
Be sure that the new series has the same length as the original data. '''
if len(new) != len(self.data):
print('New series and original have different lengths!')
self.data = new
def recent(self,lag=10):
'''lag is the number of obs to include in the window'''
t = self.t
self.data =self.data[-lag * t:]
self.dates =self.dates[-lag * t:]
self.datenums = [dateutil.parser.parse(s) for s in self.dates]
self.daterange = self.dates[0]+' to '+self.dates[-1]
def window(self,win):
'''Constrains the data to a specified date window.
win is an ordered pair: win = [win_min, win_max]
win_min is the date of the minimum date
win_max is the date of the maximum date
both are strings in 'yyyy-mm-dd' format'''
T = len(self.data)
win_min = win[0]
win_max = win[1]
win_min_num = pylab.date2num(dateutil.parser.parse(win_min))
win_max_num = pylab.date2num(dateutil.parser.parse(win_max))
date_num = pylab.date2num([dateutil.parser.parse(s) for s in self.dates])
dumpy = date_num.tolist()
min0 = 0
max0 = T
t = self.t
if win_min_num > min(date_num):
for k in range(T):
if win_min_num <= dumpy[k]:
min0 = k
break
if win_max_num < max(date_num):
'Or here'
for k in range(T):
if win_max_num < dumpy[k]:
max0 = k
break
self.data = self.data[min0:max0]
self.dates = self.dates[min0:max0]
self.datenums = [dateutil.parser.parse(s) for s in self.dates]
self.daterange = self.dates[0]+' to '+self.dates[-1]
def log(self):
'''Tansforms data into natural log of original series'''
self.data = [np.log(s) for s in self.data]
self.units= 'log '+self.units
self.title = 'Log '+self.title
def bpfilter(self,low=6,high=32,K=12):
'''Computes the bandpass (Baxter-King) filter of the series. Adds attributes:
self.bpcycle : cyclical component of series
self.bpdates : dates of bp filtered data
self.bpdatenums : date numbers of bp filtered data
'''
if low==6 and high==32 and K==12 and self.t !=4:
print 'Warning: data frequency is not quarterly!'
elif low==3 and high==8 and K==1.5 and self.t !=1:
print 'Warning: data frequency is not annual!'
self.bpcycle = tsa.filters.bkfilter(self.data,low=low,high=high,K=K)
self.bpdates = self.dates[K:-K]
self.bpdatenums = [dateutil.parser.parse(s) for s in self.bpdates]
def hpfilter(self,lamb=1600):
'''Computes the Hodrick-Prescott filter of original series. Adds attributes:
self.hpcycle : cyclical component of series
self.hptrend : trend component of series
'''
if lamb==1600 and self.t !=4:
print 'Warning: data frequency is not quarterly!'
elif lamb==129600 and self.t !=12:
print 'Warning: data frequency is not monthly!'
elif lamb==6.25 and self.t !=1:
print 'Warning: data frequency is not annual!'
self.hpcycle, self.hptrend = tsa.filters.hpfilter(self.data,lamb=lamb)
def cffilter(self,low=6,high=32,drift=True):
'''Computes the Christiano-Fitzgerald filter of original series. Adds attributes:
self.cffcycle : cyclical component of series
self.cfftrend : trend component of series
'''
if low==6 and high==32 and self.t !=4:
print 'Warning: data frequency is not quarterly!'
elif low==1.5 and high==8 and self.t !=4:
print 'Warning: data frequency is not quarterly!'
self.cffcycle, self.cfftrend = tsa.filters.cffilter(self.data,low=low, high=high, drift=drift)
def lintrend(self):
'''Computes the linear trend of original series. Adds attributes:
self.lincycle : cyclical component of series
self.lintrend : trend component of series
'''
Y = self.data
time = np.arange(len(self.data))
ones = np.ones(len(self.data))
X = np.column_stack([ones,time])
model = sm.OLS(Y, X)
result= model.fit()
pred = model.predict(X)
self.lincycle= [y-p for y,p in zip(Y,pred)]
self.lintrend= pred
def firstdiff(self):
'''Computes the first difference of original series. Adds attributes:
self.diffcycle : cyclical component of series
self.difftrend : trend component of series
self.diffdates : shorter date sequence
self.diffdatenums : shorter date numbers
self.diffdata : shorter data series
'''
Y = self.data[1:]
YL = self.data[0:-1]
DY = [y-yl for y,yl in zip(Y,YL)]
gam = np.mean(DY)
self.diffcycle = [d - gam for d in DY]
self.diffdates = self.dates[1:]
self.diffdatenums= self.datenums[1:]
self.diffdata = self.data[1:]
self.difftrend = [yl + gam for yl in YL]
def monthtoquarter(self,method='AVG'):
'''Converts monthly data to quarterly data using one of three methods:
AVG : average of three months (default)
SUM : sum of three months
END : third month value only
'''
if self.t !=12:
print 'Warning: data frequency is not monthly!'
T = len(self.data)
temp_data = self.data[0:0]
temp_dates = self.datenums[0:0]
if method == 'AVG':
for k in range(1,T-1):
if (self.datenums[k].month == 2) or (self.datenums[k].month == 5) or (self.datenums[k].month == 8) or (self.datenums[k].month == 11):
temp_data.append((self.data[k-1]+self.data[k]+self.data[k+1])/3)
temp_dates.append(self.dates[k-1])
elif method == 'SUM':
for k in range(1,T-1):
if (self.datenums[k].month == 2) or (self.datenums[k].month == 5) or (self.datenums[k].month == 8) or (self.datenums[k].month == 11):
temp_data.append((self.data[k-1]+self.data[k]+self.data[k+1]))
temp_dates.append(self.dates[k-1])
elif method== 'END':
for k in range(1,T-1):
if (self.datenums[k].month == 2) or (self.datenums[k].month == 5) or (self.datenums[k].month == 8) or (self.datenums[k].month == 11):
temp_data.append(self.data[k+1])
temp_dates.append(self.dates[k-1])
self.data = temp_data
self.dates = temp_dates
self.datenums = [dateutil.parser.parse(s) for s in self.dates]
self.t = 4
def quartertoannual(self,method='AVG'):
'''Converts quaterly data to annual using one of three methods:
AVG : average of three months (default)
SUM : sum of three months
END : third month value only
'''
if self.t !=4:
print 'Warning: data frequency is not quarterly!'
T = len(self.data)
temp_data = self.data[0:0]
temp_dates = self.datenums[0:0]
if method =='AVG':
for k in range(0,T):
'''Annual data is the average of monthly data'''
if (self.datenums[k].month == 1) and (len(self.datenums[k:])>3):
temp_data.append((self.data[k]+self.data[k+1]+self.data[k+2]+self.data[k+3])/4)
temp_dates.append(self.dates[k])
elif method=='SUM':
for k in range(0,T):
'''Annual data is the sum of monthly data'''
if (self.datenums[k].month == 1) and (len(self.datenums[k:])>3):
temp_data.append(self.data[k]+self.data[k+1]+self.data[k+2]+self.data[k+3])
temp_dates.append(self.dates[k])
else:
for k in range(0,T):
if (self.datenums[k].month == 1) and (len(self.datenums[k:])>3):
'''Annual data is the end of month value'''
temp_data.append(self.data[k+3])
temp_dates.append(self.dates[k])
self.data = temp_data
self.dates = temp_dates
self.datenums = [dateutil.parser.parse(s) for s in self.dates]
self.t = 1
def monthtoannual(self,method='AVG'):
'''Converts monthly data to annual data using one of three methods:
AVG : average of three months (default)
SUM : sum of three months
END : third month value only
'''
if self.t !=12:
print 'Warning: data frequency is not monthly!'
T = len(self.data)
temp_data = self.data[0:0]
temp_dates = self.datenums[0:0]
if method =='AVG':
for k in range(0,T):
'''Annual data is the average of monthly data'''
if (self.datenums[k].month == 1) and (len(self.datenums[k:])>11):
temp_data.append((self.data[k]+self.data[k+1]+self.data[k+2]+ self.data[k+3] + self.data[k+4] + self.data[k+5]
+ self.data[k+6] + self.data[k+7] + self.data[k+8] + self.data[k+9] + self.data[k+10] + self.data[k+11])/12)
temp_dates.append(self.dates[k])
elif method =='SUM':
for k in range(0,T):
'''Annual data is the sum of monthly data'''
if (self.datenums[k].month == 1) and (len(self.datenums[k:])>11):
temp_data.append((self.data[k]+self.data[k+1]+self.data[k+2]+ self.data[k+3] + self.data[k+4] + self.data[k+5]
+ self.data[k+6] + self.data[k+7] + self.data[k+8] + self.data[k+9] + self.data[k+10] + self.data[k+11]))
temp_dates.append(self.dates[k])
else:
for k in range(0,T):
'''Annual data is the end of year value'''
if (self.datenums[k].month == 1) and (len(self.datenums[k:])>11):
temp_data.append(self.data[k+11])
temp_dates.append(self.dates[k])
self.data = temp_data
self.dates = temp_dates
self.datenums = [dateutil.parser.parse(s) for s in self.dates]
self.t = 1
def percapita(self,pop_type = 1):
'''Converts data to per capita (US) using one of two methods:
pop_type == 1 : total population US population
pop_type != 1 : Civilian noninstitutional population is defined as persons 16 years of
age and older
'''
T = len(self.data)
temp_data = self.data[0:0]
temp_dates = self.dates[0:0]
if pop_type ==1:
populate= fred('POP')
else:
populate= fred('CNP16OV')
T2 = len(populate.data)
# Generate quarterly population data.
if self.t == 4:
for k in range(1,T2-1):
if (populate.datenums[k].month == 2) or (populate.datenums[k].month == 5) or (populate.datenums[k].month == 8) or \
(populate.datenums[k].month == 11):
temp_data.append((populate.data[k-1]+populate.data[k]+populate.data[k+1])/3)
temp_dates.append(populate.dates[k])
# Generate annual population data.
if self.t == 1:
for k in range(0,T2):
if (populate.datenums[k].month == 1) and (len(populate.datenums[k:])>11):
temp_data.append((populate.data[k]+populate.data[k+1]+populate.data[k+2]+populate.data[k+3]+populate.data[k+4]+populate.data[k+5] \
+populate.data[k+6]+populate.data[k+7]+populate.data[k+8]+populate.data[k+9]+populate.data[k+10]+populate.data[k+11])/12)
temp_dates.append(populate.dates[k])
if self.t == 12:
temp_data = populate.data
temp_dates = populate.dates
# form the population objects.
populate.data = temp_data
populate.dates = temp_dates
populate.datenums = [dateutil.parser.parse(s) for s in populate.dates]
# find the minimum of data window:
if populate.datenums[0].date() <= self.datenums[0].date():
win_min = self.datenums[0].strftime('%Y-%m-%d')
else:
win_min = populate.datenums[0].strftime('%Y-%m-%d')
# find the maximum of data window:
if populate.datenums[-1].date() <= self.datenums[-1].date():
win_max = populate.datenums[-1].strftime('%Y-%m-%d')
else:
win_max = self.datenums[-1].strftime('%Y-%m-%d')
# set data window
windo = [win_min,win_max]
populate.window(windo)
self.window(windo)
self.data = [a/b for a,b in zip(self.data,populate.data)]
# self.dates = temp_dates
# self.datenums = [dateutil.parser.parse(s) for s in self.dates]
self.title = self.title+' Per Capita'
self.unit = self.units+' Per Thousand People'
def recessions(self):
'''Method creates gray recession bars for plots. Should be used after a plot has been made but
before either (1) a new plot is created or (2) a show command is issued.'''
peaks =[
'1857-06-01',
'1860-10-01',
'1865-04-01',
'1869-06-01',
'1873-10-01',
'1882-03-01',
'1887-03-01',
'1890-07-01',
'1893-01-01',
'1895-12-01',
'1899-06-01',
'1902-09-01',
'1907-05-01',
'1910-01-01',
'1913-01-01',
'1918-08-01',
'1920-01-01',
'1923-05-01',
'1926-10-01',
'1929-08-01',
'1937-05-01',
'1945-02-01',
'1948-11-01',
'1953-07-01',
'1957-08-01',
'1960-04-01',
'1969-12-01',
'1973-11-01',
'1980-01-01',
'1981-07-01',
'1990-07-01',
'2001-03-01',
'2007-12-01']
troughs =[
'1858-12-01',
'1861-06-01',
'1867-12-01',
'1870-12-01',
'1879-03-01',
'1885-05-01',
'1888-04-01',
'1891-05-01',
'1894-06-01',
'1897-06-01',
'1900-12-01',
'1904-08-01',
'1908-06-01',
'1912-01-01',
'1914-12-01',
'1919-03-01',
'1921-07-01',
'1924-07-01',
'1927-11-01',
'1933-03-01',
'1938-06-01',
'1945-10-01',
'1949-10-01',
'1954-05-01',
'1958-04-01',
'1961-02-01',
'1970-11-01',
'1975-03-01',
'1980-07-01',
'1982-11-01',
'1991-03-01',
'2001-11-01',
'2009-06-01']
if len(troughs) < len(peaks):
today = datetime.date.today()
troughs.append(str(today))
T = len(self.data)
S = len(peaks)
date_num = pylab.date2num([dateutil.parser.parse(s) for s in self.dates])
peaks_num = pylab.date2num([dateutil.parser.parse(s) for s in peaks])
troughs_num = pylab.date2num([dateutil.parser.parse(s) for s in troughs])
datesmin = min(date_num)
datesmax = max(date_num)
peaksmin = min(peaks_num)
peaksax = max(peaks_num)
troughsmin=min(troughs_num)
troughsmax=max(troughs_num)
if datesmin <= peaksmin:
'Nothing to see here'
min0 = 0
else:
'Or here'
for k in range(S):
if datesmin <= peaks_num[k]:
min0 = k
break
if datesmax >= troughsmax:
max0 = len(troughs)-1
else:
'Or here'
for k in range(S):
if datesmax < troughs_num[k]:
max0 = k
break
if datesmax < troughsmax:
if peaks_num[max0]<datesmax and troughs_num[min0-1]>datesmin:
peaks2 = peaks[min0:max0]
peaks2.append(peaks[max0])
peaks2.insert(0,self.dates[0])
troughs2 = troughs[min0:max0]
troughs2.append(self.dates[-1])
troughs2.insert(0,troughs[min0-1])
peaks2num = pylab.date2num([dateutil.parser.parse(s) for s in peaks2])
troughs2num = pylab.date2num([dateutil.parser.parse(s) for s in troughs2])
elif peaks_num[max0]<datesmax and troughs_num[min0-1]<datesmin:
peaks2 = peaks[min0:max0]
peaks2.append(peaks[max0])
troughs2 = troughs[min0:max0]
troughs2.append(self.dates[-1])
peaks2num = pylab.date2num([dateutil.parser.parse(s) for s in peaks2])
troughs2num = pylab.date2num([dateutil.parser.parse(s) for s in troughs2])
elif peaks_num[max0]>datesmax and troughs_num[min0]>datesmin:
peaks2 = peaks[min0:max0]
peaks2.insert(0,self.dates[0])
troughs2 = troughs[min0:max0]
troughs2.insert(0,troughs[min0-1])
peaks2num = pylab.date2num([dateutil.parser.parse(s) for s in peaks2])
troughs2num = pylab.date2num([dateutil.parser.parse(s) for s in troughs2])
else:
peaks2 = peaks[min0:max0+1]
troughs2 = troughs[min0:max0+1]
peaks2num = peaks_num[min0:max0+1]
troughs2num= troughs_num[min0:max0+1]
else:
if peaks_num[max0]>datesmax and troughs_num[min0]>datesmin:
peaks2 = peaks[min0:max0]
peaks2.insert(0,self.dates[0])
troughs2 = troughs[min0:max0]
troughs2.insert(0,troughs[min0+1])
peaks2num = pylab.date2num([dateutil.parser.parse(s) for s in peaks2])
troughs2num = pylab.date2num([dateutil.parser.parse(s) for s in troughs2])
else:
peaks2 = peaks[min0:max0+1]
troughs2 = troughs[min0:max0+1]
peaks2num = peaks_num[min0:max0+1]
troughs2num= troughs_num[min0:max0+1]
self.pks = peaks2
self.trs = troughs2
self.recess_bars = pylab.plot()
self.peaks = peaks
for k in range(len(peaks2)):
pylab.axvspan(peaks2num[k], troughs2num[k], edgecolor= '0.5', facecolor='0.5', alpha=0.5)
def quickplot(x,year_mult=10,show=True,recess=False,save=False,name='file',width=2):
'''Create a plot of a FRED data series'''
fig = pylab.figure()
years = pylab.YearLocator(year_mult)
ax = fig.add_subplot(111)
ax.plot_date(x.datenums,x.data,'b-',lw=width)
ax.xaxis.set_major_locator(years)
ax.set_title(x.title)
ax.set_ylabel(x.units)
fig.autofmt_xdate()
if recess != False:
x.recessions()
ax.grid(True)
if show==True:
pylab.show()
if save !=False:
fullname = name+'.png'
fig.savefig(fullname,bbox_inches='tight')
def window_equalize(fred_list):
'''Takes a list of FRED objects and adjusts the date windows for each to the smallest common window.'''
minimums = [ k.datenums[0].date() for k in fred_list]
maximums = [ k.datenums[-1].date() for k in fred_list]
win_min = max(minimums).strftime('%Y-%m-%d')
win_max = min(maximums).strftime('%Y-%m-%d')
windo = [win_min,win_max]
for x in fred_list:
x.window(windo)
def date_numbers(date_strings):
'''Converts a list of date strings in yyy-mm-dd format to date numbers.'''
datenums = [dateutil.parser.parse(s) for s in date_strings]
return datenums
def toFred(data,dates,pandasDates=False,title=None,t=None,season=None,freq=None,source=None,units=None,daterange=None, idCode=None,updated=None):
'''function for creating a FRED object from a set of data.'''
f = fred('UNRATE')
f.data = data
if pandasDates==True:
f.dates = [ str(d.to_datetime())[0:10] for d in dates]
else:
f.dates = dates
if type(f.dates[0])==str:
f.datenums = [dateutil.parser.parse(s) for s in f.dates]
f.title = title
f.t = t
f.season = season
f.freq = freq
f.source = source
f.units = units
f.daterange = daterange
f.idCode = idCode
f.updated = updated
return f
|
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import state
class residual_bandwidth(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/isis-neighbor-attribute/neighbors/neighbor/subTLVs/subTLVs/residual-bandwidth. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines unidirectional residual bandwidth.
"""
__slots__ = ("_path_helper", "_extmethods", "__state")
_yang_name = "residual-bandwidth"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"isis-neighbor-attribute",
"neighbors",
"neighbor",
"subTLVs",
"subTLVs",
"residual-bandwidth",
]
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/isis_neighbor_attribute/neighbors/neighbor/subTLVs/subTLVs/residual_bandwidth/state (container)
YANG Description: State parameters of IS Extended Reachability sub-TLV 37.
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/isis_neighbor_attribute/neighbors/neighbor/subTLVs/subTLVs/residual_bandwidth/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State parameters of IS Extended Reachability sub-TLV 37.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
state = __builtin__.property(_get_state)
_pyangbind_elements = OrderedDict([("state", state)])
from . import state
class residual_bandwidth(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/isis-neighbor-attribute/neighbors/neighbor/subTLVs/subTLVs/residual-bandwidth. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines unidirectional residual bandwidth.
"""
__slots__ = ("_path_helper", "_extmethods", "__state")
_yang_name = "residual-bandwidth"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"isis-neighbor-attribute",
"neighbors",
"neighbor",
"subTLVs",
"subTLVs",
"residual-bandwidth",
]
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/isis_neighbor_attribute/neighbors/neighbor/subTLVs/subTLVs/residual_bandwidth/state (container)
YANG Description: State parameters of IS Extended Reachability sub-TLV 37.
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/isis_neighbor_attribute/neighbors/neighbor/subTLVs/subTLVs/residual_bandwidth/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State parameters of IS Extended Reachability sub-TLV 37.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
state = __builtin__.property(_get_state)
_pyangbind_elements = OrderedDict([("state", state)])
|
|
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import os
import subprocess
import mock
from oslo_serialization import jsonutils
from rally import exceptions
from rally.verification.tempest import subunit2json
from rally.verification.tempest import tempest
from tests.unit import test
TEMPEST_PATH = "rally.verification.tempest"
class BaseTestCase(test.TestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
self.base_repo_patcher = mock.patch.object(tempest.Tempest,
"base_repo", "foo-baserepo")
self.base_repo_dir_patcher = mock.patch.object(tempest.Tempest,
"base_repo_dir",
"foo-baserepodir")
self.verifier = tempest.Tempest("fake_deployment_id",
verification=mock.MagicMock())
self.verifier._path = "/tmp"
self.verifier.config_file = "/tmp/tempest.conf"
self.verifier.log_file_raw = "/tmp/subunit.stream"
class TempestUtilsTestCase(BaseTestCase):
def test_path(self):
self.assertEqual("/tmp", self.verifier.path())
self.assertEqual("/tmp/foo", self.verifier.path("foo"))
self.assertEqual("/tmp/foo/bar", self.verifier.path("foo", "bar"))
@mock.patch("os.path.exists")
def test_is_installed(self, mock_exists):
# Check that `is_installed` depends on existence of path
# os.path.exists == True => is_installed == True
mock_exists.return_value = True
self.assertTrue(self.verifier.is_installed())
# os.path.exists == False => is_installed == False
mock_exists.return_value = False
self.assertFalse(self.verifier.is_installed())
self.assertEqual([mock.call(self.verifier.path(".venv")),
mock.call(self.verifier.path(".venv"))],
mock_exists.call_args_list)
@mock.patch("os.environ")
def test_env_missed(self, mock_environ):
expected_env = {"PATH": "/some/path"}
mock_environ.copy.return_value = copy.deepcopy(expected_env)
expected_env.update({
"TEMPEST_CONFIG": "tempest.conf",
"TEMPEST_CONFIG_DIR": self.verifier.path(),
"OS_TEST_PATH": self.verifier.path("tempest/test_discover")})
self.assertIsNone(self.verifier._env)
self.assertEqual(expected_env, self.verifier.env)
self.assertTrue(mock_environ.copy.called)
self.assertEqual(expected_env, self.verifier._env)
@mock.patch("os.environ")
def test_env_loaded(self, mock_environ):
self.verifier._env = {"foo": "bar"}
self.verifier.env
self.assertFalse(mock_environ.copy.called)
@mock.patch("os.path.isdir", return_value=True)
@mock.patch(TEMPEST_PATH + ".tempest.check_output")
def test__venv_install_when_venv_exists(self, mock_check_output,
mock_isdir):
self.verifier._install_venv()
mock_isdir.assert_called_once_with(self.verifier.path(".venv"))
self.assertFalse(mock_check_output.called)
@mock.patch("%s.tempest.sys" % TEMPEST_PATH)
@mock.patch("%s.tempest.costilius.get_interpreter" % TEMPEST_PATH,
return_value="python")
@mock.patch("os.path.isdir", return_value=False)
@mock.patch("%s.tempest.check_output" % TEMPEST_PATH,
return_value="some_output")
def test__venv_install_when_venv_not_exist(self, mock_check_output,
mock_isdir,
mock_get_interpreter, mock_sys):
mock_sys.version_info = "not_py27_env"
self.verifier._install_venv()
mock_isdir.assert_called_once_with(self.verifier.path(".venv"))
mock_check_output.assert_has_calls([
mock.call("python ./tools/install_venv.py", shell=True,
cwd=self.verifier.path()),
mock.call("%s pip install -r requirements.txt "
"-r test-requirements.txt" %
self.verifier.venv_wrapper, shell=True,
cwd=self.verifier.path()),
mock.call("%s python setup.py develop -N" %
self.verifier.venv_wrapper, shell=True,
cwd=self.verifier.path())])
@mock.patch("%s.tempest.sys" % TEMPEST_PATH)
@mock.patch("%s.tempest.costilius.get_interpreter" % TEMPEST_PATH,
return_value=None)
@mock.patch("os.path.isdir", return_value=False)
def test__venv_install_fails__when_py27_is_not_present(
self, mock_isdir, mock_get_interpreter, mock_sys):
mock_sys.version_info = "not_py27_env"
self.assertRaises(exceptions.IncompatiblePythonVersion,
self.verifier._install_venv)
mock_isdir.assert_called_once_with(self.verifier.path(".venv"))
@mock.patch("os.path.isdir", return_value=True)
@mock.patch(TEMPEST_PATH + ".tempest.subprocess")
def test__initialize_testr_when_testr_already_initialized(
self, mock_subprocess, mock_isdir):
self.verifier._initialize_testr()
mock_isdir.assert_called_once_with(
self.verifier.path(".testrepository"))
self.assertFalse(mock_subprocess.called)
@mock.patch("os.path.isdir", return_value=False)
@mock.patch(TEMPEST_PATH + ".tempest.check_output")
def test__initialize_testr_when_testr_not_initialized(
self, mock_check_output, mock_isdir):
self.verifier._initialize_testr()
mock_isdir.assert_called_once_with(
self.verifier.path(".testrepository"))
mock_check_output.assert_called_once_with(
"%s testr init" % self.verifier.venv_wrapper, shell=True,
cwd=self.verifier.path())
@mock.patch.object(subunit2json, "main")
@mock.patch("os.path.isfile", return_value=False)
def test__save_results_without_log_file(
self, mock_isfile, mock_main):
self.verifier._save_results()
mock_isfile.assert_called_once_with(self.verifier.log_file_raw)
self.assertEqual(0, mock_main.call_count)
@mock.patch("os.path.isfile", return_value=True)
def test__save_results_with_log_file(self, mock_isfile):
with mock.patch.object(subunit2json, "main") as mock_main:
data = {"total": True, "test_cases": True}
mock_main.return_value = jsonutils.dumps(data)
self.verifier.log_file_raw = os.path.join(
os.path.dirname(__file__), "subunit.stream")
self.verifier._save_results()
mock_isfile.assert_called_once_with(self.verifier.log_file_raw)
mock_main.assert_called_once_with(
self.verifier.log_file_raw)
verification = self.verifier.verification
verification.finish_verification.assert_called_once_with(**data)
class TempestInstallAndUninstallTestCase(BaseTestCase):
@mock.patch(TEMPEST_PATH + ".tempest.subprocess.check_call")
def test__clone_successful(self, mock_check_call):
with self.base_repo_patcher:
self.verifier._clone()
mock_check_call.assert_called_once_with(
["git", "clone", "https://github.com/openstack/tempest",
"foo-baserepo"])
def test__no_dir(self):
with mock.patch("os.path.isdir", return_value=False):
self.assertFalse(self.verifier._is_git_repo("fake_dir"))
@mock.patch("subprocess.call", return_value=1)
@mock.patch("os.path.isdir", return_value=True)
def test__is_not_git_repo(self, mock_isdir, mock_call):
self.assertFalse(self.verifier._is_git_repo("fake_dir"))
@mock.patch("subprocess.call", return_value=0)
@mock.patch("os.path.isdir", return_value=True)
def test__is_git_repo(self, mock_isdir, mock_call):
self.assertTrue(self.verifier._is_git_repo("fake_dir"))
@mock.patch("%s.tempest.check_output" % TEMPEST_PATH,
return_value="fake_url")
def test__get_remote_origin(self, mock_check_output):
self.assertEqual("fake_url",
self.verifier._get_remote_origin("fake_dir"))
@mock.patch("shutil.rmtree")
@mock.patch(TEMPEST_PATH + ".tempest.os.path.exists", return_value=True)
@mock.patch(TEMPEST_PATH + ".tempest.subprocess.check_call")
def test__clone_failed(self, mock_check_call, mock_exists, mock_rmtree):
with self.base_repo_patcher:
# Check that `subprocess.CalledProcessError` is not handled
# by `_clone`
mock_check_call.side_effect = subprocess.CalledProcessError(
0, None)
self.assertRaises(subprocess.CalledProcessError,
self.verifier._clone)
mock_check_call.assert_called_once_with(
["git", "clone", "https://github.com/openstack/tempest",
"foo-baserepo"])
mock_rmtree.assert_called_once_with(self.verifier.base_repo)
@mock.patch(TEMPEST_PATH + ".tempest.Tempest.base_repo")
@mock.patch(TEMPEST_PATH + ".tempest.Tempest._initialize_testr")
@mock.patch(TEMPEST_PATH + ".tempest.Tempest._install_venv")
@mock.patch(TEMPEST_PATH + ".tempest.subprocess.check_call")
@mock.patch("shutil.copytree")
@mock.patch(TEMPEST_PATH + ".tempest.Tempest._clone")
@mock.patch("os.path.exists", return_value=False)
@mock.patch(TEMPEST_PATH + ".tempest.Tempest._is_git_repo",
return_value=False)
def test_install_successful(self, mock_tempest__is_git_repo, mock_exists,
mock_tempest__clone,
mock_copytree, mock_check_call,
mock_tempest__install_venv,
mock_tempest__initialize_testr,
mock_tempest_base_repo):
mock_tempest_base_repo.__get__ = mock.Mock(return_value="fake_dir")
self.verifier.install()
mock_tempest__is_git_repo.assert_called_once_with(
self.verifier.base_repo)
mock_exists.assert_has_calls([mock.call(self.verifier.path(".venv")),
mock.call(self.verifier.path())])
mock_tempest__clone.assert_called_once_with()
mock_copytree.assert_called_once_with(
self.verifier.base_repo,
self.verifier.path())
mock_check_call.assert_called_once_with(
"git checkout master; git pull",
cwd=self.verifier.path("tempest"),
shell=True)
mock_tempest__install_venv.assert_called_once_with()
mock_tempest__initialize_testr.assert_called_once_with()
@mock.patch(TEMPEST_PATH + ".tempest.Tempest.base_repo")
@mock.patch(TEMPEST_PATH + ".tempest.Tempest.uninstall")
@mock.patch(TEMPEST_PATH + ".tempest.Tempest._initialize_testr")
@mock.patch(TEMPEST_PATH + ".tempest.Tempest._install_venv")
@mock.patch(TEMPEST_PATH + ".tempest.subprocess.check_call")
@mock.patch("shutil.copytree")
@mock.patch(TEMPEST_PATH + ".tempest.Tempest._clone")
@mock.patch("os.path.exists", return_value=False)
@mock.patch(TEMPEST_PATH + ".tempest.Tempest._is_git_repo",
return_value=False)
def test_install_failed(self, mock_tempest__is_git_repo, mock_exists,
mock_tempest__clone,
mock_copytree, mock_check_call,
mock_tempest__install_venv,
mock_tempest__initialize_testr,
mock_tempest_uninstall,
mock_tempest_base_repo):
mock_tempest_base_repo.__get__ = mock.Mock(return_value="fake_dir")
mock_check_call.side_effect = subprocess.CalledProcessError(0, None)
self.assertRaises(tempest.TempestSetupFailure, self.verifier.install)
mock_tempest__is_git_repo.assert_called_once_with(
self.verifier.base_repo)
mock_exists.assert_has_calls([mock.call(self.verifier.path(".venv")),
mock.call(self.verifier.path())])
mock_tempest__clone.assert_called_once_with()
mock_copytree.assert_called_once_with(
self.verifier.base_repo,
self.verifier.path())
mock_check_call.assert_called_once_with(
"git checkout master; git pull",
cwd=self.verifier.path("tempest"),
shell=True)
self.assertFalse(mock_tempest__install_venv.called)
self.assertFalse(mock_tempest__initialize_testr.called)
mock_tempest_uninstall.assert_called_once_with()
@mock.patch("shutil.rmtree")
@mock.patch("os.path.exists", return_value=True)
def test_uninstall(self, mock_exists, mock_rmtree):
self.verifier.uninstall()
mock_exists.assert_called_once_with(self.verifier.path())
mock_rmtree.assert_called_once_with(self.verifier.path())
@mock.patch(TEMPEST_PATH + ".tempest.Tempest._is_git_repo",
return_value=True)
@mock.patch("tempfile.mkdtemp", return_value="fake_tempest_dir")
@mock.patch("os.listdir", return_value=["fake_dir"])
@mock.patch("shutil.move")
@mock.patch("os.path.exists", return_value=True)
def test_upgrade_repo_tree(self, mock_exists, mock_move, mock_listdir,
mock_mkdtemp,
mock_tempest__is_git_repo):
with self.base_repo_dir_patcher as foo_base:
self.verifier._base_repo = "fake_base"
self.verifier.base_repo
directory = mock_mkdtemp.return_value
mock_listdir.assert_called_once_with(foo_base)
fake_dir = mock_listdir.return_value[0]
source = os.path.join(self.base_repo_dir_patcher.new, fake_dir)
dest = os.path.join(directory, fake_dir)
mock_move.assert_called_once_with(source, dest)
class TempestVerifyTestCase(BaseTestCase):
def _get_fake_call(self, testr_arg):
return (
"%(venv)s testr run --parallel --subunit tempest.api.%(testr_arg)s"
" | tee %(tempest_path)s/subunit.stream"
" | %(venv)s subunit-2to1"
" | %(venv)s %(tempest_path)s/tools/colorizer.py" % {
"venv": self.verifier.venv_wrapper,
"testr_arg": testr_arg,
"tempest_path": self.verifier.path()})
@mock.patch(TEMPEST_PATH + ".tempest.Tempest.parse_results",
return_value=(None, None))
@mock.patch(TEMPEST_PATH + ".tempest.Tempest.env")
@mock.patch(TEMPEST_PATH + ".tempest.subprocess")
@mock.patch(TEMPEST_PATH + ".config.TempestConf")
@mock.patch(TEMPEST_PATH + ".tempest.Tempest.is_configured",
return_value=False)
def test_verify_not_configured(
self, mock_tempest_is_configured, mock_tempest_conf,
mock_subprocess, mock_tempest_env, mock_tempest_parse_results):
set_name = "compute"
fake_call = self._get_fake_call(set_name)
self.verifier.verify(set_name, None)
self.assertEqual(2, mock_tempest_is_configured.call_count)
mock_tempest_conf.assert_called_once_with(self.verifier.deployment)
mock_tempest_conf.return_value.generate.assert_called_once_with(
self.verifier.config_file
)
self.verifier.verification.start_verifying.assert_called_once_with(
set_name)
mock_subprocess.check_call.assert_called_once_with(
fake_call, env=mock_tempest_env, cwd=self.verifier.path(),
shell=True)
mock_tempest_parse_results.assert_called_once_with(None)
@mock.patch(TEMPEST_PATH + ".tempest.Tempest.parse_results",
return_value=(None, None))
@mock.patch(TEMPEST_PATH + ".tempest.Tempest.env")
@mock.patch(TEMPEST_PATH + ".tempest.subprocess")
@mock.patch(TEMPEST_PATH + ".config.TempestConf")
@mock.patch(TEMPEST_PATH + ".tempest.Tempest.is_configured",
return_value=True)
def test_verify_when_tempest_configured(
self, mock_tempest_is_configured, mock_tempest_conf,
mock_subprocess, mock_tempest_env, mock_tempest_parse_results):
set_name = "identity"
fake_call = self._get_fake_call(set_name)
self.verifier.verify(set_name, None)
mock_tempest_is_configured.assert_called_once_with()
self.assertFalse(mock_tempest_conf.called)
self.assertFalse(mock_tempest_conf().generate.called)
self.verifier.verification.start_verifying.assert_called_once_with(
set_name)
mock_subprocess.check_call.assert_called_once_with(
fake_call, env=mock_tempest_env, cwd=self.verifier.path(),
shell=True)
mock_tempest_parse_results.assert_called_once_with(None)
@mock.patch(TEMPEST_PATH + ".tempest.Tempest.parse_results",
return_value=(None, None))
@mock.patch(TEMPEST_PATH + ".tempest.Tempest.env")
@mock.patch(TEMPEST_PATH + ".tempest.subprocess")
@mock.patch(TEMPEST_PATH + ".config.TempestConf")
@mock.patch(TEMPEST_PATH + ".tempest.Tempest.is_configured",
return_value=True)
def test_verify_failed_and_tempest_is_configured(
self, mock_tempest_is_configured, mock_tempest_conf,
mock_subprocess, mock_tempest_env, mock_tempest_parse_results):
set_name = "identity"
fake_call = self._get_fake_call(set_name)
mock_subprocess.side_effect = subprocess.CalledProcessError
self.verifier.verify(set_name, None)
mock_tempest_is_configured.assert_called_once_with()
self.assertFalse(mock_tempest_conf.called)
self.assertFalse(mock_tempest_conf().generate.called)
self.verifier.verification.start_verifying.assert_called_once_with(
set_name)
mock_subprocess.check_call.assert_called_once_with(
fake_call, env=mock_tempest_env, cwd=self.verifier.path(),
shell=True)
self.assertTrue(mock_tempest_parse_results.called)
self.verifier.verification.set_failed.assert_called_once_with()
def test_import_file(self):
set_name = "identity"
log_file = "log_file"
self.verifier._save_results = mock.Mock()
self.verifier.import_file(set_name, log_file)
mock_start_verifying = self.verifier.verification.start_verifying
mock_start_verifying.assert_called_once_with(set_name)
self.verifier._save_results.assert_called_once_with(log_file)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tensor utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import re
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import decorator_utils
from tensorflow.python.util import is_in_graph_mode
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
from tensorflow.python.util import tf_stack
# Allow deprecation warnings to be silenced temporarily with a context manager.
_PRINT_DEPRECATION_WARNINGS = True
# Remember which deprecation warnings have been printed already.
_PRINTED_WARNING = {}
class DeprecatedNamesAlreadySet(Exception):
"""Raised when setting deprecated names multiple times for the same symbol."""
pass
def _add_deprecated_function_notice_to_docstring(doc, date, instructions):
"""Adds a deprecation notice to a docstring for deprecated functions."""
main_text = ['THIS FUNCTION IS DEPRECATED. It will be removed %s.' %
('in a future version' if date is None else ('after %s' % date))]
if instructions:
main_text.append('Instructions for updating:')
return decorator_utils.add_notice_to_docstring(
doc, instructions,
'DEPRECATED FUNCTION',
'(deprecated)', main_text)
def _add_deprecated_arg_notice_to_docstring(doc, date, instructions,
deprecated_names):
"""Adds a deprecation notice to a docstring for deprecated arguments."""
deprecation_string = ', '.join(sorted(deprecated_names))
return decorator_utils.add_notice_to_docstring(
doc, instructions, 'DEPRECATED FUNCTION ARGUMENTS',
'(deprecated arguments)', [
'SOME ARGUMENTS ARE DEPRECATED: `(%s)`. '
'They will be removed %s.' %
(deprecation_string, 'in a future version' if date is None else
('after %s' % date)), 'Instructions for updating:'
])
def _add_deprecated_arg_value_notice_to_docstring(doc, date, instructions,
deprecated_name_value_dict):
"""Adds a deprecation notice to a docstring for deprecated arguments."""
deprecation_string = ', '.join(
'%s=%r' % (key, value)
for key, value in sorted(deprecated_name_value_dict.items()))
when = 'in a future version' if date is None else ('after %s' % date)
return decorator_utils.add_notice_to_docstring(
doc, instructions, 'DEPRECATED FUNCTION ARGUMENT VALUES',
'(deprecated argument values)', [
'SOME ARGUMENT VALUES ARE DEPRECATED: `(%s)`. '
'They will be removed %s.' % (deprecation_string, when),
'Instructions for updating:'
])
def _validate_deprecation_args(date, instructions):
if date is not None and not re.match(r'20\d\d-[01]\d-[0123]\d', date):
raise ValueError('Date must be YYYY-MM-DD.')
if not instructions:
raise ValueError('Don\'t deprecate things without conversion instructions!')
def _call_location(outer=False):
"""Returns call location given level up from current call."""
stack = tf_stack.extract_stack_file_and_line(max_length=4)
length = len(stack)
if length == 0: # should never happen as we're in a function
return 'UNKNOWN'
index = length-4 if outer else length-3
if index < 0:
index = 0
frame = stack[index]
return '{}:{}'.format(frame.file, frame.line)
def _wrap_decorator(wrapped_function):
"""Indicate that one function wraps another.
This decorator wraps a function using `tf_decorator.make_decorator`
so that doc generation scripts can pick up original function
signature.
It would be better to use @functools.wrap decorator, but it would
not update function signature to match wrapped function in Python 2.
Args:
wrapped_function: The function that decorated function wraps.
Returns:
Function that accepts wrapper function as an argument and returns
`TFDecorator` instance.
"""
def wrapper(wrapper_func):
return tf_decorator.make_decorator(wrapped_function, wrapper_func)
return wrapper
def deprecated_alias(deprecated_name, name, func_or_class, warn_once=True):
"""Deprecate a symbol in favor of a new name with identical semantics.
This function is meant to be used when defining a backwards-compatibility
alias for a symbol which has been moved. For example:
module1.py:
```python
class NewNameForClass: pass
```
module2.py:
```python
import module1
DeprecatedNameForClass = deprecated_alias(
deprecated_name='module2.DeprecatedNameForClass',
name='module1.NewNameForClass',
module1.NewNameForClass)
```
This function works for classes and functions.
For classes, it creates a new class which is functionally identical (it
inherits from the original, and overrides its constructor), but which prints
a deprecation warning when an instance is created. It also adds a deprecation
notice to the class' docstring.
For functions, it returns a function wrapped by `tf_decorator.make_decorator`.
That function prints a warning when used, and has a deprecation notice in its
docstring. This is more or less equivalent (the deprecation warning has
slightly different text) to writing:
```python
@deprecated
def deprecated_alias(original_args):
real_function(original_args)
```
Args:
deprecated_name: The name of the symbol that is being deprecated, to be used
in the warning message. This should be its fully qualified name to avoid
confusion.
name: The name of the symbol that is to be used instead of the deprecated
name. This should be a fully qualified name to avoid confusion.
func_or_class: The (non-deprecated) class or function for which a deprecated
alias should be created.
warn_once: If True (the default), only print a deprecation warning the first
time this function is used, or the class is instantiated.
Returns:
A wrapped version of `func_or_class` which prints a deprecation warning on
use and has a modified docstring.
"""
if tf_inspect.isclass(func_or_class):
# Make a new class with __init__ wrapped in a warning.
class _NewClass(func_or_class): # pylint: disable=missing-docstring
__doc__ = decorator_utils.add_notice_to_docstring(
func_or_class.__doc__, 'Please use %s instead.' % name,
'DEPRECATED CLASS',
'(deprecated)', ['THIS CLASS IS DEPRECATED. '
'It will be removed in a future version. '])
__name__ = func_or_class.__name__
__module__ = _call_location(outer=True)
@_wrap_decorator(func_or_class.__init__)
def __init__(self, *args, **kwargs):
if hasattr(_NewClass.__init__, '__func__'):
# Python 2
_NewClass.__init__.__func__.__doc__ = func_or_class.__init__.__doc__
else:
# Python 3
_NewClass.__init__.__doc__ = func_or_class.__init__.__doc__
if _PRINT_DEPRECATION_WARNINGS:
# We're making the alias as we speak. The original may have other
# aliases, so we cannot use it to check for whether it's already been
# warned about.
if _NewClass.__init__ not in _PRINTED_WARNING:
if warn_once:
_PRINTED_WARNING[_NewClass.__init__] = True
logging.warning(
'From %s: The name %s is deprecated. Please use %s instead.\n',
_call_location(), deprecated_name, name)
super(_NewClass, self).__init__(*args, **kwargs)
return _NewClass
else:
decorator_utils.validate_callable(func_or_class, 'deprecated')
# Make a wrapper for the original
@functools.wraps(func_or_class)
def new_func(*args, **kwargs): # pylint: disable=missing-docstring
if _PRINT_DEPRECATION_WARNINGS:
# We're making the alias as we speak. The original may have other
# aliases, so we cannot use it to check for whether it's already been
# warned about.
if new_func not in _PRINTED_WARNING:
if warn_once:
_PRINTED_WARNING[new_func] = True
logging.warning(
'From %s: The name %s is deprecated. Please use %s instead.\n',
_call_location(), deprecated_name, name)
return func_or_class(*args, **kwargs)
return tf_decorator.make_decorator(
func_or_class, new_func, 'deprecated',
_add_deprecated_function_notice_to_docstring(
func_or_class.__doc__, None, 'Please use %s instead.' % name))
def deprecated_endpoints(*args):
"""Decorator for marking endpoints deprecated.
This decorator does not print deprecation messages.
TODO(annarev): eventually start printing deprecation warnings when
@deprecation_endpoints decorator is added.
Args:
*args: Deprecated endpoint names.
Returns:
A function that takes symbol as an argument and adds
_tf_deprecated_api_names to that symbol.
_tf_deprecated_api_names would be set to a list of deprecated
endpoint names for the symbol.
"""
def deprecated_wrapper(func):
# pylint: disable=protected-access
if '_tf_deprecated_api_names' in func.__dict__:
raise DeprecatedNamesAlreadySet(
'Cannot set deprecated names for %s to %s. '
'Deprecated names are already set to %s.' % (
func.__name__, str(args), str(func._tf_deprecated_api_names)))
func._tf_deprecated_api_names = args
# pylint: disable=protected-access
return func
return deprecated_wrapper
def deprecated(date, instructions, warn_once=True):
"""Decorator for marking functions or methods deprecated.
This decorator logs a deprecation warning whenever the decorated function is
called. It has the following format:
<function> (from <module>) is deprecated and will be removed after <date>.
Instructions for updating:
<instructions>
If `date` is None, 'after <date>' is replaced with 'in a future version'.
<function> will include the class name if it is a method.
It also edits the docstring of the function: ' (deprecated)' is appended
to the first line of the docstring and a deprecation notice is prepended
to the rest of the docstring.
Args:
date: String or None. The date the function is scheduled to be removed.
Must be ISO 8601 (YYYY-MM-DD), or None.
instructions: String. Instructions on how to update code using the
deprecated function.
warn_once: Boolean. Set to `True` to warn only the first time the decorated
function is called. Otherwise, every call will log a warning.
Returns:
Decorated function or method.
Raises:
ValueError: If date is not None or in ISO 8601 format, or instructions are
empty.
"""
_validate_deprecation_args(date, instructions)
def deprecated_wrapper(func):
"""Deprecation wrapper."""
decorator_utils.validate_callable(func, 'deprecated')
@functools.wraps(func)
def new_func(*args, **kwargs): # pylint: disable=missing-docstring
if _PRINT_DEPRECATION_WARNINGS:
if func not in _PRINTED_WARNING:
if warn_once:
_PRINTED_WARNING[func] = True
logging.warning(
'From %s: %s (from %s) is deprecated and will be removed %s.\n'
'Instructions for updating:\n%s',
_call_location(), decorator_utils.get_qualified_name(func),
func.__module__,
'in a future version' if date is None else ('after %s' % date),
instructions)
return func(*args, **kwargs)
return tf_decorator.make_decorator(
func, new_func, 'deprecated',
_add_deprecated_function_notice_to_docstring(func.__doc__, date,
instructions))
return deprecated_wrapper
DeprecatedArgSpec = collections.namedtuple(
'DeprecatedArgSpec', ['position', 'has_ok_value', 'ok_value'])
def deprecated_args(date, instructions, *deprecated_arg_names_or_tuples,
**kwargs):
"""Decorator for marking specific function arguments as deprecated.
This decorator logs a deprecation warning whenever the decorated function is
called with the deprecated argument. It has the following format:
Calling <function> (from <module>) with <arg> is deprecated and will be
removed after <date>. Instructions for updating:
<instructions>
If `date` is None, 'after <date>' is replaced with 'in a future version'.
<function> includes the class name if it is a method.
It also edits the docstring of the function: ' (deprecated arguments)' is
appended to the first line of the docstring and a deprecation notice is
prepended to the rest of the docstring.
Args:
date: String or None. The date the function is scheduled to be removed.
Must be ISO 8601 (YYYY-MM-DD), or None.
instructions: String. Instructions on how to update code using the
deprecated function.
*deprecated_arg_names_or_tuples: String or 2-Tuple(String,
[ok_vals]). The string is the deprecated argument name.
Optionally, an ok-value may be provided. If the user provided
argument equals this value, the warning is suppressed.
**kwargs: If `warn_once=False` is passed, every call with a deprecated
argument will log a warning. The default behavior is to only warn the
first time the function is called with any given deprecated argument.
All other kwargs raise `ValueError`.
Returns:
Decorated function or method.
Raises:
ValueError: If date is not None or in ISO 8601 format, instructions are
empty, the deprecated arguments are not present in the function
signature, the second element of a deprecated_tuple is not a
list, or if a kwarg other than `warn_once` is passed.
"""
_validate_deprecation_args(date, instructions)
if not deprecated_arg_names_or_tuples:
raise ValueError('Specify which argument is deprecated.')
if kwargs and list(kwargs.keys()) != ['warn_once']:
kwargs.pop('warn_once', None)
raise ValueError('Illegal argument to deprecated_args: %s' % kwargs)
warn_once = kwargs.get('warn_once', True)
def _get_arg_names_to_ok_vals():
"""Returns a dict mapping arg_name to DeprecatedArgSpec w/o position."""
d = {}
for name_or_tuple in deprecated_arg_names_or_tuples:
if isinstance(name_or_tuple, tuple):
d[name_or_tuple[0]] = DeprecatedArgSpec(-1, True, name_or_tuple[1])
else:
d[name_or_tuple] = DeprecatedArgSpec(-1, False, None)
return d
def _get_deprecated_positional_arguments(names_to_ok_vals, arg_spec):
"""Builds a dictionary from deprecated arguments to their spec.
Returned dict is keyed by argument name.
Each value is a DeprecatedArgSpec with the following fields:
position: The zero-based argument position of the argument
within the signature. None if the argument isn't found in
the signature.
ok_values: Values of this argument for which warning will be
suppressed.
Args:
names_to_ok_vals: dict from string arg_name to a list of values,
possibly empty, which should not elicit a warning.
arg_spec: Output from tf_inspect.getfullargspec on the called function.
Returns:
Dictionary from arg_name to DeprecatedArgSpec.
"""
arg_name_to_pos = {
name: pos for pos, name in enumerate(arg_spec.args)}
deprecated_positional_args = {}
for arg_name, spec in iter(names_to_ok_vals.items()):
if arg_name in arg_name_to_pos:
pos = arg_name_to_pos[arg_name]
deprecated_positional_args[arg_name] = DeprecatedArgSpec(
pos, spec.has_ok_value, spec.ok_value)
return deprecated_positional_args
deprecated_arg_names = _get_arg_names_to_ok_vals()
def deprecated_wrapper(func):
"""Deprecation decorator."""
decorator_utils.validate_callable(func, 'deprecated_args')
arg_spec = tf_inspect.getfullargspec(func)
deprecated_positions = _get_deprecated_positional_arguments(
deprecated_arg_names, arg_spec)
is_varargs_deprecated = arg_spec.varargs in deprecated_arg_names
is_kwargs_deprecated = arg_spec.varkw in deprecated_arg_names
if (len(deprecated_positions) + is_varargs_deprecated + is_kwargs_deprecated
!= len(deprecated_arg_names_or_tuples)):
known_args = arg_spec.args + [arg_spec.varargs, arg_spec.varkw]
missing_args = [arg_name for arg_name in deprecated_arg_names
if arg_name not in known_args]
raise ValueError('The following deprecated arguments are not present '
'in the function signature: %s. '
'Found next arguments: %s.' % (missing_args, known_args))
def _same_value(a, b):
"""A comparison operation that works for multiple object types.
Returns True for two empty lists, two numeric values with the
same value, etc.
Returns False for (pd.DataFrame, None), and other pairs which
should not be considered equivalent.
Args:
a: value one of the comparison.
b: value two of the comparison.
Returns:
A boolean indicating whether the two inputs are the same value
for the purposes of deprecation.
"""
if a is b:
return True
try:
equality = a == b
if isinstance(equality, bool):
return equality
except TypeError:
return False
return False
@functools.wraps(func)
def new_func(*args, **kwargs):
"""Deprecation wrapper."""
# TODO(apassos) figure out a way to have reasonable performance with
# deprecation warnings and eager mode.
if is_in_graph_mode.IS_IN_GRAPH_MODE() and _PRINT_DEPRECATION_WARNINGS:
invalid_args = []
named_args = tf_inspect.getcallargs(func, *args, **kwargs)
for arg_name, spec in iter(deprecated_positions.items()):
if (spec.position < len(args) and
not (spec.has_ok_value and
_same_value(named_args[arg_name], spec.ok_value))):
invalid_args.append(arg_name)
if is_varargs_deprecated and len(args) > len(arg_spec.args):
invalid_args.append(arg_spec.varargs)
if is_kwargs_deprecated and kwargs:
invalid_args.append(arg_spec.varkw)
for arg_name in deprecated_arg_names:
if (arg_name in kwargs and
not (deprecated_positions[arg_name].has_ok_value and
_same_value(named_args[arg_name],
deprecated_positions[arg_name].ok_value))):
invalid_args.append(arg_name)
for arg_name in invalid_args:
if (func, arg_name) not in _PRINTED_WARNING:
if warn_once:
_PRINTED_WARNING[(func, arg_name)] = True
logging.warning(
'From %s: calling %s (from %s) with %s is deprecated and will '
'be removed %s.\nInstructions for updating:\n%s',
_call_location(), decorator_utils.get_qualified_name(func),
func.__module__, arg_name,
'in a future version' if date is None else ('after %s' % date),
instructions)
return func(*args, **kwargs)
doc = _add_deprecated_arg_notice_to_docstring(
func.__doc__, date, instructions, sorted(deprecated_arg_names.keys()))
return tf_decorator.make_decorator(func, new_func, 'deprecated', doc)
return deprecated_wrapper
def deprecated_arg_values(date, instructions, warn_once=True,
**deprecated_kwargs):
"""Decorator for marking specific function argument values as deprecated.
This decorator logs a deprecation warning whenever the decorated function is
called with the deprecated argument values. It has the following format:
Calling <function> (from <module>) with <arg>=<value> is deprecated and
will be removed after <date>. Instructions for updating:
<instructions>
If `date` is None, 'after <date>' is replaced with 'in a future version'.
<function> will include the class name if it is a method.
It also edits the docstring of the function: ' (deprecated arguments)' is
appended to the first line of the docstring and a deprecation notice is
prepended to the rest of the docstring.
Args:
date: String or None. The date the function is scheduled to be removed.
Must be ISO 8601 (YYYY-MM-DD), or None
instructions: String. Instructions on how to update code using the
deprecated function.
warn_once: If `True`, warn only the first time this function is called with
deprecated argument values. Otherwise, every call (with a deprecated
argument value) will log a warning.
**deprecated_kwargs: The deprecated argument values.
Returns:
Decorated function or method.
Raises:
ValueError: If date is not None or in ISO 8601 format, or instructions are
empty.
"""
_validate_deprecation_args(date, instructions)
if not deprecated_kwargs:
raise ValueError('Specify which argument values are deprecated.')
def deprecated_wrapper(func):
"""Deprecation decorator."""
decorator_utils.validate_callable(func, 'deprecated_arg_values')
@functools.wraps(func)
def new_func(*args, **kwargs):
"""Deprecation wrapper."""
if _PRINT_DEPRECATION_WARNINGS:
named_args = tf_inspect.getcallargs(func, *args, **kwargs)
for arg_name, arg_value in deprecated_kwargs.items():
if arg_name in named_args and named_args[arg_name] == arg_value:
if (func, arg_name) not in _PRINTED_WARNING:
if warn_once:
_PRINTED_WARNING[(func, arg_name)] = True
logging.warning(
'From %s: calling %s (from %s) with %s=%s is deprecated and '
'will be removed %s.\nInstructions for updating:\n%s',
_call_location(), decorator_utils.get_qualified_name(func),
func.__module__, arg_name, arg_value, 'in a future version'
if date is None else ('after %s' % date), instructions)
return func(*args, **kwargs)
doc = _add_deprecated_arg_value_notice_to_docstring(
func.__doc__, date, instructions, deprecated_kwargs)
return tf_decorator.make_decorator(func, new_func, 'deprecated', doc)
return deprecated_wrapper
def deprecated_argument_lookup(new_name, new_value, old_name, old_value):
"""Looks up deprecated argument name and ensures both are not used.
Args:
new_name: new name of argument
new_value: value of new argument (or None if not used)
old_name: old name of argument
old_value: value of old argument (or None if not used)
Returns:
The effective argument that should be used.
Raises:
ValueError: if new_value and old_value are both non-null
"""
if old_value is not None:
if new_value is not None:
raise ValueError("Cannot specify both '%s' and '%s'" %
(old_name, new_name))
return old_value
return new_value
def rewrite_argument_docstring(old_doc, old_argument, new_argument):
return old_doc.replace('`%s`' % old_argument, '`%s`' % new_argument).replace(
'%s:' % old_argument, '%s:' % new_argument)
@tf_contextlib.contextmanager
def silence():
"""Temporarily silence deprecation warnings."""
global _PRINT_DEPRECATION_WARNINGS
print_deprecation_warnings = _PRINT_DEPRECATION_WARNINGS
_PRINT_DEPRECATION_WARNINGS = False
yield
_PRINT_DEPRECATION_WARNINGS = print_deprecation_warnings
|
|
#!/usr/bin/python
# Ship simulator, updated for TW4.
from __future__ import print_function
import enum
import pprint
import random
import dice
class ShipType(enum.Enum):
SPACEDOCK = 1
PDS = 2
FIGHTER = 3
CARRIER = 4
CRUISER = 5
DESTROYER = 6
DREADNOUGHT = 7
WARSUN = 8
FLAGSHIP = 9
class CombatUnit(object):
def __init__(self, edition=None, verbosity=False):
self.damage = 0
self.movement = 0
self.production_cost = 0
if edition == None or edition == 4:
self.edition = 4
else:
self.edition = 3
self.verbosity = verbosity
class GroundUnit(CombatUnit):
pass
class SpaceVehicle(CombatUnit):
def __init__(self, edition=None, verbosity=False):
super(SpaceVehicle, self).__init__(edition)
self.capacity_limit = 0
self.verbosity = verbosity
def GenerateHits(self):
pass
def ReceiveHits(self, amount):
"""Receive an amount of points of damage.
Args:
amount: Integer number.
Returns:
integer of number of hits left.
This is the generic, called by subclasses, therefore we presume one hit will
destroy it.
"""
if self.verbosity: print ("NOTE: ship destroyed (%s)" % (self.__class__.__name__))
return 0
def TravelTo(self):
pass
def IsDamaged(self):
return (self.damage > 0)
def CanHandleDamage(self):
"""How much damage can this ship take?"""
return 1
def WhereCanITravelTo(self):
pass
def ProductionSlotsTaken(self):
pass
def CanCarryGroundForces(self):
return False
def CanCarryPDSes(self):
return False
def CanCarryFighters(self):
return False
def CanGenerateAntiFighterBarrage(self):
return False
def CanSupportFighters(self):
return False
def ProvidePlanetaryShield(self):
return False
def AddToCargo(self, thing):
if len(self.cargo) < self.capacity_limit:
self.cargo.append(thing)
return True
else:
if self.verbosity: print ("NOTE: cargo limit of %s exceeded" % self.capacity_limit)
return False
def RemoveFromCargo(self):
thing = pop(self.cargo)
return thing
class Carrier(SpaceVehicle):
def __init__(self, edition = None):
super(Carrier, self).__init__(edition)
self.movement = 1
self.max_produceable = 4
self.production_cost = 3
if self.edition == None or self.edition == 4:
self.capacity_limit = 4
else:
self.capacity_limit = 6
self.shiptype = ShipType.CARRIER
self.cargo = []
def CanCarryFighters(self):
return True
def CanCarryPDSes(self):
return True
def CanCarryGroundForces(self):
return True
def UpgradeCapacity(self):
self.capacity_limit = 6
def GenerateHits(self):
return dice.Dice.RollWithTarget(9, 1)
class Cruiser(SpaceVehicle):
def __init__(self, edition = 4):
super(Cruiser, self).__init__(edition)
self.movement = 2
self.max_produceable = 8
self.production_cost = 2
self.shiptype = ShipType.CRUISER
def GenerateHits(self):
return dice.Dice.RollWithTarget(7, 1)
class Destroyer(SpaceVehicle):
def __init__(self, edition = 4):
super(Destroyer, self).__init__(edition)
self.movement = 2
self.max_produceable = 8
self.production_cost = 1
self.shiptype = ShipType.DESTROYER
def GenerateHits(self):
return dice.Dice.RollWithTarget(9, 1)
def CanGenerateAntiFighterBarrage(self):
return True
def GenerateAntiFighterBarrage(self):
return Dice.RollWithTarget(9, 2)
class Dreadnought(SpaceVehicle):
def __init__(self, edition = 4):
super(Dreadnought, self).__init__(edition)
self.movement = 2
self.production_cost = 4 if self.edition == 4 else 5
self.capacity_limit = 1
self.shiptype = ShipType.DREADNOUGHT
def GenerateHits(self):
return Dice.RollWithTarget(5, 1)
def ReceiveHits(self, amount):
amount_left = 2 - self.damage - amount
if amount_left <= 0:
if self.verbosity: print ("NOTE: ship destroyed (Dreadnought)")
return 0
elif amount == 1:
self.damage = 1
return 1
elif amount == 0:
return 2
def CanHandleDamage(self):
return 2
class Fighter(SpaceVehicle):
def __init__(self, edition = 4):
super(Fighter, self).__init__(edition)
self.max_produceable = 10
self.production_cost = 0.5
self.shiptype = ShipType.FIGHTER
def GenerateHits(self):
return dice.Dice.RollWithTarget(9, 1)
class Flagship(SpaceVehicle):
def __init__(self, edition = 4):
super(Fighter, self).__init__(edition)
self.max_produceable = 1
self.production_cost = 5
self.shiptype = ShipType.FLAGSHIP
def GenerateHits(self):
return dice.Dice.RollWithTarget(9, 1)
class Infantry(CombatUnit):
pass
class PDS(SpaceVehicle):
def __init__(self, edition=4):
super(PDS, self).__init__(edition)
self.max_produceable = 6
self.production_cost = 2
self.shiptype = ShipType.PDS
def GenerateHits(self):
return dice.Dice.RollWithTarget(6, 1)
def ProvidePlanetaryShield(self):
return True
class SpaceDock(SpaceVehicle):
def __init__(self, edition=4):
super(SpaceDock, self).__init__(edition)
self.max_produceable = 6
self.production_cost = 4
self.shiptype = ShipType.SPACEDOCK
class WarSun(SpaceVehicle):
def __init__(self, edition = 4):
super(WarSun, self).__init__(edition)
self.movement = 2
self.max_produceable = 2
self.production_cost = 12
self.cargo = []
self.shiptype = ShipType.WARSUN
def GenerateHits(self):
return dice.Dice.RollWithTarget(3, 3)
def ReceiveHits(self, amount):
amount_left = 2 - self.damage - amount
if amount_left <= 0:
if self.verbosity: print ("NOTE: ship destroyed (WarSun)")
return 0
elif amount == 1:
self.damage = 1
return 1
elif amount == 0:
return 2
def CanHandleDamage(self):
return 2
if __name__ == '__main__':
pass
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2014-2015 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import os
import time
import csv
import json
from bigmler.tests.world import world, res_filename
from subprocess import check_call, CalledProcessError
from bigml.api import check_resource
from bigmler.utils import storage_file_name
from bigmler.checkpoint import file_number_of_lines
from bigmler.tests.common_steps import check_debug
def shell_execute(command, output, test=None, options=None,
data=None, test_split=None):
"""Excute bigmler command in shell
"""
command = check_debug(command)
world.directory = os.path.dirname(output)
world.folders.append(world.directory)
try:
retcode = check_call(command, shell=True)
if retcode < 0:
assert False
else:
if test is not None:
world.test_lines = file_number_of_lines(test)
if options is None or options.find('--prediction-header') == -1:
# test file has headers in it, so first line must be ignored
world.test_lines -= 1
if test_split is not None:
data_lines = file_number_of_lines(data) - 1
world.test_lines = int(data_lines * float(test_split))
world.output = output
assert True
except (OSError, CalledProcessError, IOError) as exc:
assert False, str(exc)
#@step(r'I create BigML resources uploading train "(.*?)" file to create centroids for "(.*?)" and log predictions in "([^"]*)"$')
def i_create_all_cluster_resources(step, data=None, test=None, output=None):
if data is None or test is None or output is None:
assert False
test = res_filename(test)
command = ("bigmler cluster --train " + res_filename(data) + " --test " + test +
" --k 8" +
" --store --output " + output)
shell_execute(command, output, test=test)
#@step(r'I check that the cluster has been created')
def i_check_create_cluster(step):
cluster_file = "%s%sclusters" % (world.directory, os.sep)
try:
cluster_file = open(cluster_file, "r")
cluster = check_resource(cluster_file.readline().strip(),
world.api.get_cluster)
world.clusters.append(cluster['resource'])
world.cluster = cluster
cluster_file.close()
assert True
except Exception, exc:
assert False, str(exc)
#@step(r'I check that the centroids are ready')
def i_check_create_centroids(step):
previous_lines = -1
predictions_lines = 0
try:
predictions_file = world.output
predictions_file = open(predictions_file, "r")
predictions_lines = 0
for line in predictions_file:
predictions_lines += 1
if predictions_lines == world.test_lines:
assert True
else:
assert False, "predictions lines: %s, test lines: %s" % (predictions_lines, world.test_lines)
predictions_file.close()
except Exception, exc:
assert False, str(exc)
#@step(r'the local centroids file is like "(.*)"')
def i_check_centroids(step, check_file):
check_file = res_filename(check_file)
predictions_file = world.output
try:
predictions_file = csv.reader(open(predictions_file, "U"), lineterminator="\n")
check_file = csv.reader(open(check_file, "U"), lineterminator="\n")
for row in predictions_file:
check_row = check_file.next()
if len(check_row) != len(row):
assert False
for index in range(len(row)):
if check_row[index] != row[index]:
print row, check_row
assert False
assert True
except Exception, exc:
assert False, str(exc)
#@step(r'I create BigML resources using dataset to find centroids for "(.*)" and log predictions in "(.*)"')
def i_create_cluster_resources_from_dataset(step, test=None, output=None):
if test is None or output is None:
assert False
test = res_filename(test)
command = ("bigmler cluster --dataset " +
world.dataset['resource'] + " --test " + test + " --k 8" +
" --store --output " + output)
shell_execute(command, output, test=test)
#@step(r'I create BigML resources using source to find centroids for "(.*)" and log predictions in "(.*)"')
def i_create_cluster_resources_from_source(step, test=None, output=None):
if test is None or output is None:
assert False
test = res_filename(test)
command = ("bigmler cluster --source " +
world.source['resource'] + " --test " + test + " --k 8" +
" --store --output " + output)
shell_execute(command, output, test=test)
#@step(r'I create BigML resources using local cluster in "(.*)" to find centroids for "(.*)" and log predictions in "(.*)"')
def i_create_cluster_resources_from_local_cluster(step, directory=None, test=None, output=None):
if test is None or output is None or directory is None:
assert False
test = res_filename(test)
with open(os.path.join(directory, "clusters")) as handler:
cluster_id = handler.readline().strip()
command = ("bigmler cluster --cluster-file " +
storage_file_name(directory, cluster_id) +
" --test " + test +
" --store --output " + output)
shell_execute(command, output, test=test)
#@step(r'I create BigML resources using cluster to find centroids for "(.*)" and log predictions in "(.*)"')
def i_create_cluster_resources_from_cluster(step, test=None, output=None):
if test is None or output is None:
assert False
test = res_filename(test)
command = ("bigmler cluster --cluster " +
world.cluster['resource'] + " --test " + test + " --k 8" +
" --store --output " + output)
shell_execute(command, output, test=test)
#@step(r'I create BigML resources using clusters in file "(.*)" to find centroids for "(.*)" and log predictions in "(.*)"')
def i_create_cluster_resources_from_clusters_file(step, clusters_file=None, test=None, output=None):
if test is None or output is None or clusters_file is None:
assert False
test = res_filename(test)
command = ("bigmler cluster --clusters " +
clusters_file + " --test " + test +
" --store --output " + output)
shell_execute(command, output, test=test)
#@step(r'I create BigML resources uploading train "(.*?)" file to find centroids for "(.*?)" remotely to dataset with no CSV and log resources in "([^"]*)"$')
def i_create_all_cluster_resources_to_dataset(step, data=None, test=None, output_dir=None):
if data is None or test is None or output_dir is None:
assert False
test = res_filename(test)
command = ("bigmler cluster --remote --train " + res_filename(data) +
" --test " + test + " --k 8" +
" --to-dataset --no-csv " +
" --store --output-dir " + output_dir)
shell_execute(command, "%s/x.csv" % output_dir, test=test)
#@step(r'I create BigML resources uploading train "(.*?)" file to find centroids for "(.*?)" remotely with mapping file "(.*)" and log predictions in "([^"]*)"$')
def i_create_all_cluster_resources_with_mapping(step, data=None, test=None, fields_map=None, output=None):
if data is None or test is None or output is None or fields_map is None:
assert False
test = res_filename(test)
command = ("bigmler cluster --remote --train " + res_filename(data) +
" --test " + test + " --k 8" +
" --fields-map " + res_filename(fields_map) +
" --store --output " + output)
shell_execute(command, output, test=test)
#@step(r'I generate datasets for "(.*?)" centroids and log predictions in "(.*?)"$')
def i_create_datasets_from_cluster(step, centroids=None, output=None):
if centroids is None or output is None:
assert False
command = ("bigmler cluster --cluster " + world.cluster['resource'] +
" --cluster-datasets \"" + centroids +
"\" --store --output " + output)
shell_execute(command, output, test=None)
#@step(r'I check that the (\d+) cluster datasets are ready$')
def i_check_cluster_datasets(step, datasets_number=None):
try:
datasets_file = os.path.join(world.directory, "dataset_cluster")
datasets_file = open(datasets_file, "r")
dataset_ids = datasets_file.readlines()
world.datasets.extend(dataset_ids)
if int(datasets_number) == len(dataset_ids):
assert True
else:
assert False, "generated datasets %s, expected %s" % (
len(dataset_ids), datasets_number)
except Exception, exc:
assert False, str(exc)
#@step(r'I check that the (\d+) cluster models are ready$')
def i_check_cluster_models(step, models_number=None):
try:
models_file = os.path.join(world.directory, "models_cluster")
models_file = open(models_file, "r")
model_ids = models_file.readlines()
world.models.extend(model_ids)
if int(models_number) == len(model_ids):
assert True
else:
assert False, "generated models %s, expected %s" % (
len(model_ids), models_number)
except Exception, exc:
assert False, str(exc)
#@step(r'I generate models for "(.*?)" centroids and log results in "(.*?)"$')
def i_create_models_from_cluster(step, centroids=None, output=None):
if centroids is None or output is None:
assert False
command = ("bigmler cluster --dataset " + world.dataset['resource'] +
" --cluster-models \"" + centroids +
"\" --k 4 --store --output " + output)
shell_execute(command, output, test=None)
|
|
"""
.. module: security_monkey.watcher
:platform: Unix
:synopsis: Slurps the current config from AWS and compares it to what has previously
been recorded in the database to find any changes.
.. version:: $$VERSION$$
.. moduleauthor:: Patrick Kelley <pkelley@netflix.com> @monkeysecurity
"""
from botocore.exceptions import ClientError
from common.PolicyDiff import PolicyDiff
from common.utils import sub_dict
from security_monkey import app
from security_monkey.datastore import Account, IgnoreListEntry, db
from security_monkey.datastore import Technology, WatcherConfig, store_exception
from security_monkey.common.jinja import get_jinja_env
from security_monkey.alerters.custom_alerter import report_watcher_changes
from boto.exception import BotoServerError
import time
import datastore
from copy import deepcopy
import dpath.util
from dpath.exceptions import PathNotFound
import logging
watcher_registry = {}
abstract_classes = set(['Watcher', 'CloudAuxWatcher', 'CloudAuxBatchedWatcher'])
if not app.config.get("DONT_IGNORE_BOTO_VERBOSE_LOGGERS"):
logging.getLogger('botocore.vendored.requests.packages.urllib3').setLevel(logging.WARNING)
logging.getLogger('botocore.credentials').setLevel(logging.WARNING)
class WatcherType(type):
def __init__(cls, name, bases, attrs):
super(WatcherType, cls).__init__(name, bases, attrs)
if cls.__name__ not in abstract_classes and cls.index:
app.logger.debug("Registering watcher {} {}.{}".format(cls.index, cls.__module__, cls.__name__))
watcher_registry[cls.index] = cls
class Watcher(object):
"""Slurps the current config from AWS and compares it to what has previously
been recorded in the database to find any changes."""
index = 'abstract'
i_am_singular = 'Abstract'
i_am_plural = 'Abstracts'
rate_limit_delay = 0
ignore_list = []
interval = 60 #in minutes
active = True
account_type = 'AWS'
__metaclass__ = WatcherType
def __init__(self, accounts=None, debug=False):
"""Initializes the Watcher"""
self.datastore = datastore.Datastore()
if not accounts:
accounts = Account.query.filter(Account.third_party==False).filter(Account.active==True).all()
else:
accounts = Account.query.filter(Account.third_party==False).filter(Account.active==True).filter(Account.name.in_(accounts)).all()
if not accounts:
raise ValueError('Watcher needs a valid account')
self.accounts = [account.name for account in accounts]
self.account_identifiers = [account.identifier for account in accounts]
self.debug = debug
self.created_items = []
self.deleted_items = []
self.changed_items = []
self.ephemeral_items = []
# TODO: grab these from DB, keyed on account
self.rate_limit_delay = 0
self.honor_ephemerals = False
self.ephemeral_paths = []
# Batching attributes:
self.batched_size = 0 # Don't batch anything by default
self.done_slurping = True # Don't batch anything by default
self.total_list = [] # This will hold the full list of items to batch over
self.batch_counter = 0 # Keeps track of the batch we are on -- can be used for retry logic
self.current_account = None # Tuple that holds the current account and account index we are on.
self.technology = None
# Region is probably not needed if we are using CloudAux's iter_account_region -- will test this in
# the future as we add more items with batching support.
def prep_for_slurp(self):
"""
Should be run before slurp is run to grab the IgnoreList.
"""
query = IgnoreListEntry.query
query = query.join((Technology, Technology.id == IgnoreListEntry.tech_id))
self.ignore_list = query.filter(Technology.name == self.index).all()
def prep_for_batch_slurp(self):
"""
Should be run before batching slurps to set the current account (and region).
This will load the DB objects for account and technology for where we are currently at in the process.
:return:
"""
self.prep_for_slurp()
# Which account are we currently on?
if not self.current_account:
index = 0
# Get the Technology
# If technology doesn't exist, then create it:
technology = Technology.query.filter(Technology.name == self.index).first()
if not technology:
technology = Technology(name=self.index)
db.session.add(technology)
db.session.commit()
app.logger.info("Technology: {} did not exist... created it...".format(self.index))
self.technology = technology
else:
index = self.current_account[1] + 1
self.current_account = (Account.query.filter(Account.name == self.accounts[index]).one(), index)
# We will not be using CloudAux's iter_account_region for multi-account -- we want
# to have per-account level of batching
self.total_list = [] # Reset the total list for a new account to run against.
self.done_slurping = False
self.batch_counter = 0
def check_ignore_list(self, name):
"""
See if the given item has a name flagging it to be ignored by security_monkey.
"""
for result in self.ignore_list:
# Empty prefix comes back as None instead of an empty string ...
prefix = result.prefix or ""
if name.lower().startswith(prefix.lower()):
app.logger.info("Ignoring {}/{} because of IGNORELIST prefix {}".format(self.index, name, result.prefix))
return True
return False
def wrap_aws_rate_limited_call(self, awsfunc, *args, **nargs):
attempts = 0
def increase_delay():
if self.rate_limit_delay == 0:
self.rate_limit_delay = 1
app.logger.warn(('Being rate-limited by AWS. Increasing delay on tech {} ' +
'in account {} from 0 to 1 second. Attempt {}')
.format(self.index, self.accounts, attempts))
elif self.rate_limit_delay < 4:
self.rate_limit_delay = self.rate_limit_delay * 2
app.logger.warn(('Still being rate-limited by AWS. Increasing delay on tech {} ' +
'in account {} to {} seconds. Attempt {}')
.format(self.index, self.accounts, self.rate_limit_delay, attempts))
else:
app.logger.warn(('Still being rate-limited by AWS. Keeping delay on tech {} ' +
'in account {} at {} seconds. Attempt {}')
.format(self.index, self.accounts, self.rate_limit_delay, attempts))
while True:
attempts = attempts + 1
try:
if self.rate_limit_delay > 0:
time.sleep(self.rate_limit_delay)
retval = awsfunc(*args, **nargs)
if self.rate_limit_delay > 0:
app.logger.warn("Successfully Executed Rate-Limited Function. "
"Tech: {} Account: {}. Removing sleep period."
.format(self.index, self.accounts))
self.rate_limit_delay = 0
return retval
except BotoServerError as e: # Boto
if not e.error_code == 'Throttling':
raise e
increase_delay()
except ClientError as e: # Botocore
if not e.response["Error"]["Code"] == "Throttling":
raise e
increase_delay()
def created(self):
"""
Used by the Jinja templates
:returns: True if created_items is not empty
:returns: False otherwise.
"""
return len(self.created_items) > 0
def deleted(self):
"""
Used by the Jinja templates
:returns: True if deleted_items is not empty
:returns: False otherwise.
"""
return len(self.deleted_items) > 0
def changed(self):
"""
Used by the Jinja templates
:returns: True if changed_items is not empty
:returns: False otherwise.
"""
return len(self.changed_items) > 0
def slurp_list(self):
"""
This will fetch all the items in question that will need to get slurped.
This is used to know what we are going to have to batch up.
:return:
"""
raise NotImplementedError()
def slurp(self):
"""
method to slurp configuration from AWS for whatever it is that I'm
interested in. This will be overridden for each technology.
"""
raise NotImplementedError()
def slurp_exception(self, location=None, exception=None, exception_map={}, source="watcher"):
"""
Logs any exceptions that happen in slurp and adds them to the exception_map
using their location as the key. The location is a tuple in the form:
(technology, account, region, item_name) that describes the object where the exception occurred.
Location can also exclude an item_name if the exception is region wide.
"""
if location in exception_map:
app.logger.debug("Exception map already has location {}. This should not happen.".format(location))
exception_map[location] = exception
app.logger.debug("Adding {} to the exceptions list. Exception was: {}".format(location, str(exception)))
# Store it to the database:
store_exception(source, location, exception)
def location_in_exception_map(self, item_location, exception_map={}):
"""
Determines whether a given location is covered by an exception already in the
exception map.
Item location: (self.index, self.account, self.region, self.name)
exception Maps: (index, account, region, name)
(index, account, region)
(index, account)
:returns: True if location is covered by an entry in the exception map.
:returns: False if location is not covered by an entry in the exception map.
"""
# Exact Match
if item_location in exception_map:
app.logger.debug("Skipping {} due to an item-level exception {}.".format(item_location, exception_map[item_location]))
return True
# (index, account, region)
if item_location[0:3] in exception_map:
app.logger.debug("Skipping {} due to a region-level exception {}.".format(item_location, exception_map[item_location[0:3]]))
return True
# (index, account)
if item_location[0:2] in exception_map:
app.logger.debug("Skipping {} due to an account-level exception {}.".format(item_location, exception_map[item_location[0:2]]))
return True
# (index)
if item_location[0:1] in exception_map:
app.logger.debug("Skipping {} due to a technology-level exception {}.".format(item_location, exception_map[item_location[0:1]]))
return True
return False
def find_deleted(self, previous=[], current=[], exception_map={}):
"""
Find any items that have been deleted since the last run of the watcher.
Add these items to the deleted_items list.
"""
prev_map = {item.location(): item for item in previous}
curr_map = {item.location(): item for item in current}
item_locations = list(set(prev_map).difference(set(curr_map)))
item_locations = [item_location for item_location in item_locations if not self.location_in_exception_map(item_location, exception_map)]
list_deleted_items = [prev_map[item] for item in item_locations]
for item in list_deleted_items:
deleted_change_item = ChangeItem.from_items(old_item=item, new_item=None, source_watcher=self)
app.logger.debug("%s: %s/%s/%s deleted" % (self.i_am_singular, item.account, item.region, item.name))
self.deleted_items.append(deleted_change_item)
def find_new(self, previous=[], current=[]):
"""
Find any new objects that have been created since the last run of the watcher.
Add these items to the created_items list.
"""
prev_map = {item.location(): item for item in previous}
curr_map = {item.location(): item for item in current}
item_locations = list(set(curr_map).difference(set(prev_map)))
list_new_items = [curr_map[item] for item in item_locations]
for item in list_new_items:
new_change_item = ChangeItem.from_items(old_item=None, new_item=item, source_watcher=self)
self.created_items.append(new_change_item)
app.logger.debug("%s: %s/%s/%s created" % (self.i_am_singular, item.account, item.region, item.name))
def find_modified(self, previous=[], current=[], exception_map={}):
"""
Find any objects that have been changed since the last run of the watcher.
Add these items to the changed_items list.
"""
prev_map = {item.location(): item for item in previous}
curr_map = {item.location(): item for item in current}
item_locations = list(set(curr_map).intersection(set(prev_map)))
item_locations = [item_location for item_location in item_locations if not self.location_in_exception_map(item_location, exception_map)]
for location in item_locations:
prev_item = prev_map[location]
curr_item = curr_map[location]
# ChangeItem with and without ephemeral changes
eph_change_item = None
dur_change_item = None
if not sub_dict(prev_item.config) == sub_dict(curr_item.config):
eph_change_item = ChangeItem.from_items(old_item=prev_item, new_item=curr_item, source_watcher=self)
if self.ephemerals_skipped():
# deepcopy configs before filtering
dur_prev_item = deepcopy(prev_item)
dur_curr_item = deepcopy(curr_item)
# filter-out ephemeral paths in both old and new config dicts
for path in self.ephemeral_paths:
for cfg in [dur_prev_item.config, dur_curr_item.config]:
try:
dpath.util.delete(cfg, path, separator='$')
except PathNotFound:
pass
# now, compare only non-ephemeral paths
if not sub_dict(dur_prev_item.config) == sub_dict(dur_curr_item.config):
dur_change_item = ChangeItem.from_items(old_item=dur_prev_item, new_item=dur_curr_item,
source_watcher=self)
# store all changes, divided in specific categories
if eph_change_item:
self.ephemeral_items.append(eph_change_item)
app.logger.debug("%s: ephemeral changes in item %s/%s/%s" % (self.i_am_singular, eph_change_item.account, eph_change_item.region, eph_change_item.name))
if dur_change_item:
self.changed_items.append(dur_change_item)
app.logger.debug("%s: durable changes in item %s/%s/%s" % (self.i_am_singular, dur_change_item.account, dur_change_item.region, dur_change_item.name))
elif eph_change_item is not None:
# store all changes, handle them all equally
self.changed_items.append(eph_change_item)
app.logger.debug("%s: changes in item %s/%s/%s" % (self.i_am_singular, eph_change_item.account, eph_change_item.region, eph_change_item.name))
def find_changes(self, current=None, exception_map=None):
"""
Identify changes between the configuration I have and what I had
last time the watcher ran.
This ignores any account/region which caused an exception during slurp.
"""
current = current or []
exception_map = exception_map or {}
# Batching only logic here:
if self.batched_size > 0:
# Return the items that should be audited:
return self.find_changes_batch(current, exception_map)
else:
prev = self.read_previous_items()
self.find_deleted(previous=prev, current=current, exception_map=exception_map)
self.find_new(previous=prev, current=current)
self.find_modified(previous=prev, current=current, exception_map=exception_map)
def find_changes_batch(self, items, exception_map):
# Given the list of items, find new items that don't yet exist:
durable_items = []
from security_monkey.datastore_utils import hash_item, detect_change, persist_item
for item in items:
complete_hash, durable_hash = hash_item(item.config, self.ephemeral_paths)
# Detect if a change occurred:
is_change, change_type, db_item, created_changed = detect_change(
item, self.current_account[0], self.technology, complete_hash, durable_hash)
if not is_change:
continue
is_durable = (change_type == "durable")
if is_durable:
durable_items.append(item)
if created_changed == 'created':
self.created_items.append(ChangeItem.from_items(old_item=None, new_item=item, source_watcher=self))
if created_changed == 'changed':
db_item.audit_issues = db_item.issues
db_item.config = db_item.revisions.first().config
# At this point, a durable change was detected. If the complete hash is the same,
# then the durable hash is out of date, and this is not a real item change. This could happen if the
# ephemeral definitions change (this will be fixed in persist_item).
# Only add the items to the changed item list that are real item changes:
if db_item.latest_revision_complete_hash != complete_hash:
self.changed_items.append(ChangeItem.from_items(old_item=db_item, new_item=item,
source_watcher=self))
persist_item(item, db_item, self.technology, self.current_account[0], complete_hash,
durable_hash, is_durable)
return durable_items
def find_deleted_batch(self, exception_map):
from datastore_utils import inactivate_old_revisions
existing_arns = [item["Arn"] for item in self.total_list if item.get("Arn")]
deleted_items = inactivate_old_revisions(self, existing_arns, self.current_account[0], self.technology)
for item in deleted_items:
# An inactive revision has already been commited to the DB.
# So here, we need to pull the last two revisions to build out our
# ChangeItem.
recent_revisions=item.revisions.limit(2).all()
old_config=recent_revisions[1].config
new_config=recent_revisions[0].config
change_item = ChangeItem(
index=item.technology.name, region=item.region,
account=item.account.name, name=item.name, arn=item.arn,
old_config=old_config, new_config=new_config, active=False,
audit_issues=item.issues)
self.deleted_items.append(change_item)
def read_previous_items(self):
"""
Pulls the last-recorded configuration from the database.
:return: List of all items for the given technology and the given account.
"""
prev_list = []
for account in self.accounts:
prev = self.datastore.get_all_ctype_filtered(tech=self.index, account=account, include_inactive=False)
# Returns a map of {Item: ItemRevision}
for item in prev:
item_revision = prev[item]
new_item = ChangeItem(index=self.index,
region=item.region,
account=item.account.name,
name=item.name,
new_config=item_revision.config)
prev_list.append(new_item)
return prev_list
def is_changed(self):
"""
Note: It is intentional that self.ephemeral_items is not included here
so that emails will not go out about those changes.
Those changes will still be recorded in the database and visible in the UI.
:return: boolean whether or not we've found any changes
"""
return self.deleted_items or self.created_items or self.changed_items
def issues_found(self):
"""
Runs through any changed items to see if any have issues.
:return: boolean whether any changed items have issues
"""
has_issues = False
has_new_issue = False
has_unjustified_issue = False
for item in self.created_items + self.changed_items:
if item.audit_issues:
has_issues = True
if item.found_new_issue:
has_new_issue = True
has_unjustified_issue = True
break
for issue in item.confirmed_existing_issues:
if not issue.justified:
has_unjustified_issue = True
break
return has_issues, has_new_issue, has_unjustified_issue
def save(self):
"""
save new configs, if necessary
"""
app.logger.info("{} deleted {} in {}".format(len(self.deleted_items), self.i_am_plural, self.accounts))
app.logger.info("{} created {} in {}".format(len(self.created_items), self.i_am_plural, self.accounts))
for item in self.created_items + self.deleted_items:
item.save(self.datastore)
if self.ephemerals_skipped():
changed_locations = [item.location() for item in self.changed_items]
new_item_revisions = [item for item in self.ephemeral_items if item.location() in changed_locations]
app.logger.info("{} changed {} in {}".format(len(new_item_revisions), self.i_am_plural, self.accounts))
for item in new_item_revisions:
item.save(self.datastore)
edit_item_revisions = [item for item in self.ephemeral_items if item.location() not in changed_locations]
app.logger.info("{} ephemerally changed {} in {}".format(len(edit_item_revisions), self.i_am_plural, self.accounts))
for item in edit_item_revisions:
item.save(self.datastore, ephemeral=True)
else:
app.logger.info("{} changed {} in {}".format(len(self.changed_items), self.i_am_plural, self.accounts))
for item in self.changed_items:
item.save(self.datastore)
report_watcher_changes(self)
def plural_name(self):
"""
Used for Jinja Template
:return: i_am_plural
"""
return self.i_am_plural
def singular_name(self):
"""
Used for Jinja Template
:return: i_am_singular
"""
return self.i_am_singular
def get_interval(self):
""" Returns interval time (in minutes) """
config = WatcherConfig.query.filter(WatcherConfig.index == self.index).first()
if config:
return config.interval
return self.interval
def is_active(self):
""" Returns active """
config = WatcherConfig.query.filter(WatcherConfig.index == self.index).first()
if config:
return config.active
return self.active
def ephemerals_skipped(self):
""" Returns whether ephemerals locations are ignored """
return self.honor_ephemerals
class ChangeItem(object):
"""
Object tracks two different revisions of a given item.
"""
def __init__(self, index=None, region=None, account=None, name=None, arn=None, old_config=None, new_config=None,
active=False, audit_issues=None, source_watcher=None):
self.index = index
self.region = region
self.account = account
self.name = name
self.arn = arn
self.old_config = old_config if old_config else {}
self.new_config = new_config if new_config else {}
self.active = active
self.audit_issues = audit_issues or []
self.confirmed_new_issues = []
self.confirmed_fixed_issues = []
self.confirmed_existing_issues = []
self.found_new_issue = False
self.watcher = source_watcher
@classmethod
def from_items(cls, old_item=None, new_item=None, source_watcher=None):
"""
Create ChangeItem from two separate items.
:return: An instance of ChangeItem
"""
if not old_item and not new_item:
return
valid_item = new_item if new_item else old_item
audit_issues = old_item.audit_issues if old_item else []
active = True if new_item else False
old_config = old_item.config if old_item else {}
new_config = new_item.config if new_item else {}
return cls(index=valid_item.index,
region=valid_item.region,
account=valid_item.account,
name=valid_item.name,
arn=valid_item.arn,
old_config=old_config,
new_config=new_config,
active=active,
audit_issues=audit_issues,
source_watcher=source_watcher)
@property
def config(self):
return self.new_config
def location(self):
"""
Construct a location from the object.
:return: tuple containing index, account, region, and name.
"""
return (self.index, self.account, self.region, self.name)
def get_pdiff_html(self):
pdiff = PolicyDiff(self.new_config, self.old_config)
return pdiff.produceDiffHTML()
def _dict_for_template(self):
return {
'account': self.account,
'region': self.region,
'name': self.name,
'confirmed_new_issues': self.confirmed_new_issues,
'confirmed_fixed_issues': self.confirmed_fixed_issues,
'confirmed_existing_issues': self.confirmed_existing_issues,
'pdiff_html': self.get_pdiff_html()
}
def description(self):
"""
Provide an HTML description of the object for change emails and the Jinja templates.
:return: string of HTML describing the object.
"""
jenv = get_jinja_env()
template = jenv.get_template('jinja_change_item.html')
body = template.render(self._dict_for_template())
# app.logger.info(body)
return body
def save(self, datastore, ephemeral=False):
"""
Save the item
"""
app.logger.debug("Saving {}/{}/{}/{}\n\t{}".format(self.index, self.account, self.region, self.name, self.new_config))
self.db_item = datastore.store(
self.index,
self.region,
self.account,
self.name,
self.active,
self.new_config,
arn=self.arn,
new_issues=self.audit_issues,
ephemeral=ephemeral,
source_watcher=self.watcher)
|
|
# stdlib
import os
# 3p
from nose.plugins.attrib import attr
# project
from checks import AgentCheck
from tests.checks.common import AgentCheckTest
from util import get_hostname
@attr(requires='haproxy')
class HaproxyTest(AgentCheckTest):
CHECK_NAME = 'haproxy'
BACKEND_SERVICES = ['anotherbackend', 'conmon']
BACKEND_LIST = ['singleton:8080', 'singleton:8081', 'otherserver']
FRONTEND_CHECK_GAUGES = [
'haproxy.frontend.session.current',
'haproxy.frontend.session.limit',
'haproxy.frontend.session.pct',
]
FRONTEND_CHECK_GAUGES_POST_1_4 = [
'haproxy.frontend.requests.rate',
]
BACKEND_CHECK_GAUGES = [
'haproxy.backend.queue.current',
'haproxy.backend.session.current',
]
BACKEND_CHECK_GAUGES_POST_1_5 = [
'haproxy.backend.queue.time',
'haproxy.backend.connect.time',
'haproxy.backend.response.time',
'haproxy.backend.session.time',
]
FRONTEND_CHECK_RATES = [
'haproxy.frontend.bytes.in_rate',
'haproxy.frontend.bytes.out_rate',
'haproxy.frontend.denied.req_rate',
'haproxy.frontend.denied.resp_rate',
'haproxy.frontend.errors.req_rate',
'haproxy.frontend.session.rate',
]
FRONTEND_CHECK_RATES_POST_1_4 = [
'haproxy.frontend.response.1xx',
'haproxy.frontend.response.2xx',
'haproxy.frontend.response.3xx',
'haproxy.frontend.response.4xx',
'haproxy.frontend.response.5xx',
'haproxy.frontend.response.other',
]
BACKEND_CHECK_RATES = [
'haproxy.backend.bytes.in_rate',
'haproxy.backend.bytes.out_rate',
'haproxy.backend.denied.resp_rate',
'haproxy.backend.errors.con_rate',
'haproxy.backend.errors.resp_rate',
'haproxy.backend.session.rate',
'haproxy.backend.warnings.redis_rate',
'haproxy.backend.warnings.retr_rate',
]
BACKEND_CHECK_RATES_POST_1_4 = [
'haproxy.backend.response.1xx',
'haproxy.backend.response.2xx',
'haproxy.backend.response.3xx',
'haproxy.backend.response.4xx',
'haproxy.backend.response.5xx',
'haproxy.backend.response.other',
]
def __init__(self, *args, **kwargs):
AgentCheckTest.__init__(self, *args, **kwargs)
self.config = {
"instances": [{
'url': 'http://localhost:3835/stats',
'username': 'conmon',
'password': 'isdevops',
'status_check': True,
'collect_aggregates_only': False,
'tag_service_check_by_host': True,
}]
}
self.config_open = {
'instances': [{
'url': 'http://localhost:3836/stats',
'collect_aggregates_only': False,
}]
}
def _test_frontend_metrics(self, shared_tag):
frontend_tags = shared_tag + ['type:FRONTEND', 'service:public']
for gauge in self.FRONTEND_CHECK_GAUGES:
self.assertMetric(gauge, tags=frontend_tags, count=1)
if os.environ.get('FLAVOR_VERSION', '').split('.')[:2] >= ['1', '4']:
for gauge in self.FRONTEND_CHECK_GAUGES_POST_1_4:
self.assertMetric(gauge, tags=frontend_tags, count=1)
for rate in self.FRONTEND_CHECK_RATES:
self.assertMetric(rate, tags=frontend_tags, count=1)
if os.environ.get('FLAVOR_VERSION', '').split('.')[:2] >= ['1', '4']:
for rate in self.FRONTEND_CHECK_RATES_POST_1_4:
self.assertMetric(rate, tags=frontend_tags, count=1)
def _test_backend_metrics(self, shared_tag, services=None):
backend_tags = shared_tag + ['type:BACKEND']
if not services:
services = self.BACKEND_SERVICES
for service in services:
for backend in self.BACKEND_LIST:
tags = backend_tags + ['service:' + service, 'backend:' + backend]
for gauge in self.BACKEND_CHECK_GAUGES:
self.assertMetric(gauge, tags=tags, count=1)
if os.environ.get('FLAVOR_VERSION', '').split('.')[:2] >= ['1', '5']:
for gauge in self.BACKEND_CHECK_GAUGES_POST_1_5:
self.assertMetric(gauge, tags=tags, count=1)
for rate in self.BACKEND_CHECK_RATES:
self.assertMetric(rate, tags=tags, count=1)
if os.environ.get('FLAVOR_VERSION', '').split('.')[:2] >= ['1', '4']:
for rate in self.BACKEND_CHECK_RATES_POST_1_4:
self.assertMetric(rate, tags=tags, count=1)
def _test_service_checks(self, services=None):
if not services:
services = self.BACKEND_SERVICES
for service in services:
for backend in self.BACKEND_LIST:
tags = ['service:' + service, 'backend:' + backend]
self.assertServiceCheck(self.check.SERVICE_CHECK_NAME,
status=AgentCheck.UNKNOWN,
count=1,
tags=tags)
tags = ['service:' + service, 'backend:BACKEND']
self.assertServiceCheck(self.check.SERVICE_CHECK_NAME,
status=AgentCheck.OK,
count=1,
tags=tags)
def test_check(self):
self.run_check_twice(self.config)
shared_tag = ['instance_url:http://localhost:3835/stats']
self._test_frontend_metrics(shared_tag)
self._test_backend_metrics(shared_tag)
# check was run 2 times
# - FRONTEND is reporting OPEN that we ignore
# - only the BACKEND aggregate is reporting UP -> OK
# - The 3 individual servers are returning no check -> UNKNOWN
self._test_service_checks()
# Make sure the service checks aren't tagged with an empty hostname.
self.assertEquals(self.service_checks[0]['host_name'], get_hostname())
self.coverage_report()
def test_check_service_filter(self):
config = self.config
config['instances'][0]['services_include'] = ['conmon']
config['instances'][0]['services_exclude'] = ['.*']
self.run_check_twice(config)
shared_tag = ['instance_url:http://localhost:3835/stats']
self._test_backend_metrics(shared_tag, ['conmon'])
self._test_service_checks(['conmon'])
self.coverage_report()
def test_wrong_config(self):
config = self.config
config['instances'][0]['username'] = 'fake_username'
self.assertRaises(Exception, lambda: self.run_check(config))
# Test that nothing has been emitted
self.coverage_report()
def test_open_config(self):
self.run_check_twice(self.config_open)
shared_tag = ['instance_url:http://localhost:3836/stats']
self._test_frontend_metrics(shared_tag)
self._test_backend_metrics(shared_tag)
self._test_service_checks()
# This time, make sure the hostname is empty
self.assertEquals(self.service_checks[0]['host_name'], '')
self.coverage_report()
# Keeping a mocked test since it tests the internal
# process of service checks
def test_count_per_statuses(self):
from collections import defaultdict
self.run_check(self.config)
data = """# pxname,svname,qcur,qmax,scur,smax,slim,stot,bin,bout,dreq,dresp,ereq,econ,eresp,wretr,wredis,status,weight,act,bck,chkfail,chkdown,lastchg,downtime,qlimit,pid,iid,sid,throttle,lbtot,tracked,type,rate,rate_lim,rate_max,check_status,check_code,check_duration,hrsp_1xx,hrsp_2xx,hrsp_3xx,hrsp_4xx,hrsp_5xx,hrsp_other,hanafail,req_rate,req_rate_max,req_tot,cli_abrt,srv_abrt,
a,FRONTEND,,,1,2,12,1,11,11,0,0,0,,,,,OPEN,,,,,,,,,1,1,0,,,,0,1,0,2,,,,0,1,0,0,0,0,,1,1,1,,,
a,BACKEND,0,0,0,0,12,0,11,11,0,0,,0,0,0,0,UP,0,0,0,,0,1221810,0,,1,1,0,,0,,1,0,,0,,,,0,0,0,0,0,0,,,,,0,0,
b,FRONTEND,,,1,2,12,11,11,0,0,0,0,,,,,OPEN,,,,,,,,,1,2,0,,,,0,0,0,1,,,,,,,,,,,0,0,0,,,
b,i-1,0,0,0,1,,1,1,0,,0,,0,0,0,0,UP,1,1,0,0,1,1,30,,1,3,1,,70,,2,0,,1,1,,0,,,,,,,0,,,,0,0,
b,i-2,0,0,1,1,,1,1,0,,0,,0,0,0,0,UP,1,1,0,0,0,1,0,,1,3,2,,71,,2,0,,1,1,,0,,,,,,,0,,,,0,0,
b,i-3,0,0,0,1,,1,1,0,,0,,0,0,0,0,UP,1,1,0,0,0,1,0,,1,3,3,,70,,2,0,,1,1,,0,,,,,,,0,,,,0,0,
b,i-4,0,0,0,1,,1,1,0,,0,,0,0,0,0,DOWN,1,1,0,0,0,1,0,,1,3,3,,70,,2,0,,1,1,,0,,,,,,,0,,,,0,0,
b,i-5,0,0,0,1,,1,1,0,,0,,0,0,0,0,MAINT,1,1,0,0,0,1,0,,1,3,3,,70,,2,0,,1,1,,0,,,,,,,0,,,,0,0,
b,BACKEND,0,0,1,2,0,421,1,0,0,0,,0,0,0,0,UP,6,6,0,,0,1,0,,1,3,0,,421,,1,0,,1,,,,,,,,,,,,,,0,0,
""".split('\n')
# per service
self.check._process_data(data, True, False, collect_status_metrics=True,
collect_status_metrics_by_host=False)
expected_hosts_statuses = defaultdict(int)
expected_hosts_statuses[('b', 'OPEN')] = 1
expected_hosts_statuses[('b', 'UP')] = 3
expected_hosts_statuses[('b', 'DOWN')] = 1
expected_hosts_statuses[('b', 'MAINT')] = 1
expected_hosts_statuses[('a', 'OPEN')] = 1
self.assertEquals(self.check.hosts_statuses, expected_hosts_statuses)
# backend hosts
agg_statuses = self.check._process_backend_hosts_metric(expected_hosts_statuses)
expected_agg_statuses = {
'a': {'available': 0, 'unavailable': 0},
'b': {'available': 3, 'unavailable': 2},
}
self.assertEquals(expected_agg_statuses, dict(agg_statuses))
# with collect_aggregates_only set to True
self.check._process_data(data, True, True, collect_status_metrics=True,
collect_status_metrics_by_host=False)
self.assertEquals(self.check.hosts_statuses, expected_hosts_statuses)
# per host
self.check._process_data(data, True, False, collect_status_metrics=True,
collect_status_metrics_by_host=True)
expected_hosts_statuses = defaultdict(int)
expected_hosts_statuses[('b', 'FRONTEND', 'OPEN')] = 1
expected_hosts_statuses[('a', 'FRONTEND', 'OPEN')] = 1
expected_hosts_statuses[('b', 'i-1', 'UP')] = 1
expected_hosts_statuses[('b', 'i-2', 'UP')] = 1
expected_hosts_statuses[('b', 'i-3', 'UP')] = 1
expected_hosts_statuses[('b', 'i-4', 'DOWN')] = 1
expected_hosts_statuses[('b', 'i-5', 'MAINT')] = 1
self.assertEquals(self.check.hosts_statuses, expected_hosts_statuses)
self.check._process_data(data, True, True, collect_status_metrics=True,
collect_status_metrics_by_host=True)
self.assertEquals(self.check.hosts_statuses, expected_hosts_statuses)
|
|
#!/usr/bin/env python
# The MIT License (MIT)
# Copyright (c) 2017 Massimiliano Patacchiola
# https://mpatacchiola.github.io
# https://mpatacchiola.github.io/blog/
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import numpy as np
import cv2
import sys
from timeit import default_timer as timer
DEBUG = False
class FasaSaliencyMapping:
"""Implementation of the FASA (Fast, Accurate, and Size-Aware Salient Object Detection) algorithm.
Abstract:
Fast and accurate salient-object detectors are important for various image processing and computer vision
applications, such as adaptive compression and object segmentation. It is also desirable to have a detector that is
aware of the position and the size of the salient objects. In this paper, we propose a salient-object detection
method that is fast, accurate, and size-aware. For efficient computation, we quantize the image colors and estimate
the spatial positions and sizes of the quantized colors. We then feed these values into a statistical model to
obtain a probability of saliency. In order to estimate the final saliency, this probability is combined with a
global color contrast measure. We test our method on two public datasets and show that our method significantly
outperforms the fast state-of-the-art methods. In addition, it has comparable performance and is an order of
magnitude faster than the accurate state-of-the-art methods. We exhibit the potential of our algorithm by
processing a high-definition video in real time.
"""
def __init__(self, image_h, image_w):
"""Init the classifier.
"""
# Assigning some global variables and creating here the image to fill later (for speed purposes)
self.image_rows = image_h
self.image_cols = image_w
self.salient_image = np.zeros((image_h, image_w), dtype=np.uint8)
# mu: mean vector
self.mean_vector = np.array([0.5555, 0.6449, 0.0002, 0.0063])
# covariance matrix
# self.covariance_matrix = np.array([[0.0231, -0.0010, 0.0001, -0.0002],
# [-0.0010, 0.0246, -0.0000, 0.0000],
# [0.0001, -0.0000, 0.0115, 0.0003],
# [-0.0002, 0.0000, 0.0003, 0.0080]])
# determinant of covariance matrix
# self.determinant_covariance = np.linalg.det(self.covariance_matrix)
# self.determinant_covariance = 5.21232874e-08
# Inverse of the covariance matrix
self.covariance_matrix_inverse = np.array([[43.3777, 1.7633, -0.4059, 1.0997],
[1.7633, 40.7221, -0.0165, 0.0447],
[-0.4059, -0.0165, 87.0455, -3.2744],
[1.0997, 0.0447, -3.2744, 125.1503]])
def _calculate_histogram(self, image, tot_bins=8):
# 1- Conversion from BGR to LAB color space
# Here a color space conversion is done. Moreover the min/max value for each channel is found.
# This is helpful because the 3D histogram will be defined in this sub-space.
# image = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
minL, maxL, _, _ = cv2.minMaxLoc(image[:, :, 0])
minA, maxA, _, _ = cv2.minMaxLoc(image[:, :, 1])
minB, maxB, _, _ = cv2.minMaxLoc(image[:, :, 2])
# Quantization ranges
self.L_range = np.linspace(minL, maxL, num=tot_bins, endpoint=False)
self.A_range = np.linspace(minA, maxA, num=tot_bins, endpoint=False)
self.B_range = np.linspace(minB, maxB, num=tot_bins, endpoint=False)
# Here the image quantized using the discrete bins is created.
self.image_quantized = np.dstack((np.digitize(image[:, :, 0], self.L_range, right=False),
np.digitize(image[:, :, 1], self.A_range, right=False),
np.digitize(image[:, :, 2], self.B_range, right=False)))
self.image_quantized -= 1 # now in range [0,7]
# it maps the 3D index of hist in a flat 1D array index
self.map_3d_1d = np.zeros((tot_bins, tot_bins, tot_bins), dtype=np.int32)
# Histograms in a 3D manifold of shape (tot_bin, tot_bin, tot_bin).
# The cv2.calcHist for a 3-channels image generates a cube of size (tot_bins, tot_bins, tot_bins) which is a
# discretization of the 3-D space defined by hist_range.
# E.G. if range is 0-255 and it is divided in 5 bins we get -> [0-50][50-100][100-150][150-200][200-250]
# So if you access the histogram with the indeces: histogram[3,0,2] it is possible to see how many pixels
# fall in the range channel_1=[150-200], channel_2=[0-50], channel_3=[100-150]
# data = np.vstack((image[:, :, 0].flat, image[:, :, 1].flat, image[:, :, 2].flat)).astype(np.uint8).T
# OpenCV implementation is slightly faster than Numpy
self.histogram = cv2.calcHist([image], channels=[0, 1, 2], mask=None,
histSize=[tot_bins, tot_bins, tot_bins],
ranges=[minL, maxL, minA, maxA, minB, maxB])
# data = np.vstack((image[:, :, 0].flat, image[:, :, 1].flat, image[:, :, 2].flat)).T
# self.histogram, edges = np.histogramdd(data, bins=tot_bins, range=((minL, maxL), (minA, maxA), (minB, maxB)))
# self.histogram, edges = np.histogramdd(data, bins=tot_bins)
# Get flatten index ID of the image pixels quantized
image_indeces = np.vstack((self.image_quantized[:,:,0].flat,
self.image_quantized[:,:,1].flat,
self.image_quantized[:,:,2].flat)).astype(np.int32)
image_linear = np.ravel_multi_index(image_indeces, (tot_bins, tot_bins, tot_bins)) # in range [0,7]
# image_linear = np.reshape(image_linear, (self.image_rows, self.image_cols))
# Getting the linear ID index of unique colours
self.index_matrix = np.transpose(np.nonzero(self.histogram))
hist_index = np.where(self.histogram > 0) # Included in [0,7]
unique_color_linear = np.ravel_multi_index(hist_index, (tot_bins, tot_bins, tot_bins)) # linear ID index
self.number_of_colors = np.amax(self.index_matrix.shape)
self.centx_matrix = np.zeros(self.number_of_colors)
self.centy_matrix = np.zeros(self.number_of_colors)
self.centx2_matrix = np.zeros(self.number_of_colors)
self.centy2_matrix = np.zeros(self.number_of_colors)
# Using the numpy method where() to find the location of each unique colour in the linear ID matrix
counter = 0
for i in unique_color_linear:
# doing only one call to a flat image_linear is faster here
where_y, where_x = np.unravel_index(np.where(image_linear == i), (self.image_rows, self.image_cols))
#where_x = np.where(image_linear == i)[1] # columns coord
#where_y = np.where(image_linear == i)[0] # rows coord
self.centx_matrix[counter] = np.sum(where_x)
self.centy_matrix[counter] = np.sum(where_y)
self.centx2_matrix[counter] = np.sum(np.power(where_x, 2))
self.centy2_matrix[counter] = np.sum(np.power(where_y, 2))
counter += 1
return image
def _precompute_parameters(self, sigmac=16):
""" Semi-Vectorized version of the precompute parameters function.
This function runs at 0.003 seconds on a squared 400x400 pixel image.
It returns the number of colors and estimates the color_distance matrix
@param sigmac: the scalar used in the exponential (default=16)
@return: the number of unique colors
"""
L_centroid, A_centroid, B_centroid = np.meshgrid(self.L_range, self.A_range, self.B_range)
self.unique_pixels = np.zeros((self.number_of_colors, 3))
if sys.version_info[0] == 2:
color_range = xrange(0, self.number_of_colors)
else:
color_range = range(0, self.number_of_colors)
for i in color_range:
i_index = self.index_matrix[i, :]
L_i = L_centroid[i_index[0], i_index[1], i_index[2]]
A_i = A_centroid[i_index[0], i_index[1], i_index[2]]
B_i = B_centroid[i_index[0], i_index[1], i_index[2]]
self.unique_pixels[i] = np.array([L_i, A_i, B_i])
self.map_3d_1d[i_index[0], i_index[1], i_index[2]] = i # the map is assigned here for performance purposes
color_difference_matrix = np.sum(np.power(self.unique_pixels[:, np.newaxis] - self.unique_pixels, 2), axis=2)
self.color_distance_matrix = np.sqrt(color_difference_matrix)
self.exponential_color_distance_matrix = np.exp(- np.divide(color_difference_matrix, (2 * sigmac * sigmac)))
return self.number_of_colors
def _bilateral_filtering(self):
""" Applying the bilateral filtering to the matrices.
This function runs at 0.0006 seconds on a squared 400x400 pixel image.
Since the trick 'matrix[ matrix > x]' is used it would be possible to set a threshold
which is an energy value, considering only the histograms which have enough colours.
@return: mx, my, Vx, Vy
"""
# Obtaining the values through vectorized operations (very efficient)
self.contrast = np.dot(self.color_distance_matrix, self.histogram[self.histogram > 0])
normalization_array = np.dot(self.exponential_color_distance_matrix, self.histogram[self.histogram > 0])
self.mx = np.dot(self.exponential_color_distance_matrix, self.centx_matrix)
self.my = np.dot(self.exponential_color_distance_matrix, self.centy_matrix)
mx2 = np.dot(self.exponential_color_distance_matrix, self.centx2_matrix)
my2 = np.dot(self.exponential_color_distance_matrix, self.centy2_matrix)
# Normalizing the vectors
self.mx = np.divide(self.mx, normalization_array)
self.my = np.divide(self.my, normalization_array)
mx2 = np.divide(mx2, normalization_array)
my2 = np.divide(my2, normalization_array)
self.Vx = np.absolute(np.subtract(mx2, np.power(self.mx, 2))) # TODO: understand why some negative values appear
self.Vy = np.absolute(np.subtract(my2, np.power(self.my, 2)))
return self.mx, self.my, self.Vx, self.Vy
def _calculate_probability(self):
""" Vectorized version of the probability estimation.
This function runs at 0.0001 seconds on a squared 400x400 pixel image.
@return: a vector shape_probability of shape (number_of_colors)
"""
g = np.array([np.sqrt(12 * self.Vx) / self.image_cols,
np.sqrt(12 * self.Vy) / self.image_rows,
(self.mx - (self.image_cols / 2.0)) / float(self.image_cols),
(self.my - (self.image_rows / 2.0)) / float(self.image_rows)])
X = (g.T - self.mean_vector)
Y = X
A = self.covariance_matrix_inverse
result = (np.dot(X, A) * Y).sum(1) # This line does the trick
self.shape_probability = np.exp(- result / 2)
return self.shape_probability
def _compute_saliency_map(self):
""" Fast vectorized version of the saliency map estimation.
This function runs at 7.7e-05 seconds on a squared 400x400 pixel image.
@return: the saliency vector
"""
# Vectorized operations for saliency vector estimation
self.saliency = np.multiply(self.contrast, self.shape_probability)
a1 = np.dot(self.exponential_color_distance_matrix, self.saliency)
a2 = np.sum(self.exponential_color_distance_matrix, axis=1)
self.saliency = np.divide(a1, a2)
# The saliency vector is renormalised in range [0-255]
minVal, maxVal, _, _ = cv2.minMaxLoc(self.saliency)
self.saliency = self.saliency - minVal
self.saliency = 255 * self.saliency / (maxVal - minVal) + 1e-3
return self.saliency
def returnMask(self, image, tot_bins=8, format='BGR2LAB'):
""" Return the saliency mask of the input image.
@param: image the image to process
@param: tot_bins the number of bins used in the histogram
@param: format conversion, it can be one of the following:
BGR2LAB, BGR2RGB, RGB2LAB, RGB, BGR, LAB
@return: the saliency mask
"""
if format == 'BGR2LAB':
image = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
elif format == 'BGR2RGB':
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
elif format == 'RGB2LAB':
image = cv2.cvtColor(image, cv2.COLOR_RGB2LAB)
elif format == 'RGB' or format == 'BGR' or format == 'LAB':
pass
else:
raise ValueError('[DEEPGAZE][SALIENCY-MAP][ERROR] the input format of the image is not supported.')
if DEBUG: start = timer()
self._calculate_histogram(image, tot_bins=tot_bins)
if DEBUG: end = timer()
if DEBUG: print("--- %s calculate_histogram seconds ---" % (end - start))
if DEBUG: start = timer()
number_of_colors = self._precompute_parameters()
if DEBUG: end = timer()
if DEBUG: print("--- number of colors: " + str(number_of_colors) + " ---")
if DEBUG: print("--- %s precompute_paramters seconds ---" % (end - start))
if DEBUG: start = timer()
self._bilateral_filtering()
if DEBUG: end = timer()
if DEBUG: print("--- %s bilateral_filtering seconds ---" % (end - start))
if DEBUG: start = timer()
self._calculate_probability()
if DEBUG: end = timer()
if DEBUG: print("--- %s calculate_probability seconds ---" % (end - start))
if DEBUG: start = timer()
self._compute_saliency_map()
if DEBUG: end = timer()
if DEBUG: print("--- %s compute_saliency_map seconds ---" % (end - start))
if DEBUG: start = timer()
it = np.nditer(self.salient_image, flags=['multi_index'], op_flags=['writeonly'])
while not it.finished:
# This part takes 0.1 seconds
y = it.multi_index[0]
x = it.multi_index[1]
#L_id = self.L_id_matrix[y, x]
#A_id = self.A_id_matrix[y, x]
#B_id = self.B_id_matrix[y, x]
index = self.image_quantized[y, x]
# These operations take 0.1 seconds
index = self.map_3d_1d[index[0], index[1], index[2]]
it[0] = self.saliency[index]
it.iternext()
if DEBUG: end = timer()
# ret, self.salient_image = cv2.threshold(self.salient_image, 150, 255, cv2.THRESH_BINARY)
if DEBUG: print("--- %s returnMask 'iteration part' seconds ---" % (end - start))
return self.salient_image
|
|
#!/usr/bin/python
# Global Forest Change calculator for provided species' distributions maps
import csv
import logging
import time
import ee
import collections
from datetime import datetime
assets_filename = 'assets_eoo.csv' # File with IDs of species' range maps (uploaded through Google Maps Engine)
altitude_filename = 'altitude_range_all.csv' # File with species' altitude limits
scale = 2000 # Scale for running the calculations (2000 is recommended by IUCN)
maxPixels = 1e13 # Maximal number of pixels in image before rescaling is done
bestEffort = True # If number of pixels exceeds 'maxPixels' then rescale image
# Parameters for exponential backoff - a method for repeating a function call after
# an exception has happened. A workaround to a time-out issue in the Earth Engine
tries = 5
delay = 10
backoff = 2
# The logging module
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M',
filename='raster.log',
filemode='w')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
ee.Initialize() # Initialise the EE
startTime = datetime.now() # Start measuring the time
gfcImage = ee.Image('UMD/hansen/global_forest_change_2013') # The GFC map
gtopo30 = ee.Image('USGS/GTOPO30') # GTOPO30 Digital Elevation Map
# Concession areas
woodlogging = ee.Image('GME/images/12170611901221780154-14850889436297602760');
woodfibre = ee.Image('GME/images/12170611901221780154-03384629936081429678');
oilpalm = ee.Image('GME/images/12170611901221780154-00658915758003517223');
concessions = ee.ImageCollection([woodlogging, woodfibre, oilpalm]).mosaic();
# Read altitude limits
Altitude = collections.namedtuple('Altitude', 'min max')
with open(altitude_filename, 'rb') as f:
reader = csv.reader(f)
reader.next()
alt_info = {species: Altitude(min=float(min_alt), max=float(max_alt)) for species, min_alt, max_alt in reader}
# Select specific bands from GFC map
forest2000 = gfcImage.select(['treecover2000']).divide(100)
lossImage = gfcImage.select(['loss'])
lossYear = gfcImage.select(['lossyear'])
gainImage = gfcImage.select(['gain'])
# Remove concession areas from GFC bands
forest2000 = forest2000.where(concessions, 0);
lossImage = lossImage.where(concessions, 0);
lossYear = lossYear.where(concessions, 0);
gainImage = gainImage.where(concessions, 0);
# Find pixels in which only gain occurred
gainOnly = gainImage.And(lossImage.Not())
#tree30 = forest2000.gte(0.3);
#forest2000 = forest2000.mask(tree30)
# Multiply all bands by pixel area to get metres^2
hansen = []
hansen += [forest2000.multiply(ee.Image.pixelArea()).set('forest', 'Tree_area_2000', 'gee_returns', 'treecover2000'),
forest2000.mask(lossImage).multiply(ee.Image.pixelArea()).set('forest', 'Forest_loss_2001_2012', 'gee_returns', 'treecover2000'),
lossImage.multiply(ee.Image.pixelArea()).set('forest', 'Forest_loss_2001_2012_worstcase', 'gee_returns', 'loss'),
gainOnly.multiply(ee.Image.pixelArea()).set('forest', 'Gain_forest', 'gee_returns', 'gain')]
hansen += [forest2000.mask(lossYear.eq(year)).multiply(ee.Image.pixelArea()).set('forest', 'Loss_20{0:0=2d}'.format(year), 'gee_returns', 'treecover2000') for year in range(1,13)]
ic = ee.ImageCollection.fromImages(hansen); # Form a collection from all Hansen images
class Species(object):
def __init__(self, asset_id, image_filename):
"""
Parameters
--------------
asset_id : string
An asset id provided by the Google Maps Engine. User uploads images (e.g. range maps) through the
Maps Engine and in return gets an ID of the uploaded image. That is the "asset id"
image_filename : string
Name of the image. The asset id is associated with the image name (e.g. species id). We are using
this relation to report information to the user and to retrieve the altitude info.
Returns
--------------
None
"""
self._asset_id = asset_id
self._min_alt = alt_info[image_filename].min
self._max_alt = alt_info[image_filename].max
def __call__(self, image):
"""
This is the algorithm that will do the forest area calculations. It takes species' range map,
Global Forest Change map and elevation map to calculate forest area for the given species
Parameters
--------------
image : EE image
A tree cover image. For instance, it can be "forest loss" or "forest gain".
Returns
--------------
area : a number
Forest cover area
"""
# Retrieve the species' range map image from GME
species = ee.Image('GME/images/' + self._asset_id)
# Clip elevation map with species-specific altitude limits
alt_range = gtopo30.gte(self._min_alt).And(gtopo30.lte(self._max_alt))
# Clip species' range map with altitude information. The operation removes all pixels that
# do not fall within the altitude limits
area_of_occupancy = alt_range.And(species)
# Multiply Hansen image by the species' area of occupancy. Every pixel will contain forest cover
# area for a given species
forest_area_of_occupancy = image.multiply(area_of_occupancy)
# Sum all pixels in an image to get total forest area for a given species
total_area = forest_area_of_occupancy.reduceRegion(
reducer = ee.Reducer.sum(),
geometry = area_of_occupancy.geometry(),
scale = scale,
maxPixels = maxPixels,
bestEffort = bestEffort)
# Copy over the properties of the original image.
area = forest_area_of_occupancy.copyProperties(image);
# Store the calcuated area as a property of the image.
gee_returns = image.get('gee_returns');
area = area.set(ee.String('area'), total_area.get(gee_returns))
return area
# Names of fields that will be returned on an output
fields = ['species', 'Tree_area_2000', 'Gain_forest', 'Forest_loss_2001_2012', 'Forest_loss_2001_2012_worstcase',
'Loss_2001', 'Loss_2002', 'Loss_2003', 'Loss_2004', 'Loss_2005', 'Loss_2006', 'Loss_2007',
'Loss_2008', 'Loss_2009', 'Loss_2010', 'Loss_2011', 'Loss_2012']
# Read assets IDs
with open(assets_filename, 'rb') as f:
reader = csv.reader(f)
assets = collections.OrderedDict(asset for asset in reader)
# Open result file for writing
fout = open('results_' + assets_filename, 'wb', buffering=0)
dw = csv.DictWriter(fout, fieldnames=fields)
dw.writeheader()
# Log file that stores names of all species for which we were unable to calculate forest cover.
# The reason can be for instance a problem with a range map
ferr = open(assets_filename + '_failed.txt', 'ab+', buffering=0)
# The function will be run on server-side to calculate the forest cover for a given species
def run(asset_id, image_filename):
species = Species(asset_id, image_filename)
pixel_areas_masked = ic.map(species)
# Use exponential backoff to handle timeout errors
mtries, mdelay = tries, delay
# If time-out occurs it means the calculations did not finish in the prescribed time. However,
# the calculations are still running. That's why we query for results again after certain time, e.g. 10s.
# If situation repeats, this time we try after longer period, e.g. 20s. We do a number of retries (e.g. 5),
# after which we give up. It's called exponential backoff.
while True:
try:
return pixel_areas_masked.getInfo()['features']
except ee.EEException, e:
if '500' in str(e): # internal error ocurred
raise # no point in re-trying
mtries -= 1
if mtries == 0:
raise Exception('Number of retries exceeded. I give up!')
msg = "%s, Retrying in %d seconds..." % (str(e), mdelay)
logging.warning(msg)
time.sleep(mdelay)
mdelay *= backoff
# Iterate through all species and calculate for them forest cover. By forest cover we understand all
# GFC-derived quantities, e.g. forest cover in 2000, forest loss, forest gain.
for image_filename, asset_id in assets.items():
print 'Calculating: ', image_filename
try:
# Run computations
results = run(asset_id, image_filename)
# Get results
d = {result['properties']['forest']: result['properties']['area'] for result in results }
d['species'] = image_filename
# Write results
dw.writerow(d)
except Exception, e:
# Write errors
logging.error('Failed: %s %s with message: %s', image_filename, asset_id, str(e))
logging.exception(e)
ferr.write(image_filename + ',' + asset_id + '\n')
fout.close()
ferr.close()
print datetime.now()-startTime
|
|
# Copyright (c) 2010 Chris Moyer http://coredumped.org/
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
RECORD_TYPES = ['A', 'AAAA', 'TXT', 'CNAME', 'MX', 'PTR', 'SRV', 'SPF']
from boto.resultset import ResultSet
class ResourceRecordSets(ResultSet):
ChangeResourceRecordSetsBody = """<?xml version="1.0" encoding="UTF-8"?>
<ChangeResourceRecordSetsRequest xmlns="https://route53.amazonaws.com/doc/2011-05-05/">
<ChangeBatch>
<Comment>%(comment)s</Comment>
<Changes>%(changes)s</Changes>
</ChangeBatch>
</ChangeResourceRecordSetsRequest>"""
ChangeXML = """<Change>
<Action>%(action)s</Action>
%(record)s
</Change>"""
def __init__(self, connection=None, hosted_zone_id=None, comment=None):
self.connection = connection
self.hosted_zone_id = hosted_zone_id
self.comment = comment
self.changes = []
self.next_record_name = None
self.next_record_type = None
ResultSet.__init__(self, [('ResourceRecordSet', Record)])
def __repr__(self):
return '<ResourceRecordSets: %s>' % self.hosted_zone_id
def add_change(self, action, name, type, ttl=600,
alias_hosted_zone_id=None, alias_dns_name=None, identifier=None,
weight=None):
"""Add a change request"""
change = Record(name, type, ttl,
alias_hosted_zone_id=alias_hosted_zone_id,
alias_dns_name=alias_dns_name, identifier=identifier,
weight=weight)
self.changes.append([action, change])
return change
def to_xml(self):
"""Convert this ResourceRecordSet into XML
to be saved via the ChangeResourceRecordSetsRequest"""
changesXML = ""
for change in self.changes:
changeParams = {"action": change[0], "record": change[1].to_xml()}
changesXML += self.ChangeXML % changeParams
params = {"comment": self.comment, "changes": changesXML}
return self.ChangeResourceRecordSetsBody % params
def commit(self):
"""Commit this change"""
if not self.connection:
import boto
self.connection = boto.connect_route53()
return self.connection.change_rrsets(self.hosted_zone_id, self.to_xml())
def endElement(self, name, value, connection):
"""Overwritten to also add the NextRecordName and
NextRecordType to the base object"""
if name == 'NextRecordName':
self.next_record_name = value
elif name == 'NextRecordType':
self.next_record_type = value
else:
return ResultSet.endElement(self, name, value, connection)
def __iter__(self):
"""Override the next function to support paging"""
results = ResultSet.__iter__(self)
while results:
for obj in results:
yield obj
if self.is_truncated:
self.is_truncated = False
results = self.connection.get_all_rrsets(self.hosted_zone_id, name=self.next_record_name, type=self.next_record_type)
else:
results = None
class Record(object):
"""An individual ResourceRecordSet"""
XMLBody = """<ResourceRecordSet>
<Name>%(name)s</Name>
<Type>%(type)s</Type>
%(weight)s
%(body)s
</ResourceRecordSet>"""
WRRBody = """
<SetIdentifier>%(identifier)s</SetIdentifier>
<Weight>%(weight)s</Weight>
"""
ResourceRecordsBody = """
<TTL>%(ttl)s</TTL>
<ResourceRecords>
%(records)s
</ResourceRecords>"""
ResourceRecordBody = """<ResourceRecord>
<Value>%s</Value>
</ResourceRecord>"""
AliasBody = """<AliasTarget>
<HostedZoneId>%s</HostedZoneId>
<DNSName>%s</DNSName>
</AliasTarget>"""
def __init__(self, name=None, type=None, ttl=600, resource_records=None,
alias_hosted_zone_id=None, alias_dns_name=None, identifier=None,
weight=None):
self.name = name
self.type = type
self.ttl = ttl
if resource_records == None:
resource_records = []
self.resource_records = resource_records
self.alias_hosted_zone_id = alias_hosted_zone_id
self.alias_dns_name = alias_dns_name
self.identifier = identifier
self.weight = weight
def add_value(self, value):
"""Add a resource record value"""
self.resource_records.append(value)
def set_alias(self, alias_hosted_zone_id, alias_dns_name):
"""Make this an alias resource record set"""
self.alias_hosted_zone_id = alias_hosted_zone_id
self.alias_dns_name = alias_dns_name
def to_xml(self):
"""Spit this resource record set out as XML"""
if self.alias_hosted_zone_id != None and self.alias_dns_name != None:
# Use alias
body = self.AliasBody % (self.alias_hosted_zone_id, self.alias_dns_name)
else:
# Use resource record(s)
records = ""
for r in self.resource_records:
records += self.ResourceRecordBody % r
body = self.ResourceRecordsBody % {
"ttl": self.ttl,
"records": records,
}
weight = ""
if self.identifier != None and self.weight != None:
weight = self.WRRBody % {"identifier": self.identifier, "weight":
self.weight}
params = {
"name": self.name,
"type": self.type,
"weight": weight,
"body": body,
}
return self.XMLBody % params
def to_print(self):
rr = ""
if self.alias_hosted_zone_id != None and self.alias_dns_name != None:
# Show alias
rr = 'ALIAS ' + self.alias_hosted_zone_id + ' ' + self.alias_dns_name
else:
# Show resource record(s)
rr = ",".join(self.resource_records)
if self.identifier != None and self.weight != None:
rr += ' (WRR id=%s, w=%s)' % (self.identifier, self.weight)
return rr
def endElement(self, name, value, connection):
if name == 'Name':
self.name = value
elif name == 'Type':
self.type = value
elif name == 'TTL':
self.ttl = value
elif name == 'Value':
self.resource_records.append(value)
elif name == 'HostedZoneId':
self.alias_hosted_zone_id = value
elif name == 'DNSName':
self.alias_dns_name = value
elif name == 'SetIdentifier':
self.identifier = value
elif name == 'Weight':
self.weight = value
def startElement(self, name, attrs, connection):
return None
|
|
import cPickle
import os
import re
from pythoscope.astbuilder import regenerate
from pythoscope.code_trees_manager import FilesystemCodeTreesManager
from pythoscope.compat import any, set
from pythoscope.event import Event
from pythoscope.localizable import Localizable
from pythoscope.logger import log
from pythoscope.serializer import SerializedObject
from pythoscope.util import all_of_type, assert_argument_type, class_name,\
directories_under, extract_subpath, findfirst, load_pickle_from,\
starts_with_path, write_content_to_file, DirectoryException
########################################################################
## Project class and helpers.
##
class ModuleNeedsAnalysis(Exception):
def __init__(self, path, out_of_sync=False):
Exception.__init__(self, "Destination test module %r needs analysis." % path)
self.path = path
self.out_of_sync = out_of_sync
class ModuleNotFound(Exception):
def __init__(self, module):
Exception.__init__(self, "Couldn't find module %r." % module)
self.module = module
class ModuleSaveError(Exception):
def __init__(self, module, reason):
Exception.__init__(self, "Couldn't save module %r: %s." % (module, reason))
self.module = module
self.reason = reason
PYTHOSCOPE_SUBPATH = ".pythoscope"
PICKLE_SUBPATH = os.path.join(PYTHOSCOPE_SUBPATH, "project.pickle")
POINTS_OF_ENTRY_SUBPATH = os.path.join(PYTHOSCOPE_SUBPATH, "points-of-entry")
def get_pythoscope_path(project_path):
return os.path.join(project_path, PYTHOSCOPE_SUBPATH)
def get_pickle_path(project_path):
return os.path.join(project_path, PICKLE_SUBPATH)
def get_points_of_entry_path(project_path):
return os.path.join(project_path, POINTS_OF_ENTRY_SUBPATH)
def get_code_trees_path(project_path):
return os.path.join(get_pythoscope_path(project_path), "code-trees")
class Project(object):
"""Object representing the whole project under Pythoscope wings.
No modifications are final until you call save().
"""
def from_directory(cls, project_path):
"""Read the project information from the .pythoscope/ directory of
the given project.
The pickle file may not exist for project that is analyzed the
first time and that's OK.
"""
project_path = os.path.realpath(project_path)
try:
project = load_pickle_from(get_pickle_path(project_path))
# Update project's path, as the directory could've been moved.
project.path = project_path
except IOError:
project = Project(project_path)
return project
from_directory = classmethod(from_directory)
def __init__(self, path, code_trees_manager_class=FilesystemCodeTreesManager):
"""Initialize a Project instance using the given path as the project's
top directory.
During normal operation code_trees_manager_class is the class that
the Project delegates to all CodeTree management tasks, but during
testing this can be replaced with something else, possibly a class
that doesn't touch the file system.
"""
self.path = path
self.new_tests_directory = "tests"
self.points_of_entry = {}
self.snippet_executions = []
self._modules = {}
self.code_trees_manager = code_trees_manager_class(get_code_trees_path(path))
self._find_new_tests_directory()
def _get_pickle_path(self):
return get_pickle_path(self.path)
def get_points_of_entry_path(self):
return get_points_of_entry_path(self.path)
def path_for_point_of_entry(self, name):
return os.path.join(self.path, self.subpath_for_point_of_entry(name))
def subpath_for_point_of_entry(self, name):
return os.path.join(POINTS_OF_ENTRY_SUBPATH, name)
def _find_new_tests_directory(self):
for path in directories_under(self.path):
if re.search(r'[_-]?tests?([_-]|$)', path):
self.new_tests_directory = path
def save(self):
# To avoid inconsistencies try to save all project's modules first. If
# any of those saves fail, the pickle file won't get updated.
for module in self.get_modules():
log.debug("Calling save() on module %r" % module.subpath)
module.save()
# We don't want to have a single AST in a Project instance.
self.code_trees_manager.clear_cache()
# Pickling the project after saving all of its modules, so any changes
# made by Module instances during save() will be preserved as well.
pickled_project = cPickle.dumps(self, cPickle.HIGHEST_PROTOCOL)
log.debug("Writing project pickle to disk...")
write_content_to_file(pickled_project, self._get_pickle_path(), binary=True)
def find_module_by_full_path(self, path):
subpath = self._extract_subpath(path)
return self[subpath]
def contains_point_of_entry(self, name):
return name in self.points_of_entry
def get_point_of_entry(self, name):
return self.points_of_entry[name]
def add_point_of_entry(self, poe):
self.points_of_entry[poe.name] = poe
def remove_point_of_entry(self, name):
poe = self.points_of_entry.pop(name)
poe.clear_previous_run()
def create_module(self, path, **kwds):
"""Create a module for this project located under given path.
If there already was a module with given subpath, it will get replaced
with a new instance using the _replace_references_to_module method.
Returns the new Module object.
"""
module = Module(subpath=self._extract_subpath(path), project=self, **kwds)
if module.subpath in self._modules.keys():
self._replace_references_to_module(module)
# Don't need to forget the old CallTree, as the creation of
# the Module instance above overwrites it anyway.
self._modules[module.subpath] = module
return module
def create_test_module_from_name(self, test_name, **kwds):
"""Create a module with given name in project tests directory.
If the test module already exists, ModuleNeedsAnalysis exception will
be raised.
"""
test_path = self._path_for_test(test_name)
if os.path.exists(test_path):
raise ModuleNeedsAnalysis(test_path)
return self.create_module(test_path, **kwds)
def remove_module(self, subpath):
"""Remove a module from this Project along with all references to it
from other modules and its CodeTree.
"""
module = self[subpath]
for test_case in self.iter_test_cases():
try:
test_case.associated_modules.remove(module)
except ValueError:
pass
del self._modules[subpath]
self.code_trees_manager.forget_code_tree(module.subpath)
# :: (CodeTree, Module) -> None
def remember_code_tree(self, code_tree, module):
self.code_trees_manager.remember_code_tree(code_tree, module.subpath)
# :: Module -> CodeTree
def recall_code_tree(self, module):
return self.code_trees_manager.recall_code_tree(module.subpath)
def remember_execution_from_snippet(self, execution):
self.snippet_executions.append(execution)
def _replace_references_to_module(self, module):
"""Remove a module with the same subpath as given module from this
Project and replace all references to it with the new instance.
"""
old_module = self[module.subpath]
for test_case in self.iter_test_cases():
try:
test_case.associated_modules.remove(old_module)
test_case.associated_modules.append(module)
except ValueError:
pass
def _extract_point_of_entry_subpath(self, path):
"""Takes the file path and returns subpath relative to the
points of entry path.
Assumes the given path is under points of entry path.
"""
return extract_subpath(path, self.get_points_of_entry_path())
def _extract_subpath(self, path):
"""Takes the file path and returns subpath relative to the
project.
Assumes the given path is under Project.path.
"""
return extract_subpath(path, self.path)
def contains_path(self, path):
"""Returns True if given path is under this project's path and False
otherwise.
"""
return starts_with_path(path, self.path)
def iter_test_cases(self):
for module in self.iter_modules():
for test_case in module.test_cases:
yield test_case
def _path_for_test(self, test_module_name):
"""Return a full path to test module with given name.
"""
return os.path.join(self.path, self.new_tests_directory, test_module_name)
def __getitem__(self, module):
for mod in self.iter_modules():
if module in [mod.subpath, mod.locator]:
return mod
raise ModuleNotFound(module)
def get_modules(self):
return self._modules.values()
def iter_modules(self):
return self._modules.values()
def iter_classes(self):
for module in self.iter_modules():
for klass in module.classes:
yield klass
def iter_functions(self):
for module in self.iter_modules():
for function in module.functions:
yield function
def find_object(self, type, name, modulename):
try:
return self[modulename].find_object(type, name)
except ModuleNotFound:
pass
########################################################################
## CodeTree class and helpers.
##
class CodeTree(object):
"""Container of a module's AST (from lib2to3.pytree).
Each Module object has one corresponding CodeTree instance, which holds
its whole AST in the `code` attribute. Moreover, references to subtrees
inside this AST correspond to definitions inside a Module, and are stored
inside a `code_references` attribute.
Modules are identified by their subpath, which (within the scope of
a project) is known to be unique. CodeTree instance doesn't need to know
this subpath. It is used only by the Project class, for identification
of modules and ultimately - storage and retrieval of CodeTree instances
(see Project#remember_code_tree and Project#recall_code_tree methods).
CodeTree instances are not saved to disk unless the save() method is
called on them. They also will *not* be accesible via `CodeTree.of()`
interface unless you remember them in a Project instance first (see
Project#remember_code_tree).
"""
def of(cls, obj):
"""Return a CodeTree instance that handles code of the given object.
"""
module = module_of(obj)
return module.project.recall_code_tree(module)
of = classmethod(of)
def __init__(self, code):
self.code = code
self.code_references = {}
def add_object(self, obj, code):
self.code_references[module_level_id(obj)] = code
def add_object_with_code(self, obj):
"""Take an object holding an AST in its `code` attribute and store it
as a part of this CodeTree.
As a side effect, `code` attribute is removed. The object will no
longer hold any references to the AST, so they can be pickled
separately.
"""
self.add_object(obj, obj.code)
del obj.code
def remove_object(self, obj):
del self.code_references[module_level_id(obj)]
def get_code_of(self, obj):
if isinstance(obj, Module):
return self.code
return self.code_references[module_level_id(obj)]
def save(self, path):
"""Pickle and save this CodeTree under given path.
"""
pickled_code_tree = cPickle.dumps(self, cPickle.HIGHEST_PROTOCOL)
write_content_to_file(pickled_code_tree, path, binary=True)
def module_of(obj):
"""Return the Module given object is contained within.
"""
if isinstance(obj, Module):
return obj
elif isinstance(obj, (Function, Class)):
return obj.module
elif isinstance(obj, Method):
return module_of(obj.klass)
elif isinstance(obj, TestCase):
return module_of(obj.parent)
else:
raise TypeError("Don't know how to find the module of %r" % obj)
# :: ObjectInModule | str -> hashable
def module_level_id(obj):
"""Take an object and return something that unambiguously identifies
it in the scope of its module.
"""
if isinstance(obj, Class):
return ('Class', obj.name)
elif isinstance(obj, Function):
return ('Function', obj.name)
elif isinstance(obj, Method):
return ('Method', (obj.klass.name, obj.name))
elif isinstance(obj, TestClass):
return ('TestClass', obj.name)
elif isinstance(obj, TestMethod):
return ('TestMethod', (obj.parent.name, obj.name))
elif isinstance(obj, str):
return obj
else:
raise TypeError("Don't know how to generate a module-level id for %r" % obj)
def code_of(obj, reference=None):
"""Return an AST for the given object.
It is "code_of(obj)" instead of "obj.code" mostly for explicitness. Objects
have code attribute when they are created, but lose it once they are added
to a Module (see docstring for ObjectInModule). Existence of code_of
decouples a storage method (including caching) from an interface.
"""
if reference is not None:
assert isinstance(obj, Module)
else:
reference = obj
return CodeTree.of(obj).get_code_of(reference)
########################################################################
## Classes of objects which are part of a Module.
##
class ObjectInModule(object):
"""Named object that can be localized in a module via the AST.
Note that the code attribute will be removed from the object once it
becomes a part of a Module.
"""
def __init__(self, name, code):
self.name = name
self.code = code
class Definition(ObjectInModule):
"""Definition of a callable object (function or a method basically),
describing its static properties.
"""
def __init__(self, name, args=None, code=None, is_generator=False):
ObjectInModule.__init__(self, name, code)
if args is None:
args = []
self.args = args
self.is_generator = is_generator
class Class(ObjectInModule):
def __init__(self, name, methods=[], bases=[], code=None, module=None):
ObjectInModule.__init__(self, name, code)
self.methods = []
self.bases = bases
self.module = module
self.user_objects = []
self.add_methods(methods)
def _set_class_for_method(self, method):
if method.klass is not None:
raise TypeError("Trying to add %r to class %r, while the "
"method is already inside %r." % \
(method, self.name, method.klass.name))
method.klass = self
def add_methods(self, methods):
for method in methods:
self._set_class_for_method(method)
self.methods.append(method)
def add_user_object(self, user_object):
self.user_objects.append(user_object)
def get_traced_method_names(self):
traced_method_names = set()
for user_object in self.user_objects:
for call in user_object.calls:
traced_method_names.add(call.definition.name)
return traced_method_names
def get_untraced_methods(self):
traced_method_names = self.get_traced_method_names()
def is_untraced(method):
return method.name not in traced_method_names
return filter(is_untraced, self.methods)
def find_method_by_name(self, name):
for method in self.methods:
if method.name == name:
return method
def get_creational_method(self):
"""Return either __new__ or __init__ method of this class, with __new__
taking precedence.
"""
method = self.find_method_by_name('__new__')
if method:
return method
return self.find_method_by_name('__init__')
def __repr__(self):
return "Class(name=%s)" % self.name
CREATIONAL_METHODS = ['__init__', '__new__']
# Methods are not Callable, because they cannot be called by itself - they
# need a bound object. We represent this object by UserObject class, which
# gathers all MethodCalls for given instance.
class Method(Definition):
def __init__(self, name, args=None, code=None, is_generator=False, klass=None):
Definition.__init__(self, name, args=args, code=code, is_generator=is_generator)
self.klass = klass
def get_call_args(self):
"""Return names of arguments explicitly passed during call to this
method.
In other words, it removes "self" from the list of arguments, as "self"
is passed implicitly.
"""
if self.args and self.args[0].startswith('*'):
return self.args
return self.args[1:]
def is_creational(self):
return self.name in CREATIONAL_METHODS
def is_private(self):
"""Private methods (by convention) start with a single or double
underscore.
Note: Special methods are *not* considered private.
"""
return self.name.startswith('_') and not self.is_special()
def is_special(self):
"""Special methods, as defined in
<http://docs.python.org/reference/datamodel.html#specialnames>
have names starting and ending with a double underscore.
"""
return self.name.startswith('__') and self.name.endswith('__')
def __repr__(self):
return "Method(name=%s, args=%r)" % (self.name, self.args)
class TestCase(object):
"""A single test object, possibly contained within a test suite (denoted
as parent attribute).
"""
def __init__(self, parent=None):
self.parent = parent
class TestMethod(ObjectInModule, TestCase):
def __init__(self, name, code=None, parent=None):
ObjectInModule.__init__(self, name, code)
TestCase.__init__(self, parent)
class TestSuite(TestCase):
"""A test objects container.
Keeps both test cases and other test suites in test_cases attribute.
"""
allowed_test_case_classes = []
def __init__(self, parent=None, imports=None):
TestCase.__init__(self, parent)
if imports is None:
imports = []
self.imports = imports
self.changed = False
self.test_cases = []
def add_test_cases_without_append(self, test_cases):
for test_case in test_cases:
self.add_test_case_without_append(test_case)
def add_test_case_without_append(self, test_case):
self._check_test_case_type(test_case)
test_case.parent = self
self.test_cases.append(test_case)
def remove_test_case(self, test_case):
"""Try to remove given test case from this test suite.
Raise ValueError if the given test case is not a part of thise test
suite.
Note: this method doesn't modify the AST of the test suite.
"""
try:
self.test_cases.remove(test_case)
except ValueError:
raise ValueError("Given test case is not a part of this test suite.")
def mark_as_changed(self):
self.changed = True
if self.parent:
self.parent.mark_as_changed()
def ensure_imports(self, imports):
"Make sure that all required imports are present."
for imp in imports:
self._ensure_import(imp)
if self.parent:
self.parent.ensure_imports(imports)
def _ensure_import(self, import_desc):
if not self.contains_import(import_desc):
self.imports.append(import_desc)
def contains_import(self, import_desc):
return import_desc in self.imports
def _check_test_case_type(self, test_case):
if not isinstance(test_case, tuple(self.allowed_test_case_classes)):
raise TypeError("Given test case isn't allowed to be added to this test suite.")
class Call(Event):
"""Stores information about a single function or method call.
Includes reference to the caller, all call arguments, references to
other calls made inside this one, references to side effects that occured
during this call and finally an output value.
__eq__ and __hash__ definitions provided for Function.get_unique_calls()
and UserObject.get_external_calls().
"""
def __init__(self, definition, args, output=None, exception=None):
if [value for value in args.values() if not isinstance(value, SerializedObject)]:
raise ValueError("Values of all arguments should be instances of SerializedObject class.")
if output and exception:
raise ValueError("Call should have a single point of return.")
if not isinstance(definition, Definition):
raise ValueError("Call definition object should be an instance of Definition.")
super(Call, self).__init__()
self.definition = definition
self.input = args
self.output = output
self.exception = exception
self.caller = None
self.subcalls = []
self.side_effects = []
def add_subcall(self, call):
if call.caller is not None:
raise TypeError("This %s of %s already has a caller." % \
(class_name(call), call.definition.name))
call.caller = self
self.subcalls.append(call)
def raised_exception(self):
return self.exception is not None
def set_output(self, output):
self.output = output
def set_exception(self, exception):
self.exception = exception
def clear_exception(self):
self.exception = None
def add_side_effect(self, side_effect):
self.side_effects.append(side_effect)
def __eq__(self, other):
if type(self) != type(other):
return False
return self.definition == other.definition and \
self.input == other.input and \
self.output == other.output and \
self.exception == other.exception and \
self.side_effects == other.side_effects
def __hash__(self):
return hash((self.definition.name,
tuple(self.input.iteritems()),
self.output,
self.exception,
tuple(self.side_effects)))
def __repr__(self):
return "%s(definition=%s, input=%r, output=%r, exception=%r, side_effects=[..(%d)..])" % \
(class_name(self), self.definition.name, self.input, self.output,
self.exception, len(self.side_effects))
class CFunction(Definition):
pass
class CallToC(Call):
def __init__(self, name, side_effect=None):
super(CallToC, self).__init__(CFunction(name), {})
self.side_effect = side_effect
def clear_side_effect(self):
self.side_effect = None
class UnknownCall(Call):
def __init__(self):
super(UnknownCall, self).__init__(Function('<unknown>'), {})
class FunctionCall(Call):
pass
class MethodCall(Call):
pass
class Callable(object):
"""Dynamic aspect of a callable object. Tracks all calls made to given
callable.
Each Callable subclass tracks a different type of Calls.
"""
calls_type = None
def __init__(self, calls=None):
if calls is None:
calls = []
self.calls = calls
def add_call(self, call):
assert_argument_type(call, self.calls_type)
self.calls.append(call)
class Function(Definition, Callable):
def __init__(self, name, args=None, code=None, calls=None, is_generator=False, module=None):
Definition.__init__(self, name, args=args, code=code, is_generator=is_generator)
Callable.__init__(self, calls)
self.module = module
if is_generator:
self.calls_type = GeneratorObject
else:
self.calls_type = FunctionCall
def get_unique_calls(self):
return set(self.calls)
def __repr__(self):
return "Function(name=%s, args=%r, calls=%r)" % (self.name, self.args, self.calls)
class GeneratorObjectInvocation(Call):
"""Representation of a single generator invocation.
Each time a generator is resumed a new GeneratorObjectInvocation is created.
"""
class GeneratorObject(Callable, SerializedObject):
"""Representation of a generator object - a callable with an input and many
outputs (here called "yields").
"""
calls_type = GeneratorObjectInvocation
def __init__(self, obj, generator=None, args=None, callable=None):
Callable.__init__(self)
SerializedObject.__init__(self, obj)
if generator is not None and args is not None and callable is not None:
self.activate(generator, args, callable)
def activate(self, generator, args, callable):
assert_argument_type(generator, (Function, Method))
assert_argument_type(callable, (Function, UserObject))
if self.is_activated():
raise ValueError("This generator has already been activated.")
if not generator.is_generator:
raise TypeError("Tried to activate GeneratorObject with %r as a generator definition." % generator)
self.definition = generator
self.args = args
# Finally register this GeneratorObject with its callable context
# (which will be a Function or an UserObject). This has to be
# done only once for each GeneratorObject.
callable.add_call(self)
def is_activated(self):
return hasattr(self, 'args')
def raised_exception(self):
return any([c.raised_exception() for c in self.calls])
def _get_exception(self):
for invocation in self.calls:
if invocation.raised_exception():
return invocation.exception
exception = property(_get_exception)
def __repr__(self):
if self.is_activated():
return "GeneratorObject(generator=%r, args=%r)" % \
(self.definition.name, self.args)
else:
return "GeneratorObject()"
class UserObject(Callable, SerializedObject):
"""Serialized instance of a user-defined type.
UserObjects are also callables that aggregate MethodCall instances,
capturing the whole life of an object, from initialization to destruction.
"""
calls_type = (MethodCall, GeneratorObject)
def __init__(self, obj, klass):
Callable.__init__(self)
SerializedObject.__init__(self, obj)
self.klass = klass
self.type_name = self.klass.name
# Defined lazily to ease testing - classes may be assigned to modules after
# creation of UserObject, or never at all.
module_name = property(lambda self: self.klass.module.locator, lambda s,v: None)
def get_init_call(self):
"""Return a call to __init__ or None if it wasn't called.
"""
return findfirst(lambda call: call.definition.name == '__init__', self.calls)
def get_external_calls(self):
"""Return all calls to this object made from the outside.
Note: __init__ is considered an internal call.
"""
def is_not_init_call(call):
return call.definition.name != '__init__'
return filter(is_not_init_call, filter(self.is_external_call, self.calls))
def get_init_and_external_calls(self):
return filter(self.is_external_call, self.calls)
def __repr__(self):
return "UserObject(id=%s, klass=%r)" % (id(self), self.klass.name)
def is_external_call(self, call):
if isinstance(call, GeneratorObject):
return True
return (not call.caller) or (call.caller not in self.calls)
class TestClass(ObjectInModule, TestSuite):
"""Testing class, either generated by Pythoscope or hand-writen by the user.
Each test class contains a set of requirements its surrounding must meet,
like the list of imports it needs or specific setup and teardown
instructions.
associated_modules is a list of Modules which this test class exercises.
"""
allowed_test_case_classes = [TestMethod]
def __init__(self, name, code=None, parent=None, test_cases=[],
imports=None, associated_modules=None):
ObjectInModule.__init__(self, name, code)
TestSuite.__init__(self, parent, imports)
if associated_modules is None:
associated_modules = []
self.associated_modules = associated_modules
# Code of test cases passed to the constructor is already contained
# within the class code, so we don't need to append it.
self.add_test_cases_without_append(test_cases)
def _get_methods(self):
return self.test_cases
methods = property(_get_methods)
def add_test_case_without_append(self, test_case):
TestSuite.add_test_case_without_append(self, test_case)
if self.parent is not None:
CodeTree.of(self).add_object_with_code(test_case)
else:
# This TestClass is not attached to a Module yet. We will leave
# the just-added test case as it is and let Module instance handle
# the rest when the time comes (see `Module#add_object`).
pass
def find_method_by_name(self, name):
for method in self.test_cases:
if method.name == name:
return method
########################################################################
## The Module class.
##
class Module(Localizable, TestSuite):
allowed_test_case_classes = [TestClass]
def __init__(self, project, subpath, code=None, objects=None, imports=None,
main_snippet=None, last_import=None, errors=None):
Localizable.__init__(self, project, subpath)
TestSuite.__init__(self, imports=imports)
if code:
# Persistence of CodeTree instances is managed by the Project instance.
code_tree = CodeTree(code)
project.remember_code_tree(code_tree, self)
self.store_reference('main_snippet', main_snippet)
self.store_reference('last_import', last_import)
elif objects:
raise ValueError("Tried to create module with objects, but without code.")
if objects is None:
objects = []
if errors is None:
errors = []
self.errors = errors
self.objects = []
self.add_objects(objects)
def _set_module_for_object(self, obj):
if isinstance(obj, (Class, Function)):
if obj.module is not None:
raise TypeError("Trying to add %r to module %r, while the "
"object is already inside %r." % \
(obj, self.locator, obj.module.locator))
obj.module = self
def _get_classes(self):
return all_of_type(self.objects, Class)
classes = property(_get_classes)
def _get_functions(self):
return all_of_type(self.objects, Function)
functions = property(_get_functions)
def _get_test_classes(self):
return all_of_type(self.objects, TestClass)
test_classes = property(_get_test_classes)
def has_errors(self):
return self.errors != []
def store_reference(self, name, code):
CodeTree.of(self).add_object(name, code)
def add_objects(self, objects):
"""Add objects to this module.
Note: AST of those objects will *not* be appended to the module's AST.
"""
for obj in objects:
# Adding a test case requires some extra effort than adding
# a regular object, but they all land in `self.objects` list anyway.
if isinstance(obj, TestCase):
# By using the `add_objects()` interface user states that
# the code of objects passed is already contained within
# the module code, so we don't need to append it.
self.add_test_case_without_append(obj)
else:
self.add_object(obj)
def add_object(self, obj):
self._set_module_for_object(obj)
self.objects.append(obj)
CodeTree.of(self).add_object_with_code(obj)
# When attaching a class to a module we not only have to store its own
# code reference, but also code references of its methods.
if isinstance(obj, (Class, TestClass)):
for method in obj.methods:
CodeTree.of(self).add_object_with_code(method)
def remove_object(self, obj):
self.objects.remove(obj)
CodeTree.of(self).remove_object(obj)
def add_test_case_without_append(self, test_case):
TestSuite.add_test_case_without_append(self, test_case)
self.add_object(test_case)
self.ensure_imports(test_case.imports)
def remove_test_case(self, test_case):
TestSuite.remove_test_case(self, test_case)
self.remove_object(test_case)
def get_content(self):
return regenerate(code_of(self))
def get_test_cases_for_module(self, module):
"""Return all test cases that are associated with given module.
"""
return [tc for tc in self.test_cases if module in tc.associated_modules]
def find_object(self, type, name):
for obj in all_of_type(self.objects, type):
if obj.name == name:
return obj
def save(self):
# Don't save the test file unless it has been changed.
if self.changed:
if self.is_out_of_sync():
raise ModuleNeedsAnalysis(self.subpath, out_of_sync=True)
try:
self.write(self.get_content())
except DirectoryException, err:
raise ModuleSaveError(self.subpath, err.args[0])
self.changed = False
|
|
"""The tests for the MQTT lock platform."""
import json
from unittest.mock import ANY
from homeassistant.components import lock, mqtt
from homeassistant.components.mqtt.discovery import async_start
from homeassistant.const import (
ATTR_ASSUMED_STATE,
STATE_LOCKED,
STATE_UNAVAILABLE,
STATE_UNLOCKED,
)
from homeassistant.setup import async_setup_component
from tests.common import (
MockConfigEntry,
async_fire_mqtt_message,
async_mock_mqtt_component,
mock_registry,
)
from tests.components.lock import common
async def test_controlling_state_via_topic(hass, mqtt_mock):
"""Test the controlling state via topic."""
assert await async_setup_component(
hass,
lock.DOMAIN,
{
lock.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"payload_lock": "LOCK",
"payload_unlock": "UNLOCK",
"state_locked": "LOCKED",
"state_unlocked": "UNLOCKED",
}
},
)
state = hass.states.get("lock.test")
assert state.state is STATE_UNLOCKED
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "state-topic", "LOCKED")
state = hass.states.get("lock.test")
assert state.state is STATE_LOCKED
async_fire_mqtt_message(hass, "state-topic", "UNLOCKED")
state = hass.states.get("lock.test")
assert state.state is STATE_UNLOCKED
async def test_controlling_non_default_state_via_topic(hass, mqtt_mock):
"""Test the controlling state via topic."""
assert await async_setup_component(
hass,
lock.DOMAIN,
{
lock.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"payload_lock": "LOCK",
"payload_unlock": "UNLOCK",
"state_locked": "closed",
"state_unlocked": "open",
}
},
)
state = hass.states.get("lock.test")
assert state.state is STATE_UNLOCKED
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "state-topic", "closed")
state = hass.states.get("lock.test")
assert state.state is STATE_LOCKED
async_fire_mqtt_message(hass, "state-topic", "open")
state = hass.states.get("lock.test")
assert state.state is STATE_UNLOCKED
async def test_controlling_state_via_topic_and_json_message(hass, mqtt_mock):
"""Test the controlling state via topic and JSON message."""
assert await async_setup_component(
hass,
lock.DOMAIN,
{
lock.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"payload_lock": "LOCK",
"payload_unlock": "UNLOCK",
"state_locked": "LOCKED",
"state_unlocked": "UNLOCKED",
"value_template": "{{ value_json.val }}",
}
},
)
state = hass.states.get("lock.test")
assert state.state is STATE_UNLOCKED
async_fire_mqtt_message(hass, "state-topic", '{"val":"LOCKED"}')
state = hass.states.get("lock.test")
assert state.state is STATE_LOCKED
async_fire_mqtt_message(hass, "state-topic", '{"val":"UNLOCKED"}')
state = hass.states.get("lock.test")
assert state.state is STATE_UNLOCKED
async def test_controlling_non_default_state_via_topic_and_json_message(
hass, mqtt_mock
):
"""Test the controlling state via topic and JSON message."""
assert await async_setup_component(
hass,
lock.DOMAIN,
{
lock.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"payload_lock": "LOCK",
"payload_unlock": "UNLOCK",
"state_locked": "closed",
"state_unlocked": "open",
"value_template": "{{ value_json.val }}",
}
},
)
state = hass.states.get("lock.test")
assert state.state is STATE_UNLOCKED
async_fire_mqtt_message(hass, "state-topic", '{"val":"closed"}')
state = hass.states.get("lock.test")
assert state.state is STATE_LOCKED
async_fire_mqtt_message(hass, "state-topic", '{"val":"open"}')
state = hass.states.get("lock.test")
assert state.state is STATE_UNLOCKED
async def test_sending_mqtt_commands_and_optimistic(hass, mqtt_mock):
"""Test optimistic mode without state topic."""
assert await async_setup_component(
hass,
lock.DOMAIN,
{
lock.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "command-topic",
"payload_lock": "LOCK",
"payload_unlock": "UNLOCK",
"state_locked": "LOCKED",
"state_unlocked": "UNLOCKED",
}
},
)
state = hass.states.get("lock.test")
assert state.state is STATE_UNLOCKED
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_lock(hass, "lock.test")
mqtt_mock.async_publish.assert_called_once_with("command-topic", "LOCK", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("lock.test")
assert state.state is STATE_LOCKED
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_unlock(hass, "lock.test")
mqtt_mock.async_publish.assert_called_once_with("command-topic", "UNLOCK", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("lock.test")
assert state.state is STATE_UNLOCKED
assert state.attributes.get(ATTR_ASSUMED_STATE)
async def test_sending_mqtt_commands_and_explicit_optimistic(hass, mqtt_mock):
"""Test optimistic mode without state topic."""
assert await async_setup_component(
hass,
lock.DOMAIN,
{
lock.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"payload_lock": "LOCK",
"payload_unlock": "UNLOCK",
"state_locked": "LOCKED",
"state_unlocked": "UNLOCKED",
"optimistic": True,
}
},
)
state = hass.states.get("lock.test")
assert state.state is STATE_UNLOCKED
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_lock(hass, "lock.test")
mqtt_mock.async_publish.assert_called_once_with("command-topic", "LOCK", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("lock.test")
assert state.state is STATE_LOCKED
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_unlock(hass, "lock.test")
mqtt_mock.async_publish.assert_called_once_with("command-topic", "UNLOCK", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("lock.test")
assert state.state is STATE_UNLOCKED
assert state.attributes.get(ATTR_ASSUMED_STATE)
async def test_default_availability_payload(hass, mqtt_mock):
"""Test availability by default payload with defined topic."""
assert await async_setup_component(
hass,
lock.DOMAIN,
{
lock.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"payload_lock": "LOCK",
"payload_unlock": "UNLOCK",
"availability_topic": "availability-topic",
}
},
)
state = hass.states.get("lock.test")
assert state.state is STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic", "online")
state = hass.states.get("lock.test")
assert state.state is not STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic", "offline")
state = hass.states.get("lock.test")
assert state.state is STATE_UNAVAILABLE
async def test_custom_availability_payload(hass, mqtt_mock):
"""Test availability by custom payload with defined topic."""
assert await async_setup_component(
hass,
lock.DOMAIN,
{
lock.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"payload_lock": "LOCK",
"payload_unlock": "UNLOCK",
"state_locked": "LOCKED",
"state_unlocked": "UNLOCKED",
"availability_topic": "availability-topic",
"payload_available": "good",
"payload_not_available": "nogood",
}
},
)
state = hass.states.get("lock.test")
assert state.state is STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic", "good")
state = hass.states.get("lock.test")
assert state.state is not STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic", "nogood")
state = hass.states.get("lock.test")
assert state.state is STATE_UNAVAILABLE
async def test_setting_attribute_via_mqtt_json_message(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
assert await async_setup_component(
hass,
lock.DOMAIN,
{
lock.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "test-topic",
"json_attributes_topic": "attr-topic",
}
},
)
async_fire_mqtt_message(hass, "attr-topic", '{ "val": "100" }')
state = hass.states.get("lock.test")
assert state.attributes.get("val") == "100"
async def test_update_with_json_attrs_not_dict(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
assert await async_setup_component(
hass,
lock.DOMAIN,
{
lock.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "test-topic",
"json_attributes_topic": "attr-topic",
}
},
)
async_fire_mqtt_message(hass, "attr-topic", '[ "list", "of", "things"]')
state = hass.states.get("lock.test")
assert state.attributes.get("val") is None
assert "JSON result was not a dictionary" in caplog.text
async def test_update_with_json_attrs_bad_JSON(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
assert await async_setup_component(
hass,
lock.DOMAIN,
{
lock.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "test-topic",
"json_attributes_topic": "attr-topic",
}
},
)
async_fire_mqtt_message(hass, "attr-topic", "This is not JSON")
state = hass.states.get("lock.test")
assert state.attributes.get("val") is None
assert "Erroneous JSON: This is not JSON" in caplog.text
async def test_discovery_update_attr(hass, mqtt_mock, caplog):
"""Test update of discovered MQTTAttributes."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
await async_start(hass, "homeassistant", {}, entry)
data1 = (
'{ "name": "Beer",'
' "command_topic": "test_topic",'
' "json_attributes_topic": "attr-topic1" }'
)
data2 = (
'{ "name": "Beer",'
' "command_topic": "test_topic",'
' "json_attributes_topic": "attr-topic2" }'
)
async_fire_mqtt_message(hass, "homeassistant/lock/bla/config", data1)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "attr-topic1", '{ "val": "100" }')
state = hass.states.get("lock.beer")
assert state.attributes.get("val") == "100"
# Change json_attributes_topic
async_fire_mqtt_message(hass, "homeassistant/lock/bla/config", data2)
await hass.async_block_till_done()
# Verify we are no longer subscribing to the old topic
async_fire_mqtt_message(hass, "attr-topic1", '{ "val": "50" }')
state = hass.states.get("lock.beer")
assert state.attributes.get("val") == "100"
# Verify we are subscribing to the new topic
async_fire_mqtt_message(hass, "attr-topic2", '{ "val": "75" }')
state = hass.states.get("lock.beer")
assert state.attributes.get("val") == "75"
async def test_unique_id(hass):
"""Test unique id option only creates one light per unique_id."""
await async_mock_mqtt_component(hass)
assert await async_setup_component(
hass,
lock.DOMAIN,
{
lock.DOMAIN: [
{
"platform": "mqtt",
"name": "Test 1",
"state_topic": "test-topic",
"command_topic": "test_topic",
"unique_id": "TOTALLY_UNIQUE",
},
{
"platform": "mqtt",
"name": "Test 2",
"state_topic": "test-topic",
"command_topic": "test_topic",
"unique_id": "TOTALLY_UNIQUE",
},
]
},
)
async_fire_mqtt_message(hass, "test-topic", "payload")
assert len(hass.states.async_entity_ids(lock.DOMAIN)) == 1
async def test_discovery_removal_lock(hass, mqtt_mock, caplog):
"""Test removal of discovered lock."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
await async_start(hass, "homeassistant", {}, entry)
data = '{ "name": "Beer",' ' "command_topic": "test_topic" }'
async_fire_mqtt_message(hass, "homeassistant/lock/bla/config", data)
await hass.async_block_till_done()
state = hass.states.get("lock.beer")
assert state is not None
assert state.name == "Beer"
async_fire_mqtt_message(hass, "homeassistant/lock/bla/config", "")
await hass.async_block_till_done()
state = hass.states.get("lock.beer")
assert state is None
async def test_discovery_broken(hass, mqtt_mock, caplog):
"""Test handling of bad discovery message."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
await async_start(hass, "homeassistant", {}, entry)
data1 = '{ "name": "Beer" }'
data2 = '{ "name": "Milk",' ' "command_topic": "test_topic" }'
async_fire_mqtt_message(hass, "homeassistant/lock/bla/config", data1)
await hass.async_block_till_done()
state = hass.states.get("lock.beer")
assert state is None
async_fire_mqtt_message(hass, "homeassistant/lock/bla/config", data2)
await hass.async_block_till_done()
state = hass.states.get("lock.milk")
assert state is not None
assert state.name == "Milk"
state = hass.states.get("lock.beer")
assert state is None
async def test_discovery_update_lock(hass, mqtt_mock, caplog):
"""Test update of discovered lock."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
await async_start(hass, "homeassistant", {}, entry)
data1 = (
'{ "name": "Beer",'
' "state_topic": "test_topic",'
' "command_topic": "command_topic",'
' "availability_topic": "availability_topic1" }'
)
data2 = (
'{ "name": "Milk",'
' "state_topic": "test_topic2",'
' "command_topic": "command_topic",'
' "availability_topic": "availability_topic2" }'
)
async_fire_mqtt_message(hass, "homeassistant/lock/bla/config", data1)
await hass.async_block_till_done()
state = hass.states.get("lock.beer")
assert state is not None
assert state.name == "Beer"
async_fire_mqtt_message(hass, "homeassistant/lock/bla/config", data2)
await hass.async_block_till_done()
state = hass.states.get("lock.beer")
assert state is not None
assert state.name == "Milk"
state = hass.states.get("lock.milk")
assert state is None
async def test_entity_device_info_with_identifier(hass, mqtt_mock):
"""Test MQTT lock device registry integration."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
entry.add_to_hass(hass)
await async_start(hass, "homeassistant", {}, entry)
registry = await hass.helpers.device_registry.async_get_registry()
data = json.dumps(
{
"platform": "mqtt",
"name": "Test 1",
"state_topic": "test-topic",
"command_topic": "test-topic",
"device": {
"identifiers": ["helloworld"],
"connections": [["mac", "02:5b:26:a8:dc:12"]],
"manufacturer": "Whatever",
"name": "Beer",
"model": "Glass",
"sw_version": "0.1-beta",
},
"unique_id": "veryunique",
}
)
async_fire_mqtt_message(hass, "homeassistant/lock/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")}, set())
assert device is not None
assert device.identifiers == {("mqtt", "helloworld")}
assert device.connections == {("mac", "02:5b:26:a8:dc:12")}
assert device.manufacturer == "Whatever"
assert device.name == "Beer"
assert device.model == "Glass"
assert device.sw_version == "0.1-beta"
async def test_entity_device_info_update(hass, mqtt_mock):
"""Test device registry update."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
entry.add_to_hass(hass)
await async_start(hass, "homeassistant", {}, entry)
registry = await hass.helpers.device_registry.async_get_registry()
config = {
"platform": "mqtt",
"name": "Test 1",
"state_topic": "test-topic",
"command_topic": "test-command-topic",
"device": {
"identifiers": ["helloworld"],
"connections": [["mac", "02:5b:26:a8:dc:12"]],
"manufacturer": "Whatever",
"name": "Beer",
"model": "Glass",
"sw_version": "0.1-beta",
},
"unique_id": "veryunique",
}
data = json.dumps(config)
async_fire_mqtt_message(hass, "homeassistant/lock/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")}, set())
assert device is not None
assert device.name == "Beer"
config["device"]["name"] = "Milk"
data = json.dumps(config)
async_fire_mqtt_message(hass, "homeassistant/lock/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")}, set())
assert device is not None
assert device.name == "Milk"
async def test_entity_id_update(hass, mqtt_mock):
"""Test MQTT subscriptions are managed when entity_id is updated."""
registry = mock_registry(hass, {})
mock_mqtt = await async_mock_mqtt_component(hass)
assert await async_setup_component(
hass,
lock.DOMAIN,
{
lock.DOMAIN: [
{
"platform": "mqtt",
"name": "beer",
"state_topic": "test-topic",
"command_topic": "test-topic",
"availability_topic": "avty-topic",
"unique_id": "TOTALLY_UNIQUE",
}
]
},
)
state = hass.states.get("lock.beer")
assert state is not None
assert mock_mqtt.async_subscribe.call_count == 2
mock_mqtt.async_subscribe.assert_any_call("test-topic", ANY, 0, "utf-8")
mock_mqtt.async_subscribe.assert_any_call("avty-topic", ANY, 0, "utf-8")
mock_mqtt.async_subscribe.reset_mock()
registry.async_update_entity("lock.beer", new_entity_id="lock.milk")
await hass.async_block_till_done()
state = hass.states.get("lock.beer")
assert state is None
state = hass.states.get("lock.milk")
assert state is not None
assert mock_mqtt.async_subscribe.call_count == 2
mock_mqtt.async_subscribe.assert_any_call("test-topic", ANY, 0, "utf-8")
mock_mqtt.async_subscribe.assert_any_call("avty-topic", ANY, 0, "utf-8")
|
|
from pyravendb.data.document_convention import DocumentConvention, Failover
from pyravendb.tools.indexqueue import IndexQueue
from Crypto.Cipher import AES, PKCS1_OAEP
from Crypto.PublicKey import RSA
from Crypto.Random import get_random_bytes
from Crypto.Util.number import bytes_to_long
from pyravendb.custom_exceptions import exceptions
from pyravendb.tools.pkcs7 import PKCS7Encoder
import tempfile
import requests
import sys
import json
import hashlib
import base64
import os
from pyravendb.tools.utils import Utils
from threading import Timer, Lock
class HttpRequestsFactory(object):
def __init__(self, url, database, convention=None, api_key=None, force_get_topology=False):
self.url = url
self._primary_url = url
self.database = database
self._primary_database = database
self.primary = False
self.api_key = api_key
self.version_info = sys.version_info.major
self.convention = convention
if self.convention is None:
self.convention = DocumentConvention()
self.headers = {"Accept": "application/json", "Has-Api-key": 'true' if self.api_key is not None else 'false',
"Raven-Client-Version": "3.5.0.0"}
self.replication_topology = IndexQueue()
self.topology_change_counter = 0
self.lock = Lock()
self.topology = None
self.request_count = 0
self.force_get_topology = force_get_topology
self._token = None
self._current_api_key = None
self._current_database = None
def http_request_handler(self, path, method, data=None, headers=None, admin=False, force_read_from_master=False,
uri="databases", stream=False):
if self.force_get_topology:
self.force_get_topology = False
self.get_replication_topology()
if self.url == self._primary_url and self.database == self._primary_database and not self.primary:
self.primary = True
if self.database.lower() == self.convention.system_database:
force_read_from_master = True
uri = self.convention.system_database
return self._execute_with_replication(path, method, headers=headers, data=data, admin=admin,
force_read_from_master=force_read_from_master, uri=uri, stream=stream)
def _execute_with_replication(self, path, method, headers, data=None, admin=False,
force_read_from_master=False, uri="databases", stream=False):
second_api_key = None
while True:
index = None
url = None
if not force_read_from_master:
if admin:
url = "{0}/admin/{1}".format(self._primary_url, path)
second_api_key = self.api_key
else:
if method == "GET":
if (path == "replication/topology" or "Hilo" in path) and not self.primary:
raise exceptions.InvalidOperationException(
"Cant get access to {0} when {1}(primary) is Down".format(path, self._primary_database))
elif self.convention.failover_behavior == Failover.read_from_all_servers:
with self.lock:
self.request_count += 1
index = self.request_count % (len(self.replication_topology) + 1)
if index != 0 or not self.primary: # if 0 use the primary
index -= 1
destination = self.replication_topology.peek(index if index > 0 else 0)
url = "{0}/{1}/{2}/{3}".format(destination["url"], uri, destination["database"], path)
second_api_key = destination["credentials"].get("api_key", None)
elif not self.primary:
url = "{0}/{1}/{2}/{3}".format(self.url, uri, self.database, path)
else:
if not self.primary:
if self.convention.failover_behavior == \
Failover.allow_reads_from_secondaries_and_writes_to_secondaries:
url = "{0}/{1}/{2}/{3}".format(self.url, uri, self.database, path)
else:
raise exceptions.InvalidOperationException(
"Cant write to server when the primary is down when failover = {0}".format(
self.convention.failover_behavior.name))
if url is None:
if not self.primary:
raise exceptions.InvalidOperationException(
"Cant read or write to the master because {0} is down".format(self._primary_database))
url = "{0}/{1}/{2}/{3}".format(self._primary_url, uri, self._primary_database, path)
if uri != "databases":
url = "{0}/{1}".format(self._primary_url, path)
second_api_key = self.api_key
with requests.session() as session:
if headers is None:
headers = {}
headers.update(self.headers)
data = json.dumps(data, default=self.convention.json_default_method)
response = session.request(method, url=url, data=data, headers=headers, stream=stream)
if response.status_code == 412 or response.status_code == 401:
try:
oauth_source = response.headers.__getitem__("OAuth-Source")
except KeyError:
raise exceptions.InvalidOperationException(
"Something is not right please check your server settings (do you use the right api_key)")
self.do_auth_request(self.api_key, oauth_source, second_api_key)
continue
if (response.status_code == 503 or response.status_code == 502) and \
not self.replication_topology.empty() and not (
path == "replication/topology" or "Hilo" in path):
if self.primary:
if self.convention.failover_behavior == Failover.fail_immediately or force_read_from_master:
raise exceptions.ErrorResponseException("Failed to get response from server")
self.primary = False
self.is_alive({"url": self._primary_url, "database": self._primary_database}, primary=True)
else:
with self.lock:
if not index:
index = 0
peek_item = self.replication_topology.peek(index)
if self.url == peek_item["url"] and self.database == peek_item["database"]:
self.is_alive(self.replication_topology.get(index))
if self.replication_topology.empty():
raise exceptions.ErrorResponseException("Please check your databases")
destination = self.replication_topology.peek()
self.database = destination["database"]
self.url = destination["url"]
second_api_key = destination["credentials"].get("api_key", None)
continue
return response
def is_alive(self, destination, primary=False):
with requests.session() as session:
while True:
response = session.request("GET",
"{0}/databases/{1}/replication/topology?check-server-reachable".format(
destination["url"], destination["database"]), headers=self.headers)
if response.status_code == 412 or response.status_code == 401:
try:
try:
oauth_source = response.headers.__getitem__("OAuth-Source")
except KeyError:
raise exceptions.InvalidOperationException(
"Something is not right please check your server settings")
self.do_auth_request(self.api_key, oauth_source)
except exceptions.ErrorResponseException:
break
if response.status_code == 200:
if primary:
self.primary = True
else:
self.replication_topology.put(destination)
return
else:
break
is_alive_timer = Timer(5, lambda: self.is_alive(destination, primary))
is_alive_timer.daemon = True
is_alive_timer.start()
def check_database_exists(self, path):
return self.http_request_handler(path, "GET", force_read_from_master=True, uri="docs")
def call_hilo(self, type_tag_name, max_id, etag):
headers = {"if-None-Match": "\"" + etag + "\""}
put_url = "docs/Raven%2FHilo%2F{0}".format(type_tag_name)
response = self.http_request_handler(put_url, "PUT", data={"Max": max_id},
headers=headers)
if response.status_code == 409:
raise exceptions.FetchConcurrencyException(response.json["Error"])
if response.status_code != 201:
raise exceptions.ErrorResponseException("Something is wrong with the request")
def update_replication(self, topology_file):
with open(topology_file, 'w+') as f:
f.write(json.dumps(self.topology))
self.replication_topology.queue.clear()
self._load_topology()
def check_replication_change(self, topology_file):
try:
response = self.http_request_handler("replication/topology", "GET")
if response.status_code == 200:
topology = response.json()
with self.lock:
if self.topology != topology:
self.topology = topology
self.update_replication(topology_file)
elif response.status_code != 400 and response.status_code != 404 and not self.topology:
raise exceptions.ErrorResponseException(
"Could not connect to the database {0} please check the problem".format(self._primary_database))
except exceptions.InvalidOperationException:
pass
if not self.database.lower() == self.convention.system_database:
timer = Timer(60 * 5, lambda: self.check_replication_change(topology_file))
timer.daemon = True
timer.start()
def get_replication_topology(self):
with self.lock:
hash_name = hashlib.md5(
"{0}/{1}".format(self._primary_url, self._primary_database).encode('utf-8')).hexdigest()
topology_file = "{0}{1}RavenDB_Replication_Information_For - {2}".format(tempfile.gettempdir(), os.path.sep,
hash_name)
try:
with open(topology_file, 'r') as f:
self.topology = json.loads(f.read())
self._load_topology()
except IOError:
pass
self.check_replication_change(topology_file)
def _load_topology(self):
for destination in self.topology["Destinations"]:
if not destination["Disabled"] and not destination["IgnoredClient"]:
self.replication_topology.put({"url": destination["Url"], "database": destination["Database"],
"credentials": {"api_key": destination["ApiKey"],
"domain": destination["Domain"]}})
def do_auth_request(self, api_key, oauth_source, second_api_key=None):
api_name, secret = api_key.split('/', 1)
tries = 1
headers = {"grant_type": "client_credentials"}
data = None
with requests.session() as session:
while True:
oath = session.request(method="POST", url=oauth_source,
headers=headers, data=data)
if oath.reason == "Precondition Failed":
if tries > 1:
if not (second_api_key and self.api_key != second_api_key and tries < 3):
raise exceptions.ErrorResponseException("Unauthorized")
api_name, secret = second_api_key.split('/', 1)
tries += 1
authenticate = oath.headers.__getitem__("www-authenticate")[len("Raven "):]
challenge_dict = dict(item.split("=", 1) for item in authenticate.split(','))
exponent_str = challenge_dict.get("exponent", None)
modulus_str = challenge_dict.get("modulus", None)
challenge = challenge_dict.get("challenge", None)
exponent = bytes_to_long(base64.standard_b64decode(exponent_str))
modulus = bytes_to_long(base64.standard_b64decode(modulus_str))
rsa = RSA.construct((modulus, exponent))
cipher = PKCS1_OAEP.new(rsa)
iv = get_random_bytes(16)
key = get_random_bytes(32)
encoder = PKCS7Encoder()
cipher_text = cipher.encrypt(key + iv)
results = []
results.extend(cipher_text)
aes = AES.new(key, AES.MODE_CBC, iv)
sub_data = Utils.dict_to_string({"api key name": api_name, "challenge": challenge,
"response": base64.b64encode(hashlib.sha1(
'{0};{1}'.format(challenge, secret).encode(
'utf-8')).digest())})
results.extend(aes.encrypt(encoder.encode(sub_data)))
data = Utils.dict_to_string({"exponent": exponent_str, "modulus": modulus_str,
"data": base64.standard_b64encode(bytearray(results))})
if exponent is None or modulus is None or challenge is None:
raise exceptions.InvalidOperationException(
"Invalid response from server, could not parse raven authentication information:{0} ".format(
authenticate))
tries += 1
elif oath.status_code == 200:
oath_json = oath.json()
body = oath_json["Body"]
signature = oath_json["Signature"]
if not sys.version_info.major > 2:
body = body.encode('utf-8')
signature = signature.encode('utf-8')
with self.lock:
self._token = "Bearer {0}".format(
{"Body": body, "Signature": signature})
self.headers.update({"Authorization": self._token})
break
else:
raise exceptions.ErrorResponseException(oath.reason)
|
|
from collections import namedtuple
import logging
from django.core.mail import (
EmailMessage, EmailMultiAlternatives, get_connection
)
from django.template.loader import render_to_string, select_template
from django.template import TemplateDoesNotExist
from django.conf import settings
try:
from froide.bounce.utils import (
make_bounce_address, make_unsubscribe_header
)
except ImportError:
make_bounce_address = None
make_unsubscribe_header = None
HANDLE_BOUNCES = settings.FROIDE_CONFIG['bounce_enabled']
logger = logging.getLogger(__name__)
EmailContent = namedtuple('EmailContent', (
'subject', 'text', 'html'
))
class MailIntentRegistry:
def __init__(self):
self.intents = {}
def register(self, mail_intent, context_vars=None):
intent = MailIntent(mail_intent, context_vars)
self.intents[mail_intent] = intent
return intent
def get_intent(self, mail_intent):
return self.intents.get(mail_intent)
mail_registry = MailIntentRegistry()
class MailMiddlwareRegistry:
def __init__(self):
self.middlewares = []
def register(self, middleware):
self.middlewares.append(middleware)
return middleware
def maybe_call_middleware(self, middleware, method, **kwargs):
if hasattr(middleware, method):
return getattr(middleware, method)(**kwargs)
def should_mail(self, mail_intent, context, email_kwargs):
for middleware in self.middlewares:
result = self.maybe_call_middleware(
middleware, 'should_mail',
mail_intent=mail_intent,
context=context,
email_kwargs=email_kwargs
)
if result is False:
return False
return True
def get_email_address(self, mail_intent, context):
for middleware in self.middlewares:
result = self.maybe_call_middleware(
middleware, 'get_email_address',
mail_intent=mail_intent,
context=context
)
if result is not None:
return result
def get_context(self, mail_intent, context):
for middleware in self.middlewares:
ctx = self.maybe_call_middleware(
middleware, 'get_context',
mail_intent=mail_intent,
context=context
)
if ctx is not None:
context.update(ctx)
return context
def get_email_content(self, mail_intent, context, template_base,
email_kwargs):
for middleware in self.middlewares:
result = self.maybe_call_middleware(
middleware, 'get_email_content',
mail_intent=mail_intent,
context=context,
template_base=template_base,
email_kwargs=email_kwargs
)
if result is not None:
return result
def enhance_email_kwargs(self, mail_intent, context, email_kwargs):
for middleware in self.middlewares:
ctx = self.maybe_call_middleware(
middleware, 'enhance_email_kwargs',
mail_intent=mail_intent,
context=context,
email_kwargs=email_kwargs
)
if ctx is not None:
email_kwargs.update(ctx)
return email_kwargs
mail_middleware_registry = MailMiddlwareRegistry()
class MailIntent:
def __init__(self, mail_intent, context_vars):
self.mail_intent = mail_intent
self.context_vars = set(context_vars or [])
def get_email_address(self, context):
email_address = mail_middleware_registry.get_email_address(
self.mail_intent, context)
if email_address is not None:
return email_address
if context.get('email'):
return context['email']
if context.get('user'):
return context['user'].email
raise ValueError('No email provided for mail intent')
def get_context(self, context, preview=False):
if not preview and self.context_vars - set(context.keys()):
logger.warn(
'Mail intent %s with incomplete default context %s',
self.mail_intent, context)
context.update({
'site_name': settings.SITE_NAME,
'site_url': settings.SITE_URL
})
context = mail_middleware_registry.get_context(
self.mail_intent, context
)
return context
def get_template(self, template_names, required=True):
try:
return select_template(template_names)
except TemplateDoesNotExist:
if required:
raise
return None
def get_templates(self, template_base=None, needs_subject=True):
template_bases = []
if template_base is not None:
template_bases.append(template_base)
template_bases.append(self.mail_intent)
subject_template_names = [t + '_subject.txt' for t in template_bases]
text_template_names = [t + '.txt' for t in template_bases]
html_template_names = [t + '.html' for t in template_bases]
return EmailContent(
subject=self.get_template(subject_template_names, required=needs_subject),
text=self.get_template(text_template_names),
html=self.get_template(html_template_names, required=False),
)
def get_email_content(self, context, template_base=None, email_kwargs=None):
email_content = mail_middleware_registry.get_email_content(
self.mail_intent, context, template_base, email_kwargs
)
if email_content is not None:
return email_content
if email_kwargs is None:
email_kwargs = {}
reference = email_kwargs.get('reference')
if reference and template_base is None:
ref = reference.split(':', 1)[0]
parts = self.mail_intent.rsplit('/', 1)
template_base = '/'.join((parts[0], ref, parts[1]))
subject = email_kwargs.pop('subject', None)
email_content_templates = self.get_templates(
template_base=template_base, needs_subject=not bool(subject)
)
if email_content_templates.subject:
subject = email_content_templates.subject.render(context)
text = email_content_templates.text.render(context)
html = None
if email_content_templates.html is not None:
html = email_content_templates.html.render(context)
return EmailContent(
subject, text, html
)
def enhance_email_kwargs(self, context, email_kwargs):
email_kwargs = mail_middleware_registry.enhance_email_kwargs(
self.mail_intent, context, email_kwargs
)
return email_kwargs
def send(self, email=None, user=None, context=None,
template_base=None, **kwargs):
if context is None:
context = {}
if user is not None:
context['user'] = user
if email is not None:
context['email'] = email
# Pre-Check
if not mail_middleware_registry.should_mail(
self.mail_intent, context, kwargs):
return
email_address = self.get_email_address(context)
# Context
context = self.get_context(context)
# Kwargs enhancement
email_kwargs = self.enhance_email_kwargs(context, kwargs)
# Rendering
email_content = self.get_email_content(
context, template_base=template_base, email_kwargs=kwargs
)
# Make sure no extra subject kwarg is present
email_kwargs.pop('subject', None)
return send_mail(
email_content.subject,
email_content.text,
email_address,
user_email=email_address,
html=email_content.html,
**email_kwargs
)
def get_mail_connection(**kwargs):
return get_connection(
backend=settings.EMAIL_BACKEND,
**kwargs
)
def send_template_email(
email=None, user=None,
subject=None, subject_template=None,
template=None, html_template=None,
context=None, **kwargs):
if subject_template is not None:
subject = render_to_string(subject_template, context)
body = render_to_string(template, context)
if html_template is not None:
kwargs['html'] = render_to_string(html_template, context)
if user is not None:
return user.send_mail(subject, body, **kwargs)
elif email is not None:
return send_mail(subject, body, email, **kwargs)
return True
def send_mail(subject, body, email_address,
html=None,
from_email=None,
attachments=None, fail_silently=False,
bounce_check=True, headers=None,
priority=True,
queue=None, auto_bounce=True,
unsubscribe_reference=None,
**kwargs):
if not email_address:
return
if bounce_check:
# TODO: Check if this email should be sent
pass
if from_email is None:
from_email = settings.DEFAULT_FROM_EMAIL
backend_kwargs = {}
if HANDLE_BOUNCES and auto_bounce and make_bounce_address:
backend_kwargs['return_path'] = make_bounce_address(email_address)
if not priority and queue is None:
queue = settings.EMAIL_BULK_QUEUE
if queue is not None:
backend_kwargs['queue'] = queue
connection = get_mail_connection(**backend_kwargs)
if headers is None:
headers = {}
headers.update({
'X-Auto-Response-Suppress': 'All',
})
if make_unsubscribe_header and unsubscribe_reference is not None:
headers['List-Unsubscribe'] = make_unsubscribe_header(
email_address, unsubscribe_reference
)
if html is None:
email_klass = EmailMessage
else:
email_klass = EmailMultiAlternatives
email = email_klass(subject, body, from_email, [email_address],
connection=connection, headers=headers)
if html is not None:
email.attach_alternative(
html,
"text/html"
)
if attachments is not None:
for name, data, mime_type in attachments:
email.attach(name, data, mime_type)
return email.send(fail_silently=fail_silently)
|
|
"""Defines utility methods for testing jobs and job types"""
from __future__ import unicode_literals
from django.db import transaction
import django.utils.timezone as timezone
import job.test.utils as job_test_utils
import trigger.test.utils as trigger_test_utils
from batch.models import Batch
from data.data.exceptions import InvalidData
from job.configuration.json.job_config_v6 import JobConfigurationV6
from job.messages.create_jobs import RecipeJob
from job.models import Job, JobTypeRevision
from queue.messages.queued_jobs import QueuedJob
from queue.models import Queue
from recipe.definition.json.definition_v6 import RecipeDefinitionV6
from recipe.definition.node import ConditionNodeDefinition, JobNodeDefinition, RecipeNodeDefinition
from recipe.messages.create_conditions import Condition
from recipe.messages.create_recipes import SubRecipe
from recipe.models import Recipe, RecipeCondition, RecipeInputFile, RecipeNode, RecipeType, RecipeTypeRevision
from recipe.models import RecipeTypeSubLink, RecipeTypeJobLink
import storage.test.utils as storage_test_utils
NAME_COUNTER = 1
VERSION_COUNTER = 1
TITLE_COUNTER = 1
DESCRIPTION_COUNTER = 1
SUB_RECIPE_DEFINITION = {'version': '7',
'input': {'files': [],
'json': []},
'nodes': {'node_a': {'dependencies': [],
'input': {},
'node_type': {'node_type': 'job', 'job_type_name': 'my-job-type',
'job_type_version': '1.0.0',
'job_type_revision': 1}}}}
RECIPE_DEFINITION = {'version': '7',
'input': {'files': [{'name': 'INPUT_IMAGE', 'media_types': ['image/png'], 'required': True,
'multiple': False}],
'json': [{'name': 'bar', 'type': 'string', 'required': False}]},
'nodes': {'node_a': {'dependencies': [],
'input': {'INPUT_IMAGE': {'type': 'recipe', 'input': 'INPUT_IMAGE'}},
'node_type': {'node_type': 'job', 'job_type_name': 'my-job-type',
'job_type_version': '1.0.0',
'job_type_revision': 1}},
'node_b': {'dependencies': [{'name': 'node_a', 'acceptance': True}],
'input': {'INPUT_IMAGE': {'type': 'dependency', 'node': 'node_a',
'output': 'OUTPUT_IMAGE'}},
'node_type': {'node_type': 'job', 'job_type_name': 'my-job-type',
'job_type_version': '1.0.0',
'job_type_revision': 1}},
'node_c': {'dependencies': [{'name': 'node_b', 'acceptance': True}],
'input': {'INPUT_IMAGE': {'type': 'dependency', 'node': 'node_b',
'output': 'OUTPUT_IMAGE'}},
'node_type': {
'node_type': 'condition',
'interface': {
'version': '7',
'files': [ {
'name': 'INPUT_IMAGE',
'media_types': ['image/png'],
'required': False,
'multiple': True}],
'json': []},
'data_filter': {
'version': '7',
'all': True,
'filters': [ {'name': 'INPUT_IMAGE',
'type': 'media-type',
'condition': '==',
'values': ['image/png']}]} }},
'node_d': {'dependencies': [{'name': 'node_c', 'acceptance': True}],
'input': {'input_a': {'type': 'recipe', 'input': 'bar'},
'input_b': {'type': 'dependency', 'node': 'node_c',
'output': 'INPUT_IMAGE'}},
'node_type': {'node_type': 'recipe', 'recipe_type_name': 'sub-recipe',
'recipe_type_revision': 1}}}}
def create_recipe_type_v6(name=None, version=None, title=None, description=None, definition=None, is_active=None,
is_system=None):
"""Creates a recipe type for unit testing
:returns: The RecipeType model
:rtype: :class:`recipe.models.RecipeType`
"""
if not name:
global NAME_COUNTER
name = 'test-recipe-type-%i' % NAME_COUNTER
NAME_COUNTER += 1
if not version:
global VERSION_COUNTER
version = '%i.0.0' % VERSION_COUNTER
VERSION_COUNTER += 1
if not title:
global TITLE_COUNTER
title = 'Test Recipe Type %i' % TITLE_COUNTER
TITLE_COUNTER += 1
if not description:
global DESCRIPTION_COUNTER
description = 'Test Description %i' % DESCRIPTION_COUNTER
DESCRIPTION_COUNTER += 1
if not definition:
definition = {
'version': '7',
'input': {},
'nodes': {}}
recipe_type = RecipeType()
recipe_type.name = name
recipe_type.version = version
recipe_type.title = title
recipe_type.description = description
recipe_type.definition = definition
if is_active is not None:
recipe_type.is_active = is_active
if is_system is not None:
recipe_type.is_system = is_system
recipe_type.save()
RecipeTypeRevision.objects.create_recipe_type_revision(recipe_type)
RecipeTypeJobLink.objects.create_recipe_type_job_links_from_definition(recipe_type)
RecipeTypeSubLink.objects.create_recipe_type_sub_links_from_definition(recipe_type)
return recipe_type
def edit_recipe_type_v6(recipe_type, title=None, description=None, definition=None, auto_update=True, is_active=True):
"""Updates the definition of a recipe type, including creating a new revision for unit testing
"""
with transaction.atomic():
RecipeType.objects.edit_recipe_type_v6(recipe_type.id, title=title, description=description,
definition=RecipeDefinitionV6(definition).get_definition(),
auto_update=auto_update, is_active=is_active)
def create_recipe(recipe_type=None, input=None, event=None, is_superseded=False, superseded=None,
superseded_recipe=None, config=None, batch=None, save=True):
"""Creates a recipe for unit testing
:returns: The recipe model
:rtype: :class:`recipe.models.Recipe`
"""
if not recipe_type:
recipe_type = create_recipe_type_v6()
if not input:
input = {}
if not event:
event = trigger_test_utils.create_trigger_event()
if is_superseded and not superseded:
superseded = timezone.now()
recipe = Recipe()
recipe.recipe_type = recipe_type
recipe.recipe_type_rev = RecipeTypeRevision.objects.get_revision(recipe_type.name, recipe_type.revision_num)
recipe.event = event
recipe.input = input
recipe.is_superseded = is_superseded
recipe.superseded = superseded
recipe.batch = batch
recipe.configuration = config
if superseded_recipe:
root_id = superseded_recipe.root_superseded_recipe_id
if root_id is None:
root_id = superseded_recipe.id
recipe.root_superseded_recipe_id = root_id
recipe.superseded_recipe = superseded_recipe
if save:
recipe.save()
return recipe
def process_recipe_inputs(recipe_ids):
"""Mimics effect of process_recipe_input messages for unit testing """
for recipe_id in recipe_ids:
recipe = Recipe.objects.get_recipe_with_interfaces(recipe_id)
if not recipe.has_input():
if not recipe.recipe:
#print 'Recipe %d has no input and is not in a recipe. Message will not re-run.' % recipe.id
continue
generate_input_data_from_recipe(recipe)
# Lock recipe model and process recipe's input data
with transaction.atomic():
recipe = Recipe.objects.get_locked_recipe(recipe_id)
root_recipe_id = recipe.root_superseded_recipe_id if recipe.root_superseded_recipe_id else recipe.id
Recipe.objects.process_recipe_input(recipe)
if root_recipe_id:
update_recipe(root_recipe_id)
def generate_input_data_from_recipe(sub_recipe):
"""Generates the sub-recipe's input data from its recipe dependencies and validates and sets the input data on
the sub-recipe
:param sub_recipe: The sub-recipe with related recipe_type_rev and recipe__recipe_type_rev models
:type sub_recipe: :class:`recipe.models.Recipe`
:raises :class:`data.data.exceptions.InvalidData`: If the data is invalid
"""
# TODO: this is a hack to work with old legacy recipe data with workspaces, remove when legacy job types go
old_recipe_input_dict = dict(sub_recipe.recipe.input)
# Get sub-recipe input from dependencies in the recipe
recipe_input_data = sub_recipe.recipe.get_input_data()
node_outputs = RecipeNode.objects.get_recipe_node_outputs(sub_recipe.recipe_id)
for node_output in node_outputs.values():
if node_output.node_type == 'recipe' and node_output.id == sub_recipe.id:
node_name = node_output.node_name
break
# TODO: this is a hack to work with old legacy recipe data with workspaces, remove when legacy job types go
sub_recipe.recipe.input = old_recipe_input_dict
definition = sub_recipe.recipe.recipe_type_rev.get_definition()
input_data = definition.generate_node_input_data(node_name, recipe_input_data, node_outputs)
Recipe.objects.set_recipe_input_data_v6(sub_recipe, input_data)
def update_recipe(root_recipe_id):
"""Mimics effect of update recipe messages for unit testing """
recipe = Recipe.objects.get_recipe_instance_from_root(root_recipe_id)
recipe_model = recipe.recipe_model
when = timezone.now()
jobs_to_update = recipe.get_jobs_to_update()
blocked_job_ids = jobs_to_update['BLOCKED']
pending_job_ids = jobs_to_update['PENDING']
nodes_to_create = recipe.get_nodes_to_create()
nodes_to_process_input = recipe.get_nodes_to_process_input()
if not recipe_model.is_completed and recipe.has_completed():
Recipe.objects.complete_recipes([recipe_model.id], when)
# Create new messages for changing job statuses
if len(blocked_job_ids):
update_jobs_status(blocked_job_ids, when, status='BLOCKED')
if len(pending_job_ids):
update_jobs_status(pending_job_ids, when, status='PENDING')
# Create recipe nodes
conditions = []
recipe_jobs = []
subrecipes = []
for node_name, node_def in nodes_to_create.items():
process_input = False
if node_name in nodes_to_process_input:
process_input = True
del nodes_to_process_input[node_name]
if node_def.node_type == ConditionNodeDefinition.NODE_TYPE:
condition = Condition(node_name, process_input)
conditions.append(condition)
elif node_def.node_type == JobNodeDefinition.NODE_TYPE:
job = RecipeJob(node_def.job_type_name, node_def.job_type_version, node_def.revision_num, node_name,
process_input)
recipe_jobs.append(job)
elif node_def.node_type == RecipeNodeDefinition.NODE_TYPE:
subrecipe = SubRecipe(node_def.recipe_type_name, node_def.revision_num, node_name, process_input)
subrecipes.append(subrecipe)
if len(conditions):
create_conditions(recipe_model, conditions)
if len(recipe_jobs):
create_jobs_for_recipe(recipe_model, recipe_jobs)
if len(subrecipes):
create_subrecipes(recipe_model, subrecipes)
# Create new messages for processing recipe node input
process_condition_ids = []
process_job_ids = []
process_recipe_ids = []
for node_name, node in nodes_to_process_input.items():
if node.node_type == ConditionNodeDefinition.NODE_TYPE:
process_condition_ids.append(node.condition.id)
elif node.node_type == JobNodeDefinition.NODE_TYPE:
process_job_ids.append(node.job.id)
elif node.node_type == RecipeNodeDefinition.NODE_TYPE:
process_recipe_ids.append(node.recipe.id)
if len(process_condition_ids):
process_conditions(process_condition_ids)
if len(process_job_ids):
process_job_inputs(process_job_ids)
if len(process_recipe_ids):
process_recipe_inputs(process_recipe_ids)
def update_jobs_status(job_ids, when=timezone.now(), status='BLOCKED'):
"""Mimics effect of create_blocked_jobs_messages and create_pending_jobs_messages for unit testing """
with transaction.atomic():
jobs = []
# Retrieve locked job models
for job_model in Job.objects.get_locked_jobs(job_ids):
if not job_model.last_status_change or job_model.last_status_change < when:
# Status update is not old, so perform the update
jobs.append(job_model)
# Update jobs that need status set to BLOCKED
if jobs:
if status == 'BLOCKED':
job_ids = Job.objects.update_jobs_to_blocked(jobs, when)
if status == 'PENDING':
job_ids = Job.objects.update_jobs_to_pending(jobs, when)
update_recipe_metrics(job_ids=job_ids)
def create_conditions(recipe_model, conditions):
"""Mimics effect of create_conditions_messages for unit testing"""
condition_models = {} # {Node name: condition model}
# Create new condition models
process_input_by_node = {}
for condition in conditions:
node_name = condition.node_name
process_input_by_node[node_name] = condition.process_input
condition = RecipeCondition.objects.create_condition(recipe_model.id, root_recipe_id=recipe_model.root_recipe_id,
batch_id=recipe_model.batch_id)
condition_models[node_name] = condition
RecipeCondition.objects.bulk_create(condition_models.values())
# Create recipe nodes
recipe_nodes = RecipeNode.objects.create_recipe_condition_nodes(recipe_model.id, condition_models)
RecipeNode.objects.bulk_create(recipe_nodes)
# Set up process input dict
process_input_condition_ids = []
for condition in conditions:
condition_model = condition_models[condition.node_name]
if condition.process_input:
process_input_condition_ids.append(condition_model.id)
process_conditions(process_input_condition_ids)
def process_conditions(process_input_condition_ids):
"""Mimics effect of create_process_condition_messages for unit testing"""
for condition_id in process_input_condition_ids:
condition = RecipeCondition.objects.get_condition_with_interfaces(condition_id)
if not condition.is_processed:
definition = condition.recipe.recipe_type_rev.get_definition()
# Get condition data from dependencies in the recipe
recipe_input_data = condition.recipe.get_input_data()
node_outputs = RecipeNode.objects.get_recipe_node_outputs(condition.recipe_id)
for node_output in node_outputs.values():
if node_output.node_type == 'condition' and node_output.id == condition.id:
node_name = node_output.node_name
break
# Set data on the condition model
try:
data = definition.generate_node_input_data(node_name, recipe_input_data, node_outputs)
RecipeCondition.objects.set_condition_data_v6(condition, data, node_name)
except InvalidData as ex:
print 'Recipe created invalid input data for condition %d' % condition_id
print ex
continue
# Process filter and set whether condition was accepted
data_filter = definition.graph[node_name].data_filter
is_accepted = data_filter.is_data_accepted(data)
RecipeCondition.objects.set_processed(condition.id, is_accepted)
# update the condition's recipe
root_recipe_id = condition.recipe.root_recipe_id if condition.recipe.root_recipe_id else condition.recipe_id
update_recipe(root_recipe_id)
def create_jobs_for_recipe(recipe_model, recipe_jobs):
"""Mimics effect of create_jobs_messages_for_recipe for unit testing"""
recipe_jobs_map = {} # {Node name: job model}
superseded_jobs = {}
# Get superseded jobs from superseded recipe
if recipe_model.superseded_recipe_id:
superseded_jobs = RecipeNode.objects.get_recipe_jobs(recipe_model.superseded_recipe_id)
# Get job type revisions
revision_tuples = [(j.job_type_name, j.job_type_version, j.job_type_rev_num) for j in recipe_jobs]
revs_by_id = JobTypeRevision.objects.get_revisions(revision_tuples)
revs_by_tuple = {(j.job_type.name, j.job_type.version, j.revision_num): j for j in revs_by_id.values()}
# Create new job models
process_input_by_node = {}
for recipe_job in recipe_jobs:
node_name = recipe_job.node_name
process_input_by_node[node_name] = recipe_job.process_input
tup = (recipe_job.job_type_name, recipe_job.job_type_version, recipe_job.job_type_rev_num)
revision = revs_by_tuple[tup]
superseded_job = superseded_jobs[node_name] if node_name in superseded_jobs else None
job_config = JobConfigurationV6(recipe_model.configuration).get_configuration() if recipe_model.configuration else None
job = Job.objects.create_job_v6(revision, event_id=recipe_model.event_id, root_recipe_id=recipe_model.root_recipe_id,
recipe_id=recipe_model.id, batch_id=recipe_model.batch_id,
superseded_job=superseded_job, job_config=job_config)
recipe_jobs_map[node_name] = job
Job.objects.bulk_create(recipe_jobs_map.values())
# Create recipe nodes
recipe_nodes = RecipeNode.objects.create_recipe_job_nodes(recipe_model.id, recipe_jobs_map)
RecipeNode.objects.bulk_create(recipe_nodes)
# Set up process input dict
process_input_job_ids = []
for recipe_job in recipe_jobs:
job = recipe_jobs_map[recipe_job.node_name]
process_input = recipe_model.id and recipe_job.process_input
if job.has_input() or process_input:
process_input_job_ids.append(job.id)
process_job_inputs(process_input_job_ids)
update_recipe_metrics([recipe_model.id])
def process_job_inputs(process_input_job_ids):
"""Mimics effect of create_process_job_input_messages for unit testing"""
queued_jobs = []
for job_id in process_input_job_ids:
job = Job.objects.get_job_with_interfaces(job_id)
if not job.has_input():
if not job.recipe:
#print ('Job %d has no input and is not in a recipe. Message will not re-run.', job_id)
continue
try:
generate_job_input_data_from_recipe(job)
except InvalidData as ex:
print 'Recipe created invalid input data for job %d. Message will not re-run.' % job_id
print ex
continue
# Lock job model and process job's input data
with transaction.atomic():
job = Job.objects.get_locked_job(job_id)
Job.objects.process_job_input(job)
# queue the job
if job.num_exes == 0:
queued_jobs.append(QueuedJob(job.id, 0))
queue_jobs(queued_jobs)
def generate_job_input_data_from_recipe(job):
"""Generates the job's input data from its recipe dependencies and validates and sets the input data on the job
:param job: The job with related job_type_rev and recipe__recipe_type_rev models
:type job: :class:`job.models.Job`
:raises :class:`data.data.exceptions.InvalidData`: If the data is invalid
"""
from recipe.models import RecipeNode
# TODO: this is a hack to work with old legacy recipe data with workspaces, remove when legacy job types go
old_recipe_input_dict = dict(job.recipe.input)
# Get job input from dependencies in the recipe
recipe_input_data = job.recipe.get_input_data()
node_outputs = RecipeNode.objects.get_recipe_node_outputs(job.recipe_id)
for node_output in node_outputs.values():
if node_output.node_type == 'job' and node_output.id == job.id:
node_name = node_output.node_name
break
# TODO: this is a hack to work with old legacy recipe data with workspaces, remove when legacy job types go
job.recipe.input = old_recipe_input_dict
definition = job.recipe.recipe_type_rev.get_definition()
input_data = definition.generate_node_input_data(node_name, recipe_input_data, node_outputs)
Job.objects.set_job_input_data_v6(job, input_data)
def queue_jobs(queued_jobs, requeue=False, priority=None):
"""Mimics effect of create_queued_jobs_messages for unit testing"""
job_ids = []
for queued_job in queued_jobs:
job_ids.append(queued_job.job_id)
with transaction.atomic():
# Retrieve locked job models
job_models = {}
for job in Job.objects.get_locked_jobs(job_ids):
job_models[job.id] = job
jobs_to_queue = []
for queued_job in queued_jobs:
job_model = job_models[queued_job.job_id]
# If execution number does not match, then this update is obsolete
if job_model.num_exes != queued_job.exe_num:
# Ignore this job
continue
jobs_to_queue.append(job_model)
# Queue jobs
if jobs_to_queue:
_ = Queue.objects.queue_jobs(jobs_to_queue, requeue=requeue, priority=priority)
def update_recipe_metrics(recipe_ids=[], job_ids=None):
"""Mimics effects of create_update_recipe_metrics_messages methods for unit testing"""
if job_ids:
recipe_ids.extend(Recipe.objects.get_recipe_ids_for_jobs(job_ids))
if recipe_ids:
sub_recipe_ids = Recipe.objects.get_recipe_ids_for_sub_recipes(recipe_ids)
update_recipe_metrics(recipe_ids=sub_recipe_ids)
Recipe.objects.update_recipe_metrics(recipe_ids)
# If any of these recipes are sub-recipes, grab root recipe IDs and update those recipes
root_recipe_ids = set()
for recipe in Recipe.objects.filter(id__in=recipe_ids):
if recipe.root_recipe_id:
root_recipe_ids.add(recipe.root_recipe_id)
if root_recipe_ids:
for root_recipe_id in root_recipe_ids:
update_recipe(root_recipe_id)
# For any top-level recipes (not a sub-recipe) update any batches that these recipes belong to
from batch.messages.update_batch_metrics import create_update_batch_metrics_messages
batch_ids = set()
qry = Recipe.objects.filter(id__in=recipe_ids, recipe__isnull=True, batch__isnull=False).only('batch_id')
for recipe in qry:
batch_ids.add(recipe.batch_id)
if batch_ids:
Batch.objects.update_batch_metrics(batch_ids)
def create_subrecipes(recipe_model, subrecipes):
"""Mimics effect of create_subrecipes_messages for unit testing"""
sub_recipes_map = {} # {Node name: recipe model}
superseded_sub_recipes = {}
revision_ids = []
# Get superseded sub-recipes from superseded recipe
if recipe_model.superseded_recipe_id:
superseded_sub_recipes = RecipeNode.objects.get_subrecipes(recipe_model.superseded_recipe_id)
revision_ids = [r.recipe_type_rev_id for r in superseded_sub_recipes.values()]
# Get recipe type revisions
revision_tuples = [(sub.recipe_type_name, sub.recipe_type_rev_num) for sub in subrecipes]
revs_by_id = RecipeTypeRevision.objects.get_revision_map(revision_ids, revision_tuples)
revs_by_tuple = {(rev.recipe_type.name, rev.revision_num): rev for rev in revs_by_id.values()}
# Create new recipe models
process_input_by_node = {}
for sub_recipe in subrecipes:
node_name = sub_recipe.node_name
process_input_by_node[node_name] = sub_recipe.process_input
revision = revs_by_tuple[(sub_recipe.recipe_type_name, sub_recipe.recipe_type_rev_num)]
superseded_recipe = superseded_sub_recipes[node_name] if node_name in superseded_sub_recipes else None
recipe = Recipe.objects.create_recipe_v6(revision, recipe_model.event_id, root_recipe_id=recipe_model.root_recipe_id,
recipe_id=recipe_model.id, batch_id=recipe_model.batch_id,
superseded_recipe=superseded_recipe)
sub_recipes_map[node_name] = recipe
Recipe.objects.bulk_create(sub_recipes_map.values())
# Create recipe nodes
recipe_nodes = RecipeNode.objects.create_subrecipe_nodes(recipe_model.id, sub_recipes_map)
RecipeNode.objects.bulk_create(recipe_nodes)
# Set up process input dict
process_input_sub_ids = []
for sub_recipe in subrecipes:
recipe = sub_recipes_map[sub_recipe.node_name]
if sub_recipe.process_input:
process_input_sub_ids.append(recipe.id)
# Set up recipe diffs
# Uncomment and implement if needed to test superseding recipes
"""
if self.superseded_recipe_id:
for node_name, recipe in sub_recipes_map.items():
pair = _RecipePair(recipe.superseded_recipe, recipe)
rev_id = recipe.superseded_recipe.recipe_type_rev_id
old_revision = revs_by_id[rev_id]
new_revision = revs_by_tuple[(recipe.recipe_type.name, recipe.recipe_type_rev.revision_num)]
diff = RecipeDiff(old_revision.get_definition(), new_revision.get_definition())
if self.forced_nodes:
sub_forced_nodes = self.forced_nodes.get_forced_nodes_for_subrecipe(node_name)
if sub_forced_nodes:
diff.set_force_reprocess(sub_forced_nodes)
self._recipe_diffs.append(_RecipeDiff(diff, [pair]))"""
process_recipe_inputs(process_input_sub_ids)
update_recipe_metrics([recipe_model.id])
def create_recipe_condition(root_recipe=None, recipe=None, batch=None, is_processed=None, is_accepted=None, save=False):
"""Creates a recipe_node model for unit testing
:param root_recipe: The root recipe containing the condition
:type root_recipe: :class:'recipe.models.Recipe'
:param recipe: The recipe containing the condition
:type recipe: :class:'recipe.models.Recipe'
:param batch: The batch
:type batch: :class:'batch.models.Batch'
:param is_processed: Whether the condition has been processed
:type is_processed: bool
:param is_accepted: Whether the condition has been accepted
:type is_accepted: bool
:returns: The recipe_node model
:rtype: :class:`recipe.models.RecipeNode`
"""
if not recipe:
recipe = create_recipe()
condition = RecipeCondition()
condition.root_recipe = root_recipe if root_recipe else recipe
condition.recipe = recipe
condition.batch = batch
if is_processed is not None:
condition.is_processed = is_processed
if is_accepted is not None:
condition.is_accepted = is_accepted
if condition.is_processed:
condition.processed = timezone.now()
if save:
condition.save()
return condition
# TODO: this is deprecated and should be replaced with create_recipe_node()
def create_recipe_job(recipe=None, job_name=None, job=None):
"""Creates a job type model for unit testing
:param recipe: The associated recipe
:type recipe: :class:'recipe.models.Recipe'
:param job_name: The associated name for the recipe job
:type job_name: string
:param job: The associated job
:type job: :class:'job.models.Job'
:returns: The recipe job model
:rtype: :class:`recipe.models.RecipeNode`
"""
if not recipe:
recipe = create_recipe()
if not job_name:
job_name = 'Test Job Name'
if not job:
job = job_test_utils.create_job()
recipe_job = RecipeNode()
recipe_job.node_name = job_name
recipe_job.job = job
recipe_job.recipe = recipe
recipe_job.save()
return recipe_job
def create_recipe_node(recipe=None, node_name=None, condition=None, job=None, sub_recipe=None, save=False,
is_original=True):
"""Creates a recipe_node model for unit testing
:param recipe: The recipe containing the node
:type recipe: :class:'recipe.models.Recipe'
:param node_name: The node name
:type node_name: string
:param condition: The condition in the node
:type condition: :class:'recipe.models.RecipeCondition'
:param job: The job in the node
:type job: :class:'job.models.Job'
:param sub_recipe: The recipe in the node
:type sub_recipe: :class:'recipe.models.Recipe'
:param save: Whether to save the model
:type save: bool
:param is_original: Whether the recipe node is original
:type is_original: bool
:returns: The recipe_node model
:rtype: :class:`recipe.models.RecipeNode`
"""
if not recipe:
recipe = create_recipe()
if not node_name:
node_name = 'Test Node Name'
if not job and not sub_recipe:
job = job_test_utils.create_job()
recipe_node = RecipeNode()
recipe_node.recipe = recipe
recipe_node.node_name = node_name
recipe_node.is_original = is_original
if condition:
recipe_node.condition = condition
elif job:
recipe_node.job = job
elif sub_recipe:
recipe_node.sub_recipe = sub_recipe
if save:
recipe_node.save()
return recipe_node
def create_input_file(recipe=None, input_file=None, recipe_input=None, file_name='my_test_file.txt', media_type='text/plain',
file_size=100, file_path=None, workspace=None, countries=None, is_deleted=False, data_type_tags=[],
last_modified=None, source_started=None, source_ended=None):
"""Creates a Scale file and recipe input file model for unit testing
:returns: The file model
:rtype: :class:`storage.models.ScaleFile`
"""
if not recipe:
recipe = create_recipe()
if not recipe_input:
recipe_input = 'test_input'
if not input_file:
input_file = storage_test_utils.create_file(file_name=file_name, media_type=media_type, file_size=file_size,
file_path=file_path, workspace=workspace, countries=countries,
is_deleted=is_deleted, data_type_tags=data_type_tags,
last_modified=last_modified, source_started=source_started,
source_ended=source_ended)
RecipeInputFile.objects.create(recipe=recipe, input_file=input_file, recipe_input=recipe_input)
return input_file
|
|
# EMACS settings: -*- tab-width: 2; indent-tabs-mode: t -*-
# vim: tabstop=2:shiftwidth=2:noexpandtab
# kate: tab-width 2; replace-tabs off; indent-width 2;
#
# ==============================================================================
# Authors: Patrick Lehmann
#
# Python Class: This PoCXCOCompiler compiles xco IPCores to netlists
#
# Description:
# ------------------------------------
# TODO:
# -
# -
#
# License:
# ==============================================================================
# Copyright 2007-2015 Technische Universitaet Dresden - Germany
# Chair for VLSI-Design, Diagnostics and Architecture
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# entry point
if __name__ != "__main__":
# place library initialization code here
pass
else:
from lib.Functions import Exit
Exit.printThisIsNoExecutableFile("The PoC-Library - Python Module Compiler.XSTCompiler")
# load dependencies
from pathlib import Path
from Base.Exceptions import *
from Compiler.Base import PoCCompiler
from Compiler.Exceptions import *
class Compiler(PoCCompiler):
__executables = {}
def __init__(self, host, showLogs, showReport):
super(self.__class__, self).__init__(host, showLogs, showReport)
if (host.platform == "Windows"):
self.__executables['XST'] = "xst.exe"
elif (host.platform == "Linux"):
self.__executables['XST'] = "xst"
else:
raise PlatformNotSupportedException(self.platform)
def run(self, pocEntity, device):
import os
import re
import shutil
import subprocess
import textwrap
self.printNonQuiet(str(pocEntity))
self.printNonQuiet(" preparing compiler environment...")
# TODO: improve / resolve board to device
deviceString = str(device).upper()
deviceSection = "Device." + deviceString
# create temporary directory for XST if not existent
tempXstPath = self.host.directories["XSTTemp"]
if not (tempXstPath).exists():
self.printVerbose("Creating temporary directory for XST files.")
self.printDebug("Temporary directors: %s" % str(tempXstPath))
tempXstPath.mkdir(parents=True)
# create output directory for CoreGen if not existent
xstOutputPath = self.host.directories["PoCNetList"] / deviceString
if not (xstOutputPath).exists():
self.printVerbose("Creating temporary directory for XST files.")
self.printDebug("Temporary directors: %s" % str(xstOutputPath))
xstOutputPath.mkdir(parents=True)
# add the key Device to section SPECIAL at runtime to change interpolation results
self.host.netListConfig['SPECIAL'] = {}
self.host.netListConfig['SPECIAL']['Device'] = deviceString
self.host.netListConfig['SPECIAL']['DeviceSeries'] = device.series()
self.host.netListConfig['SPECIAL']['OutputDir'] = tempXstPath.as_posix()
# read copy tasks
# copyFileList = self.host.netListConfig[str(pocEntity)]['Copy']
# self.printDebug("CopyTasks: \n " + ("\n ".join(copyFileList.split("\n"))))
# copyTasks = []
# for item in copyFileList.split("\n"):
# list1 = re.split("\s+->\s+", item)
# if (len(list1) != 2): raise CompilerException("Expected 2 arguments for every copy task!")
#
# copyTasks.append((Path(list1[0]), Path(list1[1])))
# setup all needed paths to execute coreGen
xstExecutablePath = self.host.directories["ISEBinary"] / self.__executables['XST']
# # read netlist settings from configuration file
# ipCoreName = self.host.netListConfig[str(pocEntity)]['IPCoreName']
# xcoInputFilePath = self.host.directories["PoCRoot"] / self.host.netListConfig[str(pocEntity)]['XstFile']
# cgcTemplateFilePath = self.host.directories["PoCNetList"] / "template.cgc"
# cgpFilePath = xstGenPath / "coregen.cgp"
# cgcFilePath = xstGenPath / "coregen.cgc"
# xcoFilePath = xstGenPath / xcoInputFilePath.name
if not self.host.netListConfig.has_section(str(pocEntity)):
from configparser import NoSectionError
raise CompilerException("IP-Core '" + str(pocEntity) + "' not found.") from NoSectionError(str(pocEntity))
# read netlist settings from configuration file
if (self.host.netListConfig[str(pocEntity)]['Type'] != "XilinxSynthesis"):
raise CompilerException("This entity is not configured for XST compilation.")
topModuleName = self.host.netListConfig[str(pocEntity)]['TopModule']
fileListFilePath = self.host.directories["PoCRoot"] / self.host.netListConfig[str(pocEntity)]['FileListFile']
xcfFilePath = self.host.directories["PoCRoot"] / self.host.netListConfig[str(pocEntity)]['XSTConstraintsFile']
filterFilePath = self.host.directories["PoCRoot"] / self.host.netListConfig[str(pocEntity)]['XSTFilterFile']
#xstOptionsFilePath = self.host.directories["XSTFiles"] / self.host.netListConfig[str(pocEntity)]['XSTOptionsFile']
xstTemplateFilePath = self.host.directories["XSTFiles"] / self.host.netListConfig[str(pocEntity)]['XSTOptionsFile']
xstFilePath = tempXstPath / (topModuleName + ".xst")
prjFilePath = tempXstPath / (topModuleName + ".prj")
reportFilePath = tempXstPath / (topModuleName + ".log")
#if (not xstOptionsFilePath.exists()):
# read/write XST options file
self.printDebug("Reading Xilinx Compiler Tool option file from '%s'" % str(xstTemplateFilePath))
with xstTemplateFilePath.open('r') as xstFileHandle:
xstFileContent = xstFileHandle.read()
xstTemplateDictionary = {
'prjFile' : str(prjFilePath),
'UseNewParser' : self.host.netListConfig[str(pocEntity)]['XSTOption.UseNewParser'],
'InputFormat' : self.host.netListConfig[str(pocEntity)]['XSTOption.InputFormat'],
'OutputFormat' : self.host.netListConfig[str(pocEntity)]['XSTOption.OutputFormat'],
'OutputName' : topModuleName,
'Part' : str(device),
'TopModuleName' : topModuleName,
'OptimizationMode' : self.host.netListConfig[str(pocEntity)]['XSTOption.OptimizationMode'],
'OptimizationLevel' : self.host.netListConfig[str(pocEntity)]['XSTOption.OptimizationLevel'],
'PowerReduction' : self.host.netListConfig[str(pocEntity)]['XSTOption.PowerReduction'],
'IgnoreSynthesisConstraintsFile' : self.host.netListConfig[str(pocEntity)]['XSTOption.IgnoreSynthesisConstraintsFile'],
'SynthesisConstraintsFile' : str(xcfFilePath),
'KeepHierarchy' : self.host.netListConfig[str(pocEntity)]['XSTOption.KeepHierarchy'],
'NetListHierarchy' : self.host.netListConfig[str(pocEntity)]['XSTOption.NetListHierarchy'],
'GenerateRTLView' : self.host.netListConfig[str(pocEntity)]['XSTOption.GenerateRTLView'],
'GlobalOptimization' : self.host.netListConfig[str(pocEntity)]['XSTOption.Globaloptimization'],
'ReadCores' : self.host.netListConfig[str(pocEntity)]['XSTOption.ReadCores'],
'SearchDirectories' : '"%s"' % str(xstOutputPath),
'WriteTimingConstraints' : self.host.netListConfig[str(pocEntity)]['XSTOption.WriteTimingConstraints'],
'CrossClockAnalysis' : self.host.netListConfig[str(pocEntity)]['XSTOption.CrossClockAnalysis'],
'HierarchySeparator' : self.host.netListConfig[str(pocEntity)]['XSTOption.HierarchySeparator'],
'BusDelimiter' : self.host.netListConfig[str(pocEntity)]['XSTOption.BusDelimiter'],
'Case' : self.host.netListConfig[str(pocEntity)]['XSTOption.Case'],
'SliceUtilizationRatio' : self.host.netListConfig[str(pocEntity)]['XSTOption.SliceUtilizationRatio'],
'BRAMUtilizationRatio' : self.host.netListConfig[str(pocEntity)]['XSTOption.BRAMUtilizationRatio'],
'DSPUtilizationRatio' : self.host.netListConfig[str(pocEntity)]['XSTOption.DSPUtilizationRatio'],
'LUTCombining' : self.host.netListConfig[str(pocEntity)]['XSTOption.LUTCombining'],
'ReduceControlSets' : self.host.netListConfig[str(pocEntity)]['XSTOption.ReduceControlSets'],
'Verilog2001' : self.host.netListConfig[str(pocEntity)]['XSTOption.Verilog2001'],
'FSMExtract' : self.host.netListConfig[str(pocEntity)]['XSTOption.FSMExtract'],
'FSMEncoding' : self.host.netListConfig[str(pocEntity)]['XSTOption.FSMEncoding'],
'FSMSafeImplementation' : self.host.netListConfig[str(pocEntity)]['XSTOption.FSMSafeImplementation'],
'FSMStyle' : self.host.netListConfig[str(pocEntity)]['XSTOption.FSMStyle'],
'RAMExtract' : self.host.netListConfig[str(pocEntity)]['XSTOption.RAMExtract'],
'RAMStyle' : self.host.netListConfig[str(pocEntity)]['XSTOption.RAMStyle'],
'ROMExtract' : self.host.netListConfig[str(pocEntity)]['XSTOption.ROMExtract'],
'ROMStyle' : self.host.netListConfig[str(pocEntity)]['XSTOption.ROMStyle'],
'MUXExtract' : self.host.netListConfig[str(pocEntity)]['XSTOption.MUXExtract'],
'MUXStyle' : self.host.netListConfig[str(pocEntity)]['XSTOption.MUXStyle'],
'DecoderExtract' : self.host.netListConfig[str(pocEntity)]['XSTOption.DecoderExtract'],
'PriorityExtract' : self.host.netListConfig[str(pocEntity)]['XSTOption.PriorityExtract'],
'ShRegExtract' : self.host.netListConfig[str(pocEntity)]['XSTOption.ShRegExtract'],
'ShiftExtract' : self.host.netListConfig[str(pocEntity)]['XSTOption.ShiftExtract'],
'XorCollapse' : self.host.netListConfig[str(pocEntity)]['XSTOption.XorCollapse'],
'AutoBRAMPacking' : self.host.netListConfig[str(pocEntity)]['XSTOption.AutoBRAMPacking'],
'ResourceSharing' : self.host.netListConfig[str(pocEntity)]['XSTOption.ResourceSharing'],
'ASyncToSync' : self.host.netListConfig[str(pocEntity)]['XSTOption.ASyncToSync'],
'UseDSP48' : self.host.netListConfig[str(pocEntity)]['XSTOption.UseDSP48'],
'IOBuf' : self.host.netListConfig[str(pocEntity)]['XSTOption.IOBuf'],
'MaxFanOut' : self.host.netListConfig[str(pocEntity)]['XSTOption.MaxFanOut'],
'BufG' : self.host.netListConfig[str(pocEntity)]['XSTOption.BufG'],
'RegisterDuplication' : self.host.netListConfig[str(pocEntity)]['XSTOption.RegisterDuplication'],
'RegisterBalancing' : self.host.netListConfig[str(pocEntity)]['XSTOption.RegisterBalancing'],
'SlicePacking' : self.host.netListConfig[str(pocEntity)]['XSTOption.SlicePacking'],
'OptimizePrimitives' : self.host.netListConfig[str(pocEntity)]['XSTOption.OptimizePrimitives'],
'UseClockEnable' : self.host.netListConfig[str(pocEntity)]['XSTOption.UseClockEnable'],
'UseSyncSet' : self.host.netListConfig[str(pocEntity)]['XSTOption.UseSyncSet'],
'UseSyncReset' : self.host.netListConfig[str(pocEntity)]['XSTOption.UseSyncReset'],
'PackIORegistersIntoIOBs' : self.host.netListConfig[str(pocEntity)]['XSTOption.PackIORegistersIntoIOBs'],
'EquivalentRegisterRemoval' : self.host.netListConfig[str(pocEntity)]['XSTOption.EquivalentRegisterRemoval'],
'SliceUtilizationRatioMaxMargin' : self.host.netListConfig[str(pocEntity)]['XSTOption.SliceUtilizationRatioMaxMargin']
}
xstFileContent = xstFileContent.format(**xstTemplateDictionary)
if (self.host.netListConfig.has_option(str(pocEntity), 'XSTOption.Generics')):
xstFileContent += "-generics { %s }" % self.host.netListConfig[str(pocEntity)]['XSTOption.Generics']
self.printDebug("Writing Xilinx Compiler Tool option file to '%s'" % str(xstFilePath))
with xstFilePath.open('w') as xstFileHandle:
xstFileHandle.write(xstFileContent)
# else: # xstFilePath exists
# self.printDebug("Copy XST options file from '%s' to '%s'" % (str(xstOptionsFilePath), str(xstFilePath)))
# shutil.copy(str(xstOptionsFilePath), str(xstFilePath))
# parse project filelist
filesLineRegExpStr = r"\s*(?P<Keyword>(vhdl(\-(87|93|02|08))?|xilinx))" # Keywords: vhdl[-nn], xilinx
filesLineRegExpStr += r"\s+(?P<VHDLLibrary>[_a-zA-Z0-9]+)" # VHDL library name
filesLineRegExpStr += r"\s+\"(?P<VHDLFile>.*?)\"" # VHDL filename without "-signs
filesLineRegExp = re.compile(filesLineRegExpStr)
self.printDebug("Reading filelist '%s'" % str(fileListFilePath))
xstProjectFileContent = ""
with fileListFilePath.open('r') as prjFileHandle:
for line in prjFileHandle:
filesLineRegExpMatch = filesLineRegExp.match(line)
if (filesLineRegExpMatch is not None):
if (filesLineRegExpMatch.group('Keyword') == "vhdl"):
vhdlFileName = filesLineRegExpMatch.group('VHDLFile')
vhdlFilePath = self.host.directories["PoCRoot"] / vhdlFileName
elif (filesLineRegExpMatch.group('Keyword')[0:5] == "vhdl-"):
if (filesLineRegExpMatch.group('Keyword')[-2:] == self.__vhdlStandard):
vhdlFileName = filesLineRegExpMatch.group('VHDLFile')
vhdlFilePath = self.host.directories["PoCRoot"] / vhdlFileName
elif (filesLineRegExpMatch.group('Keyword') == "xilinx"):
vhdlFileName = filesLineRegExpMatch.group('VHDLFile')
vhdlFilePath = self.host.directories["XilinxPrimitiveSource"] / vhdlFileName
vhdlLibraryName = filesLineRegExpMatch.group('VHDLLibrary')
xstProjectFileContent += "vhdl %s \"%s\"\n" % (vhdlLibraryName, str(vhdlFilePath))
if (not vhdlFilePath.exists()):
raise CompilerException("Can not add '" + vhdlFileName + "' to project file.") from FileNotFoundError(str(vhdlFilePath))
# write iSim project file
self.printDebug("Writing XST project file to '%s'" % str(prjFilePath))
with prjFilePath.open('w') as prjFileHandle:
prjFileHandle.write(xstProjectFileContent)
# change working directory to temporary XST path
self.printVerbose(' cd "%s"' % str(tempXstPath))
os.chdir(str(tempXstPath))
# running XST
# ==========================================================================
self.printNonQuiet(" running XST...")
# assemble XST command as list of parameters
parameterList = [
str(xstExecutablePath),
'-intstyle', 'xflow',
'-filter', str(filterFilePath),
'-ifn', str(xstFilePath),
'-ofn', str(reportFilePath)
]
self.printDebug("call xst: %s" % str(parameterList))
self.printVerbose(' %s -intstyle xflow -filter "%s" -ifn "%s" -ofn "%s"' % (str(xstExecutablePath), str(fileListFilePath), str(xstFilePath), str(reportFilePath)))
if (self.dryRun == False):
try:
xstLog = subprocess.check_output(parameterList, stderr=subprocess.STDOUT, universal_newlines=True)
except subprocess.CalledProcessError as ex:
print("ERROR while executing XST")
print("Return Code: %i" % ex.returncode)
print("--------------------------------------------------------------------------------")
print(ex.output)
if self.showLogs:
print("XST log file:")
print("--------------------------------------------------------------------------------")
print(xstLog)
print()
print("return...")
return
# copy resulting files into PoC's netlist directory
self.printNonQuiet(' copy result files into output directory...')
for task in copyTasks:
(fromPath, toPath) = task
if not fromPath.exists(): raise CompilerException("Can not copy '" + str(fromPath) + "' to destination.") from FileNotFoundError(str(fromPath))
#if not toPath.exists(): raise PoCCompiler.PoCCompilerException("File '%s' does not exist!" % str(toPath))
self.printVerbose(" copying '%s'" % str(fromPath))
shutil.copy(str(fromPath), str(toPath))
|
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_concurrency import lockutils
from oslo_log import log as logging
from oslo_service import loopingcall
from os_brick import exception
from os_brick.i18n import _
from os_brick import initiator
from os_brick.initiator.connectors import base
from os_brick.initiator import linuxfc
from os_brick import utils
synchronized = lockutils.synchronized_with_prefix('os-brick-')
LOG = logging.getLogger(__name__)
class FibreChannelConnector(base.BaseLinuxConnector):
"""Connector class to attach/detach Fibre Channel volumes."""
def __init__(self, root_helper, driver=None,
execute=None, use_multipath=False,
device_scan_attempts=initiator.DEVICE_SCAN_ATTEMPTS_DEFAULT,
*args, **kwargs):
self._linuxfc = linuxfc.LinuxFibreChannel(root_helper, execute)
super(FibreChannelConnector, self).__init__(
root_helper, driver=driver,
execute=execute,
device_scan_attempts=device_scan_attempts,
*args, **kwargs)
self.use_multipath = use_multipath
def set_execute(self, execute):
super(FibreChannelConnector, self).set_execute(execute)
self._linuxscsi.set_execute(execute)
self._linuxfc.set_execute(execute)
@staticmethod
def get_connector_properties(root_helper, *args, **kwargs):
"""The Fibre Channel connector properties."""
props = {}
fc = linuxfc.LinuxFibreChannel(root_helper,
execute=kwargs.get('execute'))
wwpns = fc.get_fc_wwpns()
if wwpns:
props['wwpns'] = wwpns
wwnns = fc.get_fc_wwnns()
if wwnns:
props['wwnns'] = wwnns
return props
def get_search_path(self):
"""Where do we look for FC based volumes."""
return '/dev/disk/by-path'
def _add_targets_to_connection_properties(self, connection_properties):
LOG.debug('Adding targets to connection properties receives: %s',
connection_properties)
target_wwn = connection_properties.get('target_wwn')
target_wwns = connection_properties.get('target_wwns')
if target_wwns:
wwns = target_wwns
elif isinstance(target_wwn, list):
wwns = target_wwn
elif isinstance(target_wwn, str):
wwns = [target_wwn]
else:
wwns = []
# Convert wwns to lower case
wwns = [wwn.lower() for wwn in wwns]
if target_wwns:
connection_properties['target_wwns'] = wwns
elif target_wwn:
connection_properties['target_wwn'] = wwns
target_lun = connection_properties.get('target_lun', 0)
target_luns = connection_properties.get('target_luns')
if target_luns:
luns = target_luns
elif isinstance(target_lun, int):
luns = [target_lun]
else:
luns = []
if len(luns) == len(wwns):
# Handles single wwn + lun or multiple, potentially
# different wwns or luns
targets = list(zip(wwns, luns))
elif len(luns) == 1 and len(wwns) > 1:
# For the case of multiple wwns, but a single lun (old path)
targets = [(wwn, luns[0]) for wwn in wwns]
else:
# Something is wrong, this shouldn't happen.
msg = _("Unable to find potential volume paths for FC device "
"with luns: %(luns)s and wwns: %(wwns)s.") % {
"luns": luns, "wwns": wwns}
LOG.error(msg)
raise exception.VolumePathsNotFound(msg)
connection_properties['targets'] = targets
wwpn_lun_map = {wwpn: lun for wwpn, lun in targets}
# If there is an initiator_target_map we can update it too and generate
# the initiator_target_lun_map from it
if connection_properties.get('initiator_target_map') is not None:
# Convert it to lower case
itmap = connection_properties['initiator_target_map']
itmap = {k.lower(): [port.lower() for port in v]
for k, v in itmap.items()}
connection_properties['initiator_target_map'] = itmap
itmaplun = dict()
for init_wwpn, target_wwpns in itmap.items():
itmaplun[init_wwpn] = [(target_wwpn, wwpn_lun_map[target_wwpn])
for target_wwpn in target_wwpns
if target_wwpn in wwpn_lun_map]
# We added the if in the previous list comprehension in case
# drivers return targets in the map that are not reported in
# target_wwn or target_wwns, but we warn about it.
if len(itmaplun[init_wwpn]) != len(itmap[init_wwpn]):
unknown = set(itmap[init_wwpn])
unknown.difference_update(itmaplun[init_wwpn])
LOG.warning('Driver returned an unknown targets in the '
'initiator mapping %s', ', '.join(unknown))
connection_properties['initiator_target_lun_map'] = itmaplun
LOG.debug('Adding targets to connection properties returns: %s',
connection_properties)
return connection_properties
def _get_possible_volume_paths(self, connection_properties, hbas):
targets = connection_properties['targets']
possible_devs = self._get_possible_devices(hbas, targets)
host_paths = self._get_host_devices(possible_devs)
return host_paths
def get_volume_paths(self, connection_properties):
volume_paths = []
# first fetch all of the potential paths that might exist
# how the FC fabric is zoned may alter the actual list
# that shows up on the system. So, we verify each path.
hbas = self._linuxfc.get_fc_hbas_info()
device_paths = self._get_possible_volume_paths(
connection_properties, hbas)
for path in device_paths:
if os.path.exists(path):
volume_paths.append(path)
return volume_paths
@utils.trace
@synchronized('extend_volume', external=True)
def extend_volume(self, connection_properties):
"""Update the local kernel's size information.
Try and update the local kernel's size information
for an FC volume.
"""
connection_properties = self._add_targets_to_connection_properties(
connection_properties)
volume_paths = self.get_volume_paths(connection_properties)
if volume_paths:
return self._linuxscsi.extend_volume(
volume_paths, use_multipath=self.use_multipath)
else:
LOG.warning("Couldn't find any volume paths on the host to "
"extend volume for %(props)s",
{'props': connection_properties})
raise exception.VolumePathsNotFound()
@utils.trace
@synchronized('connect_volume', external=True)
def connect_volume(self, connection_properties):
"""Attach the volume to instance_name.
:param connection_properties: The dictionary that describes all
of the target volume attributes.
:type connection_properties: dict
:returns: dict
connection_properties for Fibre Channel must include:
target_wwn - World Wide Name
target_lun - LUN id of the volume
"""
device_info = {'type': 'block'}
connection_properties = self._add_targets_to_connection_properties(
connection_properties)
hbas = self._linuxfc.get_fc_hbas_info()
if not hbas:
LOG.warning("We are unable to locate any Fibre Channel devices.")
raise exception.NoFibreChannelHostsFound()
host_devices = self._get_possible_volume_paths(
connection_properties, hbas)
# The /dev/disk/by-path/... node is not always present immediately
# We only need to find the first device. Once we see the first device
# multipath will have any others.
def _wait_for_device_discovery(host_devices):
for device in host_devices:
LOG.debug("Looking for Fibre Channel dev %(device)s",
{'device': device})
if os.path.exists(device) and self.check_valid_device(device):
self.host_device = device
# get the /dev/sdX device. This variable is maintained to
# keep the same log output.
self.device_name = os.path.realpath(device)
raise loopingcall.LoopingCallDone()
if self.tries >= self.device_scan_attempts:
LOG.error("Fibre Channel volume device not found.")
raise exception.NoFibreChannelVolumeDeviceFound()
LOG.info("Fibre Channel volume device not yet found. "
"Will rescan & retry. Try number: %(tries)s.",
{'tries': self.tries})
self._linuxfc.rescan_hosts(hbas, connection_properties)
self.tries = self.tries + 1
self.host_device = None
self.device_name = None
self.tries = 0
timer = loopingcall.FixedIntervalLoopingCall(
_wait_for_device_discovery, host_devices)
timer.start(interval=2).wait()
LOG.debug("Found Fibre Channel volume %(name)s "
"(after %(tries)s rescans.)",
{'name': self.device_name, 'tries': self.tries})
# find out the WWN of the device
device_wwn = self._linuxscsi.get_scsi_wwn(self.host_device)
LOG.debug("Device WWN = '%(wwn)s'", {'wwn': device_wwn})
device_info['scsi_wwn'] = device_wwn
# see if the new drive is part of a multipath
# device. If so, we'll use the multipath device.
if self.use_multipath:
# Pass a symlink, not a real path, otherwise we'll get a real path
# back if we don't find a multipath and we'll return that to the
# caller, breaking Nova's encryption which requires a symlink.
(device_path, multipath_id) = self._discover_mpath_device(
device_wwn, connection_properties, self.host_device)
if multipath_id:
# only set the multipath_id if we found one
device_info['multipath_id'] = multipath_id
else:
device_path = self.host_device
device_info['path'] = device_path
return device_info
def _get_host_devices(self, possible_devs):
"""Compute the device paths on the system with an id, wwn, and lun
:param possible_devs: list of (platform, pci_id, wwn, lun) tuples
:return: list of device paths on the system based on the possible_devs
"""
host_devices = []
for platform, pci_num, target_wwn, lun in possible_devs:
host_device = "/dev/disk/by-path/%spci-%s-fc-%s-lun-%s" % (
platform + '-' if platform else '',
pci_num,
target_wwn,
self._linuxscsi.process_lun_id(lun))
host_devices.append(host_device)
return host_devices
def _get_possible_devices(self, hbas, targets):
"""Compute the possible fibre channel device options.
:param hbas: available hba devices.
:param targets: tuple of possible wwn addresses and lun combinations.
:returns: list of (platform, pci_id, wwn, lun) tuples
Given one or more wwn (mac addresses for fibre channel) ports
do the matrix math to figure out a set of pci device, wwn
tuples that are potentially valid (they won't all be). This
provides a search space for the device connection.
"""
raw_devices = []
for hba in hbas:
platform, pci_num = self._get_pci_num(hba)
if pci_num is not None:
for wwn, lun in targets:
target_wwn = "0x%s" % wwn.lower()
raw_devices.append((platform, pci_num, target_wwn, lun))
return raw_devices
@utils.trace
@synchronized('connect_volume', external=True)
def disconnect_volume(self, connection_properties, device_info,
force=False, ignore_errors=False):
"""Detach the volume from instance_name.
:param connection_properties: The dictionary that describes all
of the target volume attributes.
:type connection_properties: dict
:param device_info: historical difference, but same as connection_props
:type device_info: dict
connection_properties for Fibre Channel must include:
target_wwn - World Wide Name
target_lun - LUN id of the volume
"""
devices = []
wwn = None
connection_properties = self._add_targets_to_connection_properties(
connection_properties)
volume_paths = self.get_volume_paths(connection_properties)
mpath_path = None
for path in volume_paths:
real_path = self._linuxscsi.get_name_from_path(path)
if (self.use_multipath and not mpath_path
and self.check_valid_device(path)):
wwn = self._linuxscsi.get_scsi_wwn(path)
mpath_path = self._linuxscsi.find_multipath_device_path(wwn)
if mpath_path:
self._linuxscsi.flush_multipath_device(mpath_path)
dev_info = self._linuxscsi.get_device_info(real_path)
devices.append(dev_info)
LOG.debug("devices to remove = %s", devices)
self._remove_devices(connection_properties, devices, device_info)
def _remove_devices(self, connection_properties, devices, device_info):
# There may have been more than 1 device mounted
# by the kernel for this volume. We have to remove
# all of them
path_used = self._linuxscsi.get_dev_path(connection_properties,
device_info)
# NOTE: Due to bug #1897787 device_info may have a real path for some
# single paths instead of a symlink as it should have, so it'll only
# be a multipath if it was a symlink (not real path) and it wasn't a
# single path symlink (those have filenames starting with pci-)
# We don't use os.path.islink in case the file is no longer there.
was_symlink = path_used.count(os.sep) > 2
# We check for /pci because that's the value we return for single
# paths, whereas for multipaths we have multiple link formats.
was_multipath = '/pci-' not in path_used and was_symlink
for device in devices:
device_path = device['device']
flush = self._linuxscsi.requires_flush(device_path,
path_used,
was_multipath)
self._linuxscsi.remove_scsi_device(device_path, flush=flush)
def _get_pci_num(self, hba):
# NOTE(walter-boring)
# device path is in format of (FC and FCoE) :
# /sys/devices/pci0000:00/0000:00:03.0/0000:05:00.3/host2/fc_host/host2
# /sys/devices/pci0000:20/0000:20:03.0/0000:21:00.2/net/ens2f2/ctlr_2
# /host3/fc_host/host3
# we always want the value prior to the host or net value
# on non x86_64 device, pci devices may be appended on platform device,
# /sys/devices/platform/smb/smb:motherboard/80040000000.peu0-c0/pci0000:00/0000:00:03.0/0000:05:00.3/host2/fc_host/host2 # noqa
# so also return a platform id if it exists
platform = None
if hba is not None:
if "device_path" in hba:
device_path = hba['device_path'].split('/')
has_platform = (len(device_path) > 3
and device_path[3] == 'platform')
for index, value in enumerate(device_path):
if has_platform and value.startswith('pci'):
platform = "platform-%s" % device_path[index - 1]
if value.startswith('net') or value.startswith('host'):
return platform, device_path[index - 1]
return None, None
|
|
import typing # noqa
# Low-color themes should ONLY use the standard foreground and background
# colours listed here:
#
# http://urwid.org/manual/displayattributes.html
#
class Palette:
_fields = [
'background',
'title',
# Status bar & heading
'heading', 'heading_key', 'heading_inactive',
# Help
'key', 'head', 'text',
# Options
'option_selected', 'option_active', 'option_active_selected',
'option_selected_key',
# List and Connections
'method', 'focus',
'code_200', 'code_300', 'code_400', 'code_500', 'code_other',
'error', "warn", "alert",
'header', 'highlight', 'intercept', 'replay', 'mark',
# Hex view
'offset',
# Grid Editor
'focusfield', 'focusfield_error', 'field_error', 'editfield',
# Commander
'commander_command', 'commander_invalid', 'commander_hint'
]
high: typing.Mapping[str, typing.Sequence[str]] = None
def palette(self, transparent):
l = []
highback, lowback = None, None
if not transparent:
if self.high and self.high.get("background"):
highback = self.high["background"][1]
lowback = self.low["background"][1]
for i in self._fields:
if transparent and i == "background":
l.append(["background", "default", "default"])
else:
v = [i]
low = list(self.low[i])
if lowback and low[1] == "default":
low[1] = lowback
v.extend(low)
if self.high and i in self.high:
v.append(None)
high = list(self.high[i])
if highback and high[1] == "default":
high[1] = highback
v.extend(high)
elif highback and self.low[i][1] == "default":
high = [None, low[0], highback]
v.extend(high)
l.append(tuple(v))
return l
class LowDark(Palette):
"""
Low-color dark background
"""
low = dict(
background = ('white', 'black'),
title = ('white,bold', 'default'),
# Status bar & heading
heading = ('white', 'dark blue'),
heading_key = ('light cyan', 'dark blue'),
heading_inactive = ('dark gray', 'light gray'),
# Help
key = ('light cyan', 'default'),
head = ('white,bold', 'default'),
text = ('light gray', 'default'),
# Options
option_selected = ('black', 'light gray'),
option_selected_key = ('light cyan', 'light gray'),
option_active = ('light red', 'default'),
option_active_selected = ('light red', 'light gray'),
# List and Connections
method = ('dark cyan', 'default'),
focus = ('yellow', 'default'),
code_200 = ('dark green', 'default'),
code_300 = ('light blue', 'default'),
code_400 = ('light red', 'default'),
code_500 = ('light red', 'default'),
code_other = ('dark red', 'default'),
alert = ('light magenta', 'default'),
warn = ('brown', 'default'),
error = ('light red', 'default'),
header = ('dark cyan', 'default'),
highlight = ('white,bold', 'default'),
intercept = ('brown', 'default'),
replay = ('light green', 'default'),
mark = ('light red', 'default'),
# Hex view
offset = ('dark cyan', 'default'),
# Grid Editor
focusfield = ('black', 'light gray'),
focusfield_error = ('dark red', 'light gray'),
field_error = ('dark red', 'default'),
editfield = ('white', 'default'),
commander_command = ('white,bold', 'default'),
commander_invalid = ('light red', 'default'),
commander_hint = ('dark gray', 'default'),
)
class Dark(LowDark):
high = dict(
heading_inactive = ('g58', 'g11'),
intercept = ('#f60', 'default'),
option_selected = ('g85', 'g45'),
option_selected_key = ('light cyan', 'g50'),
option_active_selected = ('light red', 'g50'),
)
class LowLight(Palette):
"""
Low-color light background
"""
low = dict(
background = ('black', 'white'),
title = ('dark magenta', 'default'),
# Status bar & heading
heading = ('white', 'black'),
heading_key = ('dark blue', 'black'),
heading_inactive = ('black', 'light gray'),
# Help
key = ('dark blue', 'default'),
head = ('black', 'default'),
text = ('dark gray', 'default'),
# Options
option_selected = ('black', 'light gray'),
option_selected_key = ('dark blue', 'light gray'),
option_active = ('light red', 'default'),
option_active_selected = ('light red', 'light gray'),
# List and Connections
method = ('dark cyan', 'default'),
focus = ('black', 'default'),
code_200 = ('dark green', 'default'),
code_300 = ('light blue', 'default'),
code_400 = ('dark red', 'default'),
code_500 = ('dark red', 'default'),
code_other = ('light red', 'default'),
error = ('light red', 'default'),
warn = ('brown', 'default'),
alert = ('light magenta', 'default'),
header = ('dark blue', 'default'),
highlight = ('black,bold', 'default'),
intercept = ('brown', 'default'),
replay = ('dark green', 'default'),
mark = ('dark red', 'default'),
# Hex view
offset = ('dark blue', 'default'),
# Grid Editor
focusfield = ('black', 'light gray'),
focusfield_error = ('dark red', 'light gray'),
field_error = ('dark red', 'black'),
editfield = ('black', 'default'),
commander_command = ('dark magenta', 'default'),
commander_invalid = ('light red', 'default'),
commander_hint = ('light gray', 'default'),
)
class Light(LowLight):
high = dict(
background = ('black', 'g100'),
heading = ('g99', '#08f'),
heading_key = ('#0ff,bold', '#08f'),
heading_inactive = ('g35', 'g85'),
replay = ('#0a0,bold', 'default'),
option_selected = ('black', 'g85'),
option_selected_key = ('dark blue', 'g85'),
option_active_selected = ('light red', 'g85'),
)
# Solarized palette in Urwid-style terminal high-colour offsets
# See: http://ethanschoonover.com/solarized
sol_base03 = "h234"
sol_base02 = "h235"
sol_base01 = "h240"
sol_base00 = "h241"
sol_base0 = "h244"
sol_base1 = "h245"
sol_base2 = "h254"
sol_base3 = "h230"
sol_yellow = "h136"
sol_orange = "h166"
sol_red = "h160"
sol_magenta = "h125"
sol_violet = "h61"
sol_blue = "h33"
sol_cyan = "h37"
sol_green = "h64"
class SolarizedLight(LowLight):
high = dict(
background = (sol_base00, sol_base3),
title = (sol_cyan, 'default'),
text = (sol_base00, 'default'),
# Status bar & heading
heading = (sol_base2, sol_base02),
heading_key = (sol_blue, sol_base03),
heading_inactive = (sol_base03, sol_base1),
# Help
key = (sol_blue, 'default',),
head = (sol_base00, 'default'),
# Options
option_selected = (sol_base03, sol_base2),
option_selected_key = (sol_blue, sol_base2),
option_active = (sol_orange, 'default'),
option_active_selected = (sol_orange, sol_base2),
# List and Connections
method = (sol_cyan, 'default'),
focus = (sol_base01, 'default'),
code_200 = (sol_green, 'default'),
code_300 = (sol_blue, 'default'),
code_400 = (sol_orange, 'default',),
code_500 = (sol_red, 'default'),
code_other = (sol_magenta, 'default'),
error = (sol_red, 'default'),
warn = (sol_orange, 'default'),
alert = (sol_magenta, 'default'),
header = (sol_blue, 'default'),
highlight = (sol_base01, 'default'),
intercept = (sol_red, 'default',),
replay = (sol_green, 'default',),
# Hex view
offset = (sol_cyan, 'default'),
# Grid Editor
focusfield = (sol_base00, sol_base2),
focusfield_error = (sol_red, sol_base2),
field_error = (sol_red, 'default'),
editfield = (sol_base01, 'default'),
commander_command = (sol_cyan, 'default'),
commander_invalid = (sol_orange, 'default'),
commander_hint = (sol_base1, 'default'),
)
class SolarizedDark(LowDark):
high = dict(
background = (sol_base2, sol_base03),
title = (sol_blue, 'default'),
text = (sol_base1, 'default'),
# Status bar & heading
heading = (sol_base2, sol_base01),
heading_key = (sol_blue + ",bold", sol_base01),
heading_inactive = (sol_base1, sol_base02),
# Help
key = (sol_blue, 'default',),
head = (sol_base2, 'default'),
# Options
option_selected = (sol_base03, sol_base00),
option_selected_key = (sol_blue, sol_base00),
option_active = (sol_orange, 'default'),
option_active_selected = (sol_orange, sol_base00),
# List and Connections
method = (sol_cyan, 'default'),
focus = (sol_base1, 'default'),
code_200 = (sol_green, 'default'),
code_300 = (sol_blue, 'default'),
code_400 = (sol_orange, 'default',),
code_500 = (sol_red, 'default'),
code_other = (sol_magenta, 'default'),
error = (sol_red, 'default'),
warn = (sol_orange, 'default'),
alert = (sol_magenta, 'default'),
header = (sol_blue, 'default'),
highlight = (sol_base01, 'default'),
intercept = (sol_red, 'default',),
replay = (sol_green, 'default',),
# Hex view
offset = (sol_cyan, 'default'),
# Grid Editor
focusfield = (sol_base0, sol_base02),
focusfield_error = (sol_red, sol_base02),
field_error = (sol_red, 'default'),
editfield = (sol_base1, 'default'),
commander_command = (sol_blue, 'default'),
commander_invalid = (sol_orange, 'default'),
commander_hint = (sol_base00, 'default'),
)
DEFAULT = "dark"
palettes = {
"lowlight": LowLight(),
"lowdark": LowDark(),
"light": Light(),
"dark": Dark(),
"solarized_light": SolarizedLight(),
"solarized_dark": SolarizedDark(),
}
|
|
# Copyright 2011 Nicholas Bray
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
from util.io.xmloutput import XMLOutput
from util.io.filesystem import ensureDirectoryExists
from util.graphalgorithim import dominator
from . escape import objectescape
from language.python import simplecodegen, ast
import cStringIO
class Dumper(object):
def __init__(self, directory):
self.directory = directory
self.urls = {}
self.uid = 0
ensureDirectoryExists(directory)
def contextURL(self, context):
url = self.urls.get(context)
if url is None:
url = os.path.join(self.directory, "f%6.6d.html" % self.uid)
url = url.replace('\\', '/')
self.uid += 1
self.urls[context] = url
return url
def relLink(self, target, current):
directory, _file = os.path.split(current)
link = os.path.relpath(target, directory)
link = link.replace('\\', '/')
return link
def groupInvokes(self, invokes, callback):
grouped = {}
for invoke in invokes.itervalues():
key = callback(invoke)
if key not in grouped:
grouped[key] = [invoke]
else:
grouped[key].append(invoke)
return grouped
def _displayContext(self, context, o):
o << context.signature.code
o.tag('br')
with o.scope('b'):
o << "self:"
o << " "
o << context.signature.selfparam
o.tag('br')
for i, param in enumerate(context.signature.params):
with o.scope('b'):
o << "param %d:" % i
o << " "
o << param
o.tag('br')
for i, param in enumerate(context.signature.vparams):
with o.scope('b'):
o << "vparam %d:" % i
o << " "
o << param
o.tag('br')
def displayContext(self, context, o, link=True):
if link:
with o.scope('a', href=self.relLink(self.contextURL(context), o.url)):
self._displayContext(context, o)
else:
self._displayContext(context, o)
def header(self, text, o):
with o.scope('h3'):
o << text
o.endl()
def objRef(self, obj, o):
o << obj
def fold(self, context, o):
if context.foldObj:
self.header("Fold", o)
self.objRef(context.foldObj, o)
o.endl()
def code(self, context, o):
code = context.signature.code
if code and code.isStandardCode():
self.header("Code", o)
sio = cStringIO.StringIO()
simplecodegen.SimpleCodeGen(sio).process(code)
with o.scope('pre'):
o << sio.getvalue()
def criticalOps(self, context, o):
self.header("Critical Stores", o)
with o.scope('ul'):
for op in context.criticalStores:
with o.scope('li'):
o << op
o.endl()
o.endl()
def invokesIn(self, context, o):
self.header("Invoke In", o)
grouped = self.groupInvokes(context.invokeIn, lambda invoke: invoke.src)
for src, invokes in grouped.iteritems():
with o.scope('p'):
self.displayContext(src, o)
for invoke in invokes:
with o.scope('p'):
o << invoke.op
self.constraints(invoke.constraints, o)
o.endl()
def invokesOut(self, context, o):
self.header("Invoke Out", o)
grouped = self.groupInvokes(context.invokeOut, lambda invoke: invoke.op)
for op, invokes in grouped.iteritems():
with o.scope('p'):
o << op
o.tag('br')
for invoke in invokes:
self.displayContext(invoke.dst, o)
o.tag('br')
o.endl()
def slot(self, context, slot, o):
o << slot
if slot.null: o << " (null)"
o.tag('br')
o.endl()
if slot.critical.values:
with o.scope('b'):
o << "Critical"
o.endl()
with o.scope('ul'):
for value in slot.critical.values:
with o.scope('li'):
o << value
o.endl()
o.endl()
if slot.values:
with o.scope('b'):
o << "Values"
o.endl()
with o.scope('ul'):
for value in slot.values:
with o.scope('li'):
self.objRef(value, o)
o.endl()
o.endl()
def locals(self, context, o):
self.header("Locals", o)
for slot in context.locals.itervalues():
with o.scope('p'):
self.slot(context, slot, o)
def objects(self, context, o):
self.header("Objects", o)
region = context.region
for obj in region.objects.itervalues():
with o.scope('p'):
o << obj.name
o << " (%s)" % objectescape.repr(obj.flags)
o.endl()
with o.scope('ul'):
for slot in obj.fields.itervalues():
with o.scope('li'):
self.slot(context, slot, o)
o.endl()
o.endl()
def dumpTree(self, node, tree, o):
if node not in tree: return
with o.scope('ul'):
for child in tree[node]:
with o.scope('li'):
self.displayContext(child, o, link=True)
self.dumpTree(child, tree, o)
def index(self, contexts, root):
def forward(context):
return set([invoke.dst for invoke in context.invokeOut.itervalues()])
idoms = dominator.findIDoms([root], forward)
tree = dominator.treeFromIDoms(idoms)
url = os.path.join(self.directory, 'index.html')
o = XMLOutput(open(url, 'w'))
o.url = url
self.dumpTree(None, tree, o)
def constraints(self, constraints, o):
with o.scope('p'):
with o.scope('b'):
o << "%d constraints" % len(constraints)
o.tag('br')
for c in constraints:
o << c
o.tag('br')
def dumpContext(self, context):
url = self.contextURL(context)
o = XMLOutput(open(url, 'w'))
o.url = url
with o.scope('html'):
with o.scope('head'):
with o.scope('title'):
o << context.signature.code
o << ' - '
o << id(context)
with o.scope('body'):
with o.scope('p'):
o << '['
with o.scope('a', href='index.html'):
o << "Index"
o << ']'
o.endl()
with o.scope('p'):
self.displayContext(context, o, link=False)
self.fold(context, o)
self.code(context, o)
self.criticalOps(context, o)
self.invokesIn(context, o)
self.invokesOut(context, o)
self.locals(context, o)
self.objects(context, o)
self.constraints(context.constraints, o)
|
|
#!/usr/bin/env python
# Copyright (c) 2009 Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""psutil is a cross-platform library for retrieving information on
running processes and system utilization (CPU, memory, disks, network)
in Python.
"""
import atexit
import contextlib
import io
import os
import sys
import tempfile
import platform
try:
from setuptools import setup, Extension
except ImportError:
from distutils.core import setup, Extension
HERE = os.path.abspath(os.path.dirname(__file__))
def get_version():
INIT = os.path.join(HERE, 'psutil/__init__.py')
with open(INIT, 'r') as f:
for line in f:
if line.startswith('__version__'):
ret = eval(line.strip().split(' = ')[1])
assert ret.count('.') == 2, ret
for num in ret.split('.'):
assert num.isdigit(), ret
return ret
else:
raise ValueError("couldn't find version string")
def get_description():
README = os.path.join(HERE, 'README.rst')
with open(README, 'r') as f:
return f.read()
@contextlib.contextmanager
def silenced_output(stream_name):
class DummyFile(io.BytesIO):
# see: https://github.com/giampaolo/psutil/issues/678
errors = "ignore"
def write(self, s):
pass
orig = getattr(sys, stream_name)
try:
setattr(sys, stream_name, DummyFile())
yield
finally:
setattr(sys, stream_name, orig)
VERSION = get_version()
VERSION_MACRO = ('PSUTIL_VERSION', int(VERSION.replace('.', '')))
# POSIX
if os.name == 'posix':
posix_extension = Extension(
'psutil._psutil_posix',
sources=['psutil/_psutil_posix.c'])
if sys.platform.startswith("sunos"):
posix_extension.libraries.append('socket')
if platform.release() == '5.10':
posix_extension.sources.append('psutil/arch/solaris/v10/ifaddrs.c')
posix_extension.define_macros.append(('PSUTIL_SUNOS10', 1))
# Windows
if sys.platform.startswith("win32"):
def get_winver():
maj, min = sys.getwindowsversion()[0:2]
return '0x0%s' % ((maj * 100) + min)
ext = Extension(
'psutil._psutil_windows',
sources=[
'psutil/_psutil_windows.c',
'psutil/_psutil_common.c',
'psutil/arch/windows/process_info.c',
'psutil/arch/windows/process_handles.c',
'psutil/arch/windows/security.c',
'psutil/arch/windows/inet_ntop.c',
],
define_macros=[
VERSION_MACRO,
# be nice to mingw, see:
# http://www.mingw.org/wiki/Use_more_recent_defined_functions
('_WIN32_WINNT', get_winver()),
('_AVAIL_WINVER_', get_winver()),
('_CRT_SECURE_NO_WARNINGS', None),
# see: https://github.com/giampaolo/psutil/issues/348
('PSAPI_VERSION', 1),
],
libraries=[
"psapi", "kernel32", "advapi32", "shell32", "netapi32",
"iphlpapi", "wtsapi32", "ws2_32",
],
# extra_compile_args=["/Z7"],
# extra_link_args=["/DEBUG"]
)
extensions = [ext]
# OS X
elif sys.platform.startswith("darwin"):
ext = Extension(
'psutil._psutil_osx',
sources=[
'psutil/_psutil_osx.c',
'psutil/_psutil_common.c',
'psutil/arch/osx/process_info.c'
],
define_macros=[VERSION_MACRO],
extra_link_args=[
'-framework', 'CoreFoundation', '-framework', 'IOKit'
])
extensions = [ext, posix_extension]
# FreeBSD
elif sys.platform.startswith("freebsd"):
ext = Extension(
'psutil._psutil_bsd',
sources=[
'psutil/_psutil_bsd.c',
'psutil/_psutil_common.c',
'psutil/arch/bsd/freebsd.c',
'psutil/arch/bsd/freebsd_socks.c',
],
define_macros=[VERSION_MACRO],
libraries=["devstat"])
extensions = [ext, posix_extension]
# OpenBSD
elif sys.platform.startswith("openbsd"):
ext = Extension(
'psutil._psutil_bsd',
sources=[
'psutil/_psutil_bsd.c',
'psutil/_psutil_common.c',
'psutil/arch/bsd/openbsd.c',
],
define_macros=[VERSION_MACRO],
libraries=["kvm"])
extensions = [ext, posix_extension]
# Linux
elif sys.platform.startswith("linux"):
def get_ethtool_macro():
# see: https://github.com/giampaolo/psutil/issues/659
from distutils.unixccompiler import UnixCCompiler
from distutils.errors import CompileError
with tempfile.NamedTemporaryFile(
suffix='.c', delete=False, mode="wt") as f:
f.write("#include <linux/ethtool.h>")
@atexit.register
def on_exit():
try:
os.remove(f.name)
except OSError:
pass
compiler = UnixCCompiler()
try:
with silenced_output('stderr'):
with silenced_output('stdout'):
compiler.compile([f.name])
except CompileError:
return ("PSUTIL_ETHTOOL_MISSING_TYPES", 1)
else:
return None
ETHTOOL_MACRO = get_ethtool_macro()
macros = [VERSION_MACRO]
if ETHTOOL_MACRO is not None:
macros.append(ETHTOOL_MACRO)
ext = Extension(
'psutil._psutil_linux',
sources=['psutil/_psutil_linux.c'],
define_macros=macros)
extensions = [ext, posix_extension]
# Solaris
elif sys.platform.lower().startswith('sunos'):
ext = Extension(
'psutil._psutil_sunos',
sources=['psutil/_psutil_sunos.c'],
define_macros=[VERSION_MACRO],
libraries=['kstat', 'nsl', 'socket'])
extensions = [ext, posix_extension]
else:
sys.exit('platform %s is not supported' % sys.platform)
def main():
setup_args = dict(
name='psutil',
version=VERSION,
description=__doc__.replace('\n', '').strip(),
long_description=get_description(),
keywords=[
'ps', 'top', 'kill', 'free', 'lsof', 'netstat', 'nice', 'tty',
'ionice', 'uptime', 'taskmgr', 'process', 'df', 'iotop', 'iostat',
'ifconfig', 'taskset', 'who', 'pidof', 'pmap', 'smem', 'pstree',
'monitoring', 'ulimit', 'prlimit',
],
author='Giampaolo Rodola',
author_email='g.rodola <at> gmail <dot> com',
url='https://github.com/giampaolo/psutil',
platforms='Platform Independent',
license='BSD',
packages=['psutil'],
# see: python setup.py register --list-classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Win32 (MS Windows)',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: BSD License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows :: Windows NT/2000',
'Operating System :: Microsoft',
'Operating System :: OS Independent',
'Operating System :: POSIX :: BSD :: FreeBSD',
'Operating System :: POSIX :: BSD :: OpenBSD',
'Operating System :: POSIX :: Linux',
'Operating System :: POSIX :: SunOS/Solaris',
'Operating System :: POSIX',
'Programming Language :: C',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Libraries',
'Topic :: System :: Benchmark',
'Topic :: System :: Hardware',
'Topic :: System :: Monitoring',
'Topic :: System :: Networking :: Monitoring',
'Topic :: System :: Networking',
'Topic :: System :: Systems Administration',
'Topic :: Utilities',
],
)
if extensions is not None:
setup_args["ext_modules"] = extensions
setup(**setup_args)
if __name__ == '__main__':
main()
|
|
from dmd import *
import sdl2.ext
from sdl2.ext import *
class LayerTransitionBase(object):
"""Transition base class."""
progress = 0.0
"""Transition progress from 0.0 (100% from frame, 0% to frame) to 1.0 (0% from frame, 100% to frame).
Updated by :meth:`next_frame`."""
progress_per_frame = 1.0/30.0
"""Progress increment for each frame. Defaults to 1/30, or 30fps."""
progress_mult = 0 # not moving, -1 for B to A, 1 for A to B .... not documented as play/pause manipulates.
completed_handler = None
"""Function to be called once the transition has completed."""
in_out = 'in'
"""If ``'in'`` the transition is moving from `from` to `to`; if ``'out'`` the transition is moving
from `to` to `from`."""
def __init__(self):
super(LayerTransitionBase, self).__init__()
def start(self):
"""Start the transition."""
self.reset()
self.progress_mult = 1.0
def pause(self):
"""Pauses the transition at the current position."""
self.progress_mult = 0.0
def reset(self):
"""Reset the transition to the beginning."""
self.progress_mult = 0.0
self.progress = 0.0
def next_frame(self, from_frame, to_frame):
"""Applies the transition and increments the progress if the transition is running. Returns the resulting frame."""
#print 'TRANSITION NEXT FRAME PROGRESS IS' +str(self.progress)
self.progress = max(0.0, min(1.0, self.progress + self.progress_mult * self.progress_per_frame))
if self.progress <= 0.0:
if self.in_out == 'in':
return from_frame
else:
return to_frame
if self.progress >= 1.0:
if self.completed_handler != None:
self.completed_handler()
if self.in_out == 'in':
return to_frame
else:
return from_frame
return self.transition_frame(from_frame=from_frame, to_frame=to_frame)
def transition_frame(self, from_frame, to_frame):
"""Applies the transition at the current progress value.
Subclasses should override this method to provide more interesting transition effects.
Base implementation simply returns the from_frame."""
return from_frame
class ExpandTransition(LayerTransitionBase):
def __init__(self, direction='vertical'):
super(ExpandTransition, self).__init__()
self.direction = direction
self.progress_per_frame = 1.0/11.0
def transition_frame(self, from_frame, to_frame):
frame = from_frame.copy()
dst_x, dst_y = 0, 0
prog = self.progress
if self.in_out == 'out':
prog = 1.0 - prog
dst_x, dst_y = {
'vertical': (0, frame.height/2-prog*(frame.height/2)),
'horizontal': (frame.width/2-prog*(frame.width/2), 0),
}[self.direction]
if (self.direction == 'vertical'):
width = frame.width
height = prog*frame.height
else:
width = prog*frame.width
height = frame.height
Frame.copy_rect(dst=frame, dst_x=dst_x, dst_y=dst_y, src=to_frame, src_x=dst_x, src_y=dst_y, width=width, height=height, op='copy')
return frame
class SlideOverTransition(LayerTransitionBase):
def __init__(self, direction='north'):
super(SlideOverTransition, self).__init__()
self.direction = direction
self.progress_per_frame = 1.0/15.0
def transition_frame(self, from_frame, to_frame):
frame = from_frame.copy()
dst_x, dst_y = 0, 0
prog = self.progress
if self.in_out == 'in':
prog = 1.0 - prog
dst_x, dst_y = {
'north': (0, prog*frame.height),
'south': (0, -prog*frame.height),
'east': (-prog*frame.width, 0),
'west': ( prog*frame.width, 0),
}[self.direction]
Frame.copy_rect(dst=frame, dst_x=dst_x, dst_y=dst_y, src=to_frame, src_x=0, src_y=0, width=from_frame.width, height=from_frame.height, op='copy')
return frame
class PushTransition(LayerTransitionBase):
def __init__(self, direction='north'):
super(PushTransition, self).__init__()
self.direction = direction
self.progress_per_frame = 1.0/15.0
def transition_frame(self, from_frame, to_frame):
frame = Frame(width=from_frame.width, height=from_frame.height)
dst_x, dst_y = 0, 0
prog = self.progress
prog1 = self.progress
if self.in_out == 'in':
prog = 1.0 - prog
else:
prog1 = 1.0 - prog1
dst_x, dst_y, dst_x1, dst_y1 = {
'north': (0, prog*frame.height, 0, -prog1*frame.height),
'south': (0, -prog*frame.height, 0, prog1*frame.height),
'east': (-prog*frame.width, 0, prog1*frame.width, 0),
'west': ( prog*frame.width, 0, -prog1*frame.width, 0),
}[self.direction]
Frame.copy_rect(dst=frame, dst_x=dst_x, dst_y=dst_y, src=to_frame, src_x=0, src_y=0, width=from_frame.width, height=from_frame.height, op='copy')
Frame.copy_rect(dst=frame, dst_x=dst_x1, dst_y=dst_y1, src=from_frame, src_x=0, src_y=0, width=from_frame.width, height=from_frame.height, op='copy')
return frame
class WipeTransition(LayerTransitionBase):
def __init__(self, direction='north'):
super(WipeTransition, self).__init__()
self.direction = direction
self.progress_per_frame = 1.0/30.0
def transition_frame(self, from_frame, to_frame):
frame = Frame(width=from_frame.width, height=from_frame.height)
prog0 = self.progress
prog1 = self.progress
if self.in_out == 'out':
prog0 = 1.0 - prog0
else:
prog1 = 1.0 - prog1
src_x, src_y = {
'north': (0, prog1*frame.height),
'south': (0, prog0*frame.height),
'east': (prog0*frame.width, 0),
'west': (prog1*frame.width, 0),
}[self.direction]
width, height = {
'north': (frame.width, prog1*frame.height+1),
'south': (frame.width, prog0*frame.height+1),
'east': (prog0*frame.width+1, frame.height),
'west': (prog1*frame.width+1, frame.height),
}[self.direction]
if self.direction in ['east', 'south']:
from_frame, to_frame = to_frame, from_frame
#print "reverse to and from seeing going east or south" + str(self.direction)
src_x = int(round(src_x))
src_y = int(round(src_y))
Frame.copy_rect(dst=frame, dst_x=0, dst_y=0, src=from_frame, src_x=0, src_y=0, width=width, height=height, op='copy')
#print src_x, src_y, to_frame.height, to_frame.width, from_frame.height, from_frame.width, prog0, prog1, self.progress
Frame.copy_rect(dst=frame, dst_x=src_x, dst_y=src_y, src=to_frame, src_x=src_x, src_y=src_y, width=from_frame.width-src_x, height=from_frame.height-src_y, op='copy')
return frame
class AccordianTransition(LayerTransitionBase):
def __init__(self, direction='north'):
super(AccordianTransition, self).__init__()
self.direction = direction
self.progress_per_frame = 1.0/15.0
def transition_frame(self, from_frame, to_frame):
frame = Frame(width=from_frame.width, height=from_frame.height)
prog0 = self.progress
prog1 = self.progress
if self.in_out == 'out':
prog0 = 1.0 - prog0
else:
prog1 = 1.0 - prog1
src_x, src_y = {
'north': (0, prog1*frame.height),
'south': (0, prog0*frame.height),
'east': (prog0*frame.width, 0),
'west': (prog1*frame.width, 0),
}[self.direction]
if self.direction in ['east', 'south']:
from_frame, to_frame = to_frame, from_frame
src_x = int(round(src_x))
src_y = int(round(src_y))
Frame.copy_rect(dst=frame, dst_x=0, dst_y=0, src=from_frame, src_x=0, src_y=0, width=from_frame.width, height=from_frame.height, op='copy')
Frame.copy_rect(dst=frame, dst_x=src_x, dst_y=src_y, src=to_frame, src_x=src_x, src_y=src_y, width=from_frame.width-src_x, height=from_frame.height-src_y, op='copy')
return frame
class ObscuredWipeTransition(LayerTransitionBase):
def __init__(self, obscuring_frame, composite_op, direction='north'):
super(ObscuredWipeTransition, self).__init__()
self.composite_op = composite_op
self.direction = direction
self.progress_per_frame = 1.0/15.0
self.obs_frame = obscuring_frame
def transition_frame(self, from_frame, to_frame):
frame = Frame(width=from_frame.width, height=from_frame.height)
prog0 = self.progress
prog1 = self.progress
if self.in_out == 'out':
prog0 = 1.0 - prog0
else:
prog1 = 1.0 - prog1
# TODO: Improve the src_x/y so that it moves at the same speed as ovr_x/y, with the midpoint.
src_x, src_y, ovr_x, ovr_y = {
'north': (0, prog1*frame.height, 0, frame.height-prog0*(self.obs_frame.height+2*frame.height)),
'south': (0, prog0*frame.height, 0, frame.height-prog1*(self.obs_frame.height+2*frame.height)),
'east': (prog0*frame.width, 0, frame.width-prog1*(self.obs_frame.width+2*frame.width), 0),
'west': (prog1*frame.width, 0, frame.width-prog0*(self.obs_frame.width+2*frame.width), 0),
}[self.direction]
if self.direction in ['east', 'south']:
from_frame, to_frame = to_frame, from_frame
src_x = int(round(src_x))
src_y = int(round(src_y))
Frame.copy_rect(dst=frame, dst_x=0, dst_y=0, src=from_frame, src_x=0, src_y=0, width=from_frame.width, height=from_frame.height, op='copy')
Frame.copy_rect(dst=frame, dst_x=src_x, dst_y=src_y, src=to_frame, src_x=src_x, src_y=src_y, width=from_frame.width-src_x, height=from_frame.height-src_y, op='copy')
Frame.copy_rect(dst=frame, dst_x=ovr_x, dst_y=ovr_y, src=self.obs_frame, src_x=0, src_y=0, width=self.obs_frame.width, height=self.obs_frame.height, op=self.composite_op)
return frame
class CrossFadeTransition(LayerTransitionBase):
"""Performs a cross-fade between two layers. As one fades out the other one fades in."""
def __init__(self, width=128, height=32, frame_count=45):
LayerTransitionBase.__init__(self)
self.width, self.height = width, height
self.progress_per_frame = 1.0/frame_count
def transition_frame(self, from_frame, to_frame):
alpha_value = (self.progress * 255)
from_frame = from_frame.copy()
to_frame = to_frame.copy()
sdl2.SDL_SetTextureAlphaMod(from_frame.pySurface.texture, int(255-alpha_value))
#sdl2.SDL_SetTextureAlphaMod(to_frame.pySurface.texture, int(alpha_value))
Frame.copy_rect(dst=to_frame, dst_x=0, dst_y=0, src=from_frame, src_x=0, src_y=0, width=self.width, height=self.height) #, op='add')
return to_frame
class FadeTransition(LayerTransitionBase):
"""Performs a fade in or out."""
def __init__(self, frame_count=45, direction='in'):
self.direction = direction
LayerTransitionBase.__init__(self)
#self.width, self.height = width, height
self.progress_per_frame = 1.0/frame_count
def transition_frame(self, from_frame, to_frame=None):
# Calculate the frame index:
if self.direction == 'in':
alpha_value = (self.progress * 255)
frame = to_frame.copy()
else:
alpha_value = 255-(self.progress * 255)
frame = from_frame.copy()
sdl2.SDL_SetTextureAlphaMod(frame.pySurface.texture, int(alpha_value))
return frame
|
|
import pkgutil
import numpy
import copyreg
import dedupe.variables
import dedupe.variables.base as base
from dedupe.variables.base import MissingDataType
from dedupe.variables.interaction import InteractionType
for _, module, _ in pkgutil.iter_modules(dedupe.variables.__path__,
'dedupe.variables.') :
__import__(module)
FIELD_CLASSES = {k : v for k, v in base.allSubclasses(base.FieldType) if k}
class DataModel(object) :
def __init__(self, fields):
primary_fields, variables = typifyFields(fields)
self.primary_fields = primary_fields
self._derived_start = len(variables)
variables += interactions(fields, primary_fields)
variables += missing(variables)
self._missing_field_indices = missing_field_indices(variables)
self._interaction_indices = interaction_indices(variables)
self._variables = variables
def __len__(self) :
return len(self._variables)
# Changing this from a property to just a normal attribute causes
# pickling problems, because we are removing static methods from
# their class context. This could be fixed by defining comparators
# outside of classes in fieldclasses
@property
def _field_comparators(self) :
start = 0
stop = 0
comparators = []
for field in self.primary_fields :
stop = start + len(field)
comparators.append((field.field, field.comparator, start, stop))
start = stop
return comparators
def predicates(self, index_predicates=True, canopies=True) :
predicates = set()
for definition in self.primary_fields :
for predicate in definition.predicates :
if hasattr(predicate, 'index') :
if index_predicates :
if hasattr(predicate, 'canopy') :
if canopies :
predicates.add(predicate)
else :
if not canopies :
predicates.add(predicate)
else :
predicates.add(predicate)
return predicates
def distances(self, record_pairs):
num_records = len(record_pairs)
distances = numpy.empty((num_records, len(self)), 'f4')
field_comparators = self._field_comparators
for i, (record_1, record_2) in enumerate(record_pairs) :
for field, compare, start, stop in field_comparators :
if record_1[field] is not None and record_2[field] is not None :
distances[i,start:stop] = compare(record_1[field],
record_2[field])
elif hasattr(compare, 'missing') :
distances[i,start:stop] = compare(record_1[field],
record_2[field])
else :
distances[i,start:stop] = numpy.nan
distances = self._derivedDistances(distances)
return distances
def _derivedDistances(self, primary_distances) :
distances = primary_distances
current_column = self._derived_start
for interaction in self._interaction_indices :
distances[:,current_column] =\
numpy.prod(distances[:,interaction], axis=1)
current_column += 1
missing_data = numpy.isnan(distances[:,:current_column])
distances[:,:current_column][missing_data] = 0
if self._missing_field_indices :
distances[:,current_column:] =\
1 - missing_data[:,self._missing_field_indices]
return distances
def check(self, record) :
for field_comparator in self._field_comparators :
field = field_comparator[0]
if field not in record :
raise ValueError("Records do not line up with data model. "
"The field '%s' is in data_model but not "
"in a record" % field)
def typifyFields(fields) :
primary_fields = []
data_model = []
for definition in fields :
try :
field_type = definition['type']
except TypeError :
raise TypeError("Incorrect field specification: field "
"specifications are dictionaries that must "
"include a type definition, ex. "
"{'field' : 'Phone', type: 'String'}")
except KeyError :
raise KeyError("Missing field type: fields "
"specifications are dictionaries that must "
"include a type definition, ex. "
"{'field' : 'Phone', type: 'String'}")
if field_type == 'Interaction' :
continue
if field_type == 'FuzzyCategorical' and 'other fields' not in definition :
definition['other fields'] = [d['field'] for d in fields
if 'field' in d
and d['field'] != definition['field']]
try :
field_class = FIELD_CLASSES[field_type]
except KeyError :
raise KeyError("Field type %s not valid. Valid types include %s"
% (definition['type'], ', '.join(FIELD_CLASSES)))
field_object = field_class(definition)
primary_fields.append(field_object)
if hasattr(field_object, 'higher_vars') :
data_model.extend(field_object.higher_vars)
else :
data_model.append(field_object)
return primary_fields, data_model
def missing(data_model) :
missing_variables = []
for definition in data_model[:] :
if definition.has_missing :
missing_variables.append(MissingDataType(definition.name))
return missing_variables
def interactions(definitions, primary_fields) :
field_d = {field.name : field for field in primary_fields}
interaction_class = InteractionType
interactions = []
for definition in definitions :
if definition['type'] == 'Interaction' :
field = interaction_class(definition)
field.expandInteractions(field_d)
interactions.extend(field.higher_vars)
return interactions
def missing_field_indices(variables) :
return [i for i, definition
in enumerate(variables)
if definition.has_missing]
def interaction_indices(variables) :
indices = []
field_names = [field.name for field in variables]
for definition in variables :
if hasattr(definition, 'interaction_fields') :
interaction_indices = []
for interaction_field in definition.interaction_fields :
interaction_indices.append(field_names.index(interaction_field))
indices.append(interaction_indices)
return indices
def reduce_method(m):
return (getattr, (m.__self__, m.__func__.__name__))
import types
copyreg.pickle(types.MethodType, reduce_method)
|
|
"""Provides the worker thread needed for processing streams."""
from __future__ import annotations
from collections import defaultdict, deque
from collections.abc import Callable, Generator, Iterator, Mapping
import contextlib
import datetime
from io import BytesIO
import logging
from threading import Event
from typing import Any, cast
import av
from homeassistant.core import HomeAssistant
from . import redact_credentials
from .const import (
ATTR_SETTINGS,
AUDIO_CODECS,
DOMAIN,
HLS_PROVIDER,
MAX_MISSING_DTS,
MAX_TIMESTAMP_GAP,
PACKETS_TO_WAIT_FOR_AUDIO,
SEGMENT_CONTAINER_FORMAT,
SOURCE_TIMEOUT,
)
from .core import Part, Segment, StreamOutput, StreamSettings
from .hls import HlsStreamOutput
_LOGGER = logging.getLogger(__name__)
class StreamWorkerError(Exception):
"""An exception thrown while processing a stream."""
class StreamEndedError(StreamWorkerError):
"""Raised when the stream is complete, exposed for facilitating testing."""
class StreamState:
"""Responsible for trakcing output and playback state for a stream.
Holds state used for playback to interpret a decoded stream. A source stream
may be reset (e.g. reconnecting to an rtsp stream) and this object tracks
the state to inform the player.
"""
def __init__(
self,
hass: HomeAssistant,
outputs_callback: Callable[[], Mapping[str, StreamOutput]],
) -> None:
"""Initialize StreamState."""
self._stream_id: int = 0
self.hass = hass
self._outputs_callback: Callable[
[], Mapping[str, StreamOutput]
] = outputs_callback
# sequence gets incremented before the first segment so the first segment
# has a sequence number of 0.
self._sequence = -1
@property
def sequence(self) -> int:
"""Return the current sequence for the latest segment."""
return self._sequence
def next_sequence(self) -> int:
"""Increment the sequence number."""
self._sequence += 1
return self._sequence
@property
def stream_id(self) -> int:
"""Return the readonly stream_id attribute."""
return self._stream_id
def discontinuity(self) -> None:
"""Mark the stream as having been restarted."""
# Preserving sequence and stream_id here keep the HLS playlist logic
# simple to check for discontinuity at output time, and to determine
# the discontinuity sequence number.
self._stream_id += 1
# Call discontinuity to remove incomplete segment from the HLS output
if hls_output := self._outputs_callback().get(HLS_PROVIDER):
cast(HlsStreamOutput, hls_output).discontinuity()
@property
def outputs(self) -> list[StreamOutput]:
"""Return the active stream outputs."""
return list(self._outputs_callback().values())
class StreamMuxer:
"""StreamMuxer re-packages video/audio packets for output."""
def __init__(
self,
hass: HomeAssistant,
video_stream: av.video.VideoStream,
audio_stream: av.audio.stream.AudioStream | None,
stream_state: StreamState,
) -> None:
"""Initialize StreamMuxer."""
self._hass = hass
self._segment_start_dts: int = cast(int, None)
self._memory_file: BytesIO = cast(BytesIO, None)
self._av_output: av.container.OutputContainer = None
self._input_video_stream: av.video.VideoStream = video_stream
self._input_audio_stream: av.audio.stream.AudioStream | None = audio_stream
self._output_video_stream: av.video.VideoStream = None
self._output_audio_stream: av.audio.stream.AudioStream | None = None
self._segment: Segment | None = None
# the following 3 member variables are used for Part formation
self._memory_file_pos: int = cast(int, None)
self._part_start_dts: int = cast(int, None)
self._part_has_keyframe = False
self._stream_settings: StreamSettings = hass.data[DOMAIN][ATTR_SETTINGS]
self._stream_state = stream_state
self._start_time = datetime.datetime.utcnow()
def make_new_av(
self,
memory_file: BytesIO,
sequence: int,
input_vstream: av.video.VideoStream,
input_astream: av.audio.stream.AudioStream | None,
) -> tuple[
av.container.OutputContainer,
av.video.VideoStream,
av.audio.stream.AudioStream | None,
]:
"""Make a new av OutputContainer and add output streams."""
container = av.open(
memory_file,
mode="w",
format=SEGMENT_CONTAINER_FORMAT,
container_options={
**{
# Removed skip_sidx - see https://github.com/home-assistant/core/pull/39970
# "cmaf" flag replaces several of the movflags used, but too recent to use for now
"movflags": "frag_custom+empty_moov+default_base_moof+frag_discont+negative_cts_offsets+skip_trailer",
# Sometimes the first segment begins with negative timestamps, and this setting just
# adjusts the timestamps in the output from that segment to start from 0. Helps from
# having to make some adjustments in test_durations
"avoid_negative_ts": "make_non_negative",
"fragment_index": str(sequence + 1),
"video_track_timescale": str(int(1 / input_vstream.time_base)),
},
# Only do extra fragmenting if we are using ll_hls
# Let ffmpeg do the work using frag_duration
# Fragment durations may exceed the 15% allowed variance but it seems ok
**(
{
"movflags": "empty_moov+default_base_moof+frag_discont+negative_cts_offsets+skip_trailer",
# Create a fragment every TARGET_PART_DURATION. The data from each fragment is stored in
# a "Part" that can be combined with the data from all the other "Part"s, plus an init
# section, to reconstitute the data in a "Segment".
# The LL-HLS spec allows for a fragment's duration to be within the range [0.85x,1.0x]
# of the part target duration. We use the frag_duration option to tell ffmpeg to try to
# cut the fragments when they reach frag_duration. However, the resulting fragments can
# have variability in their durations and can end up being too short or too long. With a
# video track with no audio, the discrete nature of frames means that the frame at the
# end of a fragment will sometimes extend slightly beyond the desired frag_duration.
# If there are two tracks, as in the case of a video feed with audio, there is an added
# wrinkle as the fragment cut seems to be done on the first track that crosses the desired
# threshold, and cutting on the audio track may also result in a shorter video fragment
# than desired.
# Given this, our approach is to give ffmpeg a frag_duration somewhere in the middle
# of the range, hoping that the parts stay pretty well bounded, and we adjust the part
# durations a bit in the hls metadata so that everything "looks" ok.
"frag_duration": str(
self._stream_settings.part_target_duration * 9e5
),
}
if self._stream_settings.ll_hls
else {}
),
},
)
output_vstream = container.add_stream(template=input_vstream)
# Check if audio is requested
output_astream = None
if input_astream:
output_astream = container.add_stream(template=input_astream)
return container, output_vstream, output_astream
def reset(self, video_dts: int) -> None:
"""Initialize a new stream segment."""
self._part_start_dts = self._segment_start_dts = video_dts
self._segment = None
self._memory_file = BytesIO()
self._memory_file_pos = 0
(
self._av_output,
self._output_video_stream,
self._output_audio_stream,
) = self.make_new_av(
memory_file=self._memory_file,
sequence=self._stream_state.next_sequence(),
input_vstream=self._input_video_stream,
input_astream=self._input_audio_stream,
)
if self._output_video_stream.name == "hevc":
self._output_video_stream.codec_tag = "hvc1"
def mux_packet(self, packet: av.Packet) -> None:
"""Mux a packet to the appropriate output stream."""
# Check for end of segment
if packet.stream == self._input_video_stream:
if (
packet.is_keyframe
and (packet.dts - self._segment_start_dts) * packet.time_base
>= self._stream_settings.min_segment_duration
):
# Flush segment (also flushes the stub part segment)
self.flush(packet, last_part=True)
# Mux the packet
packet.stream = self._output_video_stream
self._av_output.mux(packet)
self.check_flush_part(packet)
self._part_has_keyframe |= packet.is_keyframe
elif packet.stream == self._input_audio_stream:
packet.stream = self._output_audio_stream
self._av_output.mux(packet)
def check_flush_part(self, packet: av.Packet) -> None:
"""Check for and mark a part segment boundary and record its duration."""
if self._memory_file_pos == self._memory_file.tell():
return
if self._segment is None:
# We have our first non-zero byte position. This means the init has just
# been written. Create a Segment and put it to the queue of each output.
self._segment = Segment(
sequence=self._stream_state.sequence,
stream_id=self._stream_state.stream_id,
init=self._memory_file.getvalue(),
# Fetch the latest StreamOutputs, which may have changed since the
# worker started.
stream_outputs=self._stream_state.outputs,
start_time=self._start_time,
)
self._memory_file_pos = self._memory_file.tell()
else: # These are the ends of the part segments
self.flush(packet, last_part=False)
def flush(self, packet: av.Packet, last_part: bool) -> None:
"""Output a part from the most recent bytes in the memory_file.
If last_part is True, also close the segment, give it a duration,
and clean up the av_output and memory_file.
There are two different ways to enter this function, and when
last_part is True, packet has not yet been muxed, while when
last_part is False, the packet has already been muxed. However,
in both cases, packet is the next packet and is not included in
the Part.
This function writes the duration metadata for the Part and
for the Segment. However, as the fragmentation done by ffmpeg
may result in fragment durations which fall outside the
[0.85x,1.0x] tolerance band allowed by LL-HLS, we need to fudge
some durations a bit by reporting them as being within that
range.
Note that repeated adjustments may cause drift between the part
durations in the metadata and those in the media and result in
playback issues in some clients.
"""
# Part durations should not exceed the part target duration
adjusted_dts = min(
packet.dts,
self._part_start_dts
+ self._stream_settings.part_target_duration / packet.time_base,
)
if last_part:
# Closing the av_output will write the remaining buffered data to the
# memory_file as a new moof/mdat.
self._av_output.close()
elif not self._part_has_keyframe:
# Parts which are not the last part or an independent part should
# not have durations below 0.85 of the part target duration.
adjusted_dts = max(
adjusted_dts,
self._part_start_dts
+ 0.85 * self._stream_settings.part_target_duration / packet.time_base,
)
assert self._segment
self._memory_file.seek(self._memory_file_pos)
self._hass.loop.call_soon_threadsafe(
self._segment.async_add_part,
Part(
duration=float(
(adjusted_dts - self._part_start_dts) * packet.time_base
),
has_keyframe=self._part_has_keyframe,
data=self._memory_file.read(),
),
(
segment_duration := float(
(adjusted_dts - self._segment_start_dts) * packet.time_base
)
)
if last_part
else 0,
)
if last_part:
# If we've written the last part, we can close the memory_file.
self._memory_file.close() # We don't need the BytesIO object anymore
self._start_time += datetime.timedelta(seconds=segment_duration)
# Reinitialize
self.reset(packet.dts)
else:
# For the last part, these will get set again elsewhere so we can skip
# setting them here.
self._memory_file_pos = self._memory_file.tell()
self._part_start_dts = adjusted_dts
self._part_has_keyframe = False
def close(self) -> None:
"""Close stream buffer."""
self._av_output.close()
self._memory_file.close()
class PeekIterator(Iterator):
"""An Iterator that may allow multiple passes.
This may be consumed like a normal Iterator, however also supports a
peek() method that buffers consumed items from the iterator.
"""
def __init__(self, iterator: Iterator[av.Packet]) -> None:
"""Initialize PeekIterator."""
self._iterator = iterator
self._buffer: deque[av.Packet] = deque()
# A pointer to either _iterator or _buffer
self._next = self._iterator.__next__
def __iter__(self) -> Iterator:
"""Return an iterator."""
return self
def __next__(self) -> av.Packet:
"""Return and consume the next item available."""
return self._next()
def replace_underlying_iterator(self, new_iterator: Iterator) -> None:
"""Replace the underlying iterator while preserving the buffer."""
self._iterator = new_iterator
if not self._buffer:
self._next = self._iterator.__next__
def _pop_buffer(self) -> av.Packet:
"""Consume items from the buffer until exhausted."""
if self._buffer:
return self._buffer.popleft()
# The buffer is empty, so change to consume from the iterator
self._next = self._iterator.__next__
return self._next()
def peek(self) -> Generator[av.Packet, None, None]:
"""Return items without consuming from the iterator."""
# Items consumed are added to a buffer for future calls to __next__
# or peek. First iterate over the buffer from previous calls to peek.
self._next = self._pop_buffer
for packet in self._buffer:
yield packet
for packet in self._iterator:
self._buffer.append(packet)
yield packet
class TimestampValidator:
"""Validate ordering of timestamps for packets in a stream."""
def __init__(self) -> None:
"""Initialize the TimestampValidator."""
# Decompression timestamp of last packet in each stream
self._last_dts: dict[av.stream.Stream, int | float] = defaultdict(
lambda: float("-inf")
)
# Number of consecutive missing decompression timestamps
self._missing_dts = 0
def is_valid(self, packet: av.Packet) -> bool:
"""Validate the packet timestamp based on ordering within the stream."""
# Discard packets missing DTS. Terminate if too many are missing.
if packet.dts is None:
if self._missing_dts >= MAX_MISSING_DTS:
raise StreamWorkerError(
f"No dts in {MAX_MISSING_DTS+1} consecutive packets"
)
self._missing_dts += 1
return False
self._missing_dts = 0
# Discard when dts is not monotonic. Terminate if gap is too wide.
prev_dts = self._last_dts[packet.stream]
if packet.dts <= prev_dts:
gap = packet.time_base * (prev_dts - packet.dts)
if gap > MAX_TIMESTAMP_GAP:
raise StreamWorkerError(
f"Timestamp overflow detected: last dts = {prev_dts}, dts = {packet.dts}"
)
return False
self._last_dts[packet.stream] = packet.dts
return True
def is_keyframe(packet: av.Packet) -> Any:
"""Return true if the packet is a keyframe."""
return packet.is_keyframe
def unsupported_audio(packets: Iterator[av.Packet], audio_stream: Any) -> bool:
"""Detect ADTS AAC, which is not supported by pyav."""
if not audio_stream:
return False
for count, packet in enumerate(packets):
if count >= PACKETS_TO_WAIT_FOR_AUDIO:
# Some streams declare an audio stream and never send any packets
_LOGGER.warning("Audio stream not found")
break
if packet.stream == audio_stream:
# detect ADTS AAC and disable audio
if audio_stream.codec.name == "aac" and packet.size > 2:
with memoryview(packet) as packet_view:
if packet_view[0] == 0xFF and packet_view[1] & 0xF0 == 0xF0:
_LOGGER.warning("ADTS AAC detected - disabling audio stream")
return True
break
return False
def stream_worker(
source: str,
options: dict[str, str],
stream_state: StreamState,
quit_event: Event,
) -> None:
"""Handle consuming streams."""
try:
container = av.open(source, options=options, timeout=SOURCE_TIMEOUT)
except av.AVError as err:
raise StreamWorkerError(
"Error opening stream %s" % redact_credentials(str(source))
) from err
try:
video_stream = container.streams.video[0]
except (KeyError, IndexError) as ex:
raise StreamWorkerError("Stream has no video") from ex
try:
audio_stream = container.streams.audio[0]
except (KeyError, IndexError):
audio_stream = None
if audio_stream and audio_stream.name not in AUDIO_CODECS:
audio_stream = None
# These formats need aac_adtstoasc bitstream filter, but auto_bsf not
# compatible with empty_moov and manual bitstream filters not in PyAV
if container.format.name in {"hls", "mpegts"}:
audio_stream = None
# Some audio streams do not have a profile and throw errors when remuxing
if audio_stream and audio_stream.profile is None:
audio_stream = None
dts_validator = TimestampValidator()
container_packets = PeekIterator(
filter(dts_validator.is_valid, container.demux((video_stream, audio_stream)))
)
def is_video(packet: av.Packet) -> Any:
"""Return true if the packet is for the video stream."""
return packet.stream == video_stream
# Have to work around two problems with RTSP feeds in ffmpeg
# 1 - first frame has bad pts/dts https://trac.ffmpeg.org/ticket/5018
# 2 - seeking can be problematic https://trac.ffmpeg.org/ticket/7815
#
# Use a peeking iterator to peek into the start of the stream, ensuring
# everything looks good, then go back to the start when muxing below.
try:
if audio_stream and unsupported_audio(container_packets.peek(), audio_stream):
audio_stream = None
container_packets.replace_underlying_iterator(
filter(dts_validator.is_valid, container.demux(video_stream))
)
# Advance to the first keyframe for muxing, then rewind so the muxing
# loop below can consume.
first_keyframe = next(
filter(lambda pkt: is_keyframe(pkt) and is_video(pkt), container_packets)
)
# Deal with problem #1 above (bad first packet pts/dts) by recalculating
# using pts/dts from second packet. Use the peek iterator to advance
# without consuming from container_packets. Skip over the first keyframe
# then use the duration from the second video packet to adjust dts.
next_video_packet = next(filter(is_video, container_packets.peek()))
# Since the is_valid filter has already been applied before the following
# adjustment, it does not filter out the case where the duration below is
# 0 and both the first_keyframe and next_video_packet end up with the same
# dts. Use "or 1" to deal with this.
start_dts = next_video_packet.dts - (next_video_packet.duration or 1)
first_keyframe.dts = first_keyframe.pts = start_dts
except StreamWorkerError as ex:
container.close()
raise ex
except StopIteration as ex:
container.close()
raise StreamEndedError("Stream ended; no additional packets") from ex
except av.AVError as ex:
container.close()
raise StreamWorkerError(
"Error demuxing stream while finding first packet: %s" % str(ex)
) from ex
muxer = StreamMuxer(stream_state.hass, video_stream, audio_stream, stream_state)
muxer.reset(start_dts)
# Mux the first keyframe, then proceed through the rest of the packets
muxer.mux_packet(first_keyframe)
with contextlib.closing(container), contextlib.closing(muxer):
while not quit_event.is_set():
try:
packet = next(container_packets)
except StreamWorkerError as ex:
raise ex
except StopIteration as ex:
raise StreamEndedError("Stream ended; no additional packets") from ex
except av.AVError as ex:
raise StreamWorkerError("Error demuxing stream: %s" % str(ex)) from ex
muxer.mux_packet(packet)
|
|
# -*- coding: utf-8 -*-
#
from six.moves import builtins
import testify as T
from mock import ANY
from mock import mock_open
from mock import patch
from threat_intel.opendns import InvestigateApi
from threat_intel.opendns import ResponseError
from threat_intel.util.api_cache import ApiCache
from threat_intel.util.http import MultiRequest
class InvestigateApiTest(T.TestCase):
"""Tests requesting reports from OpenDNS."""
@T.setup
def setup_opendns(self):
self.opendns = InvestigateApi('test_key')
def _patch_and_assert_categorization(self, all_responses, expected_responses, domains, expected_url, expected_data):
with patch.object(MultiRequest, 'multi_post', autospec=True, return_value=all_responses) as patched_multi_post:
actual_responses = self.opendns.categorization(domains)
patched_multi_post.assert_called_with(ANY, expected_url, data=expected_data)
assert expected_responses == actual_responses
def test_categorization(self):
domains = ['yellowstone.org', 'zion.org', 'sequoia.org', 'greatsanddunes.org']
all_responses = [
{
u'yellowstone.org': {
u'content_categories': [u'National Parks'],
u'security_categories': [],
u'status': 1
},
u'zion.org': {
u'content_categories': [u'National Parks'],
u'security_categories': [],
u'status': 1
},
u'sequoia.org': {
u'content_categories': [u'National Parks'],
u'security_categories': [],
u'status': 1
},
u'greatsanddunes.org': {
u'content_categories': [u'National Parks'],
u'security_categories': [],
u'status': 1
}
}
]
expected_url = u'https://investigate.api.umbrella.com/domains/categorization/?showLabels'
expected_data = ['["yellowstone.org", "zion.org", "sequoia.org", "greatsanddunes.org"]']
expected_responses = all_responses[0]
self._patch_and_assert_categorization(all_responses, expected_responses, domains, expected_url, expected_data)
def test_categorization_domains_limit(self):
self.opendns.MAX_DOMAINS_IN_POST = 2
domains = [
'northyorkmoors.org.uk', 'peakdistrict.org.uk',
'cairngorms.org.uk', 'pembrokeshirecoast.org.uk',
'northumberland.org.uk']
all_responses = [
{
u'northyorkmoors.org.uk': {
u'content_categories': [u'National Parks'],
u'security_categories': [],
u'status': 1
},
u'peakdistrict.org.uk': {
u'content_categories': [u'National Parks'],
u'security_categories': [],
u'status': 1
},
},
{
u'cairngorms.org.uk': {
u'content_categories': [u'National Parks'],
u'security_categories': [],
u'status': 1
},
u'pembrokeshirecoast.org.uk': {
u'content_categories': [u'National Parks'],
u'security_categories': [],
u'status': 1
},
},
{
u'northumberland.org.uk': {
u'content_categories': [u'National Parks'],
u'security_categories': [],
u'status': 1
}
}
]
expected_data = [
'["northyorkmoors.org.uk", "peakdistrict.org.uk"]',
'["cairngorms.org.uk", "pembrokeshirecoast.org.uk"]',
'["northumberland.org.uk"]']
expected_responses = {
u'northyorkmoors.org.uk': {
u'content_categories': [u'National Parks'],
u'security_categories': [],
u'status': 1
},
u'peakdistrict.org.uk': {
u'content_categories': [u'National Parks'],
u'security_categories': [],
u'status': 1
},
u'cairngorms.org.uk': {
u'content_categories': [u'National Parks'],
u'security_categories': [],
u'status': 1
},
u'pembrokeshirecoast.org.uk': {
u'content_categories': [u'National Parks'],
u'security_categories': [],
u'status': 1
},
u'northumberland.org.uk': {
u'content_categories': [u'National Parks'],
u'security_categories': [],
u'status': 1
}
}
self._patch_and_assert_categorization(all_responses, expected_responses, domains, ANY, expected_data)
def test_categorization_response_error(self):
"""Tests whether the ResponseError is raised when the response
returned from the actual API call is empty.
"""
domains = ['yosemite.gov', 'joushuatree.gov', 'deathvalley.gov']
# empty responses should raise an error
all_responses = [{}]
# mock cache file
mock_read = mock_open(read_data="{}")
with patch.object(
builtins, 'open', mock_read, create=True
), patch.object(
ApiCache, 'bulk_lookup', autospec=True, return_value={}
), patch.object(
MultiRequest, 'multi_post', autospec=True, return_value=all_responses
):
i = InvestigateApi('hocus pocus', 'cache.json')
with T.assert_raises(ResponseError):
i.categorization(domains)
def _test_api_call_get(self, call, endpoint, request, expected_url_params,
api_response, expected_result, expected_query_params=None):
"""
Tests a OpenDNS call by mocking out the HTTP GET request.
Args:
call: function in OpenDNSApi to call.
endpoint: endpoint of OpenDNS API that is hit (appended to base url)
request: call arguments
expected_url_params: URL parameters that should be passed to API
api_response: the expected response by the API
expected_result: what call should return (given the api response provided)
expected_query_params: query parameters that should be passed to API
"""
with patch.object(self.opendns, '_requests') as request_mock:
request_mock.multi_get.return_value = api_response
result = call(request)
url = self.opendns._to_url(endpoint.format(expected_url_params))
request_mock.multi_get.assert_called_with([url], expected_query_params)
T.assert_equal(result, expected_result)
def test_security(self):
self._test_api_call_get(call=self.opendns.security,
endpoint=u'security/name/{0}.json',
request=['domain'],
expected_url_params='domain',
api_response={},
expected_result={})
def test_whois_emails(self):
self._test_api_call_get(call=self.opendns.whois_emails,
endpoint=u'whois/emails/{0}',
request=['admin@dns.com'],
expected_url_params='admin@dns.com',
api_response={},
expected_result={})
def test_whois_nameservers(self):
self._test_api_call_get(call=self.opendns.whois_nameservers,
endpoint=u'whois/nameservers/{0}',
request=['ns.dns.com'],
expected_url_params='ns.dns.com',
api_response={},
expected_result={})
def test_whois_domains(self):
self._test_api_call_get(call=self.opendns.whois_domains,
endpoint=u'whois/{0}',
request=['google.com'],
expected_url_params='google.com',
api_response={},
expected_result={})
def test_whois_domains_history(self):
self._test_api_call_get(call=self.opendns.whois_domains_history,
endpoint=u'whois/{0}/history',
request=['5esb.biz'],
expected_url_params='5esb.biz',
api_response={},
expected_result={})
def test_coocurrences(self):
self._test_api_call_get(call=self.opendns.cooccurrences,
endpoint=u'recommendations/name/{0}.json',
request=['domain'],
expected_url_params='domain',
api_response={},
expected_result={})
def test_rr_history(self):
self._test_api_call_get(call=self.opendns.rr_history,
endpoint=u'dnsdb/ip/a/{0}.json',
request=['8.8.8.8'],
expected_url_params='8.8.8.8',
api_response={},
expected_result={})
def test_latest_malicious(self):
self._test_api_call_get(call=self.opendns.latest_malicious,
endpoint=u'ips/{0}/latest_domains',
request=['8.8.8.8'],
expected_url_params='8.8.8.8',
api_response={},
expected_result={})
def test_domain_tag(self):
self._test_api_call_get(call=self.opendns.domain_tag,
endpoint=u'domains/{0}/latest_tags',
request=['domain'],
expected_url_params='domain',
api_response={},
expected_result={})
def test_dns_rr(self):
self._test_api_call_get(call=self.opendns.dns_rr,
endpoint=u'dnsdb/name/a/{0}.json',
request=['domain'],
expected_url_params='domain',
api_response={},
expected_result={})
def test_related_domains(self):
self._test_api_call_get(call=self.opendns.related_domains,
endpoint=u'links/name/{0}.json',
request=['domain'],
expected_url_params='domain',
api_response={},
expected_result={})
def test_sample(self):
self._test_api_call_get(call=self.opendns.sample,
endpoint=u'sample/{0}',
request=['0492d93195451e41f568f68e7704eb0812bc2b19'],
expected_url_params='0492d93195451e41f568f68e7704eb0812bc2b19',
api_response={},
expected_result={})
def test_search(self):
self._test_api_call_get(call=self.opendns.search,
endpoint=u'search/{0}',
request=['pattern'],
expected_url_params='pattern',
api_response={},
expected_result={},
expected_query_params={'start': '-30days',
'includecategory': 'false',
'limit': 1000})
def test_risk_score(self):
self._test_api_call_get(call=self.opendns.risk_score,
endpoint=u'domains/risk-score/{0}',
request=['domain'],
expected_url_params='domain',
api_response={},
expected_result={})
|
|
# -*- coding: utf-8 -*-
"""
Wordpress OAuth1.0a Class
"""
__title__ = "wordpress-oauth"
from time import time
from random import randint
from hmac import new as HMAC
from hashlib import sha1, sha256
from base64 import b64encode
import binascii
import webbrowser
import requests
from bs4 import BeautifulSoup
try:
from urllib.parse import urlencode, quote, unquote, parse_qs, parse_qsl, urlparse, urlunparse
from urllib.parse import ParseResult as URLParseResult
except ImportError:
from urllib import urlencode, quote, unquote
from urlparse import parse_qs, parse_qsl, urlparse, urlunparse
from urlparse import ParseResult as URLParseResult
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
from wordpress.helpers import UrlUtils
class OAuth(object):
oauth_version = '1.0'
force_nonce = None
force_timestamp = None
""" API Class """
def __init__(self, requester, consumer_key, consumer_secret, **kwargs):
self.requester = requester
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.signature_method = kwargs.get('signature_method', 'HMAC-SHA1')
self.force_timestamp = kwargs.get('force_timestamp')
self.force_nonce = kwargs.get('force_nonce')
@property
def api_version(self):
return self.requester.api_version
@property
def api_namespace(self):
return self.requester.api
def get_sign_key(self, consumer_secret, token_secret=None):
"gets consumer_secret and turns it into a string suitable for signing"
if not consumer_secret:
raise UserWarning("no consumer_secret provided")
token_secret = str(token_secret) if token_secret else ''
if self.api_namespace == 'wc-api' \
and self.api_version in ["v1", "v2"]:
# special conditions for wc-api v1-2
key = consumer_secret
else:
key = "%s&%s" % (consumer_secret, token_secret)
return key
def add_params_sign(self, method, url, params, sign_key=None):
""" Adds the params to a given url, signs the url with sign_key if provided,
otherwise generates sign_key automatically and returns a signed url """
if isinstance(params, dict):
params = params.items()
urlparse_result = urlparse(url)
if urlparse_result.query:
params += parse_qsl(urlparse_result.query)
# for key, value in parse_qsl(urlparse_result.query):
# params += [(key, value)]
params = self.sorted_params(params)
params_without_signature = []
for key, value in params:
if key != "oauth_signature":
params_without_signature.append((key, value))
signature = self.generate_oauth_signature(method, params_without_signature, url, sign_key)
params = params_without_signature + [("oauth_signature", signature)]
query_string = self.flatten_params(params)
return UrlUtils.substitute_query(url, query_string)
def get_params(self):
return [
("oauth_consumer_key", self.consumer_key),
("oauth_nonce", self.generate_nonce()),
("oauth_signature_method", self.signature_method),
("oauth_timestamp", self.generate_timestamp()),
]
def get_oauth_url(self, endpoint_url, method):
""" Returns the URL with OAuth params """
params = self.get_params()
return self.add_params_sign(method, endpoint_url, params)
@classmethod
def get_signature_base_string(cls, method, params, url):
base_request_uri = quote(UrlUtils.substitute_query(url), "")
query_string = quote( cls.flatten_params(params), '~')
return "&".join([method, base_request_uri, query_string])
def generate_oauth_signature(self, method, params, url, key=None):
""" Generate OAuth Signature """
string_to_sign = self.get_signature_base_string(method, params, url)
if key is None:
key = self.get_sign_key(self.consumer_secret)
if self.signature_method == 'HMAC-SHA1':
hmac_mod = sha1
elif self.signature_method == 'HMAC-SHA256':
hmac_mod = sha256
else:
raise UserWarning("Unknown signature_method")
# print "\nstring_to_sign: %s" % repr(string_to_sign)
# print "\nkey: %s" % repr(key)
sig = HMAC(key, string_to_sign, hmac_mod)
sig_b64 = binascii.b2a_base64(sig.digest())[:-1]
# print "\nsig_b64: %s" % sig_b64
return sig_b64
@classmethod
def sorted_params(cls, params):
""" Sort parameters. works with RFC 5849 logic. params is a list of key, value pairs """
if isinstance(params, dict):
params = params.items()
# return sorted(params)
ordered = []
base_keys = sorted(set(k.split('[')[0] for k, v in params))
keys_seen = []
for base in base_keys:
for key, value in params:
if key == base or key.startswith(base + '['):
if key not in keys_seen:
ordered.append((key, value))
keys_seen.append(key)
return ordered
@classmethod
def normalize_str(cls, string):
return quote(string, '')
@classmethod
def normalize_params(cls, params):
""" Normalize parameters. works with RFC 5849 logic. params is a list of key, value pairs """
if isinstance(params, dict):
params = params.items()
params = \
[(cls.normalize_str(key), cls.normalize_str(UrlUtils.get_value_like_as_php(value))) \
for key, value in params]
# print "NORMALIZED: %s\n" % str(params.keys())
# resposne = urlencode(params)
response = params
# print "RESPONSE: %s\n" % str(resposne.split('&'))
return response
@classmethod
def flatten_params(cls, params):
if isinstance(params, dict):
params = params.items()
params = cls.normalize_params(params)
params = cls.sorted_params(params)
return "&".join(["%s=%s"%(key, value) for key, value in params])
@classmethod
def generate_timestamp(cls):
""" Generate timestamp """
if cls.force_timestamp is not None:
return cls.force_timestamp
return int(time())
@classmethod
def generate_nonce(cls):
""" Generate nonce number """
if cls.force_nonce is not None:
return cls.force_nonce
nonce = ''.join([str(randint(0, 9)) for i in range(8)])
return HMAC(
nonce.encode(),
"secret".encode(),
sha1
).hexdigest()
class OAuth_3Leg(OAuth):
""" Provides 3 legged OAuth1a, mostly based off this: http://www.lexev.org/en/2015/oauth-step-step/"""
# oauth_version = '1.0A'
def __init__(self, requester, consumer_key, consumer_secret, callback, **kwargs):
super(OAuth_3Leg, self).__init__(requester, consumer_key, consumer_secret, **kwargs)
self.callback = callback
self.wp_user = kwargs.get('wp_user')
self.wp_pass = kwargs.get('wp_pass')
self._authentication = None
self._request_token = None
self.request_token_secret = None
self._oauth_verifier = None
self._access_token = None
self.access_token_secret = None
@property
def authentication(self):
""" This is an object holding the authentication links discovered from the API
automatically generated if accessed before generated """
if not self._authentication:
self._authentication = self.discover_auth()
return self._authentication
@property
def oauth_verifier(self):
""" This is the verifier string used in authentication
automatically generated if accessed before generated """
if not self._oauth_verifier:
self._oauth_verifier = self.get_verifier()
return self._oauth_verifier
@property
def request_token(self):
""" This is the oauth_token used in requesting an access_token
automatically generated if accessed before generated """
if not self._request_token:
self.get_request_token()
return self._request_token
@property
def access_token(self):
""" This is the oauth_token used to sign requests to protected resources
automatically generated if accessed before generated """
if not self._access_token:
self.get_access_token()
return self._access_token
# def get_sign_key(self, consumer_secret, oauth_token_secret=None):
# "gets consumer_secret and oauth_token_secret and turns it into a string suitable for signing"
# if not oauth_token_secret:
# key = super(OAuth_3Leg, self).get_sign_key(consumer_secret)
# else:
# oauth_token_secret = str(oauth_token_secret) if oauth_token_secret else ''
# consumer_secret = str(consumer_secret) if consumer_secret else ''
# # oauth_token_secret has been specified
# if not consumer_secret:
# key = str(oauth_token_secret)
# else:
# key = "&".join([consumer_secret, oauth_token_secret])
# return key
def get_oauth_url(self, endpoint_url, method):
""" Returns the URL with OAuth params """
assert self.access_token, "need a valid access token for this step"
params = self.get_params()
params += [
('oauth_callback', self.callback),
('oauth_token', self.access_token)
]
sign_key = self.get_sign_key(self.consumer_secret, self.access_token_secret)
return self.add_params_sign(method, endpoint_url, params, sign_key)
# params = OrderedDict()
# params["oauth_consumer_key"] = self.consumer_key
# params["oauth_timestamp"] = self.generate_timestamp()
# params["oauth_nonce"] = self.generate_nonce()
# params["oauth_signature_method"] = self.signature_method
# params["oauth_token"] = self.access_token
#
# sign_key = self.get_sign_key(self.consumer_secret, self.access_token_secret)
#
# print "signing with key: %s" % sign_key
#
# return self.add_params_sign(method, endpoint_url, params, sign_key)
# def get_params(self, get_access_token=False):
# params = super(OAuth_3Leg, self).get_params()
# if get_access_token:
# params.append(('oauth_token', self.access_token))
# return params
def discover_auth(self):
""" Discovers the location of authentication resourcers from the API"""
discovery_url = self.requester.api_url
response = self.requester.request('GET', discovery_url)
response_json = response.json()
assert \
response_json['authentication'], \
"resopnse should include location of authentication resources, resopnse: %s" \
% UrlUtils.beautify_response(response)
self._authentication = response_json['authentication']
return self._authentication
def get_request_token(self):
""" Uses the request authentication link to get an oauth_token for requesting an access token """
assert self.consumer_key, "need a valid consumer_key for this step"
params = self.get_params()
params += [
('oauth_callback', self.callback)
]
# params = OrderedDict()
# params["oauth_consumer_key"] = self.consumer_key
# params["oauth_timestamp"] = self.generate_timestamp()
# params["oauth_nonce"] = self.generate_nonce()
# params["oauth_signature_method"] = self.signature_method
# params["oauth_callback"] = self.callback
# params["oauth_version"] = self.oauth_version
request_token_url = self.authentication['oauth1']['request']
request_token_url = self.add_params_sign("GET", request_token_url, params)
response = self.requester.get(request_token_url)
resp_content = parse_qs(response.text)
try:
self._request_token = resp_content['oauth_token'][0]
self.request_token_secret = resp_content['oauth_token_secret'][0]
except:
raise UserWarning("Could not parse request_token or request_token_secret in response from %s : %s" \
% (repr(response.request.url), UrlUtils.beautify_response(response)))
return self._request_token, self.request_token_secret
def get_form_info(self, response, form_id):
""" parses a form specified by a given form_id in the response,
extracts form data and form action """
assert response.status_code is 200
response_soup = BeautifulSoup(response.text, "lxml")
form_soup = response_soup.select_one('form#%s' % form_id)
assert \
form_soup, "unable to find form with id=%s in %s " \
% (form_id, (response_soup.prettify()).encode('ascii', errors='backslashreplace'))
# print "login form: \n", form_soup.prettify()
action = form_soup.get('action')
assert \
action, "action should be provided by form: %s" \
% (form_soup.prettify()).encode('ascii', errors='backslashreplace')
form_data = OrderedDict()
for input_soup in form_soup.select('input') + form_soup.select('button'):
# print "input, class:%5s, id=%5s, name=%5s, value=%s" % (
# input_soup.get('class'),
# input_soup.get('id'),
# input_soup.get('name'),
# input_soup.get('value')
# )
name = input_soup.get('name')
if not name:
continue
value = input_soup.get('value')
if name not in form_data:
form_data[name] = []
form_data[name].append(value)
# print "form data: %s" % str(form_data)
return action, form_data
def get_verifier(self, request_token=None, wp_user=None, wp_pass=None):
""" pretends to be a browser, uses the authorize auth link, submits user creds to WP login form to get
verifier string from access token """
if request_token is None:
request_token = self.request_token
assert request_token, "need a valid request_token for this step"
if wp_user is None and self.wp_user:
wp_user = self.wp_user
if wp_pass is None and self.wp_pass:
wp_pass = self.wp_pass
authorize_url = self.authentication['oauth1']['authorize']
authorize_url = UrlUtils.add_query(authorize_url, 'oauth_token', request_token)
# we're using a different session from the usual API calls
# (I think the headers are incompatible?)
# self.requester.get(authorize_url)
authorize_session = requests.Session()
login_form_response = authorize_session.get(authorize_url)
try:
login_form_action, login_form_data = self.get_form_info(login_form_response, 'loginform')
except AssertionError, e:
#try to parse error
login_form_soup = BeautifulSoup(login_form_response.text, 'lxml')
error = login_form_soup.select_one('div#login_error')
if error and "invalid token" in error.string.lower():
raise UserWarning("Invalid token: %s" % repr(request_token))
else:
raise UserWarning(
"could not parse login form. Site is misbehaving. Original error: %s " \
% str(e)
)
for name, values in login_form_data.items():
if name == 'log':
login_form_data[name] = wp_user
elif name == 'pwd':
login_form_data[name] = wp_pass
else:
login_form_data[name] = values[0]
assert 'log' in login_form_data, 'input for user login did not appear on form'
assert 'pwd' in login_form_data, 'input for user password did not appear on form'
# print "submitting login form to %s : %s" % (login_form_action, str(login_form_data))
confirmation_response = authorize_session.post(login_form_action, data=login_form_data, allow_redirects=True)
try:
authorize_form_action, authorize_form_data = self.get_form_info(confirmation_response, 'oauth1_authorize_form')
except AssertionError, e:
#try to parse error
# print "STATUS_CODE: %s" % str(confirmation_response.status_code)
if confirmation_response.status_code != 200:
raise UserWarning("Response was not a 200, it was a %s. original error: %s" \
% (str(confirmation_response.status_code)), str(e))
# print "HEADERS: %s" % str(confirmation_response.headers)
confirmation_soup = BeautifulSoup(confirmation_response.text, 'lxml')
error = confirmation_soup.select_one('div#login_error')
# print "ERROR: %s" % repr(error)
if error and "invalid token" in error.string.lower():
raise UserWarning("Invalid token: %s" % repr(request_token))
else:
raise UserWarning(
"could not parse login form. Site is misbehaving. Original error: %s " \
% str(e)
)
for name, values in authorize_form_data.items():
if name == 'wp-submit':
assert \
'authorize' in values, \
"apparently no authorize button, only %s" % str(values)
authorize_form_data[name] = 'authorize'
else:
authorize_form_data[name] = values[0]
assert 'wp-submit' in login_form_data, 'authorize button did not appear on form'
final_response = authorize_session.post(authorize_form_action, data=authorize_form_data, allow_redirects=False)
assert \
final_response.status_code == 302, \
"was not redirected by authorize screen, was %d instead. something went wrong" \
% final_response.status_code
assert 'location' in final_response.headers, "redirect did not provide redirect location in header"
final_location = final_response.headers['location']
# At this point we can chose to follow the redirect if the user wants,
# or just parse the verifier out of the redirect url.
# open to suggestions if anyone has any :)
final_location_queries = parse_qs(urlparse(final_location).query)
assert \
'oauth_verifier' in final_location_queries, \
"oauth verifier not provided in final redirect: %s" % final_location
self._oauth_verifier = final_location_queries['oauth_verifier'][0]
return self._oauth_verifier
def get_access_token(self, oauth_verifier=None):
""" Uses the access authentication link to get an access token """
if oauth_verifier is None:
oauth_verifier = self.oauth_verifier
assert oauth_verifier, "Need an oauth verifier to perform this step"
assert self.request_token, "Need a valid request_token to perform this step"
params = self.get_params()
params += [
('oauth_token', self.request_token),
('oauth_verifier', self.oauth_verifier)
]
# params = OrderedDict()
# params["oauth_consumer_key"] = self.consumer_key
# params['oauth_token'] = self.request_token
# params["oauth_timestamp"] = self.generate_timestamp()
# params["oauth_nonce"] = self.generate_nonce()
# params["oauth_signature_method"] = self.signature_method
# params['oauth_verifier'] = oauth_verifier
# params["oauth_callback"] = self.callback
sign_key = self.get_sign_key(self.consumer_secret, self.request_token_secret)
# sign_key = self.get_sign_key(None, self.request_token_secret)
# print "request_token_secret:", self.request_token_secret
# print "SIGNING WITH KEY:", repr(sign_key)
access_token_url = self.authentication['oauth1']['access']
access_token_url = self.add_params_sign("POST", access_token_url, params, sign_key)
access_response = self.requester.post(access_token_url)
assert \
access_response.status_code == 200, \
"Access request did not return 200, returned %s. HTML: %s" % (
access_response.status_code,
UrlUtils.beautify_response(access_response)
)
#
access_response_queries = parse_qs(access_response.text)
try:
self._access_token = access_response_queries['oauth_token'][0]
self.access_token_secret = access_response_queries['oauth_token_secret'][0]
except:
raise UserWarning("Could not parse access_token or access_token_secret in response from %s : %s" \
% (repr(access_response.request.url), UrlUtils.beautify_response(access_response)))
return self._access_token, self.access_token_secret
|
|
from functools import partial
from itertools import chain, izip
from pycparser import c_ast
from pyc_fmtstr_parser.printf_parse import printf_parse, Arg_type as p_Arg_type
from pyc_fmtstr_parser.scanf_parse import scanf_parse, Arg_type as s_Arg_type
from decomp import ida, utils
from decomp.c import decl as cdecl, types as ep_ct
from decomp.cpu import ida as cpu_ida, regs
flatten = chain.from_iterable
# MIPS N32
# $v0,$v1, $a0..$a7 for return/args
# $f0,$f2 and $f12..f19 for fp return/args
class RegSpillError(Exception): pass
class StructByValueError(Exception): pass
# WARNING: this must be in the same order as c.types.ep_ctypes!
# (pointer is intentionally missing)
c_type_to_slot = zip(
iter(ep_ct.ep_ctypes),
[ep_ct.slot_to_typename[x] for x in
[ep_ct.slot_types[y] for y in
['i8', # signed char
'u8', # unsigned char
'i16', # short
'u16', # unsigned short
'i32', # int
'u32', # unsigned int
'i32', # long
'u32', # unsigned long
'i64', # long long
'u64', # unsigned long long
's', # float
'd', # double
'i8' # char
]]])
def make_stdio_sw(fmt_type, types, pointerize):
'''enum -> [str] -> bool -> dict'''
# the printf/scanf parsers can parse nearly all format string types, but we
# do not handle all of them here
fmt_to_type = izip([fmt_type[x] for x in types],
c_type_to_slot)
return {ty: (ep_ct.ptr(val)
if pointerize is True
else val)
for (ty, (_, val)) in fmt_to_type}
printf_types = [
'TYPE_SCHAR', 'TYPE_UCHAR', 'TYPE_SHORT', 'TYPE_USHORT',
'TYPE_INT', 'TYPE_UINT', 'TYPE_LONGINT', 'TYPE_ULONGINT',
'TYPE_LONGLONGINT', 'TYPE_ULONGLONGINT', 'TYPE_DOUBLE',
'TYPE_DOUBLE', 'TYPE_CHAR'
]
scanf_types = [
'TYPE_SCHAR', 'TYPE_UCHAR', 'TYPE_SHORT', 'TYPE_USHORT',
'TYPE_INT', 'TYPE_UINT', 'TYPE_LONGINT', 'TYPE_ULONGINT',
'TYPE_LONGLONGINT', 'TYPE_ULONGLONGINT', 'TYPE_FLOAT',
'TYPE_DOUBLE', 'TYPE_CHAR'
]
printf_sw = make_stdio_sw(p_Arg_type, printf_types, pointerize=False)
printf_sw[p_Arg_type.TYPE_POINTER] = ep_ct.ptr(ep_ct.simple_typename(['void']))
printf_sw[p_Arg_type.TYPE_STRING] = ep_ct.ptr(ep_ct.simple_typename(['char']))
scanf_sw = make_stdio_sw(s_Arg_type, scanf_types, pointerize=True)
scanf_sw[s_Arg_type.TYPE_POINTER] = ep_ct.ptr(ep_ct.ptr(ep_ct.simple_typename(['void'])))
scanf_sw[s_Arg_type.TYPE_STRING] = ep_ct.ptr(ep_ct.simple_typename(['char']))
scanf_sw[s_Arg_type.TYPE_CHARSEQ] = scanf_sw[s_Arg_type.TYPE_STRING]
def n32ify_regs(regs):
'''[str] -> [str]'''
n32_map = {'$t0': '$a4', '$t1': '$a5', '$t2': '$a6', '$t3': '$a7'}
r = enumerate(regs)
return list(n32_map[reg] if reg in n32_map else regs[i] for (i, reg) in r)
# fix up IDA's register list for N32
reg_list = n32ify_regs(cpu_ida.ida_reg_list())
# offset of beginning of FPR regs in IDA's list
fpr_off = reg_list.index('$f0')
# callee-saved registers
saveregs = frozenset(flatten([
xrange(16, 24), # $s0..$s7
xrange(28, 31), # $gp, $sp, $fp
xrange(fpr_off + 20, fpr_off + 32, 2) # $f20..$f31, evens
]))
# gpr and fpr argument and return registers
arg_regs = list(xrange(4, 12)) # $a0..$a7
fp_arg_regs = list(xrange(fpr_off + 12, fpr_off + 20)) # $f12..$f19
ret_regs = list([2, 3]) # $v0..$v1
fp_ret_regs = list([fpr_off, fpr_off + 2]) # $f0,$f2
# registers that we pass via the ARGS struct
regs_by_reference = frozenset(arg_regs + fp_arg_regs + ret_regs + fp_ret_regs)
def type_to_reg_and_slot(node, chooser, i):
'''c_ast -> fn -> int -> (reg_type, slot_type) | None'''
def yield_void():
# return an empty list for (void) arglists
raise StopIteration
def maybe_fail(node):
if type(node) is c_ast.Struct:
raise StructByValueError('structs by value not yet supported')
return type_to_reg_and_slot(node.type, chooser, i)
def get(names):
'''[str] -> (gpr|fpr, slot_ty)'''
if 'void' in names:
return yield_void()
ti = ida.parse_decl(' '.join(names))
(ty, base, slot) = chooser(ti)
return (ty(base + i), slot)
sw = {
c_ast.Decl: lambda x: type_to_reg_and_slot(x.type, chooser, i),
c_ast.TypeDecl: lambda x: type_to_reg_and_slot(x.type, chooser, i),
c_ast.Typename: lambda x: type_to_reg_and_slot(x.type, chooser, i),
c_ast.IdentifierType: lambda x: get(x.names)
}
if i > 7:
raise RegSpillError('spilling registers to stack not yet supported')
# in order to use chooser we need a tinfo_t--make one suitable for an
# an int (which is also suitable for a pointer on N32)
dummy_ti = ida.parse_decl('int')
(_, base, _) = chooser(dummy_ti)
node_ty = type(node)
if node_ty in [c_ast.ArrayDecl, c_ast.PtrDecl]:
return (regs.gpr(base + i), ep_ct.slot_types.u64)
elif node_ty is c_ast.Enum:
return (regs.gpr(base + i), ep_ct.slot_types.i64)
else:
return utils.dictswitch(node_ty, sw, node, maybe_fail, node)
def get_info_for_types(nodes, caster, chooser, pos=0, handle_va=False):
'''[c_ast] -> fn -> fn -> int -> bool ->
(reg_type, slot_type) | c_ast.EllipsisParam | None'''
# nodes: a list of Decls from a FuncDecl
# caster: use this function to produce casts to arg types (see 'castify' in
# c.decl)
# chooser: a function that will determine the register number for a return
# value or argument
# pos: number of a positional argument (0-indexed)
# handle_va: pass False if making the initial list of function signatures
# from; pass True if we want variable arguments to be processed
return [c_ast.EllipsisParam
if (handle_va is False and
type(node) is c_ast.EllipsisParam)
else
type_to_reg_and_slot(node, chooser, i) + (caster(node),)
for (i, node) in enumerate(nodes, pos)]
def va_chooser(gpr_base, _, ti):
'''int -> int -> tinfo_t -> (type, int, slot_ty)'''
if ti.is_float():
slot = ep_ct.slot_types.s
elif ti.is_double():
slot = ep_ct.slot_types.d
else:
slot = ep_ct.slot_types.u64
return (regs.gpr, gpr_base, slot)
def pos_chooser(gpr_base, fpr_base, ti):
'''int -> int -> tinfo_t -> (type, int, slot_ty)'''
if ti.is_float():
return (regs.fpr, fpr_base, ep_ct.slot_types.s)
elif ti.is_double():
return (regs.fpr, fpr_base, ep_ct.slot_types.d)
else:
return (regs.gpr, gpr_base, ep_ct.slot_types.u64)
pos_wrap = partial(pos_chooser, 4, 12)
# varargs of *any* type are passed in $a0..$a7 on N32 (the second argument to
# the partially-applied va_chooser is unused)
va_wrap = partial(va_chooser, 4, 12)
def get_abi_fn_arg_map(node):
'''c_ast -> fn_sig'''
# non-fp args are returned in $2..$3, fp-args in $f0,$f2
# XXX multi-reg returns are not supported, but this is rare
ret_chooser = partial(pos_chooser, 2, 0)
caster = lambda x: cdecl.castify(x.type)
rtype = utils.items_or_default(
lambda: get_info_for_types([node], caster, ret_chooser)[0],
None) # if void return type, return None
args = [x for (_, x) in node.args.children()]
arg_types = utils.items_or_default(
lambda: list(get_info_for_types(args, caster, pos_wrap)),
[]) # if function takes no args, return empty list
return ep_ct.fn_sig(rtype, arg_types)
def get_args_for_va_function(callee, pos_arg):
'''str -> str -> [(reg_type, slot_type) | None]'''
def get_convs(acc, va_arg):
return acc + [sw[va_arg.type]]
pos_sw = {'printf': (1, printf_sw, printf_parse, va_wrap),
'scanf': (1, scanf_sw, scanf_parse, pos_wrap),
'sscanf': (2, scanf_sw, scanf_parse, pos_wrap)}
try:
(pos, sw, fn, chooser) = pos_sw[callee]
except KeyError:
raise utils.BugError('unrecognized callee %s' % callee)
(_, args) = fn(pos_arg)
convs = reduce(get_convs, args.arg, [])
return list(get_info_for_types(
convs, lambda x: x, chooser, pos=pos, handle_va=True))
|
|
import logging
import sys
class NoConsoleAvailableError(Exception):
pass
log = logging.getLogger('pytality.term')
#-----------------------------------------------------------------------------
# Terminal setup/teardown
def _find_impl(choices):
"""
Find a suitable terminal implementation.
"""
success = False
for choice in choices:
if choice == 'pygame':
try:
import term_pygame as _impl
log.debug("Imported term_pygame successfully")
success = True
break
except ImportError, e:
log.debug("Could not import term_pygame: %r", e)
continue
if choice == 'silverlight' and sys.platform == 'silverlight':
import term_silverlight as _impl
log.debug("Imported term_silverlight successfully")
success = True
break
if choice == 'winconsole' and sys.platform == 'win32':
try:
import term_winconsole as _impl
log.debug("Imported term_winconsole successfully")
success = True
break
except ImportError, e:
log.debug("Could not import term_winconsole: %r", e)
continue
if choice == 'curses':
try:
import term_curses as _impl
log.debug("Imported term_curses successfully")
success = True
break
except ImportError, e:
log.debug("Could not import term_curses: %r", e)
continue
if not success:
raise NoConsoleAvailableError("Could not find any suitable console library. You may need a working curses implementation if you are on linux.")
return _impl
#Our global terminal implementation
impl = None
class colors:
"""
Constants for the sixteen ANSI colors.
Note that background colors on some backends (curses) cannot be set as "bright".
"""
BLACK = 0
BLUE = 1
GREEN = 2
CYAN = 3
RED = 4
MAGENTA = 5
BROWN = 6
LIGHTGRAY = LIGHTGREY = 7
DARKGRAY = DARKGREY = 8
LIGHTBLUE = 9
LIGHTGREEN = 10
LIGHTCYAN = 11
LIGHTRED = 12
LIGHTMAGENTA = 13
YELLOW = 14
WHITE = 15
def init(**kwargs):
"""
Initialize the terminal and check that it's of an appropriate size.
Also disables the cursor image to avoid flickery drawings. Use set_cursor_state
to re-enable it when prompting for input.
config:
a dictionary of configuration options for Pytality.
Supports the following keys:
backends:
A list of backends to try, in order.
Defaults to ['silverlight', 'winconsole', 'pygame', 'curses']
width:
height:
Minimum dimensions of the screen.
Note that when using curses, a 1-row/column margin is added on the edge to prevent spurious failures.
Defaults to 80 wide by 24 tall.
"""
global impl, colors
log.debug("init(): initializing terminal")
default_config = dict(
backends = ['silverlight', 'winconsole', 'pygame', 'curses'],
width = 80,
height = 24,
)
if kwargs:
default_config.update(kwargs)
config = default_config
impl = _find_impl(config['backends'])
impl.init()
resize(config['width'], config['height'])
def reset():
"""
Reset the terminal to a usable state.
Enables the cursor image,
clears the current line,
and resets the current color.
May also perform other cleanup, depending on implementation.
You should call reset() unconditionally on all application exits.
"""
log.debug("reset(): resetting terminal")
if not impl:
log.warn("reset(): no backend to reset!")
return
impl.reset()
def resize(width, height):
"""
Resize the terminal.
On Windows, this actually resizes the terminal.
On Linux/Mac, this can only verify the terminal's size is sufficient
and raises TerminalTooSmallError if it isn't.
"""
log.debug("resize(): target width=%r, height=%r", width, height)
impl.resize(width, height)
#-----------------------------------------------------------------------------
# Drawing functions
def clear():
"""
Clear the screen to a blank state and home the cursor.
Unlike buffer drawing, this renders immediately, and does not require flip().
"""
impl.clear()
def draw_buffer(buf, x, y):
"""
Draw a buffer to the backing buffer (if double-buffered) on the screen
at the coordinates specified.
After you have drawn all buffers for this "frame", call flip() to render
the changes.
"""
impl.draw_buffer(buf, x, y)
def flip():
"""
Refresh the terminal, flushing all changes to the screen.
Must be called to ensure changes (from drawing buffers)
are actually rendered.
"""
impl.flip()
def get_at(x, y):
"""
Get a character from the screen at the specified coordinate.
Generally, you shouldn't need this function, but uses come up from time to time,
most notably testing.
Returns [fg, bg, character]
"""
return impl.get_at(x, y)
def move_cursor(x, y):
"""
Position the text cursor at a coordinate on the screen.
"""
return impl.move_cursor(x, y)
def set_cursor_type(cursor_type):
"""
Change the terminal cursor graphic.
There are three types of cursor available:
"blank": Draw no flashing cursor. Set by default on init()
"normal": Draw the standard cursor, usually a flashing "_".
"block": Draw a blocky cursor, usually a full-character block
Because most libraries refer to these three cursor types as 0, 1, and 2,
that notation is allowed here as well.
"""
cursor_type_map = dict(
blank=0,
normal=1,
block=2
)
if cursor_type in cursor_type_map:
cursor_type = cursor_type_map[cursor_type]
impl.set_cursor_type(cursor_type)
def set_title(title):
"""
Change the title of the terminal window.
"""
impl.set_title(title)
#-----------------------------------------------------------------------------
# Keyboard functions
def raw_getkey():
"""
Get a key of keyboard input.
Returns None, 1 character, or the name of the special key.
^C is not converted into an exception.
"""
return impl.raw_getkey()
def getkey():
"""
Get a key of keyboard input, as per raw_getkey(), but doesn't return None,
and perform some translations.
^C raises KeyboardInterrupt
CTRL+<letter> key combinations return 'ctrl-<letter>'
"""
while True:
key = impl.raw_getkey()
if key is None:
continue
if key == '\x03':
#ctrl-c
raise KeyboardInterrupt()
if len(key) == 1 and (1 <= ord(key) <= 26):
#ctrl+letter, except tab (which is ctrl-i technically)
if key == '\t':
return key
return "ctrl-%s" % chr(ord(key) + 96)
return key
|
|
"""
Module to test plotly.utils with optional dependencies.
"""
from __future__ import absolute_import
import datetime
import json
import math
from datetime import datetime as dt
from unittest import TestCase
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from pandas.util.testing import assert_series_equal
import pytz
from plotly import utils
from plotly.graph_objs import Scatter, Scatter3d, Figure, Data
from plotly.grid_objs import Column
from plotly.matplotlylib import Exporter, PlotlyRenderer
class TestJSONEncoder(TestCase):
def test_encode_as_plotly(self):
# should *fail* when object doesn't have `to_plotly_json` attribute
objs_without_attr = [
1, 'one', set(['a', 'set']), {'a': 'dict'}, ['a', 'list']
]
for obj in objs_without_attr:
self.assertRaises(utils.NotEncodable,
utils.PlotlyJSONEncoder.encode_as_plotly, obj)
# should return without exception when obj has `to_plotly_josn` attr
expected_res = 'wedidit'
class ObjWithAttr(object):
def to_plotly_json(self):
return expected_res
res = utils.PlotlyJSONEncoder.encode_as_plotly(ObjWithAttr())
self.assertEqual(res, expected_res)
def test_encode_as_list(self):
# should *fail* when object doesn't have `tolist` method
objs_without_attr = [
1, 'one', set(['a', 'set']), {'a': 'dict'}, ['a', 'list']
]
for obj in objs_without_attr:
self.assertRaises(utils.NotEncodable,
utils.PlotlyJSONEncoder.encode_as_list, obj)
# should return without exception when obj has `tolist` attr
expected_res = ['some', 'list']
class ObjWithAttr(object):
def tolist(self):
return expected_res
res = utils.PlotlyJSONEncoder.encode_as_list(ObjWithAttr())
self.assertEqual(res, expected_res)
def test_encode_as_pandas(self):
# should *fail* on things that are not specific pandas objects
not_pandas = ['giraffe', 6, float('nan'), ['a', 'list']]
for obj in not_pandas:
self.assertRaises(utils.NotEncodable,
utils.PlotlyJSONEncoder.encode_as_pandas, obj)
# should succeed when we've got specific pandas thingies
res = utils.PlotlyJSONEncoder.encode_as_pandas(pd.NaT)
self.assertTrue(res is None)
def test_encode_as_numpy(self):
# should *fail* on non-numpy-y things
not_numpy = ['hippo', 8, float('nan'), {'a': 'dict'}]
for obj in not_numpy:
self.assertRaises(utils.NotEncodable,
utils.PlotlyJSONEncoder.encode_as_numpy, obj)
# should succeed with numpy-y-thingies
res = utils.PlotlyJSONEncoder.encode_as_numpy(np.ma.core.masked)
self.assertTrue(math.isnan(res))
def test_encode_as_datetime(self):
# should *fail* without 'utcoffset' and 'isoformat' and '__sub__' attrs
non_datetimes = [datetime.date(2013, 10, 1), 'noon', 56, '00:00:00']
for obj in non_datetimes:
self.assertRaises(utils.NotEncodable,
utils.PlotlyJSONEncoder.encode_as_datetime, obj)
# should succeed with 'utcoffset', 'isoformat' and '__sub__' attrs
res = utils.PlotlyJSONEncoder.encode_as_datetime(
datetime.datetime(2013, 10, 1)
)
self.assertEqual(res, '2013-10-01')
# should not include extraneous microsecond info if DNE
res = utils.PlotlyJSONEncoder.encode_as_datetime(
datetime.datetime(2013, 10, 1, microsecond=0)
)
self.assertEqual(res, '2013-10-01')
# should include microsecond info if present
res = utils.PlotlyJSONEncoder.encode_as_datetime(
datetime.datetime(2013, 10, 1, microsecond=10)
)
self.assertEqual(res, '2013-10-01 00:00:00.000010')
# should convert tzinfo to utc. Note that in october, we're in EDT!
# therefore the 4 hour difference is correct.
naive_datetime = datetime.datetime(2013, 10, 1)
aware_datetime = pytz.timezone('US/Eastern').localize(naive_datetime)
res = utils.PlotlyJSONEncoder.encode_as_datetime(aware_datetime)
self.assertEqual(res, '2013-10-01 04:00:00')
def test_encode_as_date(self):
# should *fail* without 'utcoffset' and 'isoformat' and '__sub__' attrs
non_datetimes = ['noon', 56, '00:00:00']
for obj in non_datetimes:
self.assertRaises(utils.NotEncodable,
utils.PlotlyJSONEncoder.encode_as_date, obj)
# should work with a date
a_date = datetime.date(2013, 10, 1)
res = utils.PlotlyJSONEncoder.encode_as_date(a_date)
self.assertEqual(res, '2013-10-01')
# should also work with a date time without a utc offset!
# TODO: is this OK? We could raise errors after checking isinstance...
res = utils.PlotlyJSONEncoder.encode_as_date(
datetime.datetime(2013, 10, 1, microsecond=10)
)
self.assertEqual(res, '2013-10-01 00:00:00.000010')
## JSON encoding
numeric_list = [1, 2, 3]
np_list = np.array([1, 2, 3, np.NaN, np.NAN, np.Inf, dt(2014, 1, 5)])
mixed_list = [1, 'A', dt(2014, 1, 5), dt(2014, 1, 5, 1, 1, 1),
dt(2014, 1, 5, 1, 1, 1, 1)]
dt_list = [dt(2014, 1, 5), dt(2014, 1, 5, 1, 1, 1),
dt(2014, 1, 5, 1, 1, 1, 1)]
df = pd.DataFrame(columns=['col 1'],
data=[1, 2, 3, dt(2014, 1, 5), pd.NaT, np.NaN, np.Inf])
rng = pd.date_range('1/1/2011', periods=2, freq='H')
ts = pd.Series([1.5, 2.5], index=rng)
def test_column_json_encoding():
columns = [
Column(numeric_list, 'col 1'),
Column(mixed_list, 'col 2'),
Column(np_list, 'col 3')
]
json_columns = json.dumps(
columns, cls=utils.PlotlyJSONEncoder, sort_keys=True
)
assert('[{"data": [1, 2, 3], "name": "col 1"}, '
'{"data": [1, "A", "2014-01-05", '
'"2014-01-05 01:01:01", '
'"2014-01-05 01:01:01.000001"], '
'"name": "col 2"}, '
'{"data": [1, 2, 3, null, null, null, '
'"2014-01-05"], "name": "col 3"}]' == json_columns)
def test_figure_json_encoding():
df = pd.DataFrame(columns=['col 1'], data=[1, 2, 3])
s1 = Scatter3d(x=numeric_list, y=np_list, z=mixed_list)
s2 = Scatter(x=df['col 1'])
data = Data([s1, s2])
figure = Figure(data=data)
js1 = json.dumps(s1, cls=utils.PlotlyJSONEncoder, sort_keys=True)
js2 = json.dumps(s2, cls=utils.PlotlyJSONEncoder, sort_keys=True)
assert(js1 == '{"type": "scatter3d", "x": [1, 2, 3], '
'"y": [1, 2, 3, null, null, null, "2014-01-05"], '
'"z": [1, "A", "2014-01-05", '
'"2014-01-05 01:01:01", "2014-01-05 01:01:01.000001"]}')
assert(js2 == '{"type": "scatter", "x": [1, 2, 3]}')
# Test JSON encoding works
json.dumps(data, cls=utils.PlotlyJSONEncoder, sort_keys=True)
json.dumps(figure, cls=utils.PlotlyJSONEncoder, sort_keys=True)
# Test data wasn't mutated
assert(bool(np.asarray(np_list ==
np.array([1, 2, 3, np.NaN,
np.NAN, np.Inf, dt(2014, 1, 5)])).all()))
assert(set(data[0]['z']) ==
set([1, 'A', dt(2014, 1, 5), dt(2014, 1, 5, 1, 1, 1),
dt(2014, 1, 5, 1, 1, 1, 1)]))
def test_datetime_json_encoding():
j1 = json.dumps(dt_list, cls=utils.PlotlyJSONEncoder)
assert(j1 == '["2014-01-05", '
'"2014-01-05 01:01:01", '
'"2014-01-05 01:01:01.000001"]')
j2 = json.dumps({"x": dt_list}, cls=utils.PlotlyJSONEncoder)
assert(j2 == '{"x": ["2014-01-05", '
'"2014-01-05 01:01:01", '
'"2014-01-05 01:01:01.000001"]}')
def test_pandas_json_encoding():
j1 = json.dumps(df['col 1'], cls=utils.PlotlyJSONEncoder)
assert(j1 == '[1, 2, 3, "2014-01-05", null, null, null]')
# Test that data wasn't mutated
assert_series_equal(df['col 1'],
pd.Series([1, 2, 3, dt(2014, 1, 5),
pd.NaT, np.NaN, np.Inf], name='col 1'))
j2 = json.dumps(df.index, cls=utils.PlotlyJSONEncoder)
assert(j2 == '[0, 1, 2, 3, 4, 5, 6]')
nat = [pd.NaT]
j3 = json.dumps(nat, cls=utils.PlotlyJSONEncoder)
assert(j3 == '[null]')
assert(nat[0] is pd.NaT)
j4 = json.dumps(rng, cls=utils.PlotlyJSONEncoder)
assert(j4 == '["2011-01-01", "2011-01-01 01:00:00"]')
j5 = json.dumps(ts, cls=utils.PlotlyJSONEncoder)
assert(j5 == '[1.5, 2.5]')
assert_series_equal(ts, pd.Series([1.5, 2.5], index=rng))
j6 = json.dumps(ts.index, cls=utils.PlotlyJSONEncoder)
assert(j6 == '["2011-01-01", "2011-01-01 01:00:00"]')
def test_numpy_masked_json_encoding():
l = [1, 2, np.ma.core.masked]
j1 = json.dumps(l, cls=utils.PlotlyJSONEncoder)
print(j1)
assert(j1 == '[1, 2, null]')
def test_masked_constants_example():
# example from: https://gist.github.com/tschaume/d123d56bf586276adb98
data = {
'esN': [0, 1, 2, 3],
'ewe_is0': [-398.11901997, -398.11902774,
-398.11897111, -398.11882215],
'ewe_is1': [-398.11793027, -398.11792966, -398.11786308, None],
'ewe_is2': [-398.11397008, -398.11396421, None, None]
}
df = pd.DataFrame.from_dict(data)
plotopts = {'x': 'esN', 'marker': 'o'}
fig, ax = plt.subplots(1, 1)
df.plot(ax=ax, **plotopts)
renderer = PlotlyRenderer()
Exporter(renderer).run(fig)
json.dumps(renderer.plotly_fig, cls=utils.PlotlyJSONEncoder)
jy = json.dumps(renderer.plotly_fig['data'][1]['y'],
cls=utils.PlotlyJSONEncoder)
print(jy)
array = json.loads(jy)
assert(array == [-398.11793027, -398.11792966, -398.11786308, None])
def test_numpy_dates():
a = np.arange(np.datetime64('2011-07-11'), np.datetime64('2011-07-18'))
j1 = json.dumps(a, cls=utils.PlotlyJSONEncoder)
assert(j1 == '["2011-07-11", "2011-07-12", "2011-07-13", '
'"2011-07-14", "2011-07-15", "2011-07-16", '
'"2011-07-17"]')
def test_datetime_dot_date():
a = [datetime.date(2014, 1, 1), datetime.date(2014, 1, 2)]
j1 = json.dumps(a, cls=utils.PlotlyJSONEncoder)
assert(j1 == '["2014-01-01", "2014-01-02"]')
|
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
# called from wnf.py
# lib/wnf.py --install [rootpassword] [dbname] [source]
from __future__ import unicode_literals
import os, json
import frappe
import frappe.database
import getpass
import importlib
from frappe.model.db_schema import DbManager
from frappe.model.sync import sync_for
from frappe.utils.fixtures import sync_fixtures
from frappe.website import render, statics
def install_db(root_login="root", root_password=None, db_name=None, source_sql=None,
admin_password=None, verbose=True, force=0, site_config=None, reinstall=False):
frappe.flags.in_install_db = True
make_conf(db_name, site_config=site_config)
if reinstall:
frappe.connect(db_name=db_name)
dbman = DbManager(frappe.local.db)
dbman.create_database(db_name)
else:
frappe.local.db = make_connection(root_login, root_password)
frappe.local.session = frappe._dict({'user':'Administrator'})
create_database_and_user(force, verbose)
frappe.conf.admin_password = frappe.conf.admin_password or admin_password
frappe.connect(db_name=db_name)
import_db_from_sql(source_sql, verbose)
remove_missing_apps()
create_auth_table()
frappe.flags.in_install_db = False
def get_current_host():
return frappe.db.sql("select user()")[0][0].split('@')[1]
def create_database_and_user(force, verbose):
db_name = frappe.local.conf.db_name
dbman = DbManager(frappe.local.db)
if force or (db_name not in dbman.get_database_list()):
dbman.delete_user(db_name, get_current_host())
dbman.drop_database(db_name)
else:
raise Exception("Database %s already exists" % (db_name,))
dbman.create_user(db_name, frappe.conf.db_password, get_current_host())
if verbose: print "Created user %s" % db_name
dbman.create_database(db_name)
if verbose: print "Created database %s" % db_name
dbman.grant_all_privileges(db_name, db_name, get_current_host())
dbman.flush_privileges()
if verbose: print "Granted privileges to user %s and database %s" % (db_name, db_name)
# close root connection
frappe.db.close()
def create_auth_table():
frappe.db.sql_ddl("""create table if not exists __Auth (
`user` VARCHAR(180) NOT NULL PRIMARY KEY,
`password` VARCHAR(180) NOT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8""")
def import_db_from_sql(source_sql, verbose):
if verbose: print "Starting database import..."
db_name = frappe.conf.db_name
if not source_sql:
source_sql = os.path.join(os.path.dirname(frappe.__file__), 'data', 'Framework.sql')
DbManager(frappe.local.db).restore_database(db_name, source_sql, db_name, frappe.conf.db_password)
if verbose: print "Imported from database %s" % source_sql
def make_connection(root_login, root_password):
if root_login:
if not root_password:
root_password = frappe.conf.get("root_password") or None
if not root_password:
root_password = getpass.getpass("MySQL root password: ")
return frappe.database.Database(user=root_login, password=root_password)
def install_app(name, verbose=False, set_as_patched=True):
frappe.flags.in_install = name
frappe.clear_cache()
app_hooks = frappe.get_hooks(app_name=name)
installed_apps = frappe.get_installed_apps()
if name not in frappe.get_all_apps(with_frappe=True):
raise Exception("App not in apps.txt")
if name in installed_apps:
print "App Already Installed"
frappe.msgprint("App {0} already installed".format(name))
return
if name != "frappe":
frappe.only_for("System Manager")
for before_install in app_hooks.before_install or []:
frappe.get_attr(before_install)()
if name != "frappe":
add_module_defs(name)
sync_for(name, force=True, sync_everything=True, verbose=verbose)
add_to_installed_apps(name)
if set_as_patched:
set_all_patches_as_completed(name)
for after_install in app_hooks.after_install or []:
frappe.get_attr(after_install)()
print "Installing Fixtures..."
sync_fixtures(name)
frappe.flags.in_install = False
def add_to_installed_apps(app_name, rebuild_website=True):
installed_apps = frappe.get_installed_apps()
if not app_name in installed_apps:
installed_apps.append(app_name)
frappe.db.set_global("installed_apps", json.dumps(installed_apps))
frappe.db.commit()
post_install(rebuild_website)
def remove_from_installed_apps(app_name):
installed_apps = frappe.get_installed_apps()
if app_name in installed_apps:
installed_apps.remove(app_name)
frappe.db.set_global("installed_apps", json.dumps(installed_apps))
frappe.db.commit()
post_install()
def post_install(rebuild_website=False):
if rebuild_website:
render.clear_cache()
statics.sync().start()
frappe.db.commit()
frappe.clear_cache()
def set_all_patches_as_completed(app):
patch_path = os.path.join(frappe.get_pymodule_path(app), "patches.txt")
if os.path.exists(patch_path):
for patch in frappe.get_file_items(patch_path):
frappe.get_doc({
"doctype": "Patch Log",
"patch": patch
}).insert()
frappe.db.commit()
def make_conf(db_name=None, db_password=None, site_config=None):
site = frappe.local.site
make_site_config(db_name, db_password, site_config)
sites_path = frappe.local.sites_path
frappe.destroy()
frappe.init(site, sites_path=sites_path)
def make_site_config(db_name=None, db_password=None, site_config=None):
frappe.create_folder(os.path.join(frappe.local.site_path))
site_file = os.path.join(frappe.local.site_path, "site_config.json")
if not os.path.exists(site_file):
if not (site_config and isinstance(site_config, dict)):
site_config = get_conf_params(db_name, db_password)
with open(site_file, "w") as f:
f.write(json.dumps(site_config, indent=1, sort_keys=True))
def get_conf_params(db_name=None, db_password=None):
if not db_name:
db_name = raw_input("Database Name: ")
if not db_name:
raise Exception("Database Name Required")
if not db_password:
from frappe.utils import random_string
db_password = random_string(16)
return {"db_name": db_name, "db_password": db_password}
def make_site_dirs():
site_public_path = os.path.join(frappe.local.site_path, 'public')
site_private_path = os.path.join(frappe.local.site_path, 'private')
for dir_path in (
os.path.join(site_private_path, 'backups'),
os.path.join(site_public_path, 'files')):
if not os.path.exists(dir_path):
os.makedirs(dir_path)
locks_dir = frappe.get_site_path('locks')
if not os.path.exists(locks_dir):
os.makedirs(locks_dir)
def add_module_defs(app):
modules = frappe.get_module_list(app)
for module in modules:
d = frappe.new_doc("Module Def")
d.app_name = app
d.module_name = module
d.save()
def remove_missing_apps():
apps = ('frappe_subscription',)
installed_apps = frappe.get_installed_apps()
for app in apps:
if app in installed_apps:
try:
importlib.import_module(app)
except ImportError:
installed_apps.remove(app)
frappe.db.set_global("installed_apps", json.dumps(installed_apps))
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for resampler ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.contrib import resampler
from tensorflow.contrib.resampler.ops import gen_resampler_ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class ResamplerOpsTest(xla_test.XLATestCase):
def _assertForwardOpMatchesExpected(self, image_np, warp_np, expected):
with self.test_session() as sess, self.test_scope():
input_image = array_ops.placeholder(image_np.dtype)
warp = array_ops.placeholder(warp_np.dtype)
resampled = resampler.resampler(input_image, warp, name='resampler')
out = sess.run(resampled, {input_image: image_np, warp: warp_np})
self.assertAllCloseAccordingToType(
expected, out, rtol=5e-3, half_rtol=1e-2, bfloat16_rtol=3e-2)
def _assertBackwardOpMatchesExpected(self, input_np, warp_np, grad_output_np,
expected_grad_data, expected_grad_warp):
with self.cached_session() as sess, self.test_scope():
input_image = array_ops.placeholder(input_np.dtype)
warp = array_ops.placeholder(warp_np.dtype)
grad_output = array_ops.placeholder(grad_output_np.dtype)
grad_data, grad_warp = gen_resampler_ops.resampler_grad(
input_image, warp, grad_output)
grad_data_tf, grad_warp_tf = sess.run([grad_data, grad_warp], {
input_image: input_np,
warp: warp_np,
grad_output: grad_output_np
})
self.assertAllCloseAccordingToType(
expected_grad_warp, grad_warp_tf, half_rtol=1e-2, bfloat16_rtol=3e-2)
self.assertAllCloseAccordingToType(
expected_grad_data, grad_data_tf, half_rtol=1e-2, bfloat16_rtol=3e-2)
def testSimple(self):
for dtype in self.float_types:
input_shape = [1, 2, 2, 1]
input_data = [0, 5, 13, 54]
input_np = np.array(input_data, dtype=dtype).reshape(input_shape)
warp_shape = [1, 2]
warp_data = [0.7, 0.6]
warp_np = np.array(warp_data, dtype=dtype).reshape(warp_shape)
expected = [[26.42]]
self._assertForwardOpMatchesExpected(input_np, warp_np, expected)
grad_output = np.ones([1, 1], dtype=dtype)
expected_grad_data = [[[[0.12], [0.27999997]], [[0.18000001],
[0.42000002]]]]
expected_grad_warp = [[26.60000038, 38.20000076]]
self._assertBackwardOpMatchesExpected(input_np, warp_np, grad_output,
expected_grad_data,
expected_grad_warp)
def testMultiChannel(self):
for dtype in self.float_types:
input_shape = [1, 2, 2, 3]
input_rgb_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
input_np = np.array(input_rgb_data, dtype=dtype).reshape(input_shape)
warp_shape = [1, 2]
warp_data = [0.7, 0.6]
warp_np = np.array(warp_data, dtype=dtype).reshape(warp_shape)
expected = [[59.58000183, 146.94000244, 107.37999725]]
self._assertForwardOpMatchesExpected(input_np, warp_np, expected)
grad_output = np.ones([1, 3], dtype=dtype)
expected_grad_data = [[[[0.12, 0.12, 0.12],
[0.27999997, 0.27999997, 0.27999997]],
[[0.18000001, 0.18000001, 0.18000001],
[0.42000002, 0.42000002, 0.42000002]]]]
expected_grad_warp = [[199, 30]]
self._assertBackwardOpMatchesExpected(input_np, warp_np, grad_output,
expected_grad_data,
expected_grad_warp)
def testBatch2Height3byWidth3RGB(self):
for dtype in self.float_types:
input_shape = [2, 3, 3, 3]
input_rgb_data = [
0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1, 30, 105, 2, 40, 115,
3, 50, 125, 4, 60, 135, 5, 70, 145, 6, 0, 5, 13, 54, 135, 226, 37, 8,
234, 90, 255, 1, 30, 105, 2, 40, 115, 3, 50, 125, 4, 60, 135, 5, 70,
145, 6
]
input_np = np.array(input_rgb_data, dtype=dtype).reshape(input_shape)
# 2 batches and 2 samples for each batch.
warp_shape = [2, 2, 2]
warp_data = [0.7, 0.6, 1, 0.7, 0.9, 1.2, 1.3, 1.6]
warp_np = np.array(warp_data, dtype=dtype).reshape(warp_shape)
expected_forward = [[[43.92, 128.4, 65.86], [37.2, 114., 69.2]],
[[40.6, 122.8, 2.5], [51., 126, 4.1]]]
self._assertForwardOpMatchesExpected(input_np, warp_np, expected_forward)
expected_grad_data = [[[[0.12, 0.12, 0.12],
[0.57999998, 0.57999998, 0.57999998],
[0., 0., 0.]],
[[0.18000001, 0.18000001, 0.18000001],
[1.12, 1.12, 1.12], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]],
[[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0.08000001, 0.08000001, 0.08000001],
[0.99999988, 0.99999988, 0.99999988],
[0.11999997, 0.11999997, 0.11999997]],
[[0.02000001, 0.02000001, 0.02000001],
[0.60000008, 0.60000008, 0.60000008],
[0.17999998, 0.17999998, 0.17999998]]]]
expected_grad_warp = [[[33.39999008, -96.20000458], [-26.10000229,
-278.]],
[[-162.99998474, 39.99999619], [21., 63.]]]
grad_output = np.ones([2, 2, 3], dtype=dtype)
self._assertBackwardOpMatchesExpected(input_np, warp_np, grad_output,
expected_grad_data,
expected_grad_warp)
def testOutOfBoundWarps(self):
# (x, y) are both less than 0.
for dtype in self.float_types:
input_shape = [1, 2, 2, 1]
input_data = [10, 5, 13, 54]
input_np = np.array(input_data, dtype=dtype).reshape(input_shape)
warp_shape = [1, 2, 2]
warp_data = [-1, -1, 0.7, 0.6]
warp_np = np.array(warp_data, dtype=dtype).reshape(warp_shape)
expected = [[[0.0], [27.62]]]
self._assertForwardOpMatchesExpected(input_np, warp_np, expected)
# One of (x, y) is less than 0.
for dtype in self.float_types:
input_shape = [1, 2, 2, 1]
input_data = [10, 5, 13, 54]
input_np = np.array(input_data, dtype=dtype).reshape(input_shape)
warp_shape = [1, 2, 2]
warp_data = [-1, 0.1, 0.7, 0.6]
warp_np = np.array(warp_data, dtype=dtype).reshape(warp_shape)
expected = [[[0.0], [27.62]]]
self._assertForwardOpMatchesExpected(input_np, warp_np, expected)
# Both of (x, y) are greater than image size.
for dtype in self.float_types:
input_shape = [1, 2, 2, 1]
input_data = [10, 5, 13, 54]
input_np = np.array(input_data, dtype=dtype).reshape(input_shape)
warp_shape = [1, 2, 2]
warp_data = [-0.1, 0.1, 1.2, 2.1]
warp_np = np.array(warp_data, dtype=dtype).reshape(warp_shape)
expected = [[[0.0], [0.0]]]
self._assertForwardOpMatchesExpected(input_np, warp_np, expected)
# One of (x, y) is greater than image size.
for dtype in self.float_types:
input_shape = [1, 2, 2, 1]
input_data = [10, 5, 13, 54]
input_np = np.array(input_data, dtype=dtype).reshape(input_shape)
warp_shape = [1, 2, 2]
warp_data = [0.1, -0.1, 1.2, 0.1]
warp_np = np.array(warp_data, dtype=dtype).reshape(warp_shape)
expected = [[[0.0], [0.0]]]
self._assertForwardOpMatchesExpected(input_np, warp_np, expected)
if __name__ == '__main__':
test.main()
|
|
import dilap.core.base as db
import dilap.core.tools as dpr
import dilap.core.vector as dpv
import dilap.core.quaternion as dpq
import dilap.core.bbox as dbb
import dilap.core.ray as dr
import numpy,pdb
###############################################################################
### model is the basic unit of geometry for dilap
###
### it corresponds to a model space object
### it contains vertex data to build a model
### it can inherit transforms from a scenegraph
###
### it can write itself as a blender model
### it can write itself as a obj model
### it can write itself as a fbx model
###############################################################################
# reduce a list of models to a single model
def combine(models):
final = models[0]
if len(models) > 1:
for m in models[1:]:
final._consume(m)
return final
unused_model_id = 0
class model(db.base):
def _dpid(self):
global unused_model_id
self.dpid = unused_model_id
unused_model_id += 1
def __init__(self,*args,**kwargs):
self._dpid()
# geometric data
self._def('pcoords',[],**kwargs)
self._def('ncoords',[],**kwargs)
self._def('ucoords',[],**kwargs)
self._def('faces',[],**kwargs)
self._def('face_mats',[],**kwargs)
self._def('mats',['generic'],**kwargs)
# non geometric data
self._def('reps',{},**kwargs)
self._def('filename','model.mesh',**kwargs)
# POSSIBLY CAUSES CRASHES?
# create an independent copy of this model
def copy(self):
cps = [p.copy for p in self.pcoords]
cns = [n.copy for n in self.ncoords]
cus = [u.copy for u in self.ucoords]
cfs = [f[:] for f in self.faces]
cfms = self.face_mats[:]
cms = self.mats[:]
cfn = self.filename.replace('.mesh','.copy.mesh')
cp = model(pcoords = cps,ncoordsd = cns,ucoords = cus,
faces = cfs,face_mats = cfms,mats = cms,filename = cfn)
pdb.set_trace()
return cp
# return 3d bounding box for this model
def _aaabbb(self):
xproj = dpv.project_coords(self.pcoords,dpv.xhat)
yproj = dpv.project_coords(self.pcoords,dpv.yhat)
zproj = dpv.project_coords(self.pcoords,dpv.zhat)
bb = dbb.bbox(xproj,yproj,zproj)
return bb
# consume another model in place, adding all its data
# but leaving others data unmodified as opposed to _consume
# IT DOES NOT PREVENT SHARING OF DATA WITH OTHER!!
def _consume_preserving(self,other):
ofmats = other.face_mats[:]
ofacnt = len(ofmats)
for dx in range(len(other.mats)):
omat = other.mats[dx]
if not omat in self.mats:
self.mats.append(omat)
mdx = len(self.mats) - 1
for fdx in range(ofacnt):
if ofmats[fdx] == dx:
ofmats[fdx] = mdx
else:
mdx = self.mats.index(omat)
if not mdx == dx:
for fdx in range(ofacnt):
if ofmats[fdx] == dx:
ofmats[fdx] = mdx
other_offset = len(self.pcoords)
otherfaces = [f[:] for f in other.faces]
dpr.offset_faces(otherfaces,other_offset)
self.pcoords.extend(other.pcoords)
self.ncoords.extend(other.ncoords)
self.ucoords.extend(other.ucoords)
self.faces.extend(otherfaces)
self.face_mats.extend(ofmats)
self.reps = {}
return self
# consume another model in place, adding all its data
def _consume(self,other):
ofmats = other.face_mats
ofacnt = len(ofmats)
for dx in range(len(other.mats)):
omat = other.mats[dx]
if not omat in self.mats:
self.mats.append(omat)
mdx = len(self.mats) - 1
for fdx in range(ofacnt):
if ofmats[fdx] == dx:
ofmats[fdx] = mdx
else:
mdx = self.mats.index(omat)
if not mdx == dx:
for fdx in range(ofacnt):
if ofmats[fdx] == dx:
ofmats[fdx] = mdx
other_offset = len(self.pcoords)
dpr.offset_faces(other.faces,other_offset)
self.pcoords.extend(other.pcoords)
self.ncoords.extend(other.ncoords)
self.ucoords.extend(other.ucoords)
self.faces.extend(other.faces)
self.face_mats.extend(other.face_mats)
self.reps = {}
return self
# return faces looked up in pcoords space
# faces is either None or faces indices
def _face_positions(self,faces = None):
if faces is None:faces = self.faces
else:faces = [self.faces[fdx] for fdx in range(len(faces))]
fa = []
for f in faces:
fa.append([self.pcoords[x] for x in f])
return fa
# return faces looked up in ncoords space
# faces is either None or faces indices
def _face_normals(self,faces = None):
if faces is None:faces = self.faces
else:faces = [self.faces[fdx] for fdx in range(len(faces))]
fa = []
for f in faces:
fa.append([self.ncoords[x] for x in f])
return fa
# return geometry data organized as dict of materials
def _face_dict(self):
mcnt = len(self.mats)
fcnt = len(self.face_mats)
fa = {}
for mdx in range(mcnt):
ma = self.mats[mdx]
fa[ma] = []
for fmdx in range(fcnt):
if self.face_mats[fmdx] == mdx:
fa[ma].append(self.faces[fmdx])
if not fa[ma]:del fa[ma]
return fa
#######################################################
#methods for modifying the models material data
#######################################################
# provide the index of a material
def _lookup_mat(self,m):
if m is None:m = 0
else:
if m in self.mats:m = self.mats.index(m)
else:
self.mats.append(m)
m = len(self.mats) - 1
return m
# assign material m to range of faces rng
def _assign_material(self,m,rng = None):
m = self._lookup_mat(m)
if rng is None:rng = range(len(self.faces))
for dx in rng:self.face_mats[dx] = m
#######################################################
#######################################################
#methods for modifying uv coordinate data
#######################################################
# for range of faces rng, project uvs xy
def _project_uv_xy(self,rng = None):
if rng is None:rng = range(len(self.faces))
for nf in rng:
face = self.faces[nf]
for fdx in face:
p = self.pcoords[fdx]
nu = p.copy().xy2d()
self.ucoords[fdx] = nu
# for range of faces rng, project uvs yz
def _project_uv_yz(self,rng = None):
if rng is None:rng = range(len(self.faces))
for nf in rng:
face = self.faces[nf]
for fdx in face:
p = self.pcoords[fdx]
nu = p.copy().yz2d()
self.ucoords[fdx] = nu
# for range of faces rng, project uvs xz
def _project_uv_xz(self,rng = None):
if rng is None:rng = range(len(self.faces))
for nf in rng:
face = self.faces[nf]
for fdx in face:
p = self.pcoords[fdx]
nu = p.copy().xz2d()
self.ucoords[fdx] = nu
# for range of faces rng, project uvs flat
def _project_uv_flat(self,rng = None):
if rng is None:rng = range(len(self.faces))
for nf in rng:
face = self.faces[nf]
for fdx in face:
p = self.pcoords[fdx]
n = self.ncoords[fdx]
if dpv.near(n,dpv.nx()) or dpv.near(n,dpv.x()):
nu = p.copy().yz2d()
elif dpv.near(n,dpv.ny()) or dpv.near(n,dpv.y()):
nu = p.copy().xz2d()
elif dpv.near(n,dpv.nz()) or dpv.near(n,dpv.z()):
nu = p.copy().xy2d()
else:continue
self.ucoords[fdx] = nu
# for range of faces rng,
# translate uvs along u coordinate by dx
def _translate_uv_u(self,rng,dx):
if rng is None:rng = range(len(self.faces))
for nf in rng:
face = self.faces[nf]
for fdx in face:
u = self.ucoords[fdx]
u.translate_x(dx)
# for range of faces rng,
# translate uvs along v coordinate by dy
def _translate_uv_v(self,rng,dy):
if rng is None:rng = range(len(self.faces))
for nf in rng:
face = self.faces[nf]
for fdx in face:
u = self.ucoords[fdx]
u.translate_y(dy)
# for range of faces rng,
# scale uvs along u coordinate by du
def _scale_uv_u(self,rng,du):
if rng is None:rng = range(len(self.faces))
for nf in rng:
face = self.faces[nf]
for fdx in face:
u = self.ucoords[fdx]
u.scale_x(du)
# for range of faces rng,
# scale uvs along v coordinate by dv
def _scale_uv_v(self,rng,dv):
if rng is None:rng = range(len(self.faces))
for nf in rng:
face = self.faces[nf]
for fdx in face:
u = self.ucoords[fdx]
u.scale_y(dv)
# given a tform for world space, scale uvs to local space
def _uvs_to_local(self,uv_ttf):
sx = uv_ttf.scl.x
sy = uv_ttf.scl.y
for uvc in self.ucoords:
uvc.x *= sx
uvc.y *= sy
# given a tform for world space, scale uvs to world space
def _uvs_to_world(self,uv_ttf):
sx = uv_ttf.scl.x
sy = uv_ttf.scl.y
for uvc in self.ucoords:
uvc.x /= sx
uvc.y /= sy
#######################################################
#######################################################
#methods for modifying the models geometry data
#######################################################
# what if this respected existing points,
# instead of allowing duplicates, what if it maintained topological data
#
# should generate topo info as data comes in, because its easiest there...
#
# could allow data addition which does or does not default to duplicates
# duplicates are necessary for sharp edges via normals...
#
# add vertex data given coords,normals,uvs
def _add_vdata(self,ps,ns,us):
self.pcoords.extend(ps)
self.ncoords.extend(ns)
self.ucoords.extend(us)
# add face data given face indices,materials
def _add_fdata(self,fs,fms):
self.faces.extend(fs)
self.face_mats.extend(fms)
# given 3 verts(vs) and the passed in normals(ns)
# return a list of certainly acceptable normals
def _def_normals(self,vs,ns):
if ns is None:
if len(vs) == 3:
n = dpr.normal(*vs)
nns = [n,n,n]
elif len(vs) == 4:
n = dpr.normal(*vs[:-1])
nns = [n,n,n,n]
else:
print('_def_normals requires 3 or 4 vertices only')
raise ValueError
else:nns = ns
return nns
# given 3 verts(vs) and the passed in uvs(us)
# return a list of certainly acceptable uvs
def _def_uvs(self,vs,us):
if us is None:
if len(vs) == 3:
nus = [dpv.vector2d(0,1),dpv.vector2d(0,0),dpv.vector2d(1,0)]
elif len(vs) == 4:
nus = [dpv.vector2d(0,1),dpv.vector2d(0,0),
dpv.vector2d(1,0),dpv.vector2d(1,1)]
else:
print('_def_uvs requires 3 or 4 vertices only')
raise ValueError
else:nus = us
return nus
# given four points, add two new triangle faces
def _quad(self,v1,v2,v3,v4,ns = None,us = None,m = None):
nfstart = len(self.faces)
vs = [v1,v2,v3,v4]
nns = self._def_normals(vs,ns)
nus = self._def_uvs(vs,us)
us1 = [nus[0],nus[1],nus[2]]
us2 = [nus[0],nus[2],nus[3]]
ns1 = [nns[0],nns[1],nns[2]]
ns2 = [nns[0],nns[2],nns[3]]
self._triangle(v1,v2,v3,ns = ns1,us = us1,m = m)
self._triangle(v1,v3,v4,ns = ns2,us = us2,m = m)
nfend = len(self.faces)
return range(nfstart,nfend)
# given three points, add new triangle face
def _triangle(self,v1,v2,v3,ns = None,us = None,m = None):
nfstart = len(self.faces)
nps = [v1.copy(),v2.copy(),v3.copy()]
nns = self._def_normals(nps,ns)
nus = self._def_uvs(nps,us)
self._add_vdata(nps,nns,nus)
foffset = len(self.pcoords) - len(nps)
nfs = [[foffset,foffset+1,foffset+2]]
m = self._lookup_mat(m)
nfms = [m]
self._add_fdata(nfs,nfms)
nfend = len(self.faces)
return range(nfstart,nfend)
# given a point apex and a list of points blade, add fan of tris
def _trifan(self,apex,blade,ns = None,us = None,m = None):
nfstart = len(self.faces)
tcnt = len(blade) - 1
for trdx in range(tcnt):
c2dx = trdx
c3dx = trdx+1
c1 = apex.copy()
c2 = blade[c2dx].copy()
c3 = blade[c3dx].copy()
self._triangle(c1,c2,c3,ns,us,m)
nfend = len(self.faces)
return range(nfstart,nfend)
# given one loop, fill with a fan of triangles
def _tripie(self,loop,ns = None,us = None,m = None):
nfstart = len(self.faces)
lcom = dpv.center_of_mass(loop)
tcnt = len(loop)
for trdx in range(tcnt):
c2dx = trdx
c3dx = trdx+1
if c3dx == tcnt: c3dx = 0
c1 = lcom.copy()
c2 = loop[c2dx].copy()
c3 = loop[c3dx].copy()
self._triangle(c1,c2,c3,ns,us,m)
nfend = len(self.faces)
return range(nfstart,nfend)
# raise ValueError if l1 and l2 differ in length
def _check_loop_equality(self,l1,l2):
if not len(l1) == len(l2):
print('_bridge loops must have equal length')
raise ValueError
# given two loops of equal length, bridge with quads
def _bridge(self,loop1,loop2,ns = None,us = None,m = None):
self._check_loop_equality(loop1,loop2)
nfstart = len(self.faces)
lcnt = len(loop1)
for ldx in range(1,lcnt):
v1 = loop1[ldx-1]
v2 = loop2[ldx-1]
v3 = loop2[ldx]
v4 = loop1[ldx]
self._quad(v1,v2,v3,v4,ns,us,m)
nfend = len(self.faces)
return range(nfstart,nfend)
# given two loops of equal length bridge with a spline extrusion
def _bridge_spline(self,loop1,loop2,n = 3,
n1 = None,n2 = None,ns = None,us = None,m = None):
self._check_loop_equality(loop1,loop2)
nfstart = len(self.faces)
if n1 is None:n1 = normal(*loop1[:3])
if n2 is None:n2 = normal(*loop2[:3]).flip()
curves = []
lcnt = len(loop1)
for x in range(lcnt):
v2 = loop1[x].copy()
v3 = loop2[x].copy()
v1 = v2.copy().translate(n1)
v4 = v3.copy().translate(n2)
curve = dpv.spline(v1,v2,v3,v4,n)
curves.append(curve)
ccnt = len(curves)
for y in range(1,ccnt):
lp2 = curves[y-1]
lp1 = curves[y]
self._bridge(lp1,lp2,ns,us,m)
nfend = len(self.faces)
return range(nfstart,nfend)
# given a loop, incrementally fill with loops
# and then seal with tripie
def _bridge_patch(self,loop,n = 10,m = None):
nfstart = len(self.faces)
def move(oloop):
iloop = [l.copy() for l in oloop]
[l.translate(ray) for l,ray in zip(iloop,rays)]
self._bridge(iloop,oloop,m = m)
return iloop
com = dpv.center_of_mass(loop)
loop.append(loop[0])
rays = [dpv.v1_v2(l,com).scale_u(1.0/n) for l in loop]
for x in range(n):loop = move(loop)
self._tripie(loop,m = m)
nfend = len(self.faces)
return range(nfstart,nfend)
# given a line of points make n faces between angles a1 and a2
def _revolve_z(self,loop,a1,a2,n,ns = None,us = None,m = None):
nfstart = len(self.faces)
rotstep = (a2-a1)/float(n)
for step in range(n):
ta1 = a1+step*rotstep
ta2 = a1+(step+1)*rotstep
loop1 = [p.copy().rotate_z(ta1) for p in loop]
loop2 = [p.copy().rotate_z(ta2) for p in loop]
self._bridge(loop1,loop2,ns = ns,us = us,m = m)
nfend = len(self.faces)
return range(nfstart,nfend)
# for each point in the curve, produce a plane equation and
# properly project loop onto that plane
# then iterate over loops and bridge
#
# given a curve of points make faces to extrude loop along the curve
def _extrude(self,loop,curve,control,ctrl = None,ns = None,us = None,m = None):
nfstart = len(self.faces)
tangents = dpv.edge_tangents(curve)
tangents.append(tangents[-1].copy())
tangloop = [l.copy() for l in loop]
tangloop = dpr.orient_loop(tangloop,tangents[0],control)
tangloop = dpv.translate_coords(tangloop,curve[0])
tailloop = dpr.project_coords_plane_along(
tangloop,curve[0],tangents[0],tangents[0])
n = len(curve)
for step in range(1,n):
c0,c1 = curve[step-1],curve[step]
t0,t1 = tangents[step-1],tangents[step]
halft = dpv.midpoint(t0,t1).normalize()
n = halft
tangloop = [l.copy() for l in loop]
tangloop = dpr.orient_loop(tangloop,t0,control)
tangloop = dpv.translate_coords(tangloop,c1)
tiploop = dpr.project_coords_plane_along(tangloop,c1,n,t0)
self._bridge(tiploop,tailloop,ns = ns,us = us,m = m)
tailloop = [p.copy() for p in tiploop]
nfend = len(self.faces)
return range(nfstart,nfend)
# for range of faces rng, flip each face and its normals
def _flip_faces(self,rng):
for nf in rng:
face = self.faces[nf]
face.reverse()
for fdx in face:
self.ncoords[fdx].flip()
#######################################################
#######################################################
#methods for transforming the model in world space
#######################################################
def center(self):
com = dpv.center_of_mass(self.pcoords)
self.translate(com.flip())
return self
#######################################################
def translate_x(self,dx):
dpv.translate_coords_x(self.pcoords,dx)
return self
def translate_y(self,dy):
dpv.translate_coords_y(self.pcoords,dy)
return self
def translate_z(self,dz):
dpv.translate_coords_z(self.pcoords,dz)
return self
def translate_u(self,u):
trn = dpv.vector(u,u,u)
dpv.translate_coords(self.pcoords,trn)
return self
def translate(self,v):
dpv.translate_coords(self.pcoords,v)
return self
def translate_faces(self,frange,v):
coords = []
for f in frange:
fpoints = [self.pcoords[fx] for fx in self.faces[f]]
coords.extend(fpoints)
dpv.translate_coords(coords,v)
return self
#######################################################
def scale_x(self,sx):
dpv.scale_coords_x(self.pcoords,sx)
return self
def scale_y(self,sy):
dpv.scale_coords_y(self.pcoords,sy)
return self
def scale_z(self,sz):
dpv.scale_coords_z(self.pcoords,sz)
return self
def scale_u(self,u):
scl = dpv.vector(u,u,u)
dpv.scale_coords(self.pcoords,scl)
return self
def scale(self,v):
dpv.scale_coords(self.pcoords,v)
return self
#######################################################
def rotate(self,q):
dpv.rotate_coords(self.pcoords,q)
dpv.rotate_coords(self.ncoords,q)
return self
def rotate_x(self,rx):
dpv.rotate_x_coords(self.pcoords,rx)
return self
def rotate_y(self,ry):
dpv.rotate_y_coords(self.pcoords,ry)
return self
def rotate_z(self,rz):
dpv.rotate_z_coords(self.pcoords,rz)
return self
#######################################################
|
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains a Google Cloud Spanner Hook.
"""
from typing import Callable, List, Optional
from google.api_core.exceptions import AlreadyExists, GoogleAPICallError
from google.cloud.spanner_v1.client import Client
from google.cloud.spanner_v1.database import Database
from google.cloud.spanner_v1.instance import Instance
from google.cloud.spanner_v1.transaction import Transaction
from google.longrunning.operations_grpc_pb2 import Operation # noqa: F401
from airflow import AirflowException
from airflow.gcp.hooks.base import CloudBaseHook
class SpannerHook(CloudBaseHook):
"""
Hook for Google Cloud Spanner APIs.
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
"""
def __init__(self, gcp_conn_id: str = 'google_cloud_default', delegate_to: Optional[str] = None) -> None:
super().__init__(gcp_conn_id, delegate_to)
self._client = None
def _get_client(self, project_id: str) -> Client:
"""
Provides a client for interacting with the Cloud Spanner API.
:param project_id: The ID of the GCP project.
:type project_id: str
:return: Client
:rtype: google.cloud.spanner_v1.client.Client
"""
if not self._client:
self._client = Client(
project=project_id,
credentials=self._get_credentials(),
client_info=self.client_info
)
return self._client
@CloudBaseHook.fallback_to_default_project_id
def get_instance(
self,
instance_id: str,
project_id: Optional[str] = None
) -> Instance:
"""
Gets information about a particular instance.
:param project_id: Optional, The ID of the GCP project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param instance_id: The ID of the Cloud Spanner instance.
:type instance_id: str
:return: Spanner instance
:rtype: google.cloud.spanner_v1.instance.Instance
"""
if not project_id:
raise ValueError("The project_id should be set")
instance = self._get_client(project_id=project_id).instance(instance_id=instance_id)
if not instance.exists():
return None
return instance
def _apply_to_instance(
self, project_id: str,
instance_id: str,
configuration_name: str,
node_count: int,
display_name: str,
func: Callable[[Instance], Operation]
) -> None:
"""
Invokes a method on a given instance by applying a specified Callable.
:param project_id: The ID of the GCP project that owns the Cloud Spanner
database.
:type project_id: str
:param instance_id: The ID of the instance.
:type instance_id: str
:param configuration_name: Name of the instance configuration defining how the
instance will be created. Required for instances which do not yet exist.
:type configuration_name: str
:param node_count: (Optional) Number of nodes allocated to the instance.
:type node_count: int
:param display_name: (Optional) The display name for the instance in the Cloud
Console UI. (Must be between 4 and 30 characters.) If this value is not set
in the constructor, will fall back to the instance ID.
:type display_name: str
:param func: Method of the instance to be called.
:type func: Callable[google.cloud.spanner_v1.instance.Instance]
"""
# noinspection PyUnresolvedReferences
instance = self._get_client(project_id=project_id).instance(
instance_id=instance_id, configuration_name=configuration_name,
node_count=node_count, display_name=display_name)
try:
operation = func(instance) # type: Operation
except GoogleAPICallError as e:
self.log.error('An error occurred: %s. Exiting.', e.message)
raise e
if operation:
result = operation.result()
self.log.info(result)
@CloudBaseHook.fallback_to_default_project_id
def create_instance(
self,
instance_id: str,
configuration_name: str,
node_count: int,
display_name: str,
project_id: Optional[str] = None
) -> None:
"""
Creates a new Cloud Spanner instance.
:param instance_id: The ID of the Cloud Spanner instance.
:type instance_id: str
:param configuration_name: The name of the instance configuration defining how the
instance will be created. Possible configuration values can be retrieved via
https://cloud.google.com/spanner/docs/reference/rest/v1/projects.instanceConfigs/list
:type configuration_name: str
:param node_count: (Optional) The number of nodes allocated to the Cloud Spanner
instance.
:type node_count: int
:param display_name: (Optional) The display name for the instance in the GCP
Console. Must be between 4 and 30 characters. If this value is not set in
the constructor, the name falls back to the instance ID.
:type display_name: str
:param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:return: None
"""
if not project_id:
raise ValueError("The project_id should be set")
self._apply_to_instance(project_id, instance_id, configuration_name,
node_count, display_name, lambda x: x.create())
@CloudBaseHook.fallback_to_default_project_id
def update_instance(
self,
instance_id: str,
configuration_name: str,
node_count: int,
display_name: str,
project_id: Optional[str] = None
) -> None:
"""
Updates an existing Cloud Spanner instance.
:param instance_id: The ID of the Cloud Spanner instance.
:type instance_id: str
:param configuration_name: The name of the instance configuration defining how the
instance will be created. Possible configuration values can be retrieved via
https://cloud.google.com/spanner/docs/reference/rest/v1/projects.instanceConfigs/list
:type configuration_name: str
:param node_count: (Optional) The number of nodes allocated to the Cloud Spanner
instance.
:type node_count: int
:param display_name: (Optional) The display name for the instance in the GCP
Console. Must be between 4 and 30 characters. If this value is not set in
the constructor, the name falls back to the instance ID.
:type display_name: str
:param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:return: None
"""
if not project_id:
raise ValueError("The project_id should be set")
self._apply_to_instance(project_id, instance_id, configuration_name,
node_count, display_name, lambda x: x.update())
@CloudBaseHook.fallback_to_default_project_id
def delete_instance(self, instance_id: str, project_id: Optional[str] = None) -> None:
"""
Deletes an existing Cloud Spanner instance.
:param instance_id: The ID of the Cloud Spanner instance.
:type instance_id: str
:param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:return: None
"""
if not project_id:
raise ValueError("The project_id should be set")
instance = self._get_client(project_id=project_id).instance(instance_id)
try:
instance.delete()
return
except GoogleAPICallError as e:
self.log.error('An error occurred: %s. Exiting.', e.message)
raise e
@CloudBaseHook.fallback_to_default_project_id
def get_database(
self,
instance_id: str,
database_id: str,
project_id: Optional[str] = None
) -> Optional[Database]:
"""
Retrieves a database in Cloud Spanner. If the database does not exist
in the specified instance, it returns None.
:param instance_id: The ID of the Cloud Spanner instance.
:type instance_id: str
:param database_id: The ID of the database in Cloud Spanner.
:type database_id: str
:param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:return: Database object or None if database does not exist
:rtype: google.cloud.spanner_v1.database.Database or None
"""
if not project_id:
raise ValueError("The project_id should be set")
instance = self._get_client(project_id=project_id).instance(
instance_id=instance_id)
if not instance.exists():
raise AirflowException("The instance {} does not exist in project {} !".
format(instance_id, project_id))
database = instance.database(database_id=database_id)
if not database.exists():
return None
return database
@CloudBaseHook.fallback_to_default_project_id
def create_database(
self,
instance_id: str,
database_id: str,
ddl_statements: List[str],
project_id: Optional[str] = None
) -> None:
"""
Creates a new database in Cloud Spanner.
:type project_id: str
:param instance_id: The ID of the Cloud Spanner instance.
:type instance_id: str
:param database_id: The ID of the database to create in Cloud Spanner.
:type database_id: str
:param ddl_statements: The string list containing DDL for the new database.
:type ddl_statements: list[str]
:param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the GCP connection is used.
:return: None
"""
if not project_id:
raise ValueError("The project_id should be set")
instance = self._get_client(project_id=project_id).instance(
instance_id=instance_id)
if not instance.exists():
raise AirflowException("The instance {} does not exist in project {} !".
format(instance_id, project_id))
database = instance.database(database_id=database_id,
ddl_statements=ddl_statements)
try:
operation = database.create() # type: Operation
except GoogleAPICallError as e:
self.log.error('An error occurred: %s. Exiting.', e.message)
raise e
if operation:
result = operation.result()
self.log.info(result)
@CloudBaseHook.fallback_to_default_project_id
def update_database(
self,
instance_id: str,
database_id: str,
ddl_statements: List[str],
project_id: Optional[str] = None,
operation_id: Optional[str] = None
) -> None:
"""
Updates DDL of a database in Cloud Spanner.
:type project_id: str
:param instance_id: The ID of the Cloud Spanner instance.
:type instance_id: str
:param database_id: The ID of the database in Cloud Spanner.
:type database_id: str
:param ddl_statements: The string list containing DDL for the new database.
:type ddl_statements: list[str]
:param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the GCP connection is used.
:param operation_id: (Optional) The unique per database operation ID that can be
specified to implement idempotency check.
:type operation_id: str
:return: None
"""
if not project_id:
raise ValueError("The project_id should be set")
instance = self._get_client(project_id=project_id).instance(
instance_id=instance_id)
if not instance.exists():
raise AirflowException("The instance {} does not exist in project {} !".
format(instance_id, project_id))
database = instance.database(database_id=database_id)
try:
operation = database.update_ddl(
ddl_statements=ddl_statements, operation_id=operation_id)
if operation:
result = operation.result()
self.log.info(result)
return
except AlreadyExists as e:
if e.code == 409 and operation_id in e.message:
self.log.info("Replayed update_ddl message - the operation id %s "
"was already done before.", operation_id)
return
except GoogleAPICallError as e:
self.log.error('An error occurred: %s. Exiting.', e.message)
raise e
@CloudBaseHook.fallback_to_default_project_id
def delete_database(self, instance_id: str, database_id, project_id: Optional[str] = None) -> bool:
"""
Drops a database in Cloud Spanner.
:type project_id: str
:param instance_id: The ID of the Cloud Spanner instance.
:type instance_id: str
:param database_id: The ID of the database in Cloud Spanner.
:type database_id: str
:param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the GCP connection is used.
:return: True if everything succeeded
:rtype: bool
"""
if not project_id:
raise ValueError("The project_id should be set")
instance = self._get_client(project_id=project_id).\
instance(instance_id=instance_id)
if not instance.exists():
raise AirflowException("The instance {} does not exist in project {} !".
format(instance_id, project_id))
database = instance.database(database_id=database_id)
if not database.exists():
self.log.info(
"The database %s is already deleted from instance %s. Exiting.",
database_id, instance_id
)
return False
try:
database.drop() # pylint: disable=E1111
except GoogleAPICallError as e:
self.log.error('An error occurred: %s. Exiting.', e.message)
raise e
return True
@CloudBaseHook.fallback_to_default_project_id
def execute_dml(
self,
instance_id: str,
database_id: str,
queries: List[str],
project_id: Optional[str] = None,
) -> None:
"""
Executes an arbitrary DML query (INSERT, UPDATE, DELETE).
:param instance_id: The ID of the Cloud Spanner instance.
:type instance_id: str
:param database_id: The ID of the database in Cloud Spanner.
:type database_id: str
:param queries: The queries to execute.
:type queries: List[str]
:param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
"""
if not project_id:
raise ValueError("The project_id should be set")
self._get_client(project_id=project_id).instance(instance_id=instance_id).\
database(database_id=database_id).run_in_transaction(
lambda transaction: self._execute_sql_in_transaction(transaction, queries))
@staticmethod
def _execute_sql_in_transaction(transaction: Transaction, queries: List[str]):
for sql in queries:
transaction.execute_update(sql)
|
|
"""
=============================================================================
Manifold learning on handwritten digits: Locally Linear Embedding, Isomap...
=============================================================================
An illustration of various embeddings on the digits dataset.
The RandomTreesEmbedding, from the :mod:`sklearn.ensemble` module, is not
technically a manifold embedding method, as it learn a high-dimensional
representation on which we apply a dimensionality reduction method.
However, it is often useful to cast a dataset into a representation in
which the classes are linearly-separable.
t-SNE will be initialized with the embedding that is generated by PCA in
this example, which is not the default setting. It ensures global stability
of the embedding, i.e., the embedding does not depend on random
initialization.
"""
# Authors: Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import offsetbox
from sklearn import (manifold, datasets, decomposition, ensemble,
discriminant_analysis, random_projection)
digits = datasets.load_digits(n_class=6)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
n_neighbors = 30
#----------------------------------------------------------------------
# Scale and visualize the embedding vectors
def plot_embedding(X, title=None):
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min)
plt.figure()
ax = plt.subplot(111)
for i in range(X.shape[0]):
plt.text(X[i, 0], X[i, 1], str(digits.target[i]),
color=plt.cm.Set1(y[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
if hasattr(offsetbox, 'AnnotationBbox'):
# only print thumbnails with matplotlib > 1.0
shown_images = np.array([[1., 1.]]) # just something big
for i in range(digits.data.shape[0]):
dist = np.sum((X[i] - shown_images) ** 2, 1)
if np.min(dist) < 4e-3:
# don't show points that are too close
continue
shown_images = np.r_[shown_images, [X[i]]]
imagebox = offsetbox.AnnotationBbox(
offsetbox.OffsetImage(digits.images[i], cmap=plt.cm.gray_r),
X[i])
ax.add_artist(imagebox)
plt.xticks([]), plt.yticks([])
if title is not None:
plt.title(title)
#----------------------------------------------------------------------
# Plot images of the digits
n_img_per_row = 20
img = np.zeros((10 * n_img_per_row, 10 * n_img_per_row))
for i in range(n_img_per_row):
ix = 10 * i + 1
for j in range(n_img_per_row):
iy = 10 * j + 1
img[ix:ix + 8, iy:iy + 8] = X[i * n_img_per_row + j].reshape((8, 8))
plt.imshow(img, cmap=plt.cm.binary)
plt.xticks([])
plt.yticks([])
plt.title('A selection from the 64-dimensional digits dataset')
#----------------------------------------------------------------------
# Random 2D projection using a random unitary matrix
print("Computing random projection")
rp = random_projection.SparseRandomProjection(n_components=2, random_state=42)
X_projected = rp.fit_transform(X)
plot_embedding(X_projected, "Random Projection of the digits")
#----------------------------------------------------------------------
# Projection on to the first 2 principal components
print("Computing PCA projection")
t0 = time()
X_pca = decomposition.TruncatedSVD(n_components=2).fit_transform(X)
plot_embedding(X_pca,
"Principal Components projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Projection on to the first 2 linear discriminant components
print("Computing Linear Discriminant Analysis projection")
X2 = X.copy()
X2.flat[::X.shape[1] + 1] += 0.01 # Make X invertible
t0 = time()
X_lda = discriminant_analysis.LinearDiscriminantAnalysis(n_components=2).fit_transform(X2, y)
plot_embedding(X_lda,
"Linear Discriminant projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Isomap projection of the digits dataset
print("Computing Isomap embedding")
t0 = time()
X_iso = manifold.Isomap(n_neighbors, n_components=2).fit_transform(X)
print("Done.")
plot_embedding(X_iso,
"Isomap projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Locally linear embedding of the digits dataset
print("Computing LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='standard')
t0 = time()
X_lle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_lle,
"Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Modified Locally linear embedding of the digits dataset
print("Computing modified LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='modified')
t0 = time()
X_mlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_mlle,
"Modified Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# HLLE embedding of the digits dataset
print("Computing Hessian LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='hessian')
t0 = time()
X_hlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_hlle,
"Hessian Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# LTSA embedding of the digits dataset
print("Computing LTSA embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='ltsa')
t0 = time()
X_ltsa = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_ltsa,
"Local Tangent Space Alignment of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# MDS embedding of the digits dataset
print("Computing MDS embedding")
clf = manifold.MDS(n_components=2, n_init=1, max_iter=100)
t0 = time()
X_mds = clf.fit_transform(X)
print("Done. Stress: %f" % clf.stress_)
plot_embedding(X_mds,
"MDS embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Random Trees embedding of the digits dataset
print("Computing Totally Random Trees embedding")
hasher = ensemble.RandomTreesEmbedding(n_estimators=200, random_state=0,
max_depth=5)
t0 = time()
X_transformed = hasher.fit_transform(X)
pca = decomposition.TruncatedSVD(n_components=2)
X_reduced = pca.fit_transform(X_transformed)
plot_embedding(X_reduced,
"Random forest embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Spectral embedding of the digits dataset
print("Computing Laplacian Eigenmap")
embedder = manifold.LaplacianEigenmap(n_components=2, random_state=0,
eigen_solver="arpack")
t0 = time()
X_se = embedder.fit_transform(X)
plot_embedding(X_se,
"Spectral embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# t-SNE embedding of the digits dataset
print("Computing t-SNE embedding")
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
t0 = time()
X_tsne = tsne.fit_transform(X)
plot_embedding(X_tsne,
"t-SNE embedding of the digits (time %.2fs)" %
(time() - t0))
plt.show()
|
|
"""
[2020-02-03] Modified version of the original qcodes.plots.colors
Mofied by Victor Negirneac for Measurement Control
It modules makes available all the colors maps from the qcodes, context menu of
the color bar from pyqtgraph, the circular colormap created by me (Victo),
and the reversed version of all of them.
Feel free to add new colors
See "make_qcodes_anglemap" and "make_anglemap45_colorlist" below to get you
started.
"""
from pycqed.analysis.tools.plotting import make_anglemap45_colorlist
# default colors and colorscales, taken from plotly
color_cycle = [
"#1f77b4", # muted blue
"#ff7f0e", # safety orange
"#2ca02c", # cooked asparagus green
"#d62728", # brick red
"#9467bd", # muted purple
"#8c564b", # chestnut brown
"#e377c2", # raspberry yogurt pink
"#7f7f7f", # middle gray
"#bcbd22", # curry yellow-green
"#17becf", # blue-teal
]
colorscales_raw = {
"Greys": [[0, "rgb(0,0,0)"], [1, "rgb(255,255,255)"]],
"YlGnBu": [
[0, "rgb(8, 29, 88)"],
[0.125, "rgb(37, 52, 148)"],
[0.25, "rgb(34, 94, 168)"],
[0.375, "rgb(29, 145, 192)"],
[0.5, "rgb(65, 182, 196)"],
[0.625, "rgb(127, 205, 187)"],
[0.75, "rgb(199, 233, 180)"],
[0.875, "rgb(237, 248, 217)"],
[1, "rgb(255, 255, 217)"],
],
"Greens": [
[0, "rgb(0, 68, 27)"],
[0.125, "rgb(0, 109, 44)"],
[0.25, "rgb(35, 139, 69)"],
[0.375, "rgb(65, 171, 93)"],
[0.5, "rgb(116, 196, 118)"],
[0.625, "rgb(161, 217, 155)"],
[0.75, "rgb(199, 233, 192)"],
[0.875, "rgb(229, 245, 224)"],
[1, "rgb(247, 252, 245)"],
],
"YlOrRd": [
[0, "rgb(128, 0, 38)"],
[0.125, "rgb(189, 0, 38)"],
[0.25, "rgb(227, 26, 28)"],
[0.375, "rgb(252, 78, 42)"],
[0.5, "rgb(253, 141, 60)"],
[0.625, "rgb(254, 178, 76)"],
[0.75, "rgb(254, 217, 118)"],
[0.875, "rgb(255, 237, 160)"],
[1, "rgb(255, 255, 204)"],
],
"bluered": [[0, "rgb(0,0,255)"], [1, "rgb(255,0,0)"]],
# modified RdBu based on
# www.sandia.gov/~kmorel/documents/ColorMaps/ColorMapsExpanded.pdf
"RdBu": [
[0, "rgb(5, 10, 172)"],
[0.35, "rgb(106, 137, 247)"],
[0.5, "rgb(190,190,190)"],
[0.6, "rgb(220, 170, 132)"],
[0.7, "rgb(230, 145, 90)"],
[1, "rgb(178, 10, 28)"],
],
# Scale for non-negative numeric values
"Reds": [
[0, "rgb(220, 220, 220)"],
[0.2, "rgb(245, 195, 157)"],
[0.4, "rgb(245, 160, 105)"],
[1, "rgb(178, 10, 28)"],
],
# Scale for non-positive numeric values
"Blues": [
[0, "rgb(5, 10, 172)"],
[0.35, "rgb(40, 60, 190)"],
[0.5, "rgb(70, 100, 245)"],
[0.6, "rgb(90, 120, 245)"],
[0.7, "rgb(106, 137, 247)"],
[1, "rgb(220, 220, 220)"],
],
"picnic": [
[0, "rgb(0,0,255)"],
[0.1, "rgb(51,153,255)"],
[0.2, "rgb(102,204,255)"],
[0.3, "rgb(153,204,255)"],
[0.4, "rgb(204,204,255)"],
[0.5, "rgb(255,255,255)"],
[0.6, "rgb(255,204,255)"],
[0.7, "rgb(255,153,255)"],
[0.8, "rgb(255,102,204)"],
[0.9, "rgb(255,102,102)"],
[1, "rgb(255,0,0)"],
],
"rainbow": [
[0, "rgb(150,0,90)"],
[0.125, "rgb(0, 0, 200)"],
[0.25, "rgb(0, 25, 255)"],
[0.375, "rgb(0, 152, 255)"],
[0.5, "rgb(44, 255, 150)"],
[0.625, "rgb(151, 255, 0)"],
[0.75, "rgb(255, 234, 0)"],
[0.875, "rgb(255, 111, 0)"],
[1, "rgb(255, 0, 0)"],
],
"portland": [
[0, "rgb(12,51,131)"],
[0.25, "rgb(10,136,186)"],
[0.5, "rgb(242,211,56)"],
[0.75, "rgb(242,143,56)"],
[1, "rgb(217,30,30)"],
],
"jet": [
[0, "rgb(0,0,131)"],
[0.125, "rgb(0,60,170)"],
[0.375, "rgb(5,255,255)"],
[0.625, "rgb(255,255,0)"],
[0.875, "rgb(250,0,0)"],
[1, "rgb(128,0,0)"],
],
"hot": [
[0, "rgb(0,0,0)"],
[0.3, "rgb(230,0,0)"],
[0.6, "rgb(255,210,0)"],
[1, "rgb(255,255,255)"],
],
"blackbody": [
[0, "rgb(0,0,0)"],
[0.2, "rgb(230,0,0)"],
[0.4, "rgb(230,210,0)"],
[0.7, "rgb(255,255,255)"],
[1, "rgb(160,200,255)"],
],
"earth": [
[0, "rgb(0,0,130)"],
[0.1, "rgb(0,180,180)"],
[0.2, "rgb(40,210,40)"],
[0.4, "rgb(230,230,50)"],
[0.6, "rgb(120,70,20)"],
[1, "rgb(255,255,255)"],
],
"electric": [
[0, "rgb(0,0,0)"],
[0.15, "rgb(30,0,100)"],
[0.4, "rgb(120,0,100)"],
[0.6, "rgb(160,90,0)"],
[0.8, "rgb(230,200,0)"],
[1, "rgb(255,250,220)"],
],
"viridis": [
[0, "#440154"],
[0.06274509803921569, "#48186a"],
[0.12549019607843137, "#472d7b"],
[0.18823529411764706, "#424086"],
[0.25098039215686274, "#3b528b"],
[0.3137254901960784, "#33638d"],
[0.3764705882352941, "#2c728e"],
[0.4392156862745098, "#26828e"],
[0.5019607843137255, "#21918c"],
[0.5647058823529412, "#1fa088"],
[0.6274509803921569, "#28ae80"],
[0.6901960784313725, "#3fbc73"],
[0.7529411764705882, "#5ec962"],
[0.8156862745098039, "#84d44b"],
[0.8784313725490196, "#addc30"],
[0.9411764705882353, "#d8e219"],
[1, "#fde725"],
],
}
# Extracted https://github.com/pyqtgraph/pyqtgraph/blob/develop/pyqtgraph/graphicsItems/GradientEditorItem.py
Gradients = {
"thermal": [
(0.3333, (185, 0, 0, 255)),
(0.6666, (255, 220, 0, 255)),
(1, (255, 255, 255, 255)),
(0, (0, 0, 0, 255)),
],
"flame": [
(0.2, (7, 0, 220, 255)),
(0.5, (236, 0, 134, 255)),
(0.8, (246, 246, 0, 255)),
(1.0, (255, 255, 255, 255)),
(0.0, (0, 0, 0, 255)),
],
"yellowy": [
(0.0, (0, 0, 0, 255)),
(0.2328863796753704, (32, 0, 129, 255)),
(0.8362738179251941, (255, 255, 0, 255)),
(0.5257586450247, (115, 15, 255, 255)),
(1.0, (255, 255, 255, 255)),
],
"bipolar": [
(0.0, (0, 255, 255, 255)),
(1.0, (255, 255, 0, 255)),
(0.5, (0, 0, 0, 255)),
(0.25, (0, 0, 255, 255)),
(0.75, (255, 0, 0, 255)),
],
"spectrum": [
(1.0, (255, 0, 255, 255)),
(0.0, (255, 0, 0, 255)),
], # this is a hsv, didn't patch qcodes to allow the specification of that part...
"cyclic": [
(0.0, (255, 0, 4, 255)),
(1.0, (255, 0, 0, 255)),
], # this is a hsv, didn't patch qcodes to allow the specification of that part...
# "greyclip": [
# (0.0, (0, 0, 0, 255)),
# (0.99, (255, 255, 255, 255)),
# (1.0, (255, 0, 0, 255)),
# ],
"grey": [(0.0, (0, 0, 0, 255)), (1.0, (255, 255, 255, 255))],
# Perceptually uniform sequential colormaps from Matplotlib 2.0
"viridis": [
(0.0, (68, 1, 84, 255)),
(0.25, (58, 82, 139, 255)),
(0.5, (32, 144, 140, 255)),
(0.75, (94, 201, 97, 255)),
(1.0, (253, 231, 36, 255)),
],
"inferno": [
(0.0, (0, 0, 3, 255)),
(0.25, (87, 15, 109, 255)),
(0.5, (187, 55, 84, 255)),
(0.75, (249, 142, 8, 255)),
(1.0, (252, 254, 164, 255)),
],
"plasma": [
(0.0, (12, 7, 134, 255)),
(0.25, (126, 3, 167, 255)),
(0.5, (203, 71, 119, 255)),
(0.75, (248, 149, 64, 255)),
(1.0, (239, 248, 33, 255)),
],
"magma": [
(0.0, (0, 0, 3, 255)),
(0.25, (80, 18, 123, 255)),
(0.5, (182, 54, 121, 255)),
(0.75, (251, 136, 97, 255)),
(1.0, (251, 252, 191, 255)),
],
}
def make_qcodes_anglemap45():
anglemap_colorlist = make_anglemap45_colorlist(N=9, use_hpl=False)
len_colorlist = len(anglemap_colorlist)
color_scale = [
[i / (len_colorlist - 1), "rgb" + repr(tuple((int(x * 255) for x in col)))]
for i, col in enumerate(anglemap_colorlist)
]
return color_scale
qcodes_anglemap45 = make_qcodes_anglemap45()
colorscales_raw["anglemap45"] = qcodes_anglemap45
def make_rgba(colorscale):
return [(v, one_rgba(c)) for v, c in colorscale]
def one_rgba(c):
"""
convert a single color value to (r, g, b, a)
input can be an rgb string 'rgb(r,g,b)', '#rrggbb'
if we decide we want more we can make more, but for now this is just
to convert plotly colorscales to pyqtgraph tuples
"""
if c[0] == "#" and len(c) == 7:
return (int(c[1:3], 16), int(c[3:5], 16), int(c[5:7], 16), 255)
if c[:4] == "rgb(":
return tuple(map(int, c[4:-1].split(","))) + (255,)
raise ValueError("one_rgba only supports rgb(r,g,b) and #rrggbb colors")
colorscales = {}
for scale_name, scale in colorscales_raw.items():
colorscales[scale_name] = make_rgba(scale)
for scale_name, scale in Gradients.items():
colorscales[scale_name] = scale
for name, scale in list(colorscales.items()):
last_idx = len(scale) - 1
reversed_scale = [
(scale[last_idx - i][0], color[1]) for i, color in enumerate(scale)
]
colorscales[name + "_reversed"] = reversed_scale
# Generate also all scales with cliping at green
for name, scale in list(colorscales.items()):
clip_percent = 0.03
clip_color = (0, 255, 0, 255)
scale_low = list(scale)
scale_low.insert(1, scale[0])
scale_low[0] = (0.0, clip_color)
if scale[1][0] < clip_percent:
scale_low[1] = ((scale[1][0] + scale[0][0]) / 2, scale_low[1][1])
else:
scale_low[1] = (clip_percent, scale_low[1][1])
colorscales[name + "_clip_low"] = scale_low
scale_high = list(scale)
scale_high.insert(-1, scale[-1])
scale_high[-1] = (1.0, clip_color)
if scale[-2][0] > 1 - clip_percent:
scale_high[-2] = ((scale[-1][0] + scale[-2][0]) / 2, scale_high[-2][1])
else:
scale_high[-2] = (1 - clip_percent, scale_high[-2][1])
colorscales[name + "_clip_high"] = scale_high
|
|
import os
import math
import hashlib
import xml.dom.minidom
from framerange import FrameRange
from valuerange import ValueRange
from cross3d.constants import ControllerType, TangentType, ExtrapolationType
class Key(object):
def __init__(self, **kwargs):
self.value = float(kwargs.get('value', 0.0))
self.time = float(kwargs.get('time', 0.0))
# Tangent angles are sorted in radians.
self.inTangentAngle = float(kwargs.get('inTangentAngle', 0.0))
self.outTangentAngle = float(kwargs.get('outTangentAngle', 0.0))
self.inTangentType = int(kwargs.get('inTangentType', TangentType.Automatic))
self.outTangentType = int(kwargs.get('outTangentType', TangentType.Automatic))
self.outTangentLength = float(kwargs.get('outTangentLength', 0.0))
self.inTangentLength = float(kwargs.get('inTangentLength', 0.0))
# Normalized tangents scale based on the distance to the key they are pointing to.
self.normalizedTangents = bool(kwargs.get('normalizedTangents', True))
# Broken key allows to have manipulate tangent individually.
self.brokenTangents = bool(kwargs.get('brokenTangents', False))
@property
def inTangentPoint(self):
x = self.inTangentLength * math.cos(self.inTangentAngle)
y = self.inTangentLength * math.sin(self.inTangentAngle)
return self.time - x, self.value + y
@property
def outTangentPoint(self):
x = self.outTangentLength * math.cos(self.outTangentAngle)
y = self.outTangentLength * math.sin(self.outTangentAngle)
return self.time + x, self.value + y
class FCurve(object):
def __init__(self, **kwargs):
self._name = unicode(kwargs.get('name', ''))
self._type = int(kwargs.get('tpe', ControllerType.BezierFloat))
self._keys = []
self._inExtrapolation = int(kwargs.get('inExtrapolation', ExtrapolationType.Constant))
self._outExtrapolation = int(kwargs.get('outExtrapolation', ExtrapolationType.Constant))
def valueAtTime(self, time):
"""Returns the value of the fcurve at the specified time
Args:
time (float): time at which to evaluate the fcurve.
Returns:
float: value of the fcurve at the specified time.
"""
# we need to ensure keys is sorted properly for this to work.
sortedKeys = sorted(self._keys, key=lambda k: k.time)
# If the time specified is out of the range of keyframes, we'll need to
# extrapolate to find the value. This will be split into its own fn since
# it gets a bit messy.
if time < sortedKeys[0].time or time > sortedKeys[-1].time:
return self.extrapolateValue(time)
i = 0
t = sortedKeys[i].time
maxI = len(sortedKeys) - 1
while t < time and i < maxI:
i += 1
t = sortedKeys[i].time
if t == time:
# time is at a key -- we can just return that key's value.
return sortedKeys[i].value
else:
# we should have two keys that our time falls between
k0 = sortedKeys[i - 1]
k1 = sortedKeys[i]
return self.bezierEvaluation(k0, k1, time)
def plot(self, startValue=None, endValue=None, resolution=1.0, plotHandles=True):
"""Uses matplotlib to generate a plot of the curve, primarily useful for debugging purposes.
Args:
startValue (float): Starting value for portion of the curve to sample.
endValue (float): Ending value for portion of the curve to sample.
resolution (float): Frequency with which to sample the curve.
"""
fullRange = self.range()
startValue = fullRange[0] if startValue is None else startValue
endValue = fullRange[1] if endValue is None else endValue
import numpy as np
import matplotlib.pyplot as plt
# plot handles, if asked
if plotHandles:
for key in self._keys:
points = zip(key.inTangentPoint, (key.time, key.value), key.outTangentPoint)
plt.plot(*points, marker='o', color='blue')
plt.plot(*points, color='black')
# plot line
x = np.arange(startValue, endValue, resolution)
f = np.vectorize(self.valueAtTime)
plt.plot(x, f(x))
plt.show()
def plotted(self, rng, step=1):
plotted = FCurve()
for value in xrange(rng[0], rng[1], step):
self.addKey(time=value, value=self.valueAtTime(value))
return plotted
def offset(self, value, attr='time', rnd=False):
for key in self._keys:
v = getattr(key, attr) + float(value)
v = round(v) if rnd else v
setattr(key, attr, v)
def keys(self):
return self._keys
def scale(self, value, attr='time', pivot=0.0, rnd=False):
for key in self._keys:
# Scaling the attribute.
v = (getattr(key, attr) - pivot) * value + pivot
v = round(v) if rnd else v
setattr(key, attr, v)
# Getting the tangents time and value.
inTangentTime = math.cos(key.inTangentAngle) * key.inTangentLength
inTangentValue = math.sin(key.inTangentAngle) * key.inTangentLength
outTangentTime = math.cos(key.outTangentAngle) * key.outTangentLength
outTangentValue = math.sin(key.outTangentAngle) * key.outTangentLength
# Scaling the right tangent components.
if attr == 'time':
inTangentTime *= value
outTangentTime *= value
elif attr == 'value':
inTangentValue *= value
outTangentValue *= value
# Setting the tangent data on the keys.
key.inTangentAngle = math.atan2(inTangentValue, inTangentTime)
key.inTangentLength = math.sqrt(inTangentValue**2 + inTangentTime**2)
key.outTangentAngle = math.atan2(outTangentValue, outTangentTime)
key.outTangentLength = math.sqrt(outTangentValue**2 + outTangentTime**2)
def remap(self, rng, attr='time', rnd=False):
start = getattr(self._keys[0], attr)
end = getattr(self._keys[-1], attr)
# Difference is not the same as duration.
difference = float(end - start)
ratio = (rng[1] - rng[0]) / difference
self.scale(ratio, attr=attr, rnd=rnd, pivot=start)
self.offset(rng[0] - start, attr=attr, rnd=rnd)
def round(self, attr='time'):
for key in self._keys:
v = getattr(key, attr)
setattr(key, attr, round(v))
def invert(self, conversionRatio=1.0):
""" Inverse time and values of each key.
Args:
conversionRatio(float): The conversion ratio to go from Y to X.
For example you might want to inverse a curve where frames on X are expressed in seconds on Y.
The X values will need to be divided by a frame rate to become meaningful Y values.
On the other hand Y values will have to be multiplied by that same ratio to become meaningful X values.
"""
# Before we flip we rationalize the Y axis based on provided conversion ratio.
if conversionRatio and conversionRatio != 1.0:
self.scale(conversionRatio, attr='value')
for key in self._keys:
time = key.time
value = key.value
# Swapping time and value.
key.time = value
key.value = time
# Flipping tangents based on a 45 degrees line.
key.inTangentAngle = math.pi / 2.0 - key.inTangentAngle
key.outTangentAngle = math.pi / 2.0 - key.outTangentAngle
# We revert the scale of the Y axis.
if conversionRatio and conversionRatio != 1.0:
self.scale(1 / conversionRatio, attr='value')
def range(self, attr='time'):
# TODO: This will only work for curves whos start at their minumum and ends at their maximum.
keys = self._keys
start = getattr(keys[0], attr) if len(keys) > 1 else 0
end = getattr(keys[-1], attr) if len(keys) > 1 else 0
return ValueRange(start, end)
def setExtrapolation(self, extrapolation=[None, None]):
self._inExtrapolation = extrapolation[0] or self._inExtrapolation
self._outExtrapolation = extrapolation[1] or self._outExtrapolation
def extrapolation(self):
return (self._inExtrapolation, self._outExtrapolation)
def name(self):
return self._name
def type(self):
return self._type
def setType(self, tpe):
self._type = tpe
def setName(self, name):
self._name = name
def addKey(self, **kwargs):
key = Key(**kwargs)
self._keys.append(key)
return self._keys
def __len__(self):
return len(self.keys())
def __nonzero__(self):
return bool(self.__len__())
def __eq__(self, other):
""" Allows to compare to fCurve objects.
"""
if isinstance(other, FCurve):
if cross3d.debugLevel >= cross3d.constants.DebugLevels.Mid:
with open(r'C:\temp\fCurve.xml', 'w') as fle:
fle.write(self.toXML())
with open(r'C:\temp\otherFCurve.xml', 'w') as fle:
fle.write(other.toXML())
return self.__hash__() == other.__hash__()
return False
def __hash__(self):
return hashlib.sha224(self.toXML()).hexdigest()
def __ne__(self, other):
return not self.__eq__(other)
def fromXML(self, xml):
""" Loads curve data from an XML document.
Args:
xml(string): The xml we want to load on the curve.
"""
# If the document is a path we try to load the XML from that file.
from cross3d.migrate.XML import XMLDocument
document = XMLDocument()
document.parse(xml)
# Getting the curve element.
fCurveElement = document.root()
self._name = fCurveElement.attribute('name')
self._type = ControllerType.valueByLabel(fCurveElement.attribute('type'))
self._inExtrapolation = ExtrapolationType.valueByLabel(fCurveElement.attribute('inExtrapolation'))
self._outExtrapolation = ExtrapolationType.valueByLabel(fCurveElement.attribute('outExtrapolation'))
self._keys = []
for element in fCurveElement.children():
# This guarantees that the XML is somehow valid.
if element.findChild('inTangentAngle'):
# Getting tangent types.
inTangentType = element.findChild('inTangentType').value()
outTangentType = element.findChild('outTangentType').value()
# TODO: Remove in a few month. That's for backward compatibility.
tbc = {'custom': 'Bezier', 'linear': 'Linear', 'auto': 'Automatic', 'step': 'Stepped'}
if inTangentType in tbc:
inTangentType = tbc[inTangentType]
if outTangentType in tbc:
outTangentType = tbc[outTangentType]
kwargs = {'time': element.attribute('time'),
'value': element.attribute('value'),
'inTangentAngle': element.findChild('inTangentAngle').value(),
'outTangentAngle': element.findChild('outTangentAngle').value(),
'inTangentType': TangentType.valueByLabel(inTangentType),
'outTangentType': TangentType.valueByLabel(outTangentType),
'inTangentLength': element.findChild('inTangentLength').value(),
'outTangentLength': element.findChild('outTangentLength').value(),
'normalizedTangents': element.findChild('normalizedTangents').value() == 'True',
'brokenTangents': element.findChild('brokenTangents').value() == 'True'}
self._keys.append(Key(**kwargs))
def toXML(self):
""" Translate the curve data into a XML.
TODO: I hate the API for XML so I shove most of it here.
Returns:
str: The XML data for that curve.
"""
from cross3d.migrate.XML import XMLDocument
document = XMLDocument()
fCurveElement = document.addNode('fCurve')
fCurveElement.setAttribute('name', self._name)
fCurveElement.setAttribute('type', ControllerType.labelByValue(self._type))
fCurveElement.setAttribute('inExtrapolation', ExtrapolationType.labelByValue(self._inExtrapolation))
fCurveElement.setAttribute('outExtrapolation', ExtrapolationType.labelByValue(self._outExtrapolation))
for key in self._keys:
keyElement = fCurveElement.addNode('key')
keyElement.setAttribute('value', key.value)
keyElement.setAttribute('time', key.time)
properties = {'inTangentAngle': key.inTangentAngle,
'outTangentAngle': key.outTangentAngle,
'inTangentType': TangentType.labelByValue(key.inTangentType),
'outTangentType': TangentType.labelByValue(key.outTangentType),
'inTangentLength': key.inTangentLength,
'outTangentLength': key.outTangentLength,
'normalizedTangents': key.normalizedTangents,
'brokenTangents': key.brokenTangents}
for prop in sorted(properties.keys()):
propertyElement = keyElement.addNode(prop)
propertyElement.setValue(properties[prop])
return document.toxml()
def write(self, path):
if path and isinstance(path, basestring):
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
os.makedirs(dirname)
with open(path, 'w') as fle:
fle.write(self.toXML())
def read(self, path):
with open(path) as fle:
self.fromXML(fle.read())
return True
def extrapolateValue(self, time):
"""Returns the value at a given time outside the range of keyframes for the curve, using the
curve's extrapolation mode for values outside the keyframe range in that direction.
Args:
time (float): time at which to calculate the curve's value
Returns:
float: Extrapolated value for the curve at the specified time.
"""
sortedKeys = sorted(self._keys, key=lambda k: k.time)
if time >= sortedKeys[0].time and time <= sortedKeys[-1].time:
raise ValueError('Unable to extrapolate value for time within keyframed curve.')
t0, t1 = sortedKeys[0].time, sortedKeys[-1].time
dt = t1 - t0
dtx = 0
if time < sortedKeys[0].time:
# time is before start
mode = self._inExtrapolation
if mode == ExtrapolationType.Constant:
return sortedKeys[0].value
before = True
dtx = t0 - time
else:
# time is after end
mode = self._outExtrapolation
if mode == ExtrapolationType.Constant:
return sortedKeys[-1].value
before = False
dtx = time - t1
if mode == ExtrapolationType.Linear:
v = sortedKeys[0].value if before else sortedKeys[-1].value
tangentLength = sortedKeys[0].outTangentLength if before else sortedKeys[-1].inTangentLength
if tangentLength:
# get the angle of the opposite tangent (max doesn't store tangents)
# for the outer side in this case.
theta = sortedKeys[0].outTangentAngle if before else sortedKeys[-1].inTangentAngle
# Now get the inverse angle, since we want to move on the opposite vector
theta = math.pi - theta
# delta from the range to our unknown is our triangle's base,
# theta is the angle, and our y value is the side.
# Solve for y, and then offset by the value of the last keyframe.
return dtx * math.tan(theta) + v
else:
if len(sortedKeys) == 1:
return sortedKeys[0].value
if before:
x = sortedKeys[1].time - sortedKeys[0].time
y = sortedKeys[0].value - sortedKeys[1].value
offset = sortedKeys[0].value
else:
x = sortedKeys[-1].time - sortedKeys[-2].time
y = sortedKeys[-1].value - sortedKeys[-2].value
offset = sortedKeys[-1].value
return (y / x) * dtx + offset
elif mode == ExtrapolationType.Cycled:
# We're just looping through the existing timeline now, so we can modulus the delta of
# sample position with the delta of the start/end keyframe times
tp = dtx % dt
# If we fell off the beginning, we need to play through backwards
if before:
tp = dt - tp
# Now we can just get the value for the time
return self.valueAtTime(tp + t0)
elif mode == ExtrapolationType.CycledWithOffset:
# This is going to work the same as cycled, except we'll need to add an offset.
# our position will be the same, but we'll also need a repetition count to multuiply by
# our offset.
tp = dtx % dt
tc = math.floor(dtx / dt) + 1
offset = tc * (sortedKeys[-1].value - sortedKeys[0].value)
offset *= (-1 if before else 1)
# If we fell off the beginning, we need to play through backwards.
if before:
tp = dt - tp
# Now we can just get the value for the time and add our offset
return self.valueAtTime(tp + t0) + offset
elif mode == ExtrapolationType.PingPong:
# Again this will be similar to Cycled, however now we'll need to reverse the looping
# direction with each cycle.
tp = dtx % dt
oddRep = not bool(math.floor(dtx / dt) % 2)
# If it's an odd numbered repetition, we need to reverse it.
if (not oddRep and before) or (oddRep and not before):
tp = dt - tp
# Now we can just get the value for the time
return self.valueAtTime(tp + t0)
else:
raise ValueError('Unable to extrapolate values: invalid ExtrapolationType found.')
@staticmethod
def bezierEvaluation(key0, key1, frame):
"""Finds the point on a cubic bezier spline at time frame between two keys.
Args:
key0 (Key): Starting key for the spline
key1 (Key): Ending key for the spline
t (float): Time (as a frame) to solve for
Returns:
Tuple: Tuple of float values for the x (time) and y (value) coordinates of the resulting
point.
"""
# Implementation by Tyler Fox, modified by Will Cavanagh.
# Based on method described at
# http://edmund.birotanker.com/monotonic-bezier-curves-for-animation.html
p0x, p0y = key0.time, key0.value
p1x, p1y = key0.outTangentPoint
p2x, p2y = key1.inTangentPoint
p3x, p3y = key1.time, key1.value
totalXRecip = 1.0 / (p3x - p0x)
f = (p1x - p0x) * totalXRecip
g = (p3x - p2x) * totalXRecip
xVal = (frame - p0x) * totalXRecip
d = 3*f + 3*g - 2
n = 2*f + g - 1
r = (n*n - f*d) / (d*d)
q = ((3*f*d*n - 2*n*n*n) / (d*d*d)) - xVal/d
discriminant = q*q - 4*r*r*r
if discriminant >= 0:
pm = (discriminant**0.5)/2 #plus/minus portion of equation
# We're able to only use the + portion of the +/- and get an accurate
# outcome. Saves steps / logic.
w = (-q/2 + pm)**(1/3.0)
u = w + r/w
else:
theta = math.acos(-q / ( 2*r**(3/2.0)) )
phi = theta/3 + 4*math.pi/3
u = 2 * r**(0.5) * math.cos(phi)
t = u + n/d
t1 = 1-t
return (t1**3*p0y + 3*t1**2*t*p1y + 3*t1*t**2*p2y + t**3*p3y)
|
|
# stdlib
import collections
import logging
import pprint
import socket
import sys
import time
# project
from checks import AGENT_METRICS_CHECK_NAME, AgentCheck, create_service_check
from checks.check_status import (
CheckStatus,
CollectorStatus,
EmitterStatus,
STATUS_ERROR,
STATUS_OK,
)
from checks.datadog import DdForwarder, Dogstreams
from checks.ganglia import Ganglia
from config import get_system_stats, get_version
from resources.processes import Processes as ResProcesses
import checks.system.unix as u
import checks.system.win32 as w32
import modules
from util import (
EC2,
GCE,
get_os,
get_uuid,
Timer,
)
from utils.debug import log_exceptions
from utils.jmx import JMXFiles
from utils.platform import Platform
from utils.subprocess_output import get_subprocess_output
log = logging.getLogger(__name__)
FLUSH_LOGGING_PERIOD = 10
FLUSH_LOGGING_INITIAL = 5
DD_CHECK_TAG = 'dd_check:{0}'
class AgentPayload(collections.MutableMapping):
"""
AgentPayload offers a single payload interface but manages two payloads:
* A metadata payload
* A data payload that contains metrics, events, service_checks and more
Each of these payloads is automatically submited to its specific endpoint.
"""
METADATA_KEYS = frozenset(['meta', 'tags', 'host-tags', 'systemStats',
'agent_checks', 'gohai', 'external_host_tags'])
DUPLICATE_KEYS = frozenset(['apiKey', 'agentVersion'])
COMMON_ENDPOINT = ''
DATA_ENDPOINT = 'metrics'
METADATA_ENDPOINT = 'metadata'
def __init__(self):
self.data_payload = dict()
self.meta_payload = dict()
@property
def payload(self):
"""
Single payload with the content of data and metadata payloads.
"""
res = self.data_payload.copy()
res.update(self.meta_payload)
return res
def __getitem__(self, key):
if key in self.METADATA_KEYS:
return self.meta_payload[key]
else:
return self.data_payload[key]
def __setitem__(self, key, value):
if key in self.DUPLICATE_KEYS:
self.data_payload[key] = value
self.meta_payload[key] = value
elif key in self.METADATA_KEYS:
self.meta_payload[key] = value
else:
self.data_payload[key] = value
def __delitem__(self, key):
if key in self.DUPLICATE_KEYS:
del self.data_payload[key]
del self.meta_payload[key]
elif key in self.METADATA_KEYS:
del self.meta_payload[key]
else:
del self.data_payload[key]
def __iter__(self):
for item in self.data_payload:
yield item
for item in self.meta_payload:
yield item
def __len__(self):
return len(self.data_payload) + len(self.meta_payload)
def emit(self, log, config, emitters, continue_running, merge_payloads=True):
"""
Send payloads via the emitters.
:param merge_payloads: merge data and metadata payloads in a single payload and submit it
to the common endpoint
:type merge_payloads: boolean
"""
statuses = []
def _emit_payload(payload, endpoint):
""" Send the payload via the emitters. """
statuses = []
for emitter in emitters:
# Don't try to send to an emitter if we're stopping/
if not continue_running:
return statuses
name = emitter.__name__
emitter_status = EmitterStatus(name)
try:
emitter(payload, log, config, endpoint)
except Exception, e:
log.exception("Error running emitter: %s"
% emitter.__name__)
emitter_status = EmitterStatus(name, e)
statuses.append(emitter_status)
return statuses
if merge_payloads:
statuses.extend(_emit_payload(self.payload, self.COMMON_ENDPOINT))
else:
statuses.extend(_emit_payload(self.data_payload, self.DATA_ENDPOINT))
statuses.extend(_emit_payload(self.meta_payload, self.METADATA_ENDPOINT))
return statuses
class Collector(object):
"""
The collector is responsible for collecting data from each check and
passing it along to the emitters, who send it to their final destination.
"""
def __init__(self, agentConfig, emitters, systemStats, hostname):
self.emit_duration = None
self.agentConfig = agentConfig
self.hostname = hostname
# system stats is generated by config.get_system_stats
self.agentConfig['system_stats'] = systemStats
# agent config is used during checks, system_stats can be accessed through the config
self.os = get_os()
self.plugins = None
self.emitters = emitters
self.check_timings = agentConfig.get('check_timings')
self.push_times = {
'host_metadata': {
'start': time.time(),
'interval': int(agentConfig.get('metadata_interval', 4 * 60 * 60))
},
'external_host_tags': {
'start': time.time() - 3 * 60, # Wait for the checks to init
'interval': int(agentConfig.get('external_host_tags', 5 * 60))
},
'agent_checks': {
'start': time.time(),
'interval': int(agentConfig.get('agent_checks_interval', 10 * 60))
},
}
socket.setdefaulttimeout(15)
self.run_count = 0
self.continue_running = True
self.hostname_metadata_cache = None
self.initialized_checks_d = []
self.init_failed_checks_d = {}
# Unix System Checks
self._unix_system_checks = {
'io': u.IO(log),
'load': u.Load(log),
'memory': u.Memory(log),
'processes': u.Processes(log),
'cpu': u.Cpu(log),
'system': u.System(log)
}
# Win32 System `Checks
self._win32_system_checks = {
'io': w32.IO(log),
'proc': w32.Processes(log),
'memory': w32.Memory(log),
'network': w32.Network(log),
'cpu': w32.Cpu(log)
}
# Old-style metric checks
self._ganglia = Ganglia(log)
self._dogstream = Dogstreams.init(log, self.agentConfig)
self._ddforwarder = DdForwarder(log, self.agentConfig)
# Agent performance metrics check
self._agent_metrics = None
self._metrics_checks = []
# Custom metric checks
for module_spec in [s.strip() for s in self.agentConfig.get('custom_checks', '').split(',')]:
if len(module_spec) == 0:
continue
try:
self._metrics_checks.append(modules.load(module_spec, 'Check')(log))
log.info("Registered custom check %s" % module_spec)
log.warning("Old format custom checks are deprecated. They should be moved to the checks.d interface as old custom checks will be removed in a next version")
except Exception, e:
log.exception('Unable to load custom check module %s' % module_spec)
# Resource Checks
self._resources_checks = [
ResProcesses(log, self.agentConfig)
]
def stop(self):
"""
Tell the collector to stop at the next logical point.
"""
# This is called when the process is being killed, so
# try to stop the collector as soon as possible.
# Most importantly, don't try to submit to the emitters
# because the forwarder is quite possibly already killed
# in which case we'll get a misleading error in the logs.
# Best to not even try.
self.continue_running = False
for check in self.initialized_checks_d:
check.stop()
@staticmethod
def _stats_for_display(raw_stats):
return pprint.pformat(raw_stats, indent=4)
@log_exceptions(log)
def run(self, checksd=None, start_event=True, configs_reloaded=False):
"""
Collect data from each check and submit their data.
"""
log.debug("Found {num_checks} checks".format(num_checks=len(checksd['initialized_checks'])))
timer = Timer()
if not Platform.is_windows():
cpu_clock = time.clock()
self.run_count += 1
log.debug("Starting collection run #%s" % self.run_count)
if checksd:
self.initialized_checks_d = checksd['initialized_checks'] # is a list of AgentCheck instances
self.init_failed_checks_d = checksd['init_failed_checks'] # is of type {check_name: {error, traceback}}
payload = AgentPayload()
# Find the AgentMetrics check and pop it out
# This check must run at the end of the loop to collect info on agent performance
if not self._agent_metrics or configs_reloaded:
for check in self.initialized_checks_d:
if check.name == AGENT_METRICS_CHECK_NAME:
self._agent_metrics = check
self.initialized_checks_d.remove(check)
break
# Initialize payload
self._build_payload(payload)
metrics = payload['metrics']
events = payload['events']
service_checks = payload['service_checks']
# Run the system checks. Checks will depend on the OS
if Platform.is_windows():
# Win32 system checks
try:
metrics.extend(self._win32_system_checks['memory'].check(self.agentConfig))
metrics.extend(self._win32_system_checks['cpu'].check(self.agentConfig))
metrics.extend(self._win32_system_checks['network'].check(self.agentConfig))
metrics.extend(self._win32_system_checks['io'].check(self.agentConfig))
metrics.extend(self._win32_system_checks['proc'].check(self.agentConfig))
except Exception:
log.exception('Unable to fetch Windows system metrics.')
else:
# Unix system checks
sys_checks = self._unix_system_checks
load = sys_checks['load'].check(self.agentConfig)
payload.update(load)
system = sys_checks['system'].check(self.agentConfig)
payload.update(system)
memory = sys_checks['memory'].check(self.agentConfig)
if memory:
memstats = {
'memPhysUsed': memory.get('physUsed'),
'memPhysPctUsable': memory.get('physPctUsable'),
'memPhysFree': memory.get('physFree'),
'memPhysTotal': memory.get('physTotal'),
'memPhysUsable': memory.get('physUsable'),
'memSwapUsed': memory.get('swapUsed'),
'memSwapFree': memory.get('swapFree'),
'memSwapPctFree': memory.get('swapPctFree'),
'memSwapTotal': memory.get('swapTotal'),
'memCached': memory.get('physCached'),
'memBuffers': memory.get('physBuffers'),
'memShared': memory.get('physShared')
}
payload.update(memstats)
ioStats = sys_checks['io'].check(self.agentConfig)
if ioStats:
payload['ioStats'] = ioStats
processes = sys_checks['processes'].check(self.agentConfig)
payload.update({'processes': processes})
cpuStats = sys_checks['cpu'].check(self.agentConfig)
if cpuStats:
payload.update(cpuStats)
# Run old-style checks
gangliaData = self._ganglia.check(self.agentConfig)
dogstreamData = self._dogstream.check(self.agentConfig)
ddforwarderData = self._ddforwarder.check(self.agentConfig)
if gangliaData is not False and gangliaData is not None:
payload['ganglia'] = gangliaData
# dogstream
if dogstreamData:
dogstreamEvents = dogstreamData.get('dogstreamEvents', None)
if dogstreamEvents:
if 'dogstream' in payload['events']:
events['dogstream'].extend(dogstreamEvents)
else:
events['dogstream'] = dogstreamEvents
del dogstreamData['dogstreamEvents']
payload.update(dogstreamData)
# metrics about the forwarder
if ddforwarderData:
payload['datadog'] = ddforwarderData
# Resources checks
if not Platform.is_windows():
has_resource = False
for resources_check in self._resources_checks:
try:
resources_check.check()
snaps = resources_check.pop_snapshots()
if snaps:
has_resource = True
res_value = {
'snaps': snaps,
'format_version': resources_check.get_format_version()
}
res_format = resources_check.describe_format_if_needed()
if res_format is not None:
res_value['format_description'] = res_format
payload['resources'][resources_check.RESOURCE_KEY] = res_value
except Exception:
log.exception("Error running resource check %s" % resources_check.RESOURCE_KEY)
if has_resource:
payload['resources']['meta'] = {
'api_key': self.agentConfig['api_key'],
'host': payload['internalHostname'],
}
# newer-style checks (not checks.d style)
for metrics_check in self._metrics_checks:
res = metrics_check.check(self.agentConfig)
if res:
metrics.extend(res)
# checks.d checks
check_statuses = []
for check in self.initialized_checks_d:
if not self.continue_running:
return
log.info("Running check %s" % check.name)
instance_statuses = []
metric_count = 0
event_count = 0
service_check_count = 0
check_start_time = time.time()
check_stats = None
try:
# Run the check.
instance_statuses = check.run()
# Collect the metrics and events.
current_check_metrics = check.get_metrics()
current_check_events = check.get_events()
check_stats = check._get_internal_profiling_stats()
# Collect metadata
current_check_metadata = check.get_service_metadata()
# Save metrics & events for the payload.
metrics.extend(current_check_metrics)
if current_check_events:
if check.name not in events:
events[check.name] = current_check_events
else:
events[check.name] += current_check_events
# Save the status of the check.
metric_count = len(current_check_metrics)
event_count = len(current_check_events)
except Exception:
log.exception("Error running check %s" % check.name)
check_status = CheckStatus(
check.name, instance_statuses, metric_count,
event_count, service_check_count, service_metadata=current_check_metadata,
library_versions=check.get_library_info(),
source_type_name=check.SOURCE_TYPE_NAME or check.name,
check_stats=check_stats
)
# Service check for Agent checks failures
service_check_tags = ["check:%s" % check.name]
if check_status.status == STATUS_OK:
status = AgentCheck.OK
elif check_status.status == STATUS_ERROR:
status = AgentCheck.CRITICAL
check.service_check('datadog.agent.check_status', status, tags=service_check_tags)
# Collect the service checks and save them in the payload
current_check_service_checks = check.get_service_checks()
if current_check_service_checks:
service_checks.extend(current_check_service_checks)
service_check_count = len(current_check_service_checks)
# Update the check status with the correct service_check_count
check_status.service_check_count = service_check_count
check_statuses.append(check_status)
check_run_time = time.time() - check_start_time
log.debug("Check %s ran in %.2f s" % (check.name, check_run_time))
# Intrument check run timings if enabled.
if self.check_timings:
metric = 'datadog.agent.check_run_time'
meta = {'tags': ["check:%s" % check.name]}
metrics.append((metric, time.time(), check_run_time, meta))
for check_name, info in self.init_failed_checks_d.iteritems():
if not self.continue_running:
return
check_status = CheckStatus(check_name, None, None, None, None,
init_failed_error=info['error'],
init_failed_traceback=info['traceback'])
check_statuses.append(check_status)
# Add a service check for the agent
service_checks.append(create_service_check('datadog.agent.up', AgentCheck.OK,
hostname=self.hostname))
# Store the metrics and events in the payload.
payload['metrics'] = metrics
payload['events'] = events
payload['service_checks'] = service_checks
# Populate metadata
self._populate_payload_metadata(payload, check_statuses, start_event)
collect_duration = timer.step()
if self._agent_metrics:
metric_context = {
'collection_time': collect_duration,
'emit_time': self.emit_duration,
}
if not Platform.is_windows():
metric_context['cpu_time'] = time.clock() - cpu_clock
self._agent_metrics.set_metric_context(payload, metric_context)
self._agent_metrics.run()
agent_stats = self._agent_metrics.get_metrics()
payload['metrics'].extend(agent_stats)
if self.agentConfig.get('developer_mode'):
log.debug("\n Agent developer mode stats: \n {0}".format(
Collector._stats_for_display(agent_stats))
)
# Let's send our payload
emitter_statuses = payload.emit(log, self.agentConfig, self.emitters,
self.continue_running)
self.emit_duration = timer.step()
# Persist the status of the collection run.
try:
CollectorStatus(check_statuses, emitter_statuses,
self.hostname_metadata_cache).persist()
except Exception:
log.exception("Error persisting collector status")
if self.run_count <= FLUSH_LOGGING_INITIAL or self.run_count % FLUSH_LOGGING_PERIOD == 0:
log.info("Finished run #%s. Collection time: %ss. Emit time: %ss" %
(self.run_count, round(collect_duration, 2), round(self.emit_duration, 2)))
if self.run_count == FLUSH_LOGGING_INITIAL:
log.info("First flushes done, next flushes will be logged every %s flushes." %
FLUSH_LOGGING_PERIOD)
else:
log.debug("Finished run #%s. Collection time: %ss. Emit time: %ss" %
(self.run_count, round(collect_duration, 2), round(self.emit_duration, 2)))
return payload
@staticmethod
def run_single_check(check, verbose=True):
log.info("Running check %s" % check.name)
instance_statuses = []
metric_count = 0
event_count = 0
service_check_count = 0
check_start_time = time.time()
check_stats = None
try:
# Run the check.
instance_statuses = check.run()
# Collect the metrics and events.
current_check_metrics = check.get_metrics()
current_check_events = check.get_events()
current_service_checks = check.get_service_checks()
current_service_metadata = check.get_service_metadata()
check_stats = check._get_internal_profiling_stats()
# Save the status of the check.
metric_count = len(current_check_metrics)
event_count = len(current_check_events)
service_check_count = len(current_service_checks)
print "Metrics: \n{0}".format(pprint.pformat(current_check_metrics))
print "Events: \n{0}".format(pprint.pformat(current_check_events))
print "Service Checks: \n{0}".format(pprint.pformat(current_service_checks))
print "Service Metadata: \n{0}".format(pprint.pformat(current_service_metadata))
except Exception:
log.exception("Error running check %s" % check.name)
check_status = CheckStatus(
check.name, instance_statuses, metric_count,
event_count, service_check_count,
library_versions=check.get_library_info(),
source_type_name=check.SOURCE_TYPE_NAME or check.name,
check_stats=check_stats
)
return check_status
def _emit(self, payload):
""" Send the payload via the emitters. """
statuses = []
for emitter in self.emitters:
# Don't try to send to an emitter if we're stopping/
if not self.continue_running:
return statuses
name = emitter.__name__
emitter_status = EmitterStatus(name)
try:
emitter(payload, log, self.agentConfig)
except Exception, e:
log.exception("Error running emitter: %s" % emitter.__name__)
emitter_status = EmitterStatus(name, e)
statuses.append(emitter_status)
return statuses
def _is_first_run(self):
return self.run_count <= 1
def _build_payload(self, payload):
"""
Build the payload skeleton, so it contains all of the generic payload data.
"""
now = time.time()
payload['collection_timestamp'] = now
payload['os'] = self.os
payload['python'] = sys.version
payload['agentVersion'] = self.agentConfig['version']
payload['apiKey'] = self.agentConfig['api_key']
payload['events'] = {}
payload['metrics'] = []
payload['service_checks'] = []
payload['resources'] = {}
payload['internalHostname'] = self.hostname
payload['uuid'] = get_uuid()
payload['host-tags'] = {}
payload['external_host_tags'] = {}
def _populate_payload_metadata(self, payload, check_statuses, start_event=True):
"""
Periodically populate the payload with metadata related to the system, host, and/or checks.
"""
now = time.time()
# Include system stats on first postback
if start_event and self._is_first_run():
payload['systemStats'] = self.agentConfig.get('system_stats', {})
# Also post an event in the newsfeed
payload['events']['System'] = [{
'api_key': self.agentConfig['api_key'],
'host': payload['internalHostname'],
'timestamp': now,
'event_type':'Agent Startup',
'msg_text': 'Version %s' % get_version()
}]
# Periodically send the host metadata.
if self._should_send_additional_data('host_metadata'):
# gather metadata with gohai
try:
if not Platform.is_windows():
command = "gohai"
else:
command = "gohai\gohai.exe"
gohai_metadata, gohai_err, _ = get_subprocess_output([command], log)
payload['gohai'] = gohai_metadata
if gohai_err:
log.warning("GOHAI LOG | {0}".format(gohai_err))
except OSError as e:
if e.errno == 2: # file not found, expected when install from source
log.info("gohai file not found")
else:
raise e
except Exception as e:
log.warning("gohai command failed with error %s" % str(e))
payload['systemStats'] = get_system_stats()
payload['meta'] = self._get_hostname_metadata()
self.hostname_metadata_cache = payload['meta']
# Add static tags from the configuration file
host_tags = []
if self.agentConfig['tags'] is not None:
host_tags.extend([unicode(tag.strip())
for tag in self.agentConfig['tags'].split(",")])
if self.agentConfig['collect_ec2_tags']:
host_tags.extend(EC2.get_tags(self.agentConfig))
if host_tags:
payload['host-tags']['system'] = host_tags
# If required by the user, let's create the dd_check:xxx host tags
if self.agentConfig['create_dd_check_tags']:
app_tags_list = [DD_CHECK_TAG.format(c.name) for c in self.initialized_checks_d]
app_tags_list.extend([DD_CHECK_TAG.format(cname) for cname
in JMXFiles.get_jmx_appnames()])
if 'system' not in payload['host-tags']:
payload['host-tags']['system'] = []
payload['host-tags']['system'].extend(app_tags_list)
GCE_tags = GCE.get_tags(self.agentConfig)
if GCE_tags is not None:
payload['host-tags'][GCE.SOURCE_TYPE_NAME] = GCE_tags
# Log the metadata on the first run
if self._is_first_run():
log.info("Hostnames: %s, tags: %s" %
(repr(self.hostname_metadata_cache), payload['host-tags']))
# Periodically send extra hosts metadata (vsphere)
# Metadata of hosts that are not the host where the agent runs, not all the checks use
# that
external_host_tags = []
if self._should_send_additional_data('external_host_tags'):
for check in self.initialized_checks_d:
try:
getter = getattr(check, 'get_external_host_tags')
check_tags = getter()
external_host_tags.extend(check_tags)
except AttributeError:
pass
if external_host_tags:
payload['external_host_tags'] = external_host_tags
# Periodically send agent_checks metadata
if self._should_send_additional_data('agent_checks'):
# Add agent checks statuses and error/warning messages
agent_checks = []
for check in check_statuses:
if check.instance_statuses is not None:
for i, instance_status in enumerate(check.instance_statuses):
agent_checks.append(
(
check.name, check.source_type_name,
instance_status.instance_id,
instance_status.status,
# put error message or list of warning messages in the same field
# it will be handled by the UI
instance_status.error or instance_status.warnings or "",
check.service_metadata[i]
)
)
else:
agent_checks.append(
(
check.name, check.source_type_name,
"initialization",
check.status, repr(check.init_failed_error)
)
)
payload['agent_checks'] = agent_checks
payload['meta'] = self.hostname_metadata_cache # add hostname metadata
def _get_hostname_metadata(self):
"""
Returns a dictionnary that contains hostname metadata.
"""
metadata = EC2.get_metadata(self.agentConfig)
if metadata.get('hostname'):
metadata['ec2-hostname'] = metadata.get('hostname')
del metadata['hostname']
if self.agentConfig.get('hostname'):
metadata['agent-hostname'] = self.agentConfig.get('hostname')
else:
try:
metadata["socket-hostname"] = socket.gethostname()
except Exception:
pass
try:
metadata["socket-fqdn"] = socket.getfqdn()
except Exception:
pass
metadata["hostname"] = self.hostname
metadata["timezones"] = time.tzname
# Add cloud provider aliases
host_aliases = GCE.get_host_aliases(self.agentConfig)
if host_aliases:
metadata['host_aliases'] = host_aliases
return metadata
def _should_send_additional_data(self, data_name):
if self._is_first_run():
return True
# If the interval has passed, send the metadata again
now = time.time()
if now - self.push_times[data_name]['start'] >= self.push_times[data_name]['interval']:
log.debug('%s interval has passed. Sending it.' % data_name)
self.push_times[data_name]['start'] = now
return True
return False
|
|
# Copyright (c) 2016 Synology Co., Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for the Synology iSCSI volume driver."""
import mock
from cinder import exception
from cinder import test
from cinder.tests.unit import fake_constants as fake
from cinder.volume import configuration as conf
from cinder.volume.drivers.synology import synology_common as common
from cinder.volume.drivers.synology import synology_iscsi
VOLUME_ID = fake.VOLUME_ID
TARGET_NAME_PREFIX = 'Cinder-Target-'
IP = '10.10.10.10'
IQN = 'iqn.2000-01.com.synology:' + TARGET_NAME_PREFIX + VOLUME_ID
TRG_ID = 1
VOLUME = {
'name': fake.VOLUME_NAME,
'id': VOLUME_ID,
'display_name': 'fake_volume',
'size': 10,
'provider_location': '%s:3260,%d %s 1' % (IP, TRG_ID, IQN),
}
NEW_VOLUME_ID = fake.VOLUME2_ID
IQN2 = 'iqn.2000-01.com.synology:' + TARGET_NAME_PREFIX + NEW_VOLUME_ID
NEW_TRG_ID = 2
NEW_VOLUME = {
'name': fake.VOLUME2_NAME,
'id': NEW_VOLUME_ID,
'display_name': 'new_fake_volume',
'size': 10,
'provider_location': '%s:3260,%d %s 1' % (IP, NEW_TRG_ID, IQN2),
}
SNAPSHOT_ID = fake.SNAPSHOT_ID
SNAPSHOT = {
'name': fake.SNAPSHOT_NAME,
'id': SNAPSHOT_ID,
'volume_id': VOLUME_ID,
'volume_name': VOLUME['name'],
'volume_size': 10,
'display_name': 'fake_snapshot',
}
DS_SNAPSHOT_UUID = 'ca86a56a-40d8-4210-974c-ef15dbf01cba'
SNAPSHOT_METADATA = {
'metadata': {
'ds_snapshot_UUID': DS_SNAPSHOT_UUID
}
}
INITIATOR_IQN = 'iqn.1993-08.org.debian:01:604af6a341'
CONNECTOR = {
'initiator': INITIATOR_IQN,
}
CONTEXT = {
}
LOCAL_PATH = '/dev/isda'
IMAGE_SERVICE = 'image_service'
IMAGE_ID = 1
IMAGE_META = {
'id': IMAGE_ID
}
NODE_UUID = '72003c93-2db2-4f00-a169-67c5eae86bb1'
HOST = {
}
class SynoISCSIDriverTestCase(test.TestCase):
@mock.patch.object(common.SynoCommon,
'_get_node_uuid',
return_value=NODE_UUID)
@mock.patch.object(common, 'APIRequest')
def setUp(self, _request, _get_node_uuid):
super(SynoISCSIDriverTestCase, self).setUp()
self.conf = self.setup_configuration()
self.driver = synology_iscsi.SynoISCSIDriver(configuration=self.conf)
self.driver.common = common.SynoCommon(self.conf, 'iscsi')
def setup_configuration(self):
config = mock.Mock(spec=conf.Configuration)
config.use_chap_auth = False
config.target_protocol = 'iscsi'
config.target_ip_address = IP
config.synology_admin_port = 5000
config.synology_username = 'admin'
config.synology_password = 'admin'
config.synology_ssl_verify = True
config.synology_one_time_pass = '123456'
config.volume_dd_blocksize = 1
return config
def test_check_for_setup_error(self):
self.driver.common.check_for_setup_error = mock.Mock()
result = self.driver.check_for_setup_error()
self.driver.common.check_for_setup_error.assert_called_with()
self.assertIsNone(result)
def test_create_volume(self):
self.driver.common.create_volume = mock.Mock()
result = self.driver.create_volume(VOLUME)
self.driver.common.create_volume.assert_called_with(VOLUME)
self.assertIsNone(result)
def test_delete_volume(self):
self.driver.common.delete_volume = mock.Mock()
result = self.driver.delete_volume(VOLUME)
self.driver.common.delete_volume.assert_called_with(VOLUME)
self.assertIsNone(result)
def test_create_cloned_volume(self):
self.driver.common.create_cloned_volume = mock.Mock()
result = self.driver.create_cloned_volume(VOLUME, NEW_VOLUME)
self.driver.common.create_cloned_volume.assert_called_with(
VOLUME, NEW_VOLUME)
self.assertIsNone(result)
def test_extend_volume(self):
new_size = 20
self.driver.common.extend_volume = mock.Mock()
result = self.driver.extend_volume(VOLUME, new_size)
self.driver.common.extend_volume.assert_called_with(
VOLUME, new_size)
self.assertIsNone(result)
def test_extend_volume_wrong_size(self):
wrong_new_size = 1
self.driver.common.extend_volume = mock.Mock()
result = self.driver.extend_volume(VOLUME, wrong_new_size)
self.driver.common.extend_volume.assert_not_called()
self.assertIsNone(result)
def test_create_volume_from_snapshot(self):
self.driver.common.create_volume_from_snapshot = mock.Mock()
result = self.driver.create_volume_from_snapshot(VOLUME, SNAPSHOT)
(self.driver.common.
create_volume_from_snapshot.assert_called_with(VOLUME, SNAPSHOT))
self.assertIsNone(result)
def test_update_migrated_volume(self):
fake_ret = {'_name_id': VOLUME['id']}
status = ''
self.driver.common.update_migrated_volume = (
mock.Mock(return_value=fake_ret))
result = self.driver.update_migrated_volume(CONTEXT,
VOLUME,
NEW_VOLUME,
status)
(self.driver.common.update_migrated_volume.
assert_called_with(VOLUME, NEW_VOLUME))
self.assertEqual(fake_ret, result)
def test_create_snapshot(self):
self.driver.common.create_snapshot = (
mock.Mock(return_value=SNAPSHOT_METADATA))
result = self.driver.create_snapshot(SNAPSHOT)
self.driver.common.create_snapshot.assert_called_with(SNAPSHOT)
self.assertDictEqual(SNAPSHOT_METADATA, result)
def test_delete_snapshot(self):
self.driver.common.delete_snapshot = mock.Mock()
result = self.driver.delete_snapshot(SNAPSHOT)
self.driver.common.delete_snapshot.assert_called_with(SNAPSHOT)
self.assertIsNone(result)
def test_get_volume_stats(self):
self.driver.common.update_volume_stats = mock.MagicMock()
result = self.driver.get_volume_stats(True)
self.driver.common.update_volume_stats.assert_called_with()
self.assertEqual(self.driver.stats, result)
result = self.driver.get_volume_stats(False)
self.driver.common.update_volume_stats.assert_called_with()
self.assertEqual(self.driver.stats, result)
def test_get_volume_stats_error(self):
self.driver.common.update_volume_stats = (
mock.MagicMock(side_effect=exception.VolumeDriverException(
message='dont care')))
self.assertRaises(exception.VolumeDriverException,
self.driver.get_volume_stats,
True)
def test_create_export(self):
provider_auth = 'CHAP username password'
provider_location = '%s:3260,%d %s 1' % (IP, TRG_ID, IQN)
self.driver.common.is_lun_mapped = mock.Mock(return_value=False)
self.driver.common.create_iscsi_export = (
mock.Mock(return_value=(IQN, TRG_ID, provider_auth)))
self.driver.common.get_provider_location = (
mock.Mock(return_value=provider_location))
result = self.driver.create_export(CONTEXT, VOLUME, CONNECTOR)
self.driver.common.is_lun_mapped.assert_called_with(VOLUME['name'])
(self.driver.common.create_iscsi_export.
assert_called_with(VOLUME['name'], VOLUME['id']))
self.driver.common.get_provider_location.assert_called_with(IQN,
TRG_ID)
self.assertEqual(provider_location, result['provider_location'])
self.assertEqual(provider_auth, result['provider_auth'])
def test_create_export_is_mapped(self):
self.driver.common.is_lun_mapped = mock.Mock(return_value=True)
self.driver.common.create_iscsi_export = mock.Mock()
self.driver.common.get_provider_location = mock.Mock()
result = self.driver.create_export(CONTEXT, VOLUME, CONNECTOR)
self.driver.common.is_lun_mapped.assert_called_with(VOLUME['name'])
self.driver.common.create_iscsi_export.assert_not_called()
self.driver.common.get_provider_location.assert_not_called()
self.assertEqual({}, result)
def test_create_export_error(self):
provider_location = '%s:3260,%d %s 1' % (IP, TRG_ID, IQN)
self.driver.common.is_lun_mapped = mock.Mock(return_value=False)
self.driver.common.create_iscsi_export = (
mock.Mock(side_effect=exception.InvalidInput(reason='dont care')))
self.driver.common.get_provider_location = (
mock.Mock(return_value=provider_location))
self.assertRaises(exception.ExportFailure,
self.driver.create_export,
CONTEXT,
VOLUME,
CONNECTOR)
self.driver.common.is_lun_mapped.assert_called_with(VOLUME['name'])
self.driver.common.get_provider_location.assert_not_called()
def test_remove_export(self):
self.driver.common.is_lun_mapped = mock.Mock(return_value=True)
self.driver.common.remove_iscsi_export = mock.Mock()
self.driver.common.get_iqn_and_trgid = (
mock.Mock(return_value=('', TRG_ID)))
_, trg_id = (self.driver.common.
get_iqn_and_trgid(VOLUME['provider_location']))
result = self.driver.remove_export(CONTEXT, VOLUME)
self.driver.common.is_lun_mapped.assert_called_with(VOLUME['name'])
(self.driver.common.get_iqn_and_trgid.
assert_called_with(VOLUME['provider_location']))
(self.driver.common.remove_iscsi_export.
assert_called_with(VOLUME['name'], trg_id))
self.assertIsNone(result)
def test_remove_export_not_mapped(self):
self.driver.common.is_lun_mapped = mock.Mock(return_value=False)
self.driver.common.remove_iscsi_export = mock.Mock()
self.driver.common.get_iqn_and_trgid = mock.Mock()
result = self.driver.remove_export(CONTEXT, VOLUME)
self.driver.common.is_lun_mapped.assert_called_with(VOLUME['name'])
self.driver.common.get_iqn_and_trgid.assert_not_called()
self.driver.common.remove_iscsi_export.assert_not_called()
self.assertIsNone(result)
def test_remove_export_error(self):
self.driver.common.is_lun_mapped = mock.Mock(return_value=True)
self.driver.common.remove_iscsi_export = (
mock.Mock(side_effect= exception.RemoveExportException(
volume=VOLUME, reason='dont care')))
self.assertRaises(exception.RemoveExportException,
self.driver.remove_export,
CONTEXT,
VOLUME)
def test_remove_export_error_get_lun_mapped(self):
self.driver.common.remove_iscsi_export = mock.Mock()
self.driver.common.get_iqn_and_trgid = mock.Mock()
self.driver.common.is_lun_mapped = (
mock.Mock(side_effect=exception.SynoLUNNotExist(
message='dont care')))
result = self.driver.remove_export(CONTEXT, VOLUME)
self.assertIsNone(result)
self.driver.common.get_iqn_and_trgid.assert_not_called()
self.driver.common.remove_iscsi_export.assert_not_called()
def test_initialize_connection(self):
iscsi_properties = {
'target_discovered': False,
'target_iqn': IQN,
'target_portal': '%s:3260' % self.conf.target_ip_address,
'volume_id': VOLUME['id'],
'access_mode': 'rw',
'discard': False
}
self.driver.common.get_iscsi_properties = (
mock.Mock(return_value=iscsi_properties))
self.conf.safe_get = mock.Mock(return_value='iscsi')
result = self.driver.initialize_connection(VOLUME, CONNECTOR)
self.driver.common.get_iscsi_properties.assert_called_with(VOLUME)
self.conf.safe_get.assert_called_with('target_protocol')
self.assertEqual('iscsi', result['driver_volume_type'])
self.assertDictEqual(iscsi_properties, result['data'])
def test_initialize_connection_error(self):
self.driver.common.get_iscsi_properties = (
mock.Mock(side_effect=exception.InvalidInput(reason='dont care')))
self.assertRaises(exception.InvalidInput,
self.driver.initialize_connection,
VOLUME,
CONNECTOR)
|
|
#!/usr/bin/env python3
import os
import sys
import math
import fcntl
import base64
import socket
import select
import multiprocessing
import pysodium
import mltpipe
# ./mltpiped.py -d -s '[127.0.0.1]:0' -t '[127.0.0.1]:8080'
# For the majority of use-cases the directory will be a DNS
# configuration which is manually updated by the administrator, so
# they will want to trigger generation and destruction of ephemeral
# keys manually.
#
# A straightforward impl would be to read commands via stdin, such as:
#
# > add port # could optionally specify port, but recommendation is don't
# Added port: 44717
# > add key
# Added key: l7lMWoIKQO3dCfXzZG87eKJSh7s9_nhlx2JC_NAliSE
#
# In another terminal:
# $ mltcertify -k /etc/mlt-long-term.key [ip] [port] [public_key]
# ...outputs the signed certificate, which we upload to DNS server...
#
# [some time passes, everone now using the new config]
# > rem_port [the_previous_port]
# > rem_key [the_previous_public_key]
# Note that this leaves the option open for administrators to change
# the key without changing the port. I fully recommend administrators
# change the port as often as possible (ie. with each rekey) and make
# it as random as possible (ie. bind to 0), but this might be useful
# if it's hard to add new openings on a firewall.
# Is it possible for a toaster to generate the same C' as another one?
# If so, is it sensible then to rely on the tunid to not be the same?
# We could rely on (ip, port) being different, but don't want to.
# Need to define a policy on undecryptable packets. With unseen tunid,
# this may just mean the initiating tunnel packet was delayed, or it
# could be junk.
# Like in CurveCP, the client should send all zeroes for the solution
# part on first packet to be sure it will be processed. Normally it
# won't matter, but during DOS the server must drop any smaller
# packets to prevent the quiz from providing amplification.
# Initial window must be short enough to prevent amplification.
# Server must not keep hold of stale ephemeral keys for the sake of a
# strange client, if the client doesn't pick up the new server keys it
# should just be dropped.
# The IP and port mobility means there's room for a man on the side to
# steer the server's packets away from the legitimate source IP, by
# sending the legitimate packet with a spoofed IP and port.
#
# Having a client RPC command like 0_myIpPortIs(IP, PORT) would solve
# this problem, the encryption ensures it's truth. Whenever the client
# thinks it has changed IP, it can resend that RPC proactively, or the
# server could request it.
# Need to talk about packet sizes - same as CurveCP?
# What makes up the remaining 16 bytes of the 24 byte nonce? CurveCP's
# client and server-specific prefixes seem like a good idea.
# Would be quite neat to reduce fingerprinting further by generating
# the nonces randomly with a 32 bit generator using something like the
# following. It's worth 0 risk of repetition and is a non-priority
# however. After this, only the TID would be non-random, and that
# would be non-changing (aside from IP and port ofcourse).
#
# http://static.usenix.org/event/usenix99/full_papers/deraadt/deraadt_html/node17.html
# http://www.cs.ucdavis.edu/~rogaway/papers/subset.pdf
# The server probably shouldn't randomise the port it sends from
# because a NAT may only allow incoming packets from the port that was
# addressed previously. In any case the administrator would likely
# firewall to only send from a specific port.
# TODO: need to force rekey sometime before (2**32-1) packets sent in
# either direction, which is just 4TB with 1KB packets.
"""
"On the wire this is encoded as c, f, a0, a1, ..."
This could be more helpful...
"In contrast to byte-oriented protocols"
Suggests basic text.
so, eg.
data = "0, create, 1, NORMAL"
But Table 2 of the earlier paper shows the first RPC's Connection ID
as taking 4 bytes, which suggests a byte-oriented integer (plain text
needs just 1 byte for char '0', and maybe one for a separator
For now I'll put the 4 byte thing down to error, and adopt '\n' as RPC
separator, '\t' as RPC field separator, and an ability for a
connection RPC to use the body remainder verbatim.
Alternative approaches to sending verbatim data would be to base64
encode it (yuck), or to prefix it with a length param (not so bad), or
to make it a string literal with backslash escapes (may be simpler for
small strings). Or append whitespace to literal newlines, like multi
line headers, but that's not as helpful
"""
# Not MLT related, but a neat feature for a proxy in general would be
# the ability to add X-Forwarded-For headers to HTTP streams and
# Received headers to SMTP messages.
# The client probably wouldn't want to send each packet from a new
# port because it would create tons of NAT port mappings. But it could
# do so at a rate that creates a sensible number of port
# mappings. This should be calculated by how many users behind the
# same NAT might want to contact the same MLT service in a 2 minutes
# (standard port timeout?) stretch. 10,000 users would give 6ish ports
# each. With 2mins/6 = 20s per port. That could instead be 10-30s with
# some random distribution. This maths completely breaks down if the
# timeout is longer or number of users larger.
#
# I shouldn't take this *too* seriously because of course the client
# is still talking to the same server host,port and the client's
# sending port is largely irrelevant.
#
# However, bearing in mind that the client is normally behind a NAT
# with other users, randomly using between 1 and 5 ports will make it
# hard to determine exactly how many users are active at any time
# (provided nextTid is used sufficiently to keep the tunnels
# distinct between ports).
# QUIC has a neat idea to make it so that one connection losing a
# packet won't inhibit any other connection from proceeding. Losing a
# packet on connection 0 would still need to block all other
# connections being processed. I'm not sure exactly how it would work,
# but it seems each connection would need their own seq and ack
# numers.
#
# The MLT paper suggests it's a feature that connection data is
# shared, but IMO this is misguided - it's a feature that the
# windowSize is shared between connections, but this doesn't mean
# individual connections couldn't have their own seq & ack.
class BackendTunnelClass(mltpipe.MltPacketSend, mltpipe.RpcParse,
mltpipe.MltBytes, mltpipe.GetBytes, mltpipe.Log):
def __init__(self, our_secret, eph_s, tunid_n, seq_n, client_sock):
super().__init__()
self.our_secret = our_secret
self.their_public = eph_s
self.tunid_n = tunid_n
self.ack_num = seq_n
self.sock = client_sock
self.nonce_n = self.nonce_64('server')
self.seq_n = self.seq_32()
self.__opened_connections = {}
def get_opened_connections(self):
return self.__opened_connections
def has_opened_connection(self, n):
return n in self.get_opened_connections()
def remove_opened_connection(self, n):
self.get_opened_connections()[n].shutdown(socket.SHUT_WR)
self.get_opened_connections()[n].close()
del self.get_opened_connections()[n]
def refuse_connection(self, n):
self.send_rpc(self.rpc(0, 'refuse', n))
def add_opened_connection(self, n, backend_sock):
self.get_opened_connections()[n] = backend_sock
self.send_rpc(self.rpc(0, 'ack', n))
def seen_seq(self, seq_n):
self.ack_num = seq_n + 1
def get_expected_seq(self):
return self.ack_num
def set_expected_tunid(self, n):
self.tunid_n = n
def get_expected_tunid(self):
return self.tunid_n
def set_client_addr(self, client_addr):
self.dest_ip, self.dest_port = client_addr
def backend_handle(self, conn_id, dest_data):
if not dest_data:
# Can we actually remove it so soon? Probably want to wait
# for all acks to our previous packets before we consider
# hands washed.
self.send_rpc(self.rpc(0, 'close', conn_id))
self.remove_opened_connection(conn_id)
return
for i in range(math.ceil(len(dest_data) / 512)):
chunk = dest_data[i*512:(i+1)*512]
self.send_rpc(self.rpc_literal(conn_id, 'serviceResponse', chunk))
def backend_receive(self, backend_sock):
for conn_id in self.get_opened_connections():
if self.get_opened_connections()[conn_id] == backend_sock:
break
else:
self.log_info('ERROR backend sock not associated with a connection!')
return
dest_data = backend_sock.recv(2**16)
self.backend_handle(conn_id, dest_data)
class ServerCommands:
def handle_command(self):
cs = sys.stdin.read()
if not cs:
print("exit", file=sys.stderr)
return False
for line in cs.split('\n'):
if not line:
continue
if line in ("exit", "quit"):
return False
elif "help" == line:
print("No commands implemented yet", file=sys.stderr)
else:
print("Comand not recognised", file=sys.stderr)
print("> ", end='', file=sys.stderr)
sys.stderr.flush()
return True
class MainClass(mltpipe.RpcParse, mltpipe.MltPacketParse, mltpipe.GetBytes, mltpipe.Log, ServerCommands):
def __init__(self):
super().__init__()
self.__tunnels = {}
def get_tunnels(self):
return self.__tunnels
def add_tunnel(self, eph_s, tunnel):
self.get_tunnels()[eph_s] = tunnel
def remove_tunnel(self, tunnel):
for eph_s in self.get_tunnels():
if self.get_tunnels()[eph_s] == tunnel:
del self.get_tunnels()[eph_s]
def has_tunnel(self, eph_s):
return eph_s in self.get_tunnels()
def get_tunnel(self, eph_s):
return self.get_tunnels()[eph_s] if self.has_tunnel(eph_s) else None
def receive_packet(self, client_sock):
data, client_addr = client_sock.recvfrom(2**11)
if len(data) > 2**10:
self.log_info('dropping, packet is rudely large')
return None
# TODO: quiz if needed first, but drop instead if that would
# result in an amplification.
parsed = self.parse_packet(data, 'server')
if not parsed:
return None
tunid_n, nonce_s, eph_s, sol_s, using_eph_s, seq_n, ack_n, rpc_data_s = parsed
rpcs_l = self.parse_rpcs(rpc_data_s)
if rpcs_l is None:
log_info('dropped, looks like RPCs failed to parse')
return None
if not self.has_tunnel(using_eph_s):
self.add_tunnel(using_eph_s, BackendTunnelClass(self.our_secret, using_eph_s, tunid_n, seq_n, client_sock))
tunnel = self.get_tunnel(using_eph_s)
if tunnel.get_expected_seq() != seq_n:
# TODO: implement some reordering so we don't need to drop
self.log_info('dropping, not expecting that seq')
return None
if tunnel.get_expected_tunid() != tunid_n:
self.log_info('dropping, this is not the current tunid')
return None
tunnel.seen_seq(seq_n)
tunnel.set_client_addr(client_addr)
# I'm taking any failure of RPC calls to mean ignore rest of
# the packet. This is questionable since the next packet would
# still be processed.
# TODO: Would be more sensible to close everything on a
# failure.
for rpc in rpcs_l:
# TODO: fail well if it doesn't parse as int
if int(rpc[0]) == 0:
if b'create' == rpc[1]:
conn_id = int(rpc[2]) # TODO: check failure
srvc_name = b'serviceName'
if 0 == conn_id:
self.log_info('tried to create connection 0!')
break
if tunnel.has_opened_connection(conn_id):
self.log_info('tried to create an already open connection')
break
backend_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
# FIXME: we should be connecting in another
# thread so we can still process other
# tunnels. But need to also not process this
# tunnel further until that completes/fails.
backend_sock.connect((self.dest_ip, self.dest_port))
except socket.error as e:
if 111 == e.errno:
tunnel.refuse_connection(conn_id)
break
tunnel.add_opened_connection(conn_id, backend_sock)
elif b'close' == rpc[1]:
conn_id = int(rpc[0])
if 0 == conn_id:
for open_conn in tunnel.get_opened_connections():
tunnel.remove_opened_connection(open_conn)
self.remove_tunnel(tunnel)
break
elif tunnel.has_opened_connection(conn_id):
tunnel.remove_opened_connection(conn_id)
else:
self.log_info('RPC tried to close a non-open connection...')
break
else:
conn_id = int(rpc[0])
if not tunnel.has_opened_connection(conn_id):
self.log_info("dropped RPC, connection doesn't seem to be open")
break
if b'serviceRequest' == rpc[1]:
tunnel.get_opened_connections()[conn_id].sendall(rpc[2])
def bind(self, ip, port):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind((ip, port))
return sock
def main(self):
if sys.argv[1] != '-d':
sys.exit(1)
if sys.argv[2] != '-s':
sys.exit(1)
self.source_ip = sys.argv[3].split(':')[0][1:-1]
self.source_port = int(sys.argv[3].split(':')[1])
if sys.argv[4] != '-t':
sys.exit(1)
self.dest_ip = sys.argv[5].split(':')[0][1:-1]
self.dest_port = int(sys.argv[5].split(':')[1])
(self.server_public, self.our_secret) = pysodium.crypto_box_keypair()
client_sock = self.bind(self.source_ip, self.source_port)
# In future the upload-to-E service will read the certificates
# from S's stdout one-per-line, uploading when a new one is
# spotted. S will continue to understand the old configuration
# during a grace period, for those clients who could not
# possibly have known of the new configuration.
# This would be some minutes for the cert to be uploaded to E,
# plus the cache TTL of E, which might range from a couple of
# minutes to a day or more. The rekey period should be chosen
# with the TTL in mind.
print("Added key: " + self.bytes_b64safe(self.server_public).decode('ascii')[:-1])
print("Added port: " + str(client_sock.getsockname()[1]))
sys.stdout.flush()
print("Type commands to administer, or \"help\" for more information.", file=sys.stderr)
print("> ", end='', file=sys.stderr)
sys.stderr.flush()
fd = sys.stdin.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
try:
while True:
try:
backend_socks = []
for tunnel in self.get_tunnels().values():
backend_socks += tunnel.get_opened_connections().values()
(rready, wready, xready) = select.select([sys.stdin] + [client_sock] + backend_socks, [], [])
if sys.stdin in rready:
if not self.handle_command():
break
elif client_sock in rready:
self.receive_packet(client_sock)
else:
for r in rready:
for tunnel in self.get_tunnels().values():
if r in tunnel.get_opened_connections().values():
tunnel.backend_receive(r)
except KeyboardInterrupt:
break
finally:
client_sock.close()
if '__main__' == __name__:
MainClass().main()
|
|
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from oslo_log import log as logging
from sqlalchemy.orm import exc
from neutron.common import constants as q_const
from neutron.common import ipv6_utils as ipv6
from neutron.common import utils
from neutron.db import allowedaddresspairs_db as addr_pair
from neutron.db import models_v2
from neutron.db import securitygroups_db as sg_db
from neutron.extensions import securitygroup as ext_sg
from neutron.i18n import _LW
LOG = logging.getLogger(__name__)
DIRECTION_IP_PREFIX = {'ingress': 'source_ip_prefix',
'egress': 'dest_ip_prefix'}
DHCP_RULE_PORT = {4: (67, 68, q_const.IPv4), 6: (547, 546, q_const.IPv6)}
class SecurityGroupServerRpcMixin(sg_db.SecurityGroupDbMixin):
"""Mixin class to add agent-based security group implementation."""
def get_port_from_device(self, context, device):
"""Get port dict from device name on an agent.
Subclass must provide this method or get_ports_from_devices.
:param device: device name which identifies a port on the agent side.
What is specified in "device" depends on a plugin agent implementation.
For example, it is a port ID in OVS agent and netdev name in Linux
Bridge agent.
:return: port dict returned by DB plugin get_port(). In addition,
it must contain the following fields in the port dict returned.
- device
- security_groups
- security_group_rules,
- security_group_source_groups
- fixed_ips
"""
raise NotImplementedError(_("%s must implement get_port_from_device "
"or get_ports_from_devices.")
% self.__class__.__name__)
def get_ports_from_devices(self, context, devices):
"""Bulk method of get_port_from_device.
Subclasses may override this to provide better performance for DB
queries, backend calls, etc.
"""
return [self.get_port_from_device(context, device)
for device in devices]
def create_security_group_rule(self, context, security_group_rule):
rule = super(SecurityGroupServerRpcMixin,
self).create_security_group_rule(context,
security_group_rule)
sgids = [rule['security_group_id']]
self.notifier.security_groups_rule_updated(context, sgids)
return rule
def create_security_group_rule_bulk(self, context, security_group_rules):
rules = super(SecurityGroupServerRpcMixin,
self).create_security_group_rule_bulk_native(
context, security_group_rules)
sgids = set([r['security_group_id'] for r in rules])
self.notifier.security_groups_rule_updated(context, list(sgids))
return rules
def delete_security_group_rule(self, context, sgrid):
rule = self.get_security_group_rule(context, sgrid)
super(SecurityGroupServerRpcMixin,
self).delete_security_group_rule(context, sgrid)
self.notifier.security_groups_rule_updated(context,
[rule['security_group_id']])
def update_security_group_on_port(self, context, id, port,
original_port, updated_port):
"""Update security groups on port.
This method returns a flag which indicates request notification
is required and does not perform notification itself.
It is because another changes for the port may require notification.
"""
need_notify = False
port_updates = port['port']
if (ext_sg.SECURITYGROUPS in port_updates and
not utils.compare_elements(
original_port.get(ext_sg.SECURITYGROUPS),
port_updates[ext_sg.SECURITYGROUPS])):
# delete the port binding and read it with the new rules
port_updates[ext_sg.SECURITYGROUPS] = (
self._get_security_groups_on_port(context, port))
self._delete_port_security_group_bindings(context, id)
self._process_port_create_security_group(
context,
updated_port,
port_updates[ext_sg.SECURITYGROUPS])
need_notify = True
else:
updated_port[ext_sg.SECURITYGROUPS] = (
original_port[ext_sg.SECURITYGROUPS])
return need_notify
def check_and_notify_security_group_member_changed(
self, context, original_port, updated_port):
sg_change = not utils.compare_elements(
original_port.get(ext_sg.SECURITYGROUPS),
updated_port.get(ext_sg.SECURITYGROUPS))
if sg_change:
self.notify_security_groups_member_updated_bulk(
context, [original_port, updated_port])
elif original_port['fixed_ips'] != updated_port['fixed_ips']:
self.notify_security_groups_member_updated(context, updated_port)
def is_security_group_member_updated(self, context,
original_port, updated_port):
"""Check security group member updated or not.
This method returns a flag which indicates request notification
is required and does not perform notification itself.
It is because another changes for the port may require notification.
"""
need_notify = False
if (original_port['fixed_ips'] != updated_port['fixed_ips'] or
original_port['mac_address'] != updated_port['mac_address'] or
not utils.compare_elements(
original_port.get(ext_sg.SECURITYGROUPS),
updated_port.get(ext_sg.SECURITYGROUPS))):
need_notify = True
return need_notify
def notify_security_groups_member_updated_bulk(self, context, ports):
"""Notify update event of security group members for ports.
The agent setups the iptables rule to allow
ingress packet from the dhcp server (as a part of provider rules),
so we need to notify an update of dhcp server ip
address to the plugin agent.
security_groups_provider_updated() just notifies that an event
occurs and the plugin agent fetches the update provider
rule in the other RPC call (security_group_rules_for_devices).
"""
sg_provider_updated_networks = set()
sec_groups = set()
for port in ports:
if port['device_owner'] == q_const.DEVICE_OWNER_DHCP:
sg_provider_updated_networks.add(
port['network_id'])
# For IPv6, provider rule need to be updated in case router
# interface is created or updated after VM port is created.
elif port['device_owner'] == q_const.DEVICE_OWNER_ROUTER_INTF:
if any(netaddr.IPAddress(fixed_ip['ip_address']).version == 6
for fixed_ip in port['fixed_ips']):
sg_provider_updated_networks.add(
port['network_id'])
else:
sec_groups |= set(port.get(ext_sg.SECURITYGROUPS))
if sg_provider_updated_networks:
ports_query = context.session.query(models_v2.Port.id).filter(
models_v2.Port.network_id.in_(
sg_provider_updated_networks)).all()
ports_to_update = [p.id for p in ports_query]
self.notifier.security_groups_provider_updated(
context, ports_to_update)
if sec_groups:
self.notifier.security_groups_member_updated(
context, list(sec_groups))
def notify_security_groups_member_updated(self, context, port):
self.notify_security_groups_member_updated_bulk(context, [port])
def security_group_info_for_ports(self, context, ports):
sg_info = {'devices': ports,
'security_groups': {},
'sg_member_ips': {}}
rules_in_db = self._select_rules_for_ports(context, ports)
remote_security_group_info = {}
for (port_id, rule_in_db) in rules_in_db:
remote_gid = rule_in_db.get('remote_group_id')
security_group_id = rule_in_db.get('security_group_id')
ethertype = rule_in_db['ethertype']
if ('security_group_source_groups'
not in sg_info['devices'][port_id]):
sg_info['devices'][port_id][
'security_group_source_groups'] = []
if remote_gid:
if (remote_gid
not in sg_info['devices'][port_id][
'security_group_source_groups']):
sg_info['devices'][port_id][
'security_group_source_groups'].append(remote_gid)
if remote_gid not in remote_security_group_info:
remote_security_group_info[remote_gid] = {}
if ethertype not in remote_security_group_info[remote_gid]:
# this set will be serialized into a list by rpc code
remote_security_group_info[remote_gid][ethertype] = set()
direction = rule_in_db['direction']
rule_dict = {
'direction': direction,
'ethertype': ethertype}
for key in ('protocol', 'port_range_min', 'port_range_max',
'remote_ip_prefix', 'remote_group_id'):
if rule_in_db.get(key):
if key == 'remote_ip_prefix':
direction_ip_prefix = DIRECTION_IP_PREFIX[direction]
rule_dict[direction_ip_prefix] = rule_in_db[key]
continue
rule_dict[key] = rule_in_db[key]
if security_group_id not in sg_info['security_groups']:
sg_info['security_groups'][security_group_id] = []
if rule_dict not in sg_info['security_groups'][security_group_id]:
sg_info['security_groups'][security_group_id].append(
rule_dict)
sg_info['sg_member_ips'] = remote_security_group_info
# the provider rules do not belong to any security group, so these
# rules still reside in sg_info['devices'] [port_id]
self._apply_provider_rule(context, sg_info['devices'])
return self._get_security_group_member_ips(context, sg_info)
def _get_security_group_member_ips(self, context, sg_info):
ips = self._select_ips_for_remote_group(
context, sg_info['sg_member_ips'].keys())
for sg_id, member_ips in ips.items():
for ip in member_ips:
ethertype = 'IPv%d' % netaddr.IPNetwork(ip).version
if ethertype in sg_info['sg_member_ips'][sg_id]:
sg_info['sg_member_ips'][sg_id][ethertype].add(ip)
return sg_info
def _select_rules_for_ports(self, context, ports):
if not ports:
return []
sg_binding_port = sg_db.SecurityGroupPortBinding.port_id
sg_binding_sgid = sg_db.SecurityGroupPortBinding.security_group_id
sgr_sgid = sg_db.SecurityGroupRule.security_group_id
query = context.session.query(sg_binding_port,
sg_db.SecurityGroupRule)
query = query.join(sg_db.SecurityGroupRule,
sgr_sgid == sg_binding_sgid)
query = query.filter(sg_binding_port.in_(ports.keys()))
return query.all()
def _select_ips_for_remote_group(self, context, remote_group_ids):
ips_by_group = {}
if not remote_group_ids:
return ips_by_group
for remote_group_id in remote_group_ids:
ips_by_group[remote_group_id] = set()
ip_port = models_v2.IPAllocation.port_id
sg_binding_port = sg_db.SecurityGroupPortBinding.port_id
sg_binding_sgid = sg_db.SecurityGroupPortBinding.security_group_id
# Join the security group binding table directly to the IP allocation
# table instead of via the Port table skip an unnecessary intermediary
query = context.session.query(sg_binding_sgid,
models_v2.IPAllocation.ip_address,
addr_pair.AllowedAddressPair.ip_address)
query = query.join(models_v2.IPAllocation,
ip_port == sg_binding_port)
# Outerjoin because address pairs may be null and we still want the
# IP for the port.
query = query.outerjoin(
addr_pair.AllowedAddressPair,
sg_binding_port == addr_pair.AllowedAddressPair.port_id)
query = query.filter(sg_binding_sgid.in_(remote_group_ids))
# Each allowed address pair IP record for a port beyond the 1st
# will have a duplicate regular IP in the query response since
# the relationship is 1-to-many. Dedup with a set
for security_group_id, ip_address, allowed_addr_ip in query:
ips_by_group[security_group_id].add(ip_address)
if allowed_addr_ip:
ips_by_group[security_group_id].add(allowed_addr_ip)
return ips_by_group
def _select_remote_group_ids(self, ports):
remote_group_ids = []
for port in ports.values():
for rule in port.get('security_group_rules'):
remote_group_id = rule.get('remote_group_id')
if remote_group_id:
remote_group_ids.append(remote_group_id)
return remote_group_ids
def _select_network_ids(self, ports):
return set((port['network_id'] for port in ports.values()))
def _select_dhcp_ips_for_network_ids(self, context, network_ids):
if not network_ids:
return {}
query = context.session.query(models_v2.Port.mac_address,
models_v2.Port.network_id,
models_v2.IPAllocation.ip_address)
query = query.join(models_v2.IPAllocation)
query = query.filter(models_v2.Port.network_id.in_(network_ids))
owner = q_const.DEVICE_OWNER_DHCP
query = query.filter(models_v2.Port.device_owner == owner)
ips = {}
for network_id in network_ids:
ips[network_id] = []
for mac_address, network_id, ip in query:
if (netaddr.IPAddress(ip).version == 6
and not netaddr.IPAddress(ip).is_link_local()):
ip = str(ipv6.get_ipv6_addr_by_EUI64(q_const.IPV6_LLA_PREFIX,
mac_address))
if ip not in ips[network_id]:
ips[network_id].append(ip)
return ips
def _select_ra_ips_for_network_ids(self, context, network_ids):
"""Select IP addresses to allow sending router advertisement from.
If the OpenStack managed radvd process sends an RA, get link local
address of gateway and allow RA from this Link Local address.
The gateway port link local address will only be obtained
when router is created before VM instance is booted and
subnet is attached to router.
If OpenStack doesn't send RA, allow RA from gateway IP.
Currently, the gateway IP needs to be link local to be able
to send RA to VM.
"""
if not network_ids:
return {}
ips = {}
for network_id in network_ids:
ips[network_id] = set([])
query = context.session.query(models_v2.Subnet)
subnets = query.filter(models_v2.Subnet.network_id.in_(network_ids))
for subnet in subnets:
gateway_ip = subnet['gateway_ip']
if subnet['ip_version'] != 6 or not gateway_ip:
continue
if not netaddr.IPAddress(gateway_ip).is_link_local():
if subnet['ipv6_ra_mode']:
gateway_ip = self._get_lla_gateway_ip_for_subnet(context,
subnet)
else:
# TODO(xuhanp):Figure out how to allow gateway IP from
# existing device to be global address and figure out the
# link local address by other method.
continue
if gateway_ip:
ips[subnet['network_id']].add(gateway_ip)
return ips
def _get_lla_gateway_ip_for_subnet(self, context, subnet):
query = context.session.query(models_v2.Port.mac_address)
query = query.join(models_v2.IPAllocation)
query = query.filter(
models_v2.IPAllocation.subnet_id == subnet['id'])
query = query.filter(
models_v2.IPAllocation.ip_address == subnet['gateway_ip'])
query = query.filter(
models_v2.Port.device_owner.in_(q_const.ROUTER_INTERFACE_OWNERS))
try:
mac_address = query.one()[0]
except (exc.NoResultFound, exc.MultipleResultsFound):
LOG.warn(_LW('No valid gateway port on subnet %s is '
'found for IPv6 RA'), subnet['id'])
return
lla_ip = str(ipv6.get_ipv6_addr_by_EUI64(
q_const.IPV6_LLA_PREFIX,
mac_address))
return lla_ip
def _convert_remote_group_id_to_ip_prefix(self, context, ports):
remote_group_ids = self._select_remote_group_ids(ports)
ips = self._select_ips_for_remote_group(context, remote_group_ids)
for port in ports.values():
updated_rule = []
for rule in port.get('security_group_rules'):
remote_group_id = rule.get('remote_group_id')
direction = rule.get('direction')
direction_ip_prefix = DIRECTION_IP_PREFIX[direction]
if not remote_group_id:
updated_rule.append(rule)
continue
port['security_group_source_groups'].append(remote_group_id)
base_rule = rule
for ip in ips[remote_group_id]:
if ip in port.get('fixed_ips', []):
continue
ip_rule = base_rule.copy()
version = netaddr.IPNetwork(ip).version
ethertype = 'IPv%s' % version
if base_rule['ethertype'] != ethertype:
continue
ip_rule[direction_ip_prefix] = str(
netaddr.IPNetwork(ip).cidr)
updated_rule.append(ip_rule)
port['security_group_rules'] = updated_rule
return ports
def _add_ingress_dhcp_rule(self, port, ips):
dhcp_ips = ips.get(port['network_id'])
for dhcp_ip in dhcp_ips:
source_port, dest_port, ethertype = DHCP_RULE_PORT[
netaddr.IPAddress(dhcp_ip).version]
dhcp_rule = {'direction': 'ingress',
'ethertype': ethertype,
'protocol': 'udp',
'port_range_min': dest_port,
'port_range_max': dest_port,
'source_port_range_min': source_port,
'source_port_range_max': source_port,
'source_ip_prefix': dhcp_ip}
port['security_group_rules'].append(dhcp_rule)
def _add_ingress_ra_rule(self, port, ips):
ra_ips = ips.get(port['network_id'])
for ra_ip in ra_ips:
ra_rule = {'direction': 'ingress',
'ethertype': q_const.IPv6,
'protocol': q_const.PROTO_NAME_ICMP_V6,
'source_ip_prefix': ra_ip,
'source_port_range_min': q_const.ICMPV6_TYPE_RA}
port['security_group_rules'].append(ra_rule)
def _apply_provider_rule(self, context, ports):
network_ids = self._select_network_ids(ports)
ips_dhcp = self._select_dhcp_ips_for_network_ids(context, network_ids)
ips_ra = self._select_ra_ips_for_network_ids(context, network_ids)
for port in ports.values():
self._add_ingress_ra_rule(port, ips_ra)
self._add_ingress_dhcp_rule(port, ips_dhcp)
def security_group_rules_for_ports(self, context, ports):
rules_in_db = self._select_rules_for_ports(context, ports)
for (port_id, rule_in_db) in rules_in_db:
port = ports[port_id]
direction = rule_in_db['direction']
rule_dict = {
'security_group_id': rule_in_db['security_group_id'],
'direction': direction,
'ethertype': rule_in_db['ethertype'],
}
for key in ('protocol', 'port_range_min', 'port_range_max',
'remote_ip_prefix', 'remote_group_id'):
if rule_in_db.get(key):
if key == 'remote_ip_prefix':
direction_ip_prefix = DIRECTION_IP_PREFIX[direction]
rule_dict[direction_ip_prefix] = rule_in_db[key]
continue
rule_dict[key] = rule_in_db[key]
port['security_group_rules'].append(rule_dict)
self._apply_provider_rule(context, ports)
return self._convert_remote_group_id_to_ip_prefix(context, ports)
|
|
# -*- coding: utf-8 -*-
from __future__ import (
print_function,
unicode_literals,
)
import hypothesis
import hypothesis.strategies
import mock
import os.path
import pytest
from datetime import timedelta
from hypothesis_regex import regex
from runwith import (
main,
__main__,
timespan,
SIGKILL,
)
try:
from shlex import quote
except ImportError:
from pipes import quote
def unused(*args):
pass
# Must be imported to be tracked by coverage.
unused(__main__)
SECOND = 1
MINUTE = 60 * SECOND
HOUR = 60 * MINUTE
DAY = 24 * HOUR
WEEK = 7 * DAY
def seconds_to_timespan(x):
y = ''
weeks, x = divmod(x, WEEK)
if weeks:
y += '%dw' % weeks
days, x = divmod(x, DAY)
if days:
y += '%dd' % days
hours, x = divmod(x, HOUR)
if hours:
y += '%dh' % hours
minutes, x = divmod(x, MINUTE)
if minutes:
y += '%dm' % minutes
seconds, x = divmod(x, SECOND)
if seconds:
y += '%ds' % seconds
if x > 0:
y += '%dms' % (1000.0 * x)
return y
@pytest.mark.parametrize('value,expected', [
('1w', timedelta(weeks=1)),
('7d', timedelta(days=7)),
('2h', timedelta(hours=2)),
('.1m', timedelta(minutes=.1)),
('.7s', timedelta(seconds=.7)),
('5m30s', timedelta(minutes=5, seconds=30)),
])
def test_timespan(value, expected):
assert timespan(value) == expected
@pytest.mark.parametrize('value', [
'1',
'123abc',
])
def test_timespan_invalid(value):
with pytest.raises(ValueError) as exc:
print(timespan(value))
assert str(exc.value) == ('Invalid time span "%s".' % value)
def test_run_without_args():
with mock.patch('subprocess.Popen') as popen:
with pytest.raises(SystemExit) as exc:
print(main([]))
assert exc.value.code == 2
popen.assert_not_called()
@hypothesis.given(
status=hypothesis.strategies.integers(min_value=-128, max_value=127),
command=hypothesis.strategies.lists(
elements=hypothesis.strategies.text(min_size=1),
min_size=1,
),
)
def test_implicit_argv(status, command):
with mock.patch('sys.argv', ['runwith', '--'] + command):
process = mock.MagicMock()
process.returncode = status
process.wait.return_value = process.returncode
with mock.patch('subprocess.Popen') as popen:
popen.side_effect = [process]
assert main() == status
popen.assert_called_once_with(command)
@hypothesis.given(
command=hypothesis.strategies.lists(
elements=hypothesis.strategies.text(min_size=1),
min_size=1,
),
)
def test_spawn_failure(command):
with mock.patch('subprocess.Popen') as popen:
popen.side_effect = OSError('unknown program')
with pytest.raises(SystemExit) as exc:
print(main(['--'] + command))
assert exc.value.code == 2
popen.assert_called_once_with(command)
@hypothesis.given(
status=hypothesis.strategies.integers(min_value=-128, max_value=127),
command=hypothesis.strategies.lists(
elements=hypothesis.strategies.text(min_size=1),
min_size=1,
),
)
def test_forward_status(status, command):
process = mock.MagicMock()
process.returncode = status
process.wait.return_value = process.returncode
with mock.patch('subprocess.Popen') as popen:
popen.side_effect = [process]
assert main(['--'] + command) == status
popen.assert_called_once_with(command)
@hypothesis.given(
status=hypothesis.strategies.integers(min_value=-128, max_value=127),
command=hypothesis.strategies.lists(
elements=hypothesis.strategies.text(min_size=1),
min_size=1,
),
)
def test_redirect_stdin(tempcwd, status, command):
process = mock.MagicMock()
process.returncode = status
process.wait.return_value = process.returncode
with open('foo.txt', 'wb') as stream:
stream.write(b'FOO')
with mock.patch('subprocess.Popen') as popen:
popen.side_effect = [process]
assert main(['-i', 'foo.txt', '--'] + command) == status
popen.assert_called_once_with(command, stdin=mock.ANY)
@hypothesis.given(
status=hypothesis.strategies.integers(min_value=-128, max_value=127),
command=hypothesis.strategies.lists(
elements=hypothesis.strategies.text(min_size=1),
min_size=1,
),
)
def test_redirect_stdout(tempcwd, status, command):
process = mock.MagicMock()
process.returncode = status
process.wait.return_value = process.returncode
with mock.patch('subprocess.Popen') as popen:
popen.side_effect = [process]
assert main(['-o', 'foo.txt', '--'] + command) == status
popen.assert_called_once_with(command, stdout=mock.ANY)
assert os.path.exists('foo.txt')
@hypothesis.given(
status=hypothesis.strategies.integers(min_value=-128, max_value=127),
command=hypothesis.strategies.lists(
elements=hypothesis.strategies.text(min_size=1),
min_size=1,
),
)
def test_redirect_stderr(tempcwd, status, command):
process = mock.MagicMock()
process.returncode = status
process.wait.return_value = process.returncode
with mock.patch('subprocess.Popen') as popen:
popen.side_effect = [process]
assert main(['-e', 'foo.txt', '--'] + command) == status
popen.assert_called_once_with(command, stderr=mock.ANY)
assert os.path.exists('foo.txt')
@hypothesis.given(
status=hypothesis.strategies.integers(min_value=-128, max_value=127),
command=hypothesis.strategies.lists(
elements=hypothesis.strategies.text(min_size=1),
min_size=1,
),
workdir=regex(r'\w+').map(quote),
)
def test_change_working_directory(tempcwd, status, command, workdir):
process = mock.MagicMock()
process.returncode = status
process.wait.return_value = process.returncode
with mock.patch('subprocess.Popen') as popen:
popen.side_effect = [process]
assert main(['-w', workdir, '--'] + command) == status
popen.assert_called_once_with(command, cwd=workdir)
@hypothesis.given(
status=hypothesis.strategies.integers(min_value=-128, max_value=127),
command=hypothesis.strategies.lists(
elements=hypothesis.strategies.text(min_size=1),
min_size=1,
),
timebox=hypothesis.strategies.floats(
min_value=0.001, # 1ms
max_value=31 * DAY,
).map(seconds_to_timespan),
)
def test_respect_timebox(status, command, timebox):
process = mock.MagicMock()
process.returncode = status
process.wait.side_effect = [process.returncode]
with mock.patch('subprocess.Popen') as popen:
popen.side_effect = [process]
assert main(['-t', timebox, '--'] + command) == status
popen.assert_called_once_with(command)
process.wait.assert_called_once_with()
process.send_signal.assert_not_called()
process.terminate.assert_not_called()
@hypothesis.given(
status=hypothesis.strategies.integers(min_value=-128, max_value=127),
command=hypothesis.strategies.lists(
elements=hypothesis.strategies.text(min_size=1),
min_size=1,
),
timebox=hypothesis.strategies.floats(
min_value=0.001, # 1ms
max_value=31 * DAY,
).map(seconds_to_timespan),
)
def test_exceed_timebox(status, command, timebox):
process = mock.MagicMock()
process.returncode = status
process.wait.return_value = process.returncode
thread = mock.MagicMock()
thread.is_alive.side_effect = [True, False]
thread.join.side_effect = [None, None]
with mock.patch('threading.Thread') as T:
T.side_effect = [thread]
with mock.patch('subprocess.Popen') as P:
P.side_effect = [process]
assert main(['-t', timebox, '-g', '2s', '--'] + command) == status
P.assert_called_once_with(command)
T.assert_called_once()
process.send_signal.assert_called_once_with(SIGKILL)
process.terminate.assert_not_called()
@hypothesis.given(
status=hypothesis.strategies.integers(min_value=-128, max_value=127),
command=hypothesis.strategies.lists(
elements=hypothesis.strategies.text(min_size=1),
min_size=1,
),
timebox=hypothesis.strategies.floats(
min_value=0.001, # 1ms
max_value=31 * DAY,
).map(seconds_to_timespan),
)
def test_exceed_timebox_no_grace_time(status, command, timebox):
process = mock.MagicMock()
process.returncode = status
process.wait.return_value = process.returncode
thread = mock.MagicMock()
thread.is_alive.side_effect = [True, True]
thread.join.side_effect = [None, None, None]
with mock.patch('threading.Thread') as T:
T.side_effect = [thread]
with mock.patch('subprocess.Popen') as P:
P.side_effect = [process]
assert main(['-t', timebox, '--'] + command) == status
P.assert_called_once_with(command)
T.assert_called_once()
process.send_signal.assert_not_called()
process.terminate.assert_called_once()
@hypothesis.given(
status=hypothesis.strategies.integers(min_value=-128, max_value=127),
command=hypothesis.strategies.lists(
elements=hypothesis.strategies.text(min_size=1),
min_size=1,
),
timebox=hypothesis.strategies.floats(
min_value=0.001, # 1ms
max_value=31 * DAY,
).map(seconds_to_timespan),
)
def test_exceed_timebox_and_grace_time(status, command, timebox):
process = mock.MagicMock()
process.returncode = status
process.wait.return_value = process.returncode
thread = mock.MagicMock()
thread.is_alive.side_effect = [True, True]
thread.join.side_effect = [None, None, None]
with mock.patch('threading.Thread') as T:
T.side_effect = [thread]
with mock.patch('subprocess.Popen') as P:
P.side_effect = [process]
assert main(['-t', timebox, '-g', '2s', '--'] + command) == status
P.assert_called_once_with(command)
T.assert_called_once()
process.send_signal.assert_called_once_with(SIGKILL)
process.terminate.assert_called_once()
|
|
# -*- coding: utf-8 -*-
"""
Default Controllers
"""
module = "default"
# -----------------------------------------------------------------------------
def call():
"Call an XMLRPC, JSONRPC or RSS service"
# If webservices don't use sessions, avoid cluttering up the storage
#session.forget()
return service()
# -----------------------------------------------------------------------------
def download():
""" Download a file """
try:
filename = request.args[0]
except:
session.error("Need to specify the file to download!")
redirect(URL(f="index"))
# Load the Model
tablename = filename.split(".", 1)[0]
if "_" in tablename:
table = s3db.table(tablename)
return response.download(request, db)
# =============================================================================
def register_validation(form):
""" Validate the fields in registration form """
form_vars = form.vars
# Mobile Phone
mobile = form_vars.get("mobile")
if mobile:
import re
regex = re.compile(single_phone_number_pattern)
if not regex.match(mobile):
form.errors.mobile = T("Invalid phone number")
elif settings.get_auth_registration_mobile_phone_mandatory():
form.errors.mobile = T("Phone number is required")
# Home Phone
home = form_vars.get("home")
if home:
import re
regex = re.compile(single_phone_number_pattern)
if not regex.match(home):
form.errors.home = T("Invalid phone number")
org = settings.get_auth_registration_organisation_id_default()
if org:
# Add to default organisation
form_vars.organisation_id = org
return
# =============================================================================
def index():
""" Main Home Page """
auth.settings.register_onvalidation = register_validation
auth.configure_user_fields()
page = request.args(0)
if page:
# Go to a custom page
# Arg 1 = function in /private/templates/<template>/controllers.py
# other Args & Vars passed through
controller = "applications.%s.private.templates.%s.controllers" % \
(appname, settings.get_template())
try:
exec("import %s as custom" % controller)
except ImportError:
# No Custom Page available, continue with the default
page = "private/templates/%s/controllers.py" % \
settings.get_template()
current.log.warning("File not loadable",
"%s, %s" % (page, sys.exc_info()[1]))
else:
if "." in page:
# Remove extension
page = page.split(".", 1)[0]
if page in custom.__dict__:
exec ("output = custom.%s()()" % page)
return output
elif page != "login":
raise(HTTP(404, "Function not found: %s()" % page))
else:
output = custom.index()()
return output
elif settings.get_template() != "default":
# Try a Custom Homepage
controller = "applications.%s.private.templates.%s.controllers" % \
(appname, settings.get_template())
try:
exec("import %s as custom" % controller)
except ImportError:
# No Custom Page available, continue with the default
# @ToDo: cache this result in session
current.log.warning("Custom homepage cannot be loaded",
sys.exc_info()[1])
else:
if "index" in custom.__dict__:
output = custom.index()()
return output
# Default Homepage
title = settings.get_system_name()
response.title = title
item = ""
has_module = settings.has_module
if has_module("cms"):
table = s3db.cms_post
ltable = s3db.cms_post_module
query = (ltable.module == module) & \
((ltable.resource == None) | (ltable.resource == "index")) & \
(ltable.post_id == table.id) & \
(table.deleted != True)
item = db(query).select(table.body,
limitby=(0, 1)).first()
if item:
item = DIV(XML(item.body))
else:
item = ""
if has_module("cr"):
table = s3db.cr_shelter
SHELTERS = s3.crud_strings["cr_shelter"].title_list
else:
SHELTERS = ""
# Menu Boxes
menu_btns = [#div, label, app, function
["facility", T("Facilities"), "org", "facility"],
["facility", T("Hospitals"), "hms", "hospital"],
["facility", T("Offices"), "org", "office"],
["facility", SHELTERS, "cr", "shelter"],
["facility", T("Warehouses"), "inv", "warehouse"],
["sit", T("Staff"), "hrm", "staff"],
["sit", T("Volunteers"), "vol", "volunteer"],
["sit", T("Incidents"), "irs", "ireport"],
["sit", T("Assessments"), "survey", "series"],
["sit", T("Assets"), "asset", "asset"],
["sit", T("Inventory Items"), "inv", "inv_item"],
#["dec", T("Gap Map"), "project", "gap_map"],
#["dec", T("Gap Report"), "project", "gap_report"],
["dec", T("Requests"), "req", "req"],
["res", T("Projects"), "project", "project"],
["res", T("Commitments"), "req", "commit"],
["res", T("Sent Shipments"), "inv", "send"],
["res", T("Received Shipments"), "inv", "recv"],
]
# Change to (Mitigation)/Preparedness/Response/Recovery?
menu_divs = {"facility": DIV(H3(T("Facilities")),
_id = "facility_box",
_class = "menu_box",
),
"sit": DIV(H3(T("Situation")),
_id = "menu_div_sit",
_class = "menu_div",
),
"dec": DIV(H3(T("Decision")),
_id = "menu_div_dec",
_class = "menu_div",
),
"res": DIV(H3(T("Response")),
_id = "menu_div_res",
_class = "menu_div",
),
}
for div, label, app, function in menu_btns:
if has_module(app):
# @ToDo: Also check permissions (e.g. for anonymous users)
menu_divs[div].append(A(DIV(label,
_class = "menu-btn-r"),
_class = "menu-btn-l",
_href = URL(app, function)
)
)
div_arrow = DIV(IMG(_src = "/%s/static/img/arrow_blue_right.png" % \
appname),
_class = "div_arrow")
sit_dec_res_box = DIV(menu_divs["sit"],
div_arrow,
menu_divs["dec"],
div_arrow,
menu_divs["res"],
_id = "sit_dec_res_box",
_class = "menu_box fleft swidth"
#div_additional,
)
facility_box = menu_divs["facility"]
facility_box.append(A(IMG(_src = "/%s/static/img/map_icon_128.png" % \
appname),
_href = URL(c="gis", f="index"),
_title = T("Map")
)
)
# Check logged in AND permissions
roles = session.s3.roles
table = s3db.org_organisation
has_permission = auth.s3_has_permission
if AUTHENTICATED in roles and \
has_permission("read", table):
org_items = organisation()
datatable_ajax_source = "/%s/default/organisation.aadata" % appname
s3.actions = None
response.view = "default/index.html"
permission = auth.permission
permission.controller = "org"
permission.function = "site"
permitted_facilities = auth.permitted_facilities(redirect_on_error=False)
if permitted_facilities:
facilities = s3db.org_SiteRepresent().bulk(permitted_facilities,
include_blank=False)
facility_list = [(fac, facilities[fac]) for fac in facilities]
facility_list = sorted(facility_list, key=lambda fac: fac[1])
facility_opts = [OPTION(fac[1], _value=fac[0])
for fac in facility_list]
manage_facility_box = DIV(H3(T("Manage Your Facilities")),
SELECT(_id = "manage_facility_select",
_style = "max-width:360px",
*facility_opts
),
A(T("Go"),
_href = URL(c="default", f="site",
args=[facility_list[0][0]]),
#_disabled = "disabled",
_id = "manage_facility_btn",
_class = "action-btn"
),
_id = "manage_facility_box",
_class = "menu_box fleft"
)
s3.jquery_ready.append(
'''$('#manage_facility_select').change(function(){
$('#manage_facility_btn').attr('href',S3.Ap.concat('/default/site/',$('#manage_facility_select').val()))})
$('#manage_facility_btn').click(function(){
if ( ($('#manage_facility_btn').attr('href').toString())===S3.Ap.concat('/default/site/None') )
{$("#manage_facility_box").append("<div class='alert alert-error'>%s</div>")
return false}})''' % (T("Please Select a Facility")))
else:
manage_facility_box = ""
if has_permission("create", table):
create = A(T("Create Organization"),
_href = URL(c="org", f="organisation",
args=["create"]),
_id = "add-btn",
_class = "action-btn",
_style = "margin-right: 10px;")
else:
create = ""
org_box = DIV(H3(T("Organizations")),
create,
org_items,
_id = "org_box",
_class = "menu_box fleft"
)
else:
datatable_ajax_source = ""
manage_facility_box = ""
org_box = ""
# Login/Registration forms
self_registration = settings.get_security_self_registration()
registered = False
login_form = None
login_div = None
register_form = None
register_div = None
if AUTHENTICATED not in roles:
# This user isn't yet logged-in
if request.cookies.has_key("registered"):
# This browser has logged-in before
registered = True
# Provide a login box on front page
auth.messages.submit_button = T("Login")
login_form = auth.login(inline=True)
login_div = DIV(H3(T("Login")),
P(XML(T("Registered users can %(login)s to access the system") % \
dict(login=B(T("login"))))))
if self_registration:
# Provide a Registration box on front page
register_form = auth.register()
register_div = DIV(H3(T("Register")),
P(XML(T("If you would like to help, then please %(sign_up_now)s") % \
dict(sign_up_now=B(T("sign-up now"))))))
if request.env.request_method == "POST":
if login_form.errors:
hide, show = "#register_form", "#login_form"
else:
hide, show = "#login_form", "#register_form"
post_script = \
'''$('%s').addClass('hide')
$('%s').removeClass('hide')''' % (hide, show)
else:
post_script = ""
register_script = \
'''$('#register-btn').attr('href','#register')
$('#login-btn').attr('href','#login')
%s
$('#register-btn').click(function(){
$('#register_form').removeClass('hide')
$('#login_form').addClass('hide')
})
$('#login-btn').click(function(){
$('#register_form').addClass('hide')
$('#login_form').removeClass('hide')
})''' % post_script
s3.jquery_ready.append(register_script)
if settings.frontpage.rss:
s3.external_stylesheets.append("http://www.google.com/uds/solutions/dynamicfeed/gfdynamicfeedcontrol.css")
s3.scripts.append("http://www.google.com/jsapi?key=notsupplied-wizard")
s3.scripts.append("http://www.google.com/uds/solutions/dynamicfeed/gfdynamicfeedcontrol.js")
counter = 0
feeds = ""
for feed in settings.frontpage.rss:
counter += 1
feeds = "".join((feeds,
"{title:'%s',\n" % feed["title"],
"url:'%s'}" % feed["url"]))
# Don't add a trailing comma for old IEs
if counter != len(settings.frontpage.rss):
feeds += ",\n"
# feedCycleTime: milliseconds before feed is reloaded (5 minutes)
feed_control = "".join(('''
function LoadDynamicFeedControl(){
var feeds=[
''', feeds, '''
]
var options={
feedCycleTime:300000,
numResults:5,
stacked:true,
horizontal:false,
title:"''', str(T("News")), '''"
}
new GFdynamicFeedControl(feeds,'feed-control',options)
}
google.load('feeds','1')
google.setOnLoadCallback(LoadDynamicFeedControl)'''))
s3.js_global.append(feed_control)
output = dict(title = title,
item = item,
sit_dec_res_box = sit_dec_res_box,
facility_box = facility_box,
manage_facility_box = manage_facility_box,
org_box = org_box,
r = None, # Required for dataTable to work
datatable_ajax_source = datatable_ajax_source,
self_registration=self_registration,
registered=registered,
login_form=login_form,
login_div=login_div,
register_form=register_form,
register_div=register_div
)
if get_vars.tour:
output = s3db.tour_builder(output)
return output
# -----------------------------------------------------------------------------
def organisation():
"""
Function to handle pagination for the org list on the homepage
"""
representation = request.extension
resource = s3db.resource("org_organisation")
totalrows = resource.count()
display_start = int(get_vars.displayStart) if get_vars.displayStart else 0
display_length = int(get_vars.pageLength) if get_vars.pageLength else 10
limit = 4 * display_length
list_fields = ["id", "name"]
default_orderby = orderby = "org_organisation.name asc"
if representation == "aadata":
query, orderby, left = resource.datatable_filter(list_fields, get_vars)
if orderby is None:
orderby = default_orderby
if query:
resource.add_filter(query)
data = resource.select(list_fields,
start=display_start,
limit=limit,
orderby=orderby,
count=True,
represent=True)
filteredrows = data["numrows"]
rfields = data["rfields"]
data = data["rows"]
dt = S3DataTable(rfields, data)
dt.defaultActionButtons(resource)
s3.no_formats = True
if representation == "html":
items = dt.html(totalrows,
totalrows,
"org_dt",
dt_ajax_url=URL(c="default",
f="organisation",
extension="aadata",
vars={"id": "org_dt"},
),
dt_pageLength=display_length,
dt_pagination="true",
)
elif representation == "aadata":
draw = request.get_vars.get("draw")
if draw:
draw = int(draw)
items = dt.json(totalrows,
filteredrows,
"org_dt",
draw)
else:
from gluon.http import HTTP
raise HTTP(501, ERROR.BAD_FORMAT)
return items
# -----------------------------------------------------------------------------
def site():
"""
@ToDo: Avoid redirect
"""
try:
site_id = request.args[0]
except:
raise HTTP(404)
table = s3db.org_site
record = db(table.site_id == site_id).select(table.instance_type,
limitby=(0, 1)).first()
tablename = record.instance_type
table = s3db.table(tablename)
if table:
query = (table.site_id == site_id)
id = db(query).select(table.id,
limitby = (0, 1)).first().id
cf = tablename.split("_", 1)
redirect(URL(c = cf[0],
f = cf[1],
args = [id]))
# -----------------------------------------------------------------------------
def message():
""" Show a confirmation screen """
#if "verify_email_sent" in request.args:
title = T("Account Registered - Please Check Your Email")
message = T( "%(system_name)s has sent an email to %(email)s to verify your email address.\nPlease check your email to verify this address. If you do not receive this email please check you junk email or spam filters." )\
% {"system_name": settings.get_system_name(),
"email": request.vars.email}
image = "email_icon.png"
return dict(title = title,
message = message,
image_src = "/%s/static/img/%s" % (appname, image)
)
# -----------------------------------------------------------------------------
def rapid():
""" Set/remove rapid data entry flag """
val = get_vars.get("val", True)
if val == "0":
val = False
else:
val = True
session.s3.rapid_data_entry = val
response.view = "xml.html"
return dict(item=str(session.s3.rapid_data_entry))
# -----------------------------------------------------------------------------
def user():
""" Auth functions based on arg. See gluon/tools.py """
auth_settings = auth.settings
utable = auth_settings.table_user
arg = request.args(0)
if arg == "verify_email":
# Ensure we use the user's language
key = request.args[-1]
query = (utable.registration_key == key)
user = db(query).select(utable.language,
limitby=(0, 1)).first()
if not user:
redirect(auth_settings.verify_email_next)
session.s3.language = user.language
auth_settings.on_failed_authorization = URL(f="error")
auth.configure_user_fields()
auth_settings.profile_onaccept = auth.s3_user_profile_onaccept
auth_settings.register_onvalidation = register_validation
self_registration = settings.get_security_self_registration()
login_form = register_form = None
# Check for template-specific customisations
customise = settings.customise_auth_user_controller
if customise:
customise(arg=arg)
if arg == "login":
title = response.title = T("Login")
# @ToDo: move this code to /modules/s3/s3aaa.py:def login()?
auth.messages.submit_button = T("Login")
form = auth()
#form = auth.login()
login_form = form
elif arg == "register":
title = response.title = T("Register")
# @ToDo: move this code to /modules/s3/s3aaa.py:def register()?
if not self_registration:
session.error = T("Registration not permitted")
redirect(URL(f="index"))
form = register_form = auth.register()
elif arg == "change_password":
title = response.title = T("Change Password")
form = auth()
# Add client-side validation
if s3.debug:
s3.scripts.append("/%s/static/scripts/jquery.pstrength.2.1.0.js" % appname)
else:
s3.scripts.append("/%s/static/scripts/jquery.pstrength.2.1.0.min.js" % appname)
s3.jquery_ready.append("$('.password:eq(1)').pstrength()")
elif arg == "retrieve_password":
title = response.title = T("Retrieve Password")
form = auth()
elif arg == "profile":
title = response.title = T("User Profile")
form = auth.profile()
elif arg == "options.s3json":
# Used when adding organisations from registration form
return s3_rest_controller(prefix="auth", resourcename="user")
else:
# logout or verify_email
title = ""
form = auth()
if form:
if s3.crud.submit_style:
form[0][-1][1][0]["_class"] = s3.crud.submit_style
if settings.get_template() != "default":
# Try a Custom View
view = os.path.join(request.folder, "private", "templates",
settings.get_template(), "views", "user.html")
if os.path.exists(view):
try:
# Pass view as file not str to work in compiled mode
response.view = open(view, "rb")
except IOError:
from gluon.http import HTTP
raise HTTP("404", "Unable to open Custom View: %s" % view)
return dict(title=title,
form=form,
login_form=login_form,
register_form=register_form,
self_registration=self_registration)
# -----------------------------------------------------------------------------
def person():
"""
Profile to show:
- User Details
- Person Details
- Staff/Volunteer Record
- Map Config
"""
# Set to current user
user_person_id = str(s3_logged_in_person())
# When request.args = [], set it as user_person_id.
# When it is not an ajax request and the first argument is not user_person_id, set it.
# If it is an json request, leave the arguments unmodified.
if not request.args or (request.args[0] != user_person_id and \
request.args[-1] != "options.s3json" and \
request.args[-1] != "validate.json"
):
request.args = [user_person_id]
set_method = s3db.set_method
# Custom Method for User
def auth_profile_method(r, **attr):
# Custom View
response.view = "update.html"
current.menu.breadcrumbs = None
# RHeader for consistency
rheader = attr.get("rheader", None)
if callable(rheader):
rheader = rheader(r)
table = auth.settings.table_user
tablename = table._tablename
next = URL(c = "default",
f = "person",
args = [user_person_id, "user"])
onaccept = lambda form: auth.s3_approve_user(form.vars),
form = auth.profile(next = next,
onaccept = onaccept)
return dict(title = T("User Profile"),
rheader = rheader,
form = form,
)
set_method("pr", "person",
method="user",
action=auth_profile_method)
# Custom Method for Contacts
set_method("pr", "person",
method="contacts",
action=s3db.pr_contacts)
#if settings.has_module("asset"):
# # Assets as component of people
# s3db.add_components("pr_person", asset_asset="assigned_to_id")
# CRUD pre-process
def prep(r):
if r.method in ("options", "validate"):
return True
if r.interactive and r.method != "import":
# Load default model to override CRUD Strings
tablename = "pr_person"
table = s3db[tablename]
# Users can not delete their own person record
r.resource.configure(deletable=False)
s3.crud_strings[tablename].update(
title_display = T("Personal Profile"),
title_update = T("Personal Profile"))
# Organisation-dependent Fields
set_org_dependent_field = settings.set_org_dependent_field
set_org_dependent_field("pr_person_details", "father_name")
set_org_dependent_field("pr_person_details", "mother_name")
set_org_dependent_field("pr_person_details", "affiliations")
set_org_dependent_field("pr_person_details", "company")
if r.component:
if r.component_name == "physical_description":
# Hide all but those details that we want
# Lock all the fields
table = r.component.table
for field in table.fields:
table[field].writable = False
table[field].readable = False
# Now enable those that we want
table.ethnicity.writable = True
table.ethnicity.readable = True
table.blood_type.writable = True
table.blood_type.readable = True
table.medical_conditions.writable = True
table.medical_conditions.readable = True
table.other_details.writable = True
table.other_details.readable = True
elif r.component_name == "config":
ctable = s3db.gis_config
s3db.gis_config_form_setup()
# Create forms use this
# (update forms are in gis/config())
fields = ["name",
"pe_default",
"default_location_id",
"zoom",
"lat",
"lon",
#"projection_id",
#"symbology_id",
#"wmsbrowser_url",
#"wmsbrowser_name",
]
osm_table = s3db.gis_layer_openstreetmap
openstreetmap = db(osm_table.deleted == False).select(osm_table.id,
limitby=(0, 1))
if openstreetmap:
# OpenStreetMap config
s3db.add_components("gis_config",
auth_user_options={"joinby": "pe_id",
"pkey": "pe_id",
"multiple": False,
},
)
fields += ["user_options.osm_oauth_consumer_key",
"user_options.osm_oauth_consumer_secret",
]
crud_form = s3base.S3SQLCustomForm(*fields)
list_fields = ["name",
"pe_default",
]
s3db.configure("gis_config",
crud_form=crud_form,
insertable=False,
list_fields = list_fields,
)
else:
table.pe_label.readable = False
table.pe_label.writable = False
table.missing.readable = False
table.missing.writable = False
table.age_group.readable = False
table.age_group.writable = False
# Assume volunteers only between 12-81
table.date_of_birth.widget = S3DateWidget(past=972, future=-144)
return True
else:
# Disable non-interactive & import
return False
s3.prep = prep
# CRUD post-process
def postp(r, output):
if r.interactive and r.component:
if r.component_name == "human_resource":
# Set the minimum end_date to the same as the start_date
s3.jquery_ready.append(
'''S3.start_end_date('hrm_human_resource_start_date','hrm_human_resource_end_date')''')
if r.component_name == "identity":
# Set the minimum valid_until to the same as the valid_from
s3.jquery_ready.append(
'''S3.start_end_date('pr_identity_valid_from','pr_identity_valid_until')''')
if r.component_name == "experience":
# Set the minimum end_date to the same as the start_date
s3.jquery_ready.append(
'''S3.start_end_date('hrm_experience_start_date','hrm_experience_end_date')''')
elif r.component_name == "config":
update_url = URL(c="gis", f="config",
args="[id]")
s3_action_buttons(r, update_url=update_url)
s3.actions.append(
dict(url=URL(c="gis", f="index",
vars={"config":"[id]"}),
label=str(T("Show")),
_class="action-btn")
)
elif r.component_name == "asset":
# Provide a link to assign a new Asset
# @ToDo: Proper Widget to do this inline
output["add_btn"] = A(T("Assign Asset"),
_href=URL(c="asset", f="asset"),
_id="add-btn",
_class="action-btn")
return output
s3.postp = postp
if settings.get_hrm_staff_experience() == "experience":
experience_tab = (T("Experience"), "experience")
else:
experience_tab = None
if settings.get_hrm_use_certificates():
certificates_tab = (T("Certificates"), "certificate")
else:
certificates_tab = None
if settings.get_hrm_use_credentials():
credentials_tab = (T("Credentials"), "credential")
else:
credentials_tab = None
if settings.get_hrm_use_description():
description_tab = (T("Description"), "physical_description")
else:
description_tab = None
if settings.get_hrm_use_education():
education_tab = (T("Education"), "education")
else:
education_tab = None
if settings.get_hrm_use_id():
id_tab = (T("ID"), "identity")
else:
id_tab = None
if settings.get_hrm_use_skills():
skills_tab = (T("Skills"), "competency")
else:
skills_tab = None
teams = settings.get_hrm_teams()
if teams:
teams_tab = (T(teams), "group_membership")
else:
teams_tab = None
if settings.get_hrm_use_trainings():
trainings_tab = (T("Trainings"), "training")
else:
trainings_tab = None
tabs = [(T("Person Details"), None),
(T("User Account"), "user"),
(T("Staff/Volunteer Record"), "human_resource"),
id_tab,
description_tab,
(T("Address"), "address"),
(T("Contacts"), "contacts"),
education_tab,
trainings_tab,
certificates_tab,
skills_tab,
credentials_tab,
experience_tab,
teams_tab,
#(T("Assets"), "asset"),
#(T("My Subscriptions"), "subscription"),
(T("My Maps"), "config"),
]
output = s3_rest_controller("pr", "person",
rheader = lambda r: \
s3db.pr_rheader(r, tabs=tabs))
return output
# -----------------------------------------------------------------------------
def group():
"""
RESTful CRUD controller
- needed when group add form embedded in default/person
- only create method is allowed, when opened in a inline form.
"""
# Check if it is called from a inline form
if auth.permission.format != "popup":
return ""
# Pre-process
def prep(r):
if r.method != "create":
return False
return True
s3.prep = prep
output = s3_rest_controller("pr", "group")
return output
# -----------------------------------------------------------------------------
def skill():
"""
RESTful CRUD controller
- needed when skill add form embedded in default/person
- only create method is allowed, when opened in a inline form.
"""
# Check if it is called from a inline form
if auth.permission.format != "popup":
return ""
# Pre-process
def prep(r):
if r.method != "create":
return False
return True
s3.prep = prep
output = s3_rest_controller("hrm", "skill")
return output
# -----------------------------------------------------------------------------
def facebook():
""" Login using Facebook """
channel = s3db.msg_facebook_login()
if not channel:
redirect(URL(f="user", args=request.args, vars=get_vars))
from s3oauth import FaceBookAccount
auth.settings.login_form = FaceBookAccount(channel)
form = auth()
return dict(form=form)
# -----------------------------------------------------------------------------
def google():
""" Login using Google """
channel = settings.get_auth_google()
if not channel:
redirect(URL(f="user", args=request.args, vars=get_vars))
from s3oauth import GooglePlusAccount
auth.settings.login_form = GooglePlusAccount(channel)
form = auth()
return dict(form=form)
# -----------------------------------------------------------------------------
# About Sahana
def apath(path=""):
""" Application path """
from gluon.fileutils import up
opath = up(request.folder)
# @ToDo: This path manipulation is very OS specific.
while path[:3] == "../": opath, path=up(opath), path[3:]
return os.path.join(opath,path).replace("\\", "/")
def about():
"""
The About page provides details on the software dependencies and
versions available to this instance of Sahana Eden.
"""
response.title = T("About")
if settings.get_template() != "default":
# Try a Custom View
view = os.path.join(request.folder, "private", "templates",
settings.get_template(), "views", "about.html")
if os.path.exists(view):
try:
# Pass view as file not str to work in compiled mode
response.view = open(view, "rb")
except IOError:
from gluon.http import HTTP
raise HTTP("404", "Unable to open Custom View: %s" % view)
import sys
import subprocess
import string
python_version = sys.version
web2py_version = open(apath("../VERSION"), "r").read()[8:]
sahana_version = open(os.path.join(request.folder, "VERSION"), "r").read()
# Database
sqlite_version = None
mysql_version = None
mysqldb_version = None
pgsql_version = None
psycopg_version = None
if db_string.find("sqlite") != -1:
try:
import sqlite3
sqlite_version = sqlite3.version
except:
sqlite_version = T("Unknown")
elif db_string.find("mysql") != -1:
try:
import MySQLdb
mysqldb_version = MySQLdb.__revision__
except:
mysqldb_version = T("Not installed or incorrectly configured.")
mysql_version = T("Unknown")
else:
#mysql_version = (subprocess.Popen(["mysql", "--version"], stdout=subprocess.PIPE).communicate()[0]).rstrip()[10:]
con = MySQLdb.connect(host=settings.database.get("host", "localhost"),
port=settings.database.get("port", None) or 3306,
db=settings.database.get("database", "sahana"),
user=settings.database.get("username", "sahana"),
passwd=settings.database.get("password", "password")
)
cur = con.cursor()
cur.execute("SELECT VERSION()")
mysql_version = cur.fetchone()
else:
# Postgres
try:
import psycopg2
psycopg_version = psycopg2.__version__
except:
psycopg_version = T("Not installed or incorrectly configured.")
pgsql_version = T("Unknown")
else:
#pgsql_reply = (subprocess.Popen(["psql", "--version"], stdout=subprocess.PIPE).communicate()[0])
#pgsql_version = string.split(pgsql_reply)[2]
con = psycopg2.connect(host=settings.database.get("host", "localhost"),
port=settings.database.get("port", None) or 5432,
database=settings.database.get("database", "sahana"),
user=settings.database.get("username", "sahana"),
password=settings.database.get("password", "password")
)
cur = con.cursor()
cur.execute("SELECT version()")
pgsql_version = cur.fetchone()
# Libraries
try:
import reportlab
reportlab_version = reportlab.Version
except:
reportlab_version = T("Not installed or incorrectly configured.")
try:
import xlwt
xlwt_version = xlwt.__VERSION__
except:
xlwt_version = T("Not installed or incorrectly configured.")
return dict(
python_version=python_version,
sahana_version=sahana_version,
web2py_version=web2py_version,
sqlite_version=sqlite_version,
mysql_version=mysql_version,
mysqldb_version=mysqldb_version,
pgsql_version=pgsql_version,
psycopg_version=psycopg_version,
reportlab_version=reportlab_version,
xlwt_version=xlwt_version
)
# -----------------------------------------------------------------------------
def help():
""" Custom View """
if settings.get_template() != "default":
# Try a Custom View
view = os.path.join(request.folder, "private", "templates",
settings.get_template(), "views", "help.html")
if os.path.exists(view):
try:
# Pass view as file not str to work in compiled mode
response.view = open(view, "rb")
except IOError:
from gluon.http import HTTP
raise HTTP("404", "Unable to open Custom View: %s" % view)
response.title = T("Help")
return dict()
# -----------------------------------------------------------------------------
def privacy():
""" Custom View """
if settings.get_template() != "default":
# Try a Custom View
view = os.path.join(request.folder, "private", "templates",
settings.get_template(), "views", "privacy.html")
if os.path.exists(view):
try:
# Pass view as file not str to work in compiled mode
response.view = open(view, "rb")
except IOError:
from gluon.http import HTTP
raise HTTP("404", "Unable to open Custom View: %s" % view)
response.title = T("Privacy")
return dict()
# -----------------------------------------------------------------------------
def tos():
""" Custom View """
if settings.get_template() != "default":
# Try a Custom View
view = os.path.join(request.folder, "private", "templates",
settings.get_template(), "views", "tos.html")
if os.path.exists(view):
try:
# Pass view as file not str to work in compiled mode
response.view = open(view, "rb")
except IOError:
from gluon.http import HTTP
raise HTTP("404", "Unable to open Custom View: %s" % view)
response.title = T("Terms of Service")
return dict()
# -----------------------------------------------------------------------------
def video():
""" Custom View """
if settings.get_template() != "default":
# Try a Custom View
view = os.path.join(request.folder, "private", "templates",
settings.get_template(), "views", "video.html")
if os.path.exists(view):
try:
# Pass view as file not str to work in compiled mode
response.view = open(view, "rb")
except IOError:
from gluon.http import HTTP
raise HTTP("404", "Unable to open Custom View: %s" % view)
response.title = T("Video Tutorials")
return dict()
# -----------------------------------------------------------------------------
def contact():
"""
Give the user options to contact the site admins.
Either:
An internal Support Requests database
or:
Custom View
"""
if auth.is_logged_in() and settings.has_module("support"):
# Provide an internal Support Requests ticketing system.
prefix = "support"
resourcename = "req"
tablename = "%s_%s" % (prefix, resourcename)
table = s3db[tablename]
# Pre-processor
def prep(r):
if r.interactive:
# Only Admins should be able to update ticket status
status = table.status
actions = table.actions
if not auth.s3_has_role(ADMIN):
status.writable = False
actions.writable = False
if r.method != "update":
status.readable = False
status.writable = False
actions.readable = False
actions.writable = False
return True
s3.prep = prep
output = s3_rest_controller(prefix, resourcename)
return output
template = settings.get_template()
if template != "default":
# Try a Custom Page
controller = "applications.%s.private.templates.%s.controllers" % \
(appname, template)
try:
exec("import %s as custom" % controller) in globals(), locals()
except ImportError, e:
# No Custom Page available, try a custom view
pass
else:
if "contact" in custom.__dict__:
output = custom.contact()()
return output
# Try a Custom View
view = os.path.join(request.folder, "private", "templates",
template, "views", "contact.html")
if os.path.exists(view):
try:
# Pass view as file not str to work in compiled mode
response.view = open(view, "rb")
except IOError:
from gluon.http import HTTP
raise HTTP("404", "Unable to open Custom View: %s" % view)
response.title = T("Contact us")
return dict()
if settings.has_module("cms"):
# Use CMS
return s3db.cms_index("default", "contact", page_name=T("Contact Us"))
# Just use default HTML View
return dict()
# -----------------------------------------------------------------------------
def load_all_models():
"""
Controller to load all models in web browser
- to make it easy to debug in Eclipse
"""
s3db.load_all_models()
return "ok"
# -----------------------------------------------------------------------------
def audit():
"""
RESTful CRUD Controller for Audit Logs
- used e.g. for Site Activity
"""
return s3_rest_controller("s3", "audit")
# -----------------------------------------------------------------------------
def get_settings():
"""
Function to respond to get requests. Requires admin permissions
"""
# Check if the request has a valid authorization header with admin cred.
if not auth.s3_has_role("ADMIN"):
auth.permission.format = None
auth.permission.fail()
elif not settings.get_base_allow_testing():
raise(HTTP("405", "Testing not allowed"))
else:
arg = request.args(0)
# Ex. request /get_settings/deployment_settings/template
if arg == "deployment_settings":
asked = request.args[1:]
return_settings = {}
for setting in asked:
func_name = "get_%s" % setting
function = getattr(settings, func_name)
# Ex. value of function - settings.get_template()
try:
value = function()
except TypeError:
continue
return_settings[setting] = value
return response.json(return_settings)
raise(HTTP("400", "Invalid/Missing argument"))
# END =========================================================================
|
|
# (c) Copyright 2012-2015 Hewlett Packard Enterprise Development LP
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Volume driver for HPE 3PAR Storage array.
This driver requires 3.1.3 or later firmware on the 3PAR array, using
the 4.x version of the hpe3parclient.
You will need to install the python hpe3parclient.
sudo pip install --upgrade "hpe3parclient>=4.0"
Set the following in the cinder.conf file to enable the
3PAR iSCSI Driver along with the required flags:
volume_driver=cinder.volume.drivers.hpe.hpe_3par_iscsi.HPE3PARISCSIDriver
"""
import re
import sys
try:
from hpe3parclient import exceptions as hpeexceptions
except ImportError:
hpeexceptions = None
from oslo_log import log as logging
from oslo_utils.excutils import save_and_reraise_exception
from cinder import exception
from cinder.i18n import _
from cinder import interface
from cinder import utils
from cinder.volume.drivers.hpe import hpe_3par_base as hpebasedriver
from cinder.volume import utils as volume_utils
LOG = logging.getLogger(__name__)
# EXISTENT_PATH error code returned from hpe3parclient
EXISTENT_PATH = 73
DEFAULT_ISCSI_PORT = 3260
CHAP_USER_KEY = "HPQ-cinder-CHAP-name"
CHAP_PASS_KEY = "HPQ-cinder-CHAP-secret"
@interface.volumedriver
class HPE3PARISCSIDriver(hpebasedriver.HPE3PARDriverBase):
"""OpenStack iSCSI driver to enable 3PAR storage array.
Version history:
.. code-block:: none
1.0 - Initial driver
1.1 - QoS, extend volume, multiple iscsi ports, remove domain,
session changes, faster clone, requires 3.1.2 MU2 firmware.
1.2.0 - Updated the use of the hp3parclient to 2.0.0 and refactored
the drivers to use the new APIs.
1.2.1 - Synchronized extend_volume method.
1.2.2 - Added try/finally around client login/logout.
1.2.3 - log exceptions before raising
1.2.4 - Fixed iSCSI active path bug #1224594
1.2.5 - Added metadata during attach/detach bug #1258033
1.2.6 - Use least-used iscsi n:s:p for iscsi volume attach bug #1269515
This update now requires 3.1.2 MU3 firmware
1.3.0 - Removed all SSH code. We rely on the hp3parclient now.
2.0.0 - Update hp3parclient API uses 3.0.x
2.0.2 - Add back-end assisted volume migrate
2.0.3 - Added support for managing/unmanaging of volumes
2.0.4 - Added support for volume retype
2.0.5 - Added CHAP support, requires 3.1.3 MU1 firmware
and hp3parclient 3.1.0.
2.0.6 - Fixing missing login/logout around attach/detach bug #1367429
2.0.7 - Add support for pools with model update
2.0.8 - Migrate without losing type settings bug #1356608
2.0.9 - Removing locks bug #1381190
2.0.10 - Add call to queryHost instead SSH based findHost #1398206
2.0.11 - Added missing host name during attach fix #1398206
2.0.12 - Removed usage of host name cache #1398914
2.0.13 - Update LOG usage to fix translations. bug #1384312
2.0.14 - Do not allow a different iSCSI IP (hp3par_iscsi_ips) to be
used during live-migration. bug #1423958
2.0.15 - Added support for updated detach_volume attachment.
2.0.16 - Added encrypted property to initialize_connection #1439917
2.0.17 - Python 3 fixes
2.0.18 - Improved VLUN creation and deletion logic. #1469816
2.0.19 - Changed initialize_connection to use getHostVLUNs. #1475064
2.0.20 - Adding changes to support 3PAR iSCSI multipath.
2.0.21 - Adds consistency group support
2.0.22 - Update driver to use ABC metaclasses
2.0.23 - Added update_migrated_volume. bug # 1492023
3.0.0 - Rebranded HP to HPE.
3.0.1 - Python 3 support
3.0.2 - Remove db access for consistency groups
3.0.3 - Fix multipath dictionary key error. bug #1522062
3.0.4 - Adds v2 managed replication support
3.0.5 - Adds v2 unmanaged replication support
3.0.6 - Adding manage/unmanage snapshot support
3.0.7 - Optimize array ID retrieval
3.0.8 - Update replication to version 2.1
3.0.9 - Use same LUN ID for each VLUN path #1551994
3.0.10 - Remove metadata that tracks the instance ID. bug #1572665
3.0.11 - _create_3par_iscsi_host() now accepts iscsi_iqn as list only.
Bug #1590180
3.0.12 - Added entry point tracing
3.0.13 - Handling HTTP conflict 409, host WWN/iSCSI name already used
by another host, while creating 3PAR iSCSI Host. bug #1642945
3.0.14 - Handle manage and unmanage hosts present. bug #1648067
3.0.15 - Adds consistency group capability in generic volume groups.
3.0.16 - Get host from os-brick connector. bug #1690244
4.0.0 - Adds base class.
4.0.1 - Update CHAP on host record when volume is migrated
to new compute host. bug # 1737181
4.0.2 - Handle force detach case. bug #1686745
"""
VERSION = "4.0.2"
# The name of the CI wiki page.
CI_WIKI_NAME = "HPE_Storage_CI"
def __init__(self, *args, **kwargs):
super(HPE3PARISCSIDriver, self).__init__(*args, **kwargs)
self.protocol = 'iSCSI'
def _do_setup(self, common):
self.iscsi_ips = {}
common.client_login()
try:
self.initialize_iscsi_ports(common)
finally:
self._logout(common)
def initialize_iscsi_ports(self, common):
# map iscsi_ip-> ip_port
# -> iqn
# -> nsp
iscsi_ip_list = {}
temp_iscsi_ip = {}
# use the 3PAR ip_addr list for iSCSI configuration
if len(common._client_conf['hpe3par_iscsi_ips']) > 0:
# add port values to ip_addr, if necessary
for ip_addr in common._client_conf['hpe3par_iscsi_ips']:
ip = ip_addr.split(':')
if len(ip) == 1:
temp_iscsi_ip[ip_addr] = {'ip_port': DEFAULT_ISCSI_PORT}
elif len(ip) == 2:
temp_iscsi_ip[ip[0]] = {'ip_port': ip[1]}
else:
LOG.warning("Invalid IP address format '%s'", ip_addr)
# add the single value iscsi_ip_address option to the IP dictionary.
# This way we can see if it's a valid iSCSI IP. If it's not valid,
# we won't use it and won't bother to report it, see below
if (common._client_conf['iscsi_ip_address'] not in temp_iscsi_ip):
ip = common._client_conf['iscsi_ip_address']
ip_port = common._client_conf['iscsi_port']
temp_iscsi_ip[ip] = {'ip_port': ip_port}
# get all the valid iSCSI ports from 3PAR
# when found, add the valid iSCSI ip, ip port, iqn and nsp
# to the iSCSI IP dictionary
iscsi_ports = common.get_active_iscsi_target_ports()
for port in iscsi_ports:
ip = port['IPAddr']
if ip in temp_iscsi_ip:
ip_port = temp_iscsi_ip[ip]['ip_port']
iscsi_ip_list[ip] = {'ip_port': ip_port,
'nsp': port['nsp'],
'iqn': port['iSCSIName']}
del temp_iscsi_ip[ip]
# if the single value iscsi_ip_address option is still in the
# temp dictionary it's because it defaults to $my_ip which doesn't
# make sense in this context. So, if present, remove it and move on.
if common._client_conf['iscsi_ip_address'] in temp_iscsi_ip:
del temp_iscsi_ip[common._client_conf['iscsi_ip_address']]
# lets see if there are invalid iSCSI IPs left in the temp dict
if len(temp_iscsi_ip) > 0:
LOG.warning("Found invalid iSCSI IP address(s) in "
"configuration option(s) hpe3par_iscsi_ips or "
"target_ip_address '%s.'",
(", ".join(temp_iscsi_ip)))
if not len(iscsi_ip_list) > 0:
msg = _('At least one valid iSCSI IP address must be set.')
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
self.iscsi_ips[common._client_conf['hpe3par_api_url']] = iscsi_ip_list
@utils.trace
def initialize_connection(self, volume, connector):
"""Assigns the volume to a server.
Assign any created volume to a compute node/host so that it can be
used from that host.
This driver returns a driver_volume_type of 'iscsi'.
The format of the driver data is defined in _get_iscsi_properties.
Example return value:
.. code-block:: default
{
'driver_volume_type': 'iscsi',
'data': {
'encrypted': False,
'target_discovered': True,
'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001',
'target_protal': '127.0.0.1:3260',
'volume_id': 1,
}
}
Steps to export a volume on 3PAR
* Get the 3PAR iSCSI iqn
* Create a host on the 3par
* create vlun on the 3par
"""
common = self._login()
try:
# If the volume has been failed over, we need to reinitialize
# iSCSI ports so they represent the new array.
if volume.get('replication_status') == 'failed-over' and (
common._client_conf['hpe3par_api_url'] not in self.iscsi_ips):
self.initialize_iscsi_ports(common)
# Grab the correct iSCSI ports
iscsi_ips = self.iscsi_ips[common._client_conf['hpe3par_api_url']]
# we have to make sure we have a host
host, username, password = self._create_host(
common,
volume,
connector)
if connector.get('multipath'):
ready_ports = common.client.getiSCSIPorts(
state=common.client.PORT_STATE_READY)
target_portals = []
target_iqns = []
target_luns = []
# Target portal ips are defined in cinder.conf.
target_portal_ips = iscsi_ips.keys()
# Collect all existing VLUNs for this volume/host combination.
existing_vluns = common.find_existing_vluns(volume, host)
# Cycle through each ready iSCSI port and determine if a new
# VLUN should be created or an existing one used.
lun_id = None
for port in ready_ports:
iscsi_ip = port['IPAddr']
if iscsi_ip in target_portal_ips:
vlun = None
# check for an already existing VLUN matching the
# nsp for this iSCSI IP. If one is found, use it
# instead of creating a new VLUN.
for v in existing_vluns:
portPos = common.build_portPos(
iscsi_ips[iscsi_ip]['nsp'])
if v['portPos'] == portPos:
vlun = v
break
else:
vlun = common.create_vlun(
volume, host, iscsi_ips[iscsi_ip]['nsp'],
lun_id=lun_id)
# We want to use the same LUN ID for every port
if lun_id is None:
lun_id = vlun['lun']
iscsi_ip_port = "%s:%s" % (
iscsi_ip, iscsi_ips[iscsi_ip]['ip_port'])
target_portals.append(iscsi_ip_port)
target_iqns.append(port['iSCSIName'])
target_luns.append(vlun['lun'])
else:
LOG.warning("iSCSI IP: '%s' was not found in "
"hpe3par_iscsi_ips list defined in "
"cinder.conf.", iscsi_ip)
info = {'driver_volume_type': 'iscsi',
'data': {'target_portals': target_portals,
'target_iqns': target_iqns,
'target_luns': target_luns,
'target_discovered': True
}
}
else:
least_used_nsp = None
# check if a VLUN already exists for this host
existing_vlun = common.find_existing_vlun(volume, host)
if existing_vlun:
# We override the nsp here on purpose to force the
# volume to be exported out the same IP as it already is.
# This happens during nova live-migration, we want to
# disable the picking of a different IP that we export
# the volume to, or nova complains.
least_used_nsp = common.build_nsp(existing_vlun['portPos'])
if not least_used_nsp:
least_used_nsp = self._get_least_used_nsp_for_host(
common,
host['name'])
vlun = None
if existing_vlun is None:
# now that we have a host, create the VLUN
vlun = common.create_vlun(volume, host, least_used_nsp)
else:
vlun = existing_vlun
if least_used_nsp is None:
LOG.warning("Least busy iSCSI port not found, "
"using first iSCSI port in list.")
iscsi_ip = list(iscsi_ips)[0]
else:
iscsi_ip = self._get_ip_using_nsp(least_used_nsp, common)
iscsi_ip_port = iscsi_ips[iscsi_ip]['ip_port']
iscsi_target_iqn = iscsi_ips[iscsi_ip]['iqn']
info = {'driver_volume_type': 'iscsi',
'data': {'target_portal': "%s:%s" %
(iscsi_ip, iscsi_ip_port),
'target_iqn': iscsi_target_iqn,
'target_lun': vlun['lun'],
'target_discovered': True
}
}
if common._client_conf['hpe3par_iscsi_chap_enabled']:
info['data']['auth_method'] = 'CHAP'
info['data']['auth_username'] = username
info['data']['auth_password'] = password
encryption_key_id = volume.get('encryption_key_id', None)
info['data']['encrypted'] = encryption_key_id is not None
return info
finally:
self._logout(common)
@utils.trace
def terminate_connection(self, volume, connector, **kwargs):
"""Driver entry point to unattach a volume from an instance."""
common = self._login()
try:
is_force_detach = connector is None
if is_force_detach:
common.terminate_connection(volume, None, None)
else:
hostname = common._safe_hostname(connector['host'])
common.terminate_connection(
volume,
hostname,
iqn=connector['initiator'])
self._clear_chap_3par(common, volume)
finally:
self._logout(common)
def _clear_chap_3par(self, common, volume):
"""Clears CHAP credentials on a 3par volume.
Ignore exceptions caused by the keys not being present on a volume.
"""
vol_name = common._get_3par_vol_name(volume['id'])
try:
common.client.removeVolumeMetaData(vol_name, CHAP_USER_KEY)
except hpeexceptions.HTTPNotFound:
pass
except Exception:
raise
try:
common.client.removeVolumeMetaData(vol_name, CHAP_PASS_KEY)
except hpeexceptions.HTTPNotFound:
pass
except Exception:
raise
def _create_3par_iscsi_host(self, common, hostname, iscsi_iqn, domain,
persona_id):
"""Create a 3PAR host.
Create a 3PAR host, if there is already a host on the 3par using
the same iqn but with a different hostname, return the hostname
used by 3PAR.
"""
# first search for an existing host
host_found = None
hosts = common.client.queryHost(iqns=iscsi_iqn)
if hosts and hosts['members'] and 'name' in hosts['members'][0]:
host_found = hosts['members'][0]['name']
if host_found is not None:
return host_found
else:
persona_id = int(persona_id)
try:
common.client.createHost(hostname, iscsiNames=iscsi_iqn,
optional={'domain': domain,
'persona': persona_id})
except hpeexceptions.HTTPConflict as path_conflict:
msg = "Create iSCSI host caught HTTP conflict code: %s"
with save_and_reraise_exception(reraise=False) as ctxt:
if path_conflict.get_code() is EXISTENT_PATH:
# Handle exception : EXISTENT_PATH - host WWN/iSCSI
# name already used by another host
hosts = common.client.queryHost(iqns=iscsi_iqn)
if hosts and hosts['members'] and (
'name' in hosts['members'][0]):
hostname = hosts['members'][0]['name']
else:
# re-raise last caught exception
ctxt.reraise = True
LOG.exception(msg, path_conflict.get_code())
else:
# re-raise last caught exception
# for other HTTP conflict
ctxt.reraise = True
LOG.exception(msg, path_conflict.get_code())
return hostname
def _modify_3par_iscsi_host(self, common, hostname, iscsi_iqn):
mod_request = {'pathOperation': common.client.HOST_EDIT_ADD,
'iSCSINames': [iscsi_iqn]}
common.client.modifyHost(hostname, mod_request)
def _set_3par_chaps(self, common, hostname, volume, username, password):
"""Sets a 3PAR host's CHAP credentials."""
if not common._client_conf['hpe3par_iscsi_chap_enabled']:
return
mod_request = {'chapOperation': common.client.HOST_EDIT_ADD,
'chapOperationMode': common.client.CHAP_INITIATOR,
'chapName': username,
'chapSecret': password}
common.client.modifyHost(hostname, mod_request)
def _create_host(self, common, volume, connector):
"""Creates or modifies existing 3PAR host."""
# make sure we don't have the host already
host = None
username = None
password = None
hostname = common._safe_hostname(connector['host'])
cpg = common.get_cpg(volume, allowSnap=True)
domain = common.get_domain(cpg)
# Get the CHAP secret if CHAP is enabled
if common._client_conf['hpe3par_iscsi_chap_enabled']:
vol_name = common._get_3par_vol_name(volume['id'])
username = common.client.getVolumeMetaData(
vol_name, CHAP_USER_KEY)['value']
password = common.client.getVolumeMetaData(
vol_name, CHAP_PASS_KEY)['value']
try:
host = common._get_3par_host(hostname)
# Check whether host with iqn of initiator present on 3par
hosts = common.client.queryHost(iqns=[connector['initiator']])
host, hostname = common._get_prioritized_host_on_3par(host,
hosts,
hostname)
except hpeexceptions.HTTPNotFound:
# get persona from the volume type extra specs
persona_id = common.get_persona_type(volume)
# host doesn't exist, we have to create it
hostname = self._create_3par_iscsi_host(common,
hostname,
[connector['initiator']],
domain,
persona_id)
else:
if 'iSCSIPaths' not in host or len(host['iSCSIPaths']) < 1:
self._modify_3par_iscsi_host(
common, hostname,
connector['initiator'])
elif (not host['initiatorChapEnabled'] and
common._client_conf['hpe3par_iscsi_chap_enabled']):
LOG.warning("Host exists without CHAP credentials set and "
"has iSCSI attachments but CHAP is enabled. "
"Updating host with new CHAP credentials.")
# set/update the chap details for the host
self._set_3par_chaps(common, hostname, volume, username, password)
host = common._get_3par_host(hostname)
return host, username, password
def _do_export(self, common, volume, connector):
"""Gets the associated account, generates CHAP info and updates."""
model_update = {}
if not common._client_conf['hpe3par_iscsi_chap_enabled']:
model_update['provider_auth'] = None
return model_update
# CHAP username will be the hostname
chap_username = connector['host']
chap_password = None
try:
# Get all active VLUNs for the host
vluns = common.client.getHostVLUNs(chap_username)
# Host has active VLUNs... is CHAP enabled on host?
host_info = common.client.getHost(chap_username)
if not host_info['initiatorChapEnabled']:
LOG.warning("Host has no CHAP key, but CHAP is enabled.")
except hpeexceptions.HTTPNotFound:
chap_password = volume_utils.generate_password(16)
LOG.warning("No host or VLUNs exist. Generating new "
"CHAP key.")
else:
# Get a list of all iSCSI VLUNs and see if there is already a CHAP
# key assigned to one of them. Use that CHAP key if present,
# otherwise create a new one. Skip any VLUNs that are missing
# CHAP credentials in metadata.
chap_exists = False
active_vluns = 0
for vlun in vluns:
if not vlun['active']:
continue
active_vluns += 1
# iSCSI connections start with 'iqn'.
if ('remoteName' in vlun and
re.match('iqn.*', vlun['remoteName'])):
try:
chap_password = common.client.getVolumeMetaData(
vlun['volumeName'], CHAP_PASS_KEY)['value']
chap_exists = True
break
except hpeexceptions.HTTPNotFound:
LOG.debug("The VLUN %s is missing CHAP credentials "
"but CHAP is enabled. Skipping.",
vlun['remoteName'])
else:
LOG.warning("Non-iSCSI VLUN detected.")
if not chap_exists:
chap_password = volume_utils.generate_password(16)
LOG.warning("No VLUN contained CHAP credentials. "
"Generating new CHAP key.")
# Add CHAP credentials to the volume metadata
vol_name = common._get_3par_vol_name(volume['id'])
common.client.setVolumeMetaData(
vol_name, CHAP_USER_KEY, chap_username)
common.client.setVolumeMetaData(
vol_name, CHAP_PASS_KEY, chap_password)
model_update['provider_auth'] = ('CHAP %s %s' %
(chap_username, chap_password))
return model_update
@utils.trace
def create_export(self, context, volume, connector):
common = self._login()
try:
return self._do_export(common, volume, connector)
finally:
self._logout(common)
@utils.trace
def ensure_export(self, context, volume):
"""Ensure the volume still exists on the 3PAR.
Also retrieves CHAP credentials, if present on the volume
"""
common = self._login()
try:
vol_name = common._get_3par_vol_name(volume['id'])
common.client.getVolume(vol_name)
except hpeexceptions.HTTPNotFound:
LOG.error("Volume %s doesn't exist on array.", vol_name)
else:
metadata = common.client.getAllVolumeMetaData(vol_name)
username = None
password = None
model_update = {}
model_update['provider_auth'] = None
for member in metadata['members']:
if member['key'] == CHAP_USER_KEY:
username = member['value']
elif member['key'] == CHAP_PASS_KEY:
password = member['value']
if username and password:
model_update['provider_auth'] = ('CHAP %s %s' %
(username, password))
return model_update
finally:
self._logout(common)
def _get_least_used_nsp_for_host(self, common, hostname):
"""Get the least used NSP for the current host.
Steps to determine which NSP to use.
* If only one iSCSI NSP, return it
* If there is already an active vlun to this host, return its NSP
* Return NSP with fewest active vluns
"""
iscsi_nsps = self._get_iscsi_nsps(common)
# If there's only one path, use it
if len(iscsi_nsps) == 1:
return iscsi_nsps[0]
# Try to reuse an existing iscsi path to the host
vluns = common.client.getVLUNs()
for vlun in vluns['members']:
if vlun['active']:
if vlun['hostname'] == hostname:
temp_nsp = common.build_nsp(vlun['portPos'])
if temp_nsp in iscsi_nsps:
# this host already has an iscsi path, so use it
return temp_nsp
# Calculate the least used iscsi nsp
least_used_nsp = self._get_least_used_nsp(common,
vluns['members'],
self._get_iscsi_nsps(common))
return least_used_nsp
def _get_iscsi_nsps(self, common):
"""Return the list of candidate nsps."""
nsps = []
iscsi_ips = self.iscsi_ips[common._client_conf['hpe3par_api_url']]
for value in iscsi_ips.values():
nsps.append(value['nsp'])
return nsps
def _get_ip_using_nsp(self, nsp, common):
"""Return IP associated with given nsp."""
iscsi_ips = self.iscsi_ips[common._client_conf['hpe3par_api_url']]
for (key, value) in iscsi_ips.items():
if value['nsp'] == nsp:
return key
def _get_least_used_nsp(self, common, vluns, nspss):
"""Return the nsp that has the fewest active vluns."""
# return only the nsp (node:server:port)
# count the number of nsps
nsp_counts = {}
for nsp in nspss:
# initialize counts to zero
nsp_counts[nsp] = 0
current_least_used_nsp = None
for vlun in vluns:
if vlun['active']:
nsp = common.build_nsp(vlun['portPos'])
if nsp in nsp_counts:
nsp_counts[nsp] = nsp_counts[nsp] + 1
# identify key (nsp) of least used nsp
current_smallest_count = sys.maxsize
for (nsp, count) in nsp_counts.items():
if count < current_smallest_count:
current_least_used_nsp = nsp
current_smallest_count = count
return current_least_used_nsp
|
|
#!/usr/bin/env python
"""Metadata module
Author: Alex Ip (alex.ip@ga.gov.au)
"""
import pickle
import logging
import os
logger = logging.getLogger('root.' + __name__)
class MetadataException(Exception):
pass
class Metadata(object):
"""Superclass of all metadata types
Manages master dict containing all metadata trees
"""
# Class variable holding metadata type string (e.g. 'XML', 'MTL',
# 'REPORT', 'TIF', 'FST')
_metadata_type_id = None # Not set for the master metadata class
_filename_pattern = '.*\.dat' # Default RegEx for finding metadata file.
def __init__(self, source=None):
"""Instantiates Metadata object
Argument:
source: either a dict containing existing metadata or a string representing an input file to read
"""
self._metadata_dict = {}
self._filename = None
if source:
if isinstance(source, dict):
self._metadata_dict = source
elif isinstance(source, str):
self.read_file(source)
def get_metadata(self, key_path_list=[], subtree=None):
"""Function to return the sub-dict or value in the metadata nested dict
from a list of keys drilling down through the tree structure. Key path
can also contain ellipsis ('...') to skip to the first found instance
of the next key
Returns:
subtree dict, metadata value or None
Side effect: Will pop values from the start of key_path_list until key is found
"""
def find_first_key(search_key, search_dict):
"""Recursive helper function to find the first value or sub-dict for the specified
search key when an ellipsis is used in a key path.
"""
logger.debug(' find_first_key(%s, %s) called',
repr(search_key), repr(search_dict))
if not isinstance(search_dict, dict):
return None
for key in search_dict.keys():
if key == search_key:
return search_dict[key]
else:
found_item = find_first_key(search_key, search_dict[key])
if found_item:
return found_item
logger.debug('get_metadata(%s, %s) called',
repr(key_path_list), repr(subtree))
subtree = subtree or self._metadata_dict
# assert subtree, 'Subtree must be specified'
# Convert comma-delimited string to list if necessary
if isinstance(key_path_list, str):
key_path_list = key_path_list.split(',')
# Do not modify original list (is this necessary?)
key_path_list = list(key_path_list)
while subtree and key_path_list:
key = key_path_list.pop(0)
if key == '...': # Ellipsis means skip to next key
while key_path_list and key == '...': # Skip to next non-ellipsis key
key = key_path_list.pop(0) # Skip to next key
if key == '...': # Bad input - ends in ellipsis
return None
subtree = self.get_metadata(
key_path_list, find_first_key(key, subtree))
elif key:
try:
subtree = subtree.get(key)
logger.debug('key = %s, value = %s', key, subtree)
except:
pass
return subtree
def delete_metadata(self, key_path_list, subtree=None):
logger.debug('delete_metadata(%s, %s) called',
repr(key_path_list), repr(subtree))
assert key_path_list, "Key path list must be non-empty"
_key_path_list = list(key_path_list) # Copy list to avoid side effects
key = _key_path_list.pop()
subtree = self.get_metadata(_key_path_list, subtree)
assert subtree and key in subtree.keys(), repr(key_path_list) + " not found"
del subtree[key]
logger.debug('%s deleted', repr(key_path_list))
def tree_to_tuples(self, subtree=None, node_name=''):
"""Recursive function to return all leaf node (key, value) pairs as a flat (un-sorted) list of tuples
Arguments:
subtree: nested dict to contain nodes. Defaults to full internal metadata dict
node_name: comma-separated node path to pre-pend to child node names
Returns:
flat list of (<node path>, <value>) tuples
"""
logger.debug('tree_to_tuples(%s, %s) called',
repr(subtree), repr(node_name))
subtree = subtree or self._metadata_dict
# assert subtree, 'Subtree must be specified'
result_list = []
while subtree:
subtree = subtree.copy() # Do not modify original top-level dict
key, value = subtree.popitem()
if node_name:
key = node_name + ',' + key
if isinstance(value, dict): # not a leaf node
result_list += self.tree_to_tuples(value, key)
else: # Leaf node - add key=value string to list
result_list.append((str(key), str(value)))
return result_list
def tree_to_list(self, subtree=None, node_name=''):
"""Recursive function to return all leaf node (key, value) pairs as a flat (un-sorted) list of strings
Arguments:
subtree: nested dict to contain nodes. Defaults to full internal metadata dict
node_name: comma-separated node path to pre-pend to child node names
Returns:
flat list of <node path>=<value> strings
"""
logger.debug('tree_to_list(%s, %s) called',
repr(subtree), repr(node_name))
subtree = subtree or self._metadata_dict
# assert subtree, 'Subtree must be specified'
return [name + '=' + value for name,
value in self.tree_to_tuples(subtree)]
def read_file(self, filename=None):
"""Abstract function to parse a metadata file and store the results in self._metadata_dict
Needs to be implemented for the relevant file format in all descendant classes
Argument:
filename: Name of metadata file to be parsed and stored. Defaults to instance value
Returns:
Nested dict containing metadata
Side effect:
Changes value of self._filename to match specified name if load succeeds
"""
filename = filename or self._filename
assert filename, 'Filename must be specified'
infile = open(filename, 'rb')
self._metadata_dict = pickle.load(infile)
infile.close()
self._filename = filename
return self._metadata_dict
def write_file(self, filename=None):
"""Abstract function write the metadata contained in self._metadata_dict to a
file in the appropriate format.
Needs to be implemented for the relevant file format in all descendant classes
Argument:
filename: Metadata file to be written
"""
filename = filename or self._filename
assert filename, 'Filename must be specified'
outfile = open(filename, 'wb')
pickle.dump(self._metadata_dict, outfile)
outfile.close()
def set_root_metadata_from_object(self, metadata_object):
"""Function to add the metadata belonging to another metadata object to the internal metadata dict
Argument: Metadata object
"""
# ToDo: Implement type checking to ensure that metadata_object is a
# Metadata instance
self._metadata_dict[
metadata_object.metadata_type_id] = metadata_object.metadata_dict
return self._metadata_dict
def merge_root_metadata_from_object(self, metadata_object, overwrite=True):
"""Function to merge the metadata belonging to another metadata object to the internal metadata dict
Argument: Metadata object
"""
# ToDo: Implement type checking to ensure that metadata_object is a
# Metadata instance
self.merge_root_metadata(
metadata_object.metadata_type_id, metadata_object.metadata_dict, overwrite)
return self._metadata_dict
def set_root_metadata(self, metadata, root_key=None):
"""Function to add or replace a nested dict under the specified root key in the internal metadata dict
Arguments:
root_key: metadata type string (e.g. 'XML', 'MTL', 'REPORT', 'TIF', 'FST')
metadata: nested dict containing metadata tree to be added. Could also be a scalar value
"""
if root_key:
self._metadata_dict[root_key] = metadata
else:
self._metadata_dict = metadata
return self._metadata_dict
def merge_root_metadata(self, root_key, metadata, overwrite=True):
"""Function to merge a nested dict under the specified root key in the internal metadata dict.
N.B: Will always overwrite existing values
Arguments:
root_key: metadata type string (e.g. 'XML', 'MTL', 'REPORT', 'TIF', 'FST')
metadata: nested dict containing metadata tree to be added.
"""
destination_tree = self._metadata_dict.get(root_key)
if not destination_tree:
destination_tree = {}
self._metadata_dict[root_key] = destination_tree
self.merge_metadata_dicts(metadata, destination_tree, overwrite)
return self._metadata_dict
def merge_metadata_node(self, key_path_list, metadata, overwrite=True):
"""Function to merge a nested dict at a node specified by a list of
keys drilling down through the tree structure
Arguments:
key_path_list: List of keys defining the path to the node.
metadata: Value or nested dict to graft into _metadata_dict
overwrite: Boolean flag to enable overwriting of existing values
"""
logger.debug('merge_metadata_node(%s, %s, %s) called', repr(
key_path_list), repr(metadata), repr(overwrite))
# Convert comma-delimited string to list if necessary
if isinstance(key_path_list, str):
key_path_list = key_path_list.split(',')
destination_tree = self.get_metadata(key_path_list)
assert destination_tree, 'Destination subtree dict not found'
assert isinstance(destination_tree, dict), 'Destination is not a dict'
assert '...' not in key_path_list, 'Key path must be specified explicitly (no ellipses allowed)'
self.merge_metadata_dicts(metadata, destination_tree, True)
return self._metadata_dict
def set_metadata_node(self, key_path_list, metadata, overwrite=True):
"""Function to set a value or sub-dict in the metadata nested dict at a node specified by a list of
keys drilling down through the tree structure.
Arguments:
key_path_list: List of keys defining the path to the node.
metadata: Value or nested dict to graft into _metadata_dict
overwrite: Boolean flag to enable overwriting of existing values
N.B: Key path may NOT contain ellipses ('...')
"""
logger.debug('set_metadata_node(%s, %s, %s) called', repr(
key_path_list), repr(metadata), repr(overwrite))
# Convert comma-delimited string to list if necessary
if isinstance(key_path_list, str):
key_path_list = key_path_list.split(',')
assert key_path_list, 'Key path must be specified with a non-empty list'
# Convert comma-delimited string to list if necessary
if isinstance(key_path_list, str):
key_path_list = key_path_list.split(',')
assert '...' not in key_path_list, 'Key path must be specified explicitly (no ellipses allowed)'
subtree = self._metadata_dict
key_path_list = list(key_path_list) # Do not modify original list
while isinstance(subtree, dict) and key_path_list:
key = key_path_list.pop(0)
logger.debug('key = %s', key)
if key:
if not key_path_list: # No more levels to descend
# Metadata for key already exists in subtree
if key in subtree.keys() and subtree[
key] and not overwrite:
raise Exception('Unable to overwrite key ' + key)
else:
# logger.debug(' Setting subtree[key] = %s', metadata)
subtree[key] = metadata # Overwrite previous node
else: # still more levels to descend
if key in subtree.keys(): # Existing node found (dict or value)
if not isinstance(subtree[key], dict) and subtree[
key] and not overwrite:
raise Exception(
'Unable to overwrite subtree ' + key)
else:
# logger.debug(' Setting subtree = %s', subtree.get(key))
subtree = subtree.get(key) # Descend to next level
else: # Key doesn't exist in subtree
logger.debug(' Setting subtree[key] = {}')
subtree[key] = {} # Create new subtree
subtree = subtree[key]
def merge_metadata_dicts(self, source_tree, destination_tree, overwrite=True,
add_new_nodes=True, keep_existing_data=False):
"""Recursive function to copy a nested dict into another nested dict
Arguments:
source_tree: nested dict representing tree to copy into destination tree
destination_tree: nested dict representing destination tree
overwrite: Boolean flag to enable overwriting of existing values. Will raise exception on conflict
add_new_nodes: Boolean flag to enable creation of new values
keep_existing_data: Boolean flag to keep any non-empty data in the destination tree
"""
if source_tree is None:
return
assert isinstance(source_tree, dict), 'Source tree must be a dict'
for key in source_tree.keys():
source_metadata = source_tree[key]
dest_metadata = destination_tree.get(key)
if isinstance(source_metadata,
dict): # Source metadata is not a leaf node
if dest_metadata is None: # Key doesn't exist in destination - create sub-dict
if not add_new_nodes:
logger.debug('Unable to create new node %s', key)
continue
dest_metadata = {}
destination_tree[key] = dest_metadata
# Destination metadata is a leaf node
elif not isinstance(dest_metadata, dict):
# Overwrite leaf node with new sub-dict if possible
if not overwrite:
logger.debug(
'Unable to overwrite existing leaf node %s', key)
continue
dest_metadata = {}
destination_tree[key] = dest_metadata
# Recursive call to copy source subtree to destination subtree
self.merge_metadata_dicts(source_metadata, dest_metadata, overwrite,
add_new_nodes, keep_existing_data)
else: # source metadata is a leaf node
if dest_metadata and not overwrite:
logger.debug(
'Unable to overwrite existing destination metadata for %s', key)
continue
elif dest_metadata is None and not add_new_nodes:
logger.debug('Unable to create new node %s', key)
continue
if not dest_metadata or not keep_existing_data:
destination_tree[key] = source_metadata
else:
logger.debug('Kept existing value of node %s', key)
def list_from_string(self, comma_separated_string):
'''
Helper function to return a list of strings from a string containing a comma separated list
'''
if comma_separated_string:
return [value_string.strip() for value_string in comma_separated_string.split(',') if value_string.strip()]
else:
return []
@property
def filename(self):
"""Returns filename
"""
if self._filename:
return os.path.abspath(self._filename)
else:
return None
@property
def metadata_type_id(self):
"""Returns metadata type ID string
"""
return self.__class__._metadata_type_id
@property
def metadata_dict(self):
"""Returns metadata dict containing the full metadata tree
"""
return self._metadata_dict
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Network-related utilities for supporting libvirt connection code."""
import os
import jinja2
import netaddr
import nova.conf
from nova.network import model
CONF = nova.conf.CONF
CONF.import_opt('use_ipv6', 'nova.netconf')
def get_net_and_mask(cidr):
net = netaddr.IPNetwork(cidr)
return str(net.ip), str(net.netmask)
def get_net_and_prefixlen(cidr):
net = netaddr.IPNetwork(cidr)
return str(net.ip), str(net._prefixlen)
def get_ip_version(cidr):
net = netaddr.IPNetwork(cidr)
return int(net.version)
def _get_first_network(network, version):
# Using a generator expression with a next() call for the first element
# of a list since we don't want to evaluate the whole list as we can
# have a lot of subnets
try:
return next(i for i in network['subnets']
if i['version'] == version)
except StopIteration:
pass
def get_injected_network_template(network_info, use_ipv6=None, template=None,
libvirt_virt_type=None):
"""Returns a rendered network template for the given network_info.
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param use_ipv6: If False, do not return IPv6 template information
even if an IPv6 subnet is present in network_info.
:param template: Path to the interfaces template file.
:param libvirt_virt_type: The Libvirt `virt_type`, will be `None` for
other hypervisors..
"""
if use_ipv6 is None:
use_ipv6 = CONF.use_ipv6
if not template:
template = CONF.injected_network_template
if not (network_info and template):
return
nets = []
ifc_num = -1
ipv6_is_available = False
for vif in network_info:
if not vif['network'] or not vif['network']['subnets']:
continue
network = vif['network']
# NOTE(bnemec): The template only supports a single subnet per
# interface and I'm not sure how/if that can be fixed, so this
# code only takes the first subnet of the appropriate type.
subnet_v4 = _get_first_network(network, 4)
subnet_v6 = _get_first_network(network, 6)
ifc_num += 1
if not network.get_meta('injected'):
continue
hwaddress = vif.get('address')
address = None
netmask = None
gateway = ''
broadcast = None
dns = None
routes = []
if subnet_v4:
if subnet_v4.get_meta('dhcp_server') is not None:
continue
if subnet_v4['ips']:
ip = subnet_v4['ips'][0]
address = ip['address']
netmask = model.get_netmask(ip, subnet_v4)
if subnet_v4['gateway']:
gateway = subnet_v4['gateway']['address']
broadcast = str(subnet_v4.as_netaddr().broadcast)
dns = ' '.join([i['address'] for i in subnet_v4['dns']])
for route_ref in subnet_v4['routes']:
(net, mask) = get_net_and_mask(route_ref['cidr'])
route = {'gateway': str(route_ref['gateway']['address']),
'cidr': str(route_ref['cidr']),
'network': net,
'netmask': mask}
routes.append(route)
address_v6 = None
gateway_v6 = ''
netmask_v6 = None
dns_v6 = None
have_ipv6 = (use_ipv6 and subnet_v6)
if have_ipv6:
if subnet_v6.get_meta('dhcp_server') is not None:
continue
if subnet_v6['ips']:
ipv6_is_available = True
ip_v6 = subnet_v6['ips'][0]
address_v6 = ip_v6['address']
netmask_v6 = model.get_netmask(ip_v6, subnet_v6)
if subnet_v6['gateway']:
gateway_v6 = subnet_v6['gateway']['address']
dns_v6 = ' '.join([i['address'] for i in subnet_v6['dns']])
net_info = {'name': 'eth%d' % ifc_num,
'hwaddress': hwaddress,
'address': address,
'netmask': netmask,
'gateway': gateway,
'broadcast': broadcast,
'dns': dns,
'routes': routes,
'address_v6': address_v6,
'gateway_v6': gateway_v6,
'netmask_v6': netmask_v6,
'dns_v6': dns_v6,
}
nets.append(net_info)
if not nets:
return
tmpl_path, tmpl_file = os.path.split(template)
env = jinja2.Environment(loader=jinja2.FileSystemLoader(tmpl_path),
trim_blocks=True)
template = env.get_template(tmpl_file)
return template.render({'interfaces': nets,
'use_ipv6': ipv6_is_available,
'libvirt_virt_type': libvirt_virt_type})
def get_network_metadata(network_info, use_ipv6=None):
"""Gets a more complete representation of the instance network information.
This data is exposed as network_data.json in the metadata service and
the config drive.
:param network_info: `nova.network.models.NetworkInfo` object describing
the network metadata.
:param use_ipv6: If False, do not return IPv6 template information
even if an IPv6 subnet is present in network_info. Defaults to
nova.netconf.use_ipv6.
"""
if not network_info:
return
if use_ipv6 is None:
use_ipv6 = CONF.use_ipv6
# IPv4 or IPv6 networks
nets = []
# VIFs, physical NICs, or VLANs. Physical NICs will have type 'phy'.
links = []
# Non-network bound services, such as DNS
services = []
ifc_num = -1
net_num = -1
for vif in network_info:
if not vif.get('network') or not vif['network'].get('subnets'):
continue
network = vif['network']
# NOTE(JoshNang) currently, only supports the first IPv4 and first
# IPv6 subnet on network, a limitation that also exists in the
# network template.
subnet_v4 = _get_first_network(network, 4)
subnet_v6 = _get_first_network(network, 6)
ifc_num += 1
link = None
# Get the VIF or physical NIC data
if subnet_v4 or subnet_v6:
link = _get_eth_link(vif, ifc_num)
links.append(link)
# Add IPv4 and IPv6 networks if they exist
if subnet_v4 and subnet_v4.get('ips'):
net_num += 1
nets.append(_get_nets(vif, subnet_v4, 4, net_num, link['id']))
services += [dns for dns in _get_dns_services(subnet_v4)
if dns not in services]
if (use_ipv6 and subnet_v6) and subnet_v6.get('ips'):
net_num += 1
nets.append(_get_nets(vif, subnet_v6, 6, net_num, link['id']))
services += [dns for dns in _get_dns_services(subnet_v6)
if dns not in services]
return {
"links": links,
"networks": nets,
"services": services
}
def _get_eth_link(vif, ifc_num):
"""Get a VIF or physical NIC representation.
:param vif: Neutron VIF
:param ifc_num: Interface index for generating name if the VIF's
'devname' isn't defined.
:return: A dict with 'id', 'vif_id', 'type', 'mtu' and
'ethernet_mac_address' as keys
"""
link_id = vif.get('devname')
if not link_id:
link_id = 'interface%d' % ifc_num
# Use 'phy' for physical links. Ethernet can be confusing
if vif.get('type') == 'ethernet':
nic_type = 'phy'
else:
nic_type = vif.get('type')
link = {
'id': link_id,
'vif_id': vif['id'],
'type': nic_type,
'mtu': vif['network']['meta'].get('mtu'),
'ethernet_mac_address': vif.get('address'),
}
return link
def _get_nets(vif, subnet, version, net_num, link_id):
"""Get networks for the given VIF and subnet
:param vif: Neutron VIF
:param subnet: Neutron subnet
:param version: IP version as an int, either '4' or '6'
:param net_num: Network index for generating name of each network
:param link_id: Arbitrary identifier for the link the networks are
attached to
"""
if subnet.get_meta('dhcp_server') is not None:
net_info = {
'id': 'network%d' % net_num,
'type': 'ipv%d_dhcp' % version,
'link': link_id,
'network_id': vif['network']['id']
}
return net_info
ip = subnet['ips'][0]
address = ip['address']
if version == 4:
netmask = model.get_netmask(ip, subnet)
elif version == 6:
netmask = str(subnet.as_netaddr().netmask)
net_info = {
'id': 'network%d' % net_num,
'type': 'ipv%d' % version,
'link': link_id,
'ip_address': address,
'netmask': netmask,
'routes': _get_default_route(version, subnet),
'network_id': vif['network']['id']
}
# Add any additional routes beyond the default route
for route in subnet['routes']:
route_addr = netaddr.IPNetwork(route['cidr'])
new_route = {
'network': str(route_addr.network),
'netmask': str(route_addr.netmask),
'gateway': route['gateway']['address']
}
net_info['routes'].append(new_route)
return net_info
def _get_default_route(version, subnet):
"""Get a default route for a network
:param version: IP version as an int, either '4' or '6'
:param subnet: Neutron subnet
"""
if subnet.get('gateway') and subnet['gateway'].get('address'):
gateway = subnet['gateway']['address']
else:
return []
if version == 4:
return [{
'network': '0.0.0.0',
'netmask': '0.0.0.0',
'gateway': gateway
}]
elif version == 6:
return [{
'network': '::',
'netmask': '::',
'gateway': gateway
}]
def _get_dns_services(subnet):
"""Get the DNS servers for the subnet."""
services = []
if not subnet.get('dns'):
return services
return [{'type': 'dns', 'address': ip.get('address')}
for ip in subnet['dns']]
|
|
import socket
import sys
import unittest
from common import linesep, platform_skip, TestCase
import pyuv
TEST_PORT = 12345
TEST_PORT2 = 12346
MULTICAST_ADDRESS = "239.255.0.1"
class UDPTest(TestCase):
def setUp(self):
super(UDPTest, self).setUp()
self.server = None
self.client = None
def on_close(self, handle):
self.on_close_called += 1
def on_server_recv(self, handle, ip_port, flags, data, error):
self.assertEqual(flags, 0)
ip, port = ip_port
data = data.strip()
self.assertEqual(data, b"PING")
self.server.send((ip, port), b"PONG"+linesep)
def on_client_recv(self, handle, ip_port, flags, data, error):
self.assertEqual(flags, 0)
ip, port = ip_port
data = data.strip()
self.assertEqual(data, b"PONG")
self.client.close(self.on_close)
self.server.close(self.on_close)
def timer_cb(self, timer):
self.client.send(("127.0.0.1", TEST_PORT), b"PING"+linesep)
timer.close(self.on_close)
def test_udp_pingpong(self):
self.on_close_called = 0
self.server = pyuv.UDP(self.loop)
self.server.bind(("0.0.0.0", TEST_PORT))
self.server.set_broadcast(True) # for coverage
try:
self.server.set_ttl(10) # for coverage
except pyuv.error.UDPError:
# This function is not implemented on Windows
pass
self.server.start_recv(self.on_server_recv)
self.client = pyuv.UDP(self.loop)
self.client.bind(("0.0.0.0", TEST_PORT2))
self.client.start_recv(self.on_client_recv)
timer = pyuv.Timer(self.loop)
timer.start(self.timer_cb, 0.1, 0)
self.loop.run()
self.assertEqual(self.on_close_called, 3)
class UDPEmptyDatagramTest(TestCase):
def setUp(self):
super(UDPEmptyDatagramTest, self).setUp()
self.server = None
self.client = None
self.on_close_called = 0
def on_close(self, handle):
self.on_close_called += 1
def on_client_recv(self, handle, ip_port, flags, data, error):
self.assertEqual(flags, 0)
ip, port = ip_port
self.assertEqual(error, None)
self.assertEqual(data, b"")
self.client.close(self.on_close)
self.server.close(self.on_close)
def test_udp_empty_datagram(self):
self.server = pyuv.UDP(self.loop)
self.client = pyuv.UDP(self.loop)
self.client.bind(("0.0.0.0", TEST_PORT2))
self.client.start_recv(self.on_client_recv)
self.server.send(("127.0.0.1", TEST_PORT2), b"")
self.loop.run()
self.assertEqual(self.on_close_called, 2)
class UDPTestNull(TestCase):
def setUp(self):
super(UDPTestNull, self).setUp()
self.server = None
self.client = None
def on_close(self, handle):
self.on_close_called += 1
def on_server_recv(self, handle, ip_port, flags, data, error):
self.assertEqual(flags, 0)
ip, port = ip_port
data = data.strip()
self.assertEqual(data, b"PIN\x00G")
self.server.send((ip, port), b"PONG"+linesep)
def on_client_recv(self, handle, ip_port, flags, data, error):
self.assertEqual(flags, 0)
ip, port = ip_port
data = data.strip()
self.assertEqual(data, b"PONG")
self.client.close(self.on_close)
self.server.close(self.on_close)
def timer_cb(self, timer):
self.client.send(("127.0.0.1", TEST_PORT), b"PIN\x00G"+linesep)
timer.close(self.on_close)
def test_udp_pingpong_null(self):
self.on_close_called = 0
self.server = pyuv.UDP(self.loop)
self.server.bind(("0.0.0.0", TEST_PORT))
self.server.start_recv(self.on_server_recv)
self.client = pyuv.UDP(self.loop)
self.client.bind(("0.0.0.0", TEST_PORT2))
self.client.start_recv(self.on_client_recv)
timer = pyuv.Timer(self.loop)
timer.start(self.timer_cb, 0.1, 0)
self.loop.run()
self.assertEqual(self.on_close_called, 3)
class UDPTestList(TestCase):
def setUp(self):
super(UDPTestList, self).setUp()
self.server = None
self.client = None
def on_close(self, handle):
self.on_close_called += 1
def on_server_recv(self, handle, ip_port, flags, data, error):
self.assertEqual(flags, 0)
ip, port = ip_port
data = data.strip()
self.assertEqual(data, b"PING")
self.server.send((ip, port), [b"PONG", linesep])
def on_client_recv(self, handle, ip_port, flags, data, error):
self.assertEqual(flags, 0)
ip, port = ip_port
data = data.strip()
self.assertEqual(data, b"PONG")
self.client.close(self.on_close)
self.server.close(self.on_close)
def timer_cb(self, timer):
self.client.send(("127.0.0.1", TEST_PORT), [b"PING", linesep])
timer.close(self.on_close)
def test_udp_pingpong_list(self):
self.on_close_called = 0
self.server = pyuv.UDP(self.loop)
self.server.bind(("0.0.0.0", TEST_PORT))
self.server.start_recv(self.on_server_recv)
self.client = pyuv.UDP(self.loop)
self.client.bind(("0.0.0.0", TEST_PORT2))
self.client.start_recv(self.on_client_recv)
timer = pyuv.Timer(self.loop)
timer.start(self.timer_cb, 0.1, 0)
self.loop.run()
self.assertEqual(self.on_close_called, 3)
class UDPTestListNull(TestCase):
def setUp(self):
super(UDPTestListNull, self).setUp()
self.server = None
self.client = None
def on_close(self, handle):
self.on_close_called += 1
def on_server_recv(self, handle, ip_port, flags, data, error):
self.assertEqual(flags, 0)
ip, port = ip_port
data = data.strip()
self.assertEqual(data, b"PIN\x00G")
self.server.send((ip, port), [b"PONG", linesep])
def on_client_recv(self, handle, ip_port, flags, data, error):
self.assertEqual(flags, 0)
ip, port = ip_port
data = data.strip()
self.assertEqual(data, b"PONG")
self.client.close(self.on_close)
self.server.close(self.on_close)
def timer_cb(self, timer):
self.client.send(("127.0.0.1", TEST_PORT), [b"PIN\x00G", linesep])
timer.close(self.on_close)
def test_udp_pingpong_list_null(self):
self.on_close_called = 0
self.server = pyuv.UDP(self.loop)
self.server.bind(("0.0.0.0", TEST_PORT))
self.server.start_recv(self.on_server_recv)
self.client = pyuv.UDP(self.loop)
self.client.bind(("0.0.0.0", TEST_PORT2))
self.client.start_recv(self.on_client_recv)
timer = pyuv.Timer(self.loop)
timer.start(self.timer_cb, 0.1, 0)
self.loop.run()
self.assertEqual(self.on_close_called, 3)
class UDPTestInvalidData(TestCase):
def setUp(self):
super(UDPTestInvalidData, self).setUp()
self.server = None
self.client = None
def on_close(self, handle):
self.on_close_called += 1
def on_server_recv(self, handle, ip_port, flags, data, error):
self.assertEqual(flags, 0)
ip, port = ip_port
self.client.close(self.on_close)
self.server.close(self.on_close)
self.fail("Expected send to fail.")
def timer_cb(self, timer):
self.assertRaises(TypeError, self.client.send, ("127.0.0.1", TEST_PORT), object())
self.assertRaises(TypeError, self.client.send, ("127.0.0.1", TEST_PORT), 1)
self.assertRaises(TypeError, self.client.send, ("127.0.0.1", TEST_PORT), object())
self.assertRaises(TypeError, self.client.send, ("127.0.0.1", TEST_PORT), 1)
self.client.close(self.on_close)
self.server.close(self.on_close)
timer.close(self.on_close)
def test_udp_invalid_data(self):
self.on_close_called = 0
self.server = pyuv.UDP(self.loop)
self.server.bind(("0.0.0.0", TEST_PORT))
self.server.start_recv(self.on_server_recv)
self.client = pyuv.UDP(self.loop)
self.client.bind(("0.0.0.0", TEST_PORT2))
timer = pyuv.Timer(self.loop)
timer.start(self.timer_cb, 0.1, 0)
self.loop.run()
self.assertEqual(self.on_close_called, 3)
if sys.platform == 'win32':
def interface_addresses():
for family, _, _, _, sockaddr in socket.getaddrinfo('', None):
if family == socket.AF_INET:
yield sockaddr[0]
else:
def interface_addresses():
yield MULTICAST_ADDRESS
class UDPTestMulticast(TestCase):
def setUp(self):
super(UDPTestMulticast, self).setUp()
self.server = None
self.clients = None
self.received_data = None
def on_close(self, handle):
self.on_close_called += 1
def on_client_recv(self, handle, ip_port, flags, data, error):
self.assertEqual(flags, 0)
ip, port = ip_port
self.received_data = data.strip()
for client in self.clients:
client.set_membership(MULTICAST_ADDRESS, pyuv.UV_LEAVE_GROUP)
client.close(self.on_close)
def on_server_send(self, handle, error):
handle.close(self.on_close)
def _create_clients(self, loop=False):
self.clients = list()
for addr in interface_addresses():
client = pyuv.UDP(self.loop)
client.bind((addr, TEST_PORT))
client.set_membership(MULTICAST_ADDRESS, pyuv.UV_JOIN_GROUP)
if loop:
client.set_multicast_loop(True)
else:
client.set_multicast_ttl(10)
client.start_recv(self.on_client_recv)
self.clients.append(client)
def test_udp_multicast(self):
self.on_close_called = 0
self.server = pyuv.UDP(self.loop)
self._create_clients()
self.server.send((MULTICAST_ADDRESS, TEST_PORT), b"PING", self.on_server_send)
self.loop.run()
self.assertEqual(self.on_close_called, 1 + len(self.clients))
self.assertEqual(self.received_data, b"PING")
@platform_skip(["darwin"])
def test_udp_multicast_loop(self):
self.on_close_called = 0
self._create_clients(True)
for client in self.clients:
client.send((MULTICAST_ADDRESS, TEST_PORT), b"PING")
self.loop.run()
self.assertEqual(self.on_close_called, len(self.clients))
self.assertEqual(self.received_data, b"PING")
class UDPTestBigDatagram(TestCase):
def send_cb(self, handle, error):
self.handle.close()
self.errorno = error
def test_udp_big_datagram(self):
self.errorno = None
self.handle = pyuv.UDP(self.loop)
data = b"X"*65536
self.handle.send(("127.0.0.1", TEST_PORT), data, self.send_cb)
self.loop.run()
self.assertEqual(self.errorno, pyuv.errno.UV_EMSGSIZE)
class UDPTestOpen(TestCase):
def test_udp_open(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
handle = pyuv.UDP(self.loop)
handle.open(sock.fileno())
try:
handle.bind(("1.2.3.4", TEST_PORT))
except pyuv.error.UDPError as e:
self.assertEqual(e.args[0], pyuv.errno.UV_EADDRNOTAVAIL)
self.loop.run()
sock.close()
class UDPTestBind(TestCase):
def test_udp_bind(self):
handle = pyuv.UDP(self.loop)
handle.bind(("", TEST_PORT))
handle.close()
self.loop.run()
def test_udp_bind_reuse(self):
handle = pyuv.UDP(self.loop)
handle.bind(("", TEST_PORT), pyuv.UV_UDP_REUSEADDR)
handle2 = pyuv.UDP(self.loop)
handle2.bind(("", TEST_PORT), pyuv.UV_UDP_REUSEADDR)
handle.close()
handle2.close()
self.loop.run()
def test_udp_bind_noreuse(self):
handle = pyuv.UDP(self.loop)
handle.bind(("", TEST_PORT))
handle2 = pyuv.UDP(self.loop)
self.assertRaises(pyuv.error.UDPError, handle2.bind, ("", TEST_PORT))
handle.close()
handle2.close()
self.loop.run()
class UDPTestFileno(TestCase):
def check_fileno(self, handle):
fd = handle.fileno()
self.assertIsInstance(fd, int)
self.assertGreaterEqual(fd, 0)
def test_udp_fileno(self):
server = pyuv.UDP(self.loop)
server.bind(("0.0.0.0", TEST_PORT))
self.check_fileno(server)
server.close()
class UDPTestMulticastInterface(TestCase):
def test_udp_multicast_interface(self):
handle = pyuv.UDP(self.loop)
handle.bind(("", TEST_PORT))
handle.set_multicast_interface("0.0.0.0")
handle.close()
self.loop.run()
@platform_skip(["win32"])
class UDPTryTest(TestCase):
def setUp(self):
super(UDPTryTest, self).setUp()
self.server = None
self.client = None
def on_close(self, handle):
self.on_close_called += 1
def on_server_recv(self, handle, ip_port, flags, data, error):
self.assertEqual(flags, 0)
ip, port = ip_port
data = data.strip()
self.assertEqual(data, b"PING")
self.server.close(self.on_close)
self.client.close(self.on_close)
def timer_cb(self, timer):
timer.close(self.on_close)
while True:
try:
r = self.client.try_send(("127.0.0.1", TEST_PORT), b"PING")
except pyuv.error.UDPError as e:
self.assertEqual(e.args[0], pyuv.errno.UV_EAGAIN)
else:
self.assertEqual(r, 4)
break
def test_udp_try_send(self):
self.on_close_called = 0
self.server = pyuv.UDP(self.loop)
self.server.bind(("0.0.0.0", TEST_PORT))
self.server.start_recv(self.on_server_recv)
self.client = pyuv.UDP(self.loop)
self.client.bind(("0.0.0.0", TEST_PORT2))
timer = pyuv.Timer(self.loop)
timer.start(self.timer_cb, 0.1, 0)
self.loop.run()
self.assertEqual(self.on_close_called, 3)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
|
# Eve W-Space
# Copyright 2014 Andrew Austin and contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
from django.db import models
from django.conf import settings
import eveapi
from core.models import Type, Location
from API.models import CorpAPIKey
from core.models import Corporation, Alliance
from Map.models import System, MapSystem, Map
from API import cache_handler as handler
User = settings.AUTH_USER_MODEL
class POS(models.Model):
"""Represents a POS somewhere in space."""
system = models.ForeignKey(System, related_name="poses")
planet = models.IntegerField()
moon = models.IntegerField()
towertype = models.ForeignKey(Type, related_name="inspace")
corporation = models.ForeignKey(Corporation, related_name="poses")
posname = models.CharField(max_length=100, blank=True, null=True)
fitting = models.TextField(blank=True, null=True)
# Using CCP's status codes here for sanity with API checks
status = models.IntegerField(choices=((0, 'Unanchored'),
(1, 'Anchored'),
(2, 'Onlining'),
(3, 'Reinforced'),
(4, 'Online')))
# This should be the time the tower exits RF
# TODO: add a validator to make sure this is only set
# if status = 3 (Reinforced)
rftime = models.DateTimeField(null=True, blank=True)
updated = models.DateTimeField()
# These values will be set by the TSV parser from d-scan data if available
guns = models.IntegerField(null=True, blank=True)
ewar = models.IntegerField(null=True, blank=True)
sma = models.IntegerField(null=True, blank=True)
hardener = models.IntegerField(null=True, blank=True)
# This is a short comment that is displayed as a warning
warpin_notice = models.CharField(blank=True, null=True, max_length=64)
class Meta:
ordering = ['system__name', 'planet', 'moon']
@classmethod
def update_from_import_list(cls, system, import_list):
"""
Imports starbases from YAML importer.
"""
for pos in import_list:
planet = pos['planet']
moon = pos['moon']
warpin = pos['warpin']
status = pos['status']
rftime = pos['rftime']
name = pos['name']
tower = Type.objects.get(name=pos['tower'])
try:
owner = Corporation.objects.get(name=pos['owner'])
except Corporation.DoesNotExist:
from core import tasks
api = eveapi.EVEAPIConnection(cacheHandler=handler)
corp_id = api.eve.CharacterID(
names=pos['owner']).characters[0].characterID
owner = tasks.update_corporation(corp_id, True)
if POS.objects.filter(system=system, planet=planet,
moon=moon, corporation=owner).exists():
# Update first existing record
starbase = POS.objects.filter(system=system, planet=planet,
moon=moon,
corporation=owner).all()[0]
starbase.status = status
starbase.name = name
starbase.towertype = tower
if status == 3:
starbase.rftime = rftime
starbase.warpin_notice = warpin
else:
new_pos = POS(system=system, planet=planet, moon=moon,
corporation=owner, towertype=tower,
warpin_notice=warpin, status=status)
if status == 3:
new_pos.rftime = rftime
new_pos.save()
def as_dict(self):
data = {
'planet': self.planet, 'moon': self.moon,
'tower': self.towertype.name, 'owner': self.corporation.name,
'status': self.status, 'name': self.posname,
'rftime': self.rftime, 'warpin': self.warpin_notice,
}
return data
def clean(self):
from django.core.exceptions import ValidationError
if self.rftime and self.status != 3:
raise ValidationError("A POS cannot have an rftime unless "
"it is reinforced")
def __unicode__(self):
return self.posname
# override save to implement posname defaulting to towertype.name
def save(self, *args, **kwargs):
if not self.posname:
self.posname = self.towertype.name
# Mark tower as having been updated
from datetime import datetime
import pytz
self.updated = datetime.now(pytz.utc)
super(POS, self).save(*args, **kwargs)
def log(self, user, action, map_system):
"""
Records a log entry for POS updates and additions.
"""
map_system.map.add_log(
user,
"%s POS (Planet %s Moon %s, owner %s) in %s (%s), %s jumps out from root system."
%(action, self.planet, self.moon, self.corporation, map_system.system.name,
map_system.friendlyname, map_system.distance_from_root()))
def size(self):
"""
Returns the size of the tower, Small Medium or Large.
"""
if u'Small' in self.towertype.name:
return u'Small'
if u'Medium' in self.towertype.name:
return u'Medium'
return u'Large'
def fit_from_dscan(self, dscan):
"""
Fills in a POS's fitting from a copy / paste of d-scan results.
"""
return self.fit_from_iterable(csv.reader(dscan.splitlines(),
delimiter="\t"))
def fit_from_iterable(self, fit):
"""
Fills in a POS's fitting from an iterable (normally parsed d-scan)
"""
from core.models import Type
item_dict = dict()
# marketGroupIDs to consider guns, ewar, hardeners, and smas
guns_groups = [480, 479, 594, 595, 596]
ewar_groups = [481, 1009]
sma_groups = [484]
hardener_groups = [485]
towers = 0
self.sma = 0
self.hardener = 0
self.guns = 0
self.ewar = 0
for row in fit:
try:
item_type = Type.objects.get(name=row[1])
# odd bug where invalid items get into dscan
except Type.DoesNotExist:
continue
if item_type.marketgroup:
group_tree = []
parent = item_type.marketgroup
while parent:
group_tree.append(parent.id)
parent = parent.parentgroup
if item_type.marketgroup.id in guns_groups:
self.guns += 1
if item_type.marketgroup.id in ewar_groups:
self.ewar += 1
if item_type.marketgroup.id in sma_groups:
self.sma += 1
if item_type.marketgroup.id in hardener_groups:
self.hardener += 1
if item_type.marketgroup.id == 478:
towers += 1
towertype = item_type
posname = row[0]
if item_type.name in item_dict:
item_dict[item_type.name] += 1
elif 1285 in group_tree and 478 not in group_tree:
item_dict.update({item_type.name: 1})
self.fitting = "Imported from D-Scan:\n"
for itemtype in item_dict:
self.fitting += "\n%s : %s" % (itemtype, item_dict[itemtype])
if towers == 1 and self.towertype_id is None and self.posname is None:
self.towertype = towertype
self.posname = posname
if towers == 0 and self.towertype_id is None:
raise AttributeError('No POS in the D-Scan!')
elif towers <= 1:
self.save()
else:
raise AttributeError('Too many towers detected in the D-Scan!')
class CorpPOS(POS):
"""A corp-controlled POS with manager and password data."""
manager = models.ForeignKey(User, null=True, blank=True,
related_name='poses')
password = models.CharField(max_length=100)
description = models.TextField(null=True, blank=True)
# Let's store the CCP Item ID for the tower here to make API lookup easier
# If it is null, then we are not tracking this POS via API
apiitemid = models.BigIntegerField(null=True, blank=True)
apikey = models.ForeignKey(CorpAPIKey, null=True, blank=True,
related_name='poses')
class Meta:
permissions = (('can_see_pos_pw', 'Can see corp POS passwords.'),
('can_see_all_pos', 'Sees all corp POSes '
'regardless of manager.'),)
class POSApplication(models.Model):
"""Represents an application for a personal POS."""
applicant = models.ForeignKey(User, null=True, blank=True,
related_name='posapps')
towertype = models.ForeignKey(Type, null=True, blank=True,
related_name='posapps')
residents = models.ManyToManyField(User)
normalfit = models.TextField()
siegefit = models.TextField()
# Once it is approved, we will fill in these two to tie the records together
approved = models.DateTimeField(blank=True, null=True)
posrecord = models.ForeignKey(CorpPOS, blank=True, null=True,
related_name='application')
class Meta:
permissions = (('can_close_pos_app',
'Can dispose of corp POS applications.'),)
def __unicode__(self):
return 'Applicant: %s Tower: %s' % (self.applicant.username,
self.towertype.name)
class POSVote(models.Model):
"""Represents a vote on a personal POS application."""
application = models.ForeignKey(POSApplication, related_name='votes')
voter = models.ForeignKey(User, related_name='posvotes')
vote = models.IntegerField(choices=((0, 'Deny'),
(1, 'Approve'),
(2, 'Abstain')))
|
|
"""Tests for WebSocket API commands."""
import datetime
from unittest.mock import ANY, patch
from async_timeout import timeout
import pytest
import voluptuous as vol
from homeassistant.bootstrap import SIGNAL_BOOTSTRAP_INTEGRATONS
from homeassistant.components.websocket_api import const
from homeassistant.components.websocket_api.auth import (
TYPE_AUTH,
TYPE_AUTH_OK,
TYPE_AUTH_REQUIRED,
)
from homeassistant.components.websocket_api.const import URL
from homeassistant.core import Context, callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import entity
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.loader import async_get_integration
from homeassistant.setup import DATA_SETUP_TIME, async_setup_component
from tests.common import MockEntity, MockEntityPlatform, async_mock_service
async def test_call_service(hass, websocket_client):
"""Test call service command."""
calls = async_mock_service(hass, "domain_test", "test_service")
await websocket_client.send_json(
{
"id": 5,
"type": "call_service",
"domain": "domain_test",
"service": "test_service",
"service_data": {"hello": "world"},
}
)
msg = await websocket_client.receive_json()
assert msg["id"] == 5
assert msg["type"] == const.TYPE_RESULT
assert msg["success"]
assert len(calls) == 1
call = calls[0]
assert call.domain == "domain_test"
assert call.service == "test_service"
assert call.data == {"hello": "world"}
assert call.context.as_dict() == msg["result"]["context"]
@pytest.mark.parametrize("command", ("call_service", "call_service_action"))
async def test_call_service_blocking(hass, websocket_client, command):
"""Test call service commands block, except for homeassistant restart / stop."""
with patch(
"homeassistant.core.ServiceRegistry.async_call", autospec=True
) as mock_call:
await websocket_client.send_json(
{
"id": 5,
"type": "call_service",
"domain": "domain_test",
"service": "test_service",
"service_data": {"hello": "world"},
},
)
msg = await websocket_client.receive_json()
assert msg["id"] == 5
assert msg["type"] == const.TYPE_RESULT
assert msg["success"]
mock_call.assert_called_once_with(
ANY,
"domain_test",
"test_service",
{"hello": "world"},
blocking=True,
context=ANY,
target=ANY,
)
with patch(
"homeassistant.core.ServiceRegistry.async_call", autospec=True
) as mock_call:
await websocket_client.send_json(
{
"id": 6,
"type": "call_service",
"domain": "homeassistant",
"service": "test_service",
},
)
msg = await websocket_client.receive_json()
assert msg["id"] == 6
assert msg["type"] == const.TYPE_RESULT
assert msg["success"]
mock_call.assert_called_once_with(
ANY,
"homeassistant",
"test_service",
ANY,
blocking=True,
context=ANY,
target=ANY,
)
with patch(
"homeassistant.core.ServiceRegistry.async_call", autospec=True
) as mock_call:
await websocket_client.send_json(
{
"id": 7,
"type": "call_service",
"domain": "homeassistant",
"service": "restart",
},
)
msg = await websocket_client.receive_json()
assert msg["id"] == 7
assert msg["type"] == const.TYPE_RESULT
assert msg["success"]
mock_call.assert_called_once_with(
ANY, "homeassistant", "restart", ANY, blocking=False, context=ANY, target=ANY
)
async def test_call_service_target(hass, websocket_client):
"""Test call service command with target."""
calls = async_mock_service(hass, "domain_test", "test_service")
await websocket_client.send_json(
{
"id": 5,
"type": "call_service",
"domain": "domain_test",
"service": "test_service",
"service_data": {"hello": "world"},
"target": {
"entity_id": ["entity.one", "entity.two"],
"device_id": "deviceid",
},
}
)
msg = await websocket_client.receive_json()
assert msg["id"] == 5
assert msg["type"] == const.TYPE_RESULT
assert msg["success"]
assert len(calls) == 1
call = calls[0]
assert call.domain == "domain_test"
assert call.service == "test_service"
assert call.data == {
"hello": "world",
"entity_id": ["entity.one", "entity.two"],
"device_id": ["deviceid"],
}
assert call.context.as_dict() == msg["result"]["context"]
async def test_call_service_target_template(hass, websocket_client):
"""Test call service command with target does not allow template."""
await websocket_client.send_json(
{
"id": 5,
"type": "call_service",
"domain": "domain_test",
"service": "test_service",
"service_data": {"hello": "world"},
"target": {
"entity_id": "{{ 1 }}",
},
}
)
msg = await websocket_client.receive_json()
assert msg["id"] == 5
assert msg["type"] == const.TYPE_RESULT
assert not msg["success"]
assert msg["error"]["code"] == const.ERR_INVALID_FORMAT
async def test_call_service_not_found(hass, websocket_client):
"""Test call service command."""
await websocket_client.send_json(
{
"id": 5,
"type": "call_service",
"domain": "domain_test",
"service": "test_service",
"service_data": {"hello": "world"},
}
)
msg = await websocket_client.receive_json()
assert msg["id"] == 5
assert msg["type"] == const.TYPE_RESULT
assert not msg["success"]
assert msg["error"]["code"] == const.ERR_NOT_FOUND
async def test_call_service_child_not_found(hass, websocket_client):
"""Test not reporting not found errors if it's not the called service."""
async def serv_handler(call):
await hass.services.async_call("non", "existing")
hass.services.async_register("domain_test", "test_service", serv_handler)
await websocket_client.send_json(
{
"id": 5,
"type": "call_service",
"domain": "domain_test",
"service": "test_service",
"service_data": {"hello": "world"},
}
)
msg = await websocket_client.receive_json()
assert msg["id"] == 5
assert msg["type"] == const.TYPE_RESULT
assert not msg["success"]
assert msg["error"]["code"] == const.ERR_HOME_ASSISTANT_ERROR
async def test_call_service_schema_validation_error(
hass: HomeAssistantType, websocket_client
):
"""Test call service command with invalid service data."""
calls = []
service_schema = vol.Schema(
{
vol.Required("message"): str,
}
)
@callback
def service_call(call):
calls.append(call)
hass.services.async_register(
"domain_test",
"test_service",
service_call,
schema=service_schema,
)
await websocket_client.send_json(
{
"id": 5,
"type": "call_service",
"domain": "domain_test",
"service": "test_service",
"service_data": {},
}
)
msg = await websocket_client.receive_json()
assert msg["id"] == 5
assert msg["type"] == const.TYPE_RESULT
assert not msg["success"]
assert msg["error"]["code"] == const.ERR_INVALID_FORMAT
await websocket_client.send_json(
{
"id": 6,
"type": "call_service",
"domain": "domain_test",
"service": "test_service",
"service_data": {"extra_key": "not allowed"},
}
)
msg = await websocket_client.receive_json()
assert msg["id"] == 6
assert msg["type"] == const.TYPE_RESULT
assert not msg["success"]
assert msg["error"]["code"] == const.ERR_INVALID_FORMAT
await websocket_client.send_json(
{
"id": 7,
"type": "call_service",
"domain": "domain_test",
"service": "test_service",
"service_data": {"message": []},
}
)
msg = await websocket_client.receive_json()
assert msg["id"] == 7
assert msg["type"] == const.TYPE_RESULT
assert not msg["success"]
assert msg["error"]["code"] == const.ERR_INVALID_FORMAT
assert len(calls) == 0
async def test_call_service_error(hass, websocket_client):
"""Test call service command with error."""
@callback
def ha_error_call(_):
raise HomeAssistantError("error_message")
hass.services.async_register("domain_test", "ha_error", ha_error_call)
async def unknown_error_call(_):
raise ValueError("value_error")
hass.services.async_register("domain_test", "unknown_error", unknown_error_call)
await websocket_client.send_json(
{
"id": 5,
"type": "call_service",
"domain": "domain_test",
"service": "ha_error",
}
)
msg = await websocket_client.receive_json()
assert msg["id"] == 5
assert msg["type"] == const.TYPE_RESULT
assert msg["success"] is False
assert msg["error"]["code"] == "home_assistant_error"
assert msg["error"]["message"] == "error_message"
await websocket_client.send_json(
{
"id": 6,
"type": "call_service",
"domain": "domain_test",
"service": "unknown_error",
}
)
msg = await websocket_client.receive_json()
assert msg["id"] == 6
assert msg["type"] == const.TYPE_RESULT
assert msg["success"] is False
assert msg["error"]["code"] == "unknown_error"
assert msg["error"]["message"] == "value_error"
async def test_subscribe_unsubscribe_events(hass, websocket_client):
"""Test subscribe/unsubscribe events command."""
init_count = sum(hass.bus.async_listeners().values())
await websocket_client.send_json(
{"id": 5, "type": "subscribe_events", "event_type": "test_event"}
)
msg = await websocket_client.receive_json()
assert msg["id"] == 5
assert msg["type"] == const.TYPE_RESULT
assert msg["success"]
# Verify we have a new listener
assert sum(hass.bus.async_listeners().values()) == init_count + 1
hass.bus.async_fire("ignore_event")
hass.bus.async_fire("test_event", {"hello": "world"})
hass.bus.async_fire("ignore_event")
with timeout(3):
msg = await websocket_client.receive_json()
assert msg["id"] == 5
assert msg["type"] == "event"
event = msg["event"]
assert event["event_type"] == "test_event"
assert event["data"] == {"hello": "world"}
assert event["origin"] == "LOCAL"
await websocket_client.send_json(
{"id": 6, "type": "unsubscribe_events", "subscription": 5}
)
msg = await websocket_client.receive_json()
assert msg["id"] == 6
assert msg["type"] == const.TYPE_RESULT
assert msg["success"]
# Check our listener got unsubscribed
assert sum(hass.bus.async_listeners().values()) == init_count
async def test_get_states(hass, websocket_client):
"""Test get_states command."""
hass.states.async_set("greeting.hello", "world")
hass.states.async_set("greeting.bye", "universe")
await websocket_client.send_json({"id": 5, "type": "get_states"})
msg = await websocket_client.receive_json()
assert msg["id"] == 5
assert msg["type"] == const.TYPE_RESULT
assert msg["success"]
states = []
for state in hass.states.async_all():
states.append(state.as_dict())
assert msg["result"] == states
async def test_get_services(hass, websocket_client):
"""Test get_services command."""
await websocket_client.send_json({"id": 5, "type": "get_services"})
msg = await websocket_client.receive_json()
assert msg["id"] == 5
assert msg["type"] == const.TYPE_RESULT
assert msg["success"]
assert msg["result"] == hass.services.async_services()
async def test_get_config(hass, websocket_client):
"""Test get_config command."""
await websocket_client.send_json({"id": 5, "type": "get_config"})
msg = await websocket_client.receive_json()
assert msg["id"] == 5
assert msg["type"] == const.TYPE_RESULT
assert msg["success"]
if "components" in msg["result"]:
msg["result"]["components"] = set(msg["result"]["components"])
if "whitelist_external_dirs" in msg["result"]:
msg["result"]["whitelist_external_dirs"] = set(
msg["result"]["whitelist_external_dirs"]
)
if "allowlist_external_dirs" in msg["result"]:
msg["result"]["allowlist_external_dirs"] = set(
msg["result"]["allowlist_external_dirs"]
)
if "allowlist_external_urls" in msg["result"]:
msg["result"]["allowlist_external_urls"] = set(
msg["result"]["allowlist_external_urls"]
)
assert msg["result"] == hass.config.as_dict()
async def test_ping(websocket_client):
"""Test get_panels command."""
await websocket_client.send_json({"id": 5, "type": "ping"})
msg = await websocket_client.receive_json()
assert msg["id"] == 5
assert msg["type"] == "pong"
async def test_call_service_context_with_user(hass, aiohttp_client, hass_access_token):
"""Test that the user is set in the service call context."""
assert await async_setup_component(hass, "websocket_api", {})
calls = async_mock_service(hass, "domain_test", "test_service")
client = await aiohttp_client(hass.http.app)
async with client.ws_connect(URL) as ws:
auth_msg = await ws.receive_json()
assert auth_msg["type"] == TYPE_AUTH_REQUIRED
await ws.send_json({"type": TYPE_AUTH, "access_token": hass_access_token})
auth_msg = await ws.receive_json()
assert auth_msg["type"] == TYPE_AUTH_OK
await ws.send_json(
{
"id": 5,
"type": "call_service",
"domain": "domain_test",
"service": "test_service",
"service_data": {"hello": "world"},
}
)
msg = await ws.receive_json()
assert msg["success"]
refresh_token = await hass.auth.async_validate_access_token(hass_access_token)
assert len(calls) == 1
call = calls[0]
assert call.domain == "domain_test"
assert call.service == "test_service"
assert call.data == {"hello": "world"}
assert call.context.user_id == refresh_token.user.id
async def test_subscribe_requires_admin(websocket_client, hass_admin_user):
"""Test subscribing events without being admin."""
hass_admin_user.groups = []
await websocket_client.send_json(
{"id": 5, "type": "subscribe_events", "event_type": "test_event"}
)
msg = await websocket_client.receive_json()
assert not msg["success"]
assert msg["error"]["code"] == const.ERR_UNAUTHORIZED
async def test_states_filters_visible(hass, hass_admin_user, websocket_client):
"""Test we only get entities that we're allowed to see."""
hass_admin_user.mock_policy({"entities": {"entity_ids": {"test.entity": True}}})
hass.states.async_set("test.entity", "hello")
hass.states.async_set("test.not_visible_entity", "invisible")
await websocket_client.send_json({"id": 5, "type": "get_states"})
msg = await websocket_client.receive_json()
assert msg["id"] == 5
assert msg["type"] == const.TYPE_RESULT
assert msg["success"]
assert len(msg["result"]) == 1
assert msg["result"][0]["entity_id"] == "test.entity"
async def test_get_states_not_allows_nan(hass, websocket_client):
"""Test get_states command not allows NaN floats."""
hass.states.async_set("greeting.hello", "world", {"hello": float("NaN")})
await websocket_client.send_json({"id": 5, "type": "get_states"})
msg = await websocket_client.receive_json()
assert not msg["success"]
assert msg["error"]["code"] == const.ERR_UNKNOWN_ERROR
async def test_subscribe_unsubscribe_events_whitelist(
hass, websocket_client, hass_admin_user
):
"""Test subscribe/unsubscribe events on whitelist."""
hass_admin_user.groups = []
await websocket_client.send_json(
{"id": 5, "type": "subscribe_events", "event_type": "not-in-whitelist"}
)
msg = await websocket_client.receive_json()
assert msg["id"] == 5
assert msg["type"] == const.TYPE_RESULT
assert not msg["success"]
assert msg["error"]["code"] == "unauthorized"
await websocket_client.send_json(
{"id": 6, "type": "subscribe_events", "event_type": "themes_updated"}
)
msg = await websocket_client.receive_json()
assert msg["id"] == 6
assert msg["type"] == const.TYPE_RESULT
assert msg["success"]
hass.bus.async_fire("themes_updated")
with timeout(3):
msg = await websocket_client.receive_json()
assert msg["id"] == 6
assert msg["type"] == "event"
event = msg["event"]
assert event["event_type"] == "themes_updated"
assert event["origin"] == "LOCAL"
async def test_subscribe_unsubscribe_events_state_changed(
hass, websocket_client, hass_admin_user
):
"""Test subscribe/unsubscribe state_changed events."""
hass_admin_user.groups = []
hass_admin_user.mock_policy({"entities": {"entity_ids": {"light.permitted": True}}})
await websocket_client.send_json(
{"id": 7, "type": "subscribe_events", "event_type": "state_changed"}
)
msg = await websocket_client.receive_json()
assert msg["id"] == 7
assert msg["type"] == const.TYPE_RESULT
assert msg["success"]
hass.states.async_set("light.not_permitted", "on")
hass.states.async_set("light.permitted", "on")
msg = await websocket_client.receive_json()
assert msg["id"] == 7
assert msg["type"] == "event"
assert msg["event"]["event_type"] == "state_changed"
assert msg["event"]["data"]["entity_id"] == "light.permitted"
async def test_render_template_renders_template(hass, websocket_client):
"""Test simple template is rendered and updated."""
hass.states.async_set("light.test", "on")
await websocket_client.send_json(
{
"id": 5,
"type": "render_template",
"template": "State is: {{ states('light.test') }}",
}
)
msg = await websocket_client.receive_json()
assert msg["id"] == 5
assert msg["type"] == const.TYPE_RESULT
assert msg["success"]
msg = await websocket_client.receive_json()
assert msg["id"] == 5
assert msg["type"] == "event"
event = msg["event"]
assert event == {
"result": "State is: on",
"listeners": {
"all": False,
"domains": [],
"entities": ["light.test"],
"time": False,
},
}
hass.states.async_set("light.test", "off")
msg = await websocket_client.receive_json()
assert msg["id"] == 5
assert msg["type"] == "event"
event = msg["event"]
assert event == {
"result": "State is: off",
"listeners": {
"all": False,
"domains": [],
"entities": ["light.test"],
"time": False,
},
}
async def test_render_template_manual_entity_ids_no_longer_needed(
hass, websocket_client
):
"""Test that updates to specified entity ids cause a template rerender."""
hass.states.async_set("light.test", "on")
await websocket_client.send_json(
{
"id": 5,
"type": "render_template",
"template": "State is: {{ states('light.test') }}",
}
)
msg = await websocket_client.receive_json()
assert msg["id"] == 5
assert msg["type"] == const.TYPE_RESULT
assert msg["success"]
msg = await websocket_client.receive_json()
assert msg["id"] == 5
assert msg["type"] == "event"
event = msg["event"]
assert event == {
"result": "State is: on",
"listeners": {
"all": False,
"domains": [],
"entities": ["light.test"],
"time": False,
},
}
hass.states.async_set("light.test", "off")
msg = await websocket_client.receive_json()
assert msg["id"] == 5
assert msg["type"] == "event"
event = msg["event"]
assert event == {
"result": "State is: off",
"listeners": {
"all": False,
"domains": [],
"entities": ["light.test"],
"time": False,
},
}
@pytest.mark.parametrize(
"template",
[
"{{ my_unknown_func() + 1 }}",
"{{ my_unknown_var }}",
"{{ my_unknown_var + 1 }}",
"{{ now() | unknown_filter }}",
],
)
async def test_render_template_with_error(hass, websocket_client, caplog, template):
"""Test a template with an error."""
await websocket_client.send_json(
{"id": 5, "type": "render_template", "template": template, "strict": True}
)
msg = await websocket_client.receive_json()
assert msg["id"] == 5
assert msg["type"] == const.TYPE_RESULT
assert not msg["success"]
assert msg["error"]["code"] == const.ERR_TEMPLATE_ERROR
assert "Template variable error" not in caplog.text
assert "TemplateError" not in caplog.text
@pytest.mark.parametrize(
"template",
[
"{{ my_unknown_func() + 1 }}",
"{{ my_unknown_var }}",
"{{ my_unknown_var + 1 }}",
"{{ now() | unknown_filter }}",
],
)
async def test_render_template_with_timeout_and_error(
hass, websocket_client, caplog, template
):
"""Test a template with an error with a timeout."""
await websocket_client.send_json(
{
"id": 5,
"type": "render_template",
"template": template,
"timeout": 5,
"strict": True,
}
)
msg = await websocket_client.receive_json()
assert msg["id"] == 5
assert msg["type"] == const.TYPE_RESULT
assert not msg["success"]
assert msg["error"]["code"] == const.ERR_TEMPLATE_ERROR
assert "Template variable error" not in caplog.text
assert "TemplateError" not in caplog.text
async def test_render_template_error_in_template_code(hass, websocket_client, caplog):
"""Test a template that will throw in template.py."""
await websocket_client.send_json(
{"id": 5, "type": "render_template", "template": "{{ now() | random }}"}
)
msg = await websocket_client.receive_json()
assert msg["id"] == 5
assert msg["type"] == const.TYPE_RESULT
assert not msg["success"]
assert msg["error"]["code"] == const.ERR_TEMPLATE_ERROR
assert "TemplateError" not in caplog.text
async def test_render_template_with_delayed_error(hass, websocket_client, caplog):
"""Test a template with an error that only happens after a state change."""
hass.states.async_set("sensor.test", "on")
await hass.async_block_till_done()
template_str = """
{% if states.sensor.test.state %}
on
{% else %}
{{ explode + 1 }}
{% endif %}
"""
await websocket_client.send_json(
{"id": 5, "type": "render_template", "template": template_str}
)
await hass.async_block_till_done()
msg = await websocket_client.receive_json()
assert msg["id"] == 5
assert msg["type"] == const.TYPE_RESULT
assert msg["success"]
hass.states.async_remove("sensor.test")
await hass.async_block_till_done()
msg = await websocket_client.receive_json()
assert msg["id"] == 5
assert msg["type"] == "event"
event = msg["event"]
assert event == {
"result": "on",
"listeners": {
"all": False,
"domains": [],
"entities": ["sensor.test"],
"time": False,
},
}
msg = await websocket_client.receive_json()
assert msg["id"] == 5
assert msg["type"] == const.TYPE_RESULT
assert not msg["success"]
assert msg["error"]["code"] == const.ERR_TEMPLATE_ERROR
assert "TemplateError" not in caplog.text
async def test_render_template_with_timeout(hass, websocket_client, caplog):
"""Test a template that will timeout."""
slow_template_str = """
{% for var in range(1000) -%}
{% for var in range(1000) -%}
{{ var }}
{%- endfor %}
{%- endfor %}
"""
await websocket_client.send_json(
{
"id": 5,
"type": "render_template",
"timeout": 0.000001,
"template": slow_template_str,
}
)
msg = await websocket_client.receive_json()
assert msg["id"] == 5
assert msg["type"] == const.TYPE_RESULT
assert not msg["success"]
assert msg["error"]["code"] == const.ERR_TEMPLATE_ERROR
assert "TemplateError" not in caplog.text
async def test_render_template_returns_with_match_all(hass, websocket_client):
"""Test that a template that would match with all entities still return success."""
await websocket_client.send_json(
{"id": 5, "type": "render_template", "template": "State is: {{ 42 }}"}
)
msg = await websocket_client.receive_json()
assert msg["id"] == 5
assert msg["type"] == const.TYPE_RESULT
assert msg["success"]
async def test_manifest_list(hass, websocket_client):
"""Test loading manifests."""
http = await async_get_integration(hass, "http")
websocket_api = await async_get_integration(hass, "websocket_api")
await websocket_client.send_json({"id": 5, "type": "manifest/list"})
msg = await websocket_client.receive_json()
assert msg["id"] == 5
assert msg["type"] == const.TYPE_RESULT
assert msg["success"]
assert sorted(msg["result"], key=lambda manifest: manifest["domain"]) == [
http.manifest,
websocket_api.manifest,
]
async def test_manifest_get(hass, websocket_client):
"""Test getting a manifest."""
hue = await async_get_integration(hass, "hue")
await websocket_client.send_json(
{"id": 6, "type": "manifest/get", "integration": "hue"}
)
msg = await websocket_client.receive_json()
assert msg["id"] == 6
assert msg["type"] == const.TYPE_RESULT
assert msg["success"]
assert msg["result"] == hue.manifest
# Non existing
await websocket_client.send_json(
{"id": 7, "type": "manifest/get", "integration": "non_existing"}
)
msg = await websocket_client.receive_json()
assert msg["id"] == 7
assert msg["type"] == const.TYPE_RESULT
assert not msg["success"]
assert msg["error"]["code"] == "not_found"
async def test_entity_source_admin(hass, websocket_client, hass_admin_user):
"""Check that we fetch sources correctly."""
platform = MockEntityPlatform(hass)
await platform.async_add_entities(
[MockEntity(name="Entity 1"), MockEntity(name="Entity 2")]
)
# Fetch all
await websocket_client.send_json({"id": 6, "type": "entity/source"})
msg = await websocket_client.receive_json()
assert msg["id"] == 6
assert msg["type"] == const.TYPE_RESULT
assert msg["success"]
assert msg["result"] == {
"test_domain.entity_1": {
"source": entity.SOURCE_PLATFORM_CONFIG,
"domain": "test_platform",
},
"test_domain.entity_2": {
"source": entity.SOURCE_PLATFORM_CONFIG,
"domain": "test_platform",
},
}
# Fetch one
await websocket_client.send_json(
{"id": 7, "type": "entity/source", "entity_id": ["test_domain.entity_2"]}
)
msg = await websocket_client.receive_json()
assert msg["id"] == 7
assert msg["type"] == const.TYPE_RESULT
assert msg["success"]
assert msg["result"] == {
"test_domain.entity_2": {
"source": entity.SOURCE_PLATFORM_CONFIG,
"domain": "test_platform",
},
}
# Fetch two
await websocket_client.send_json(
{
"id": 8,
"type": "entity/source",
"entity_id": ["test_domain.entity_2", "test_domain.entity_1"],
}
)
msg = await websocket_client.receive_json()
assert msg["id"] == 8
assert msg["type"] == const.TYPE_RESULT
assert msg["success"]
assert msg["result"] == {
"test_domain.entity_1": {
"source": entity.SOURCE_PLATFORM_CONFIG,
"domain": "test_platform",
},
"test_domain.entity_2": {
"source": entity.SOURCE_PLATFORM_CONFIG,
"domain": "test_platform",
},
}
# Fetch non existing
await websocket_client.send_json(
{
"id": 9,
"type": "entity/source",
"entity_id": ["test_domain.entity_2", "test_domain.non_existing"],
}
)
msg = await websocket_client.receive_json()
assert msg["id"] == 9
assert msg["type"] == const.TYPE_RESULT
assert not msg["success"]
assert msg["error"]["code"] == const.ERR_NOT_FOUND
# Mock policy
hass_admin_user.groups = []
hass_admin_user.mock_policy(
{"entities": {"entity_ids": {"test_domain.entity_2": True}}}
)
# Fetch all
await websocket_client.send_json({"id": 10, "type": "entity/source"})
msg = await websocket_client.receive_json()
assert msg["id"] == 10
assert msg["type"] == const.TYPE_RESULT
assert msg["success"]
assert msg["result"] == {
"test_domain.entity_2": {
"source": entity.SOURCE_PLATFORM_CONFIG,
"domain": "test_platform",
},
}
# Fetch unauthorized
await websocket_client.send_json(
{"id": 11, "type": "entity/source", "entity_id": ["test_domain.entity_1"]}
)
msg = await websocket_client.receive_json()
assert msg["id"] == 11
assert msg["type"] == const.TYPE_RESULT
assert not msg["success"]
assert msg["error"]["code"] == const.ERR_UNAUTHORIZED
async def test_subscribe_trigger(hass, websocket_client):
"""Test subscribing to a trigger."""
init_count = sum(hass.bus.async_listeners().values())
await websocket_client.send_json(
{
"id": 5,
"type": "subscribe_trigger",
"trigger": {"platform": "event", "event_type": "test_event"},
"variables": {"hello": "world"},
}
)
msg = await websocket_client.receive_json()
assert msg["id"] == 5
assert msg["type"] == const.TYPE_RESULT
assert msg["success"]
# Verify we have a new listener
assert sum(hass.bus.async_listeners().values()) == init_count + 1
context = Context()
hass.bus.async_fire("ignore_event")
hass.bus.async_fire("test_event", {"hello": "world"}, context=context)
hass.bus.async_fire("ignore_event")
with timeout(3):
msg = await websocket_client.receive_json()
assert msg["id"] == 5
assert msg["type"] == "event"
assert msg["event"]["context"]["id"] == context.id
assert msg["event"]["variables"]["trigger"]["platform"] == "event"
event = msg["event"]["variables"]["trigger"]["event"]
assert event["event_type"] == "test_event"
assert event["data"] == {"hello": "world"}
assert event["origin"] == "LOCAL"
await websocket_client.send_json(
{"id": 6, "type": "unsubscribe_events", "subscription": 5}
)
msg = await websocket_client.receive_json()
assert msg["id"] == 6
assert msg["type"] == const.TYPE_RESULT
assert msg["success"]
# Check our listener got unsubscribed
assert sum(hass.bus.async_listeners().values()) == init_count
async def test_test_condition(hass, websocket_client):
"""Test testing a condition."""
hass.states.async_set("hello.world", "paulus")
await websocket_client.send_json(
{
"id": 5,
"type": "test_condition",
"condition": {
"condition": "state",
"entity_id": "hello.world",
"state": "paulus",
},
"variables": {"hello": "world"},
}
)
msg = await websocket_client.receive_json()
assert msg["id"] == 5
assert msg["type"] == const.TYPE_RESULT
assert msg["success"]
assert msg["result"]["result"] is True
async def test_execute_script(hass, websocket_client):
"""Test testing a condition."""
calls = async_mock_service(hass, "domain_test", "test_service")
await websocket_client.send_json(
{
"id": 5,
"type": "execute_script",
"sequence": [
{
"service": "domain_test.test_service",
"data": {"hello": "world"},
}
],
}
)
msg_no_var = await websocket_client.receive_json()
assert msg_no_var["id"] == 5
assert msg_no_var["type"] == const.TYPE_RESULT
assert msg_no_var["success"]
await websocket_client.send_json(
{
"id": 6,
"type": "execute_script",
"sequence": {
"service": "domain_test.test_service",
"data": {"hello": "{{ name }}"},
},
"variables": {"name": "From variable"},
}
)
msg_var = await websocket_client.receive_json()
assert msg_var["id"] == 6
assert msg_var["type"] == const.TYPE_RESULT
assert msg_var["success"]
await hass.async_block_till_done()
await hass.async_block_till_done()
assert len(calls) == 2
call = calls[0]
assert call.domain == "domain_test"
assert call.service == "test_service"
assert call.data == {"hello": "world"}
assert call.context.as_dict() == msg_no_var["result"]["context"]
call = calls[1]
assert call.domain == "domain_test"
assert call.service == "test_service"
assert call.data == {"hello": "From variable"}
assert call.context.as_dict() == msg_var["result"]["context"]
async def test_subscribe_unsubscribe_bootstrap_integrations(
hass, websocket_client, hass_admin_user
):
"""Test subscribe/unsubscribe bootstrap_integrations."""
await websocket_client.send_json(
{"id": 7, "type": "subscribe_bootstrap_integrations"}
)
msg = await websocket_client.receive_json()
assert msg["id"] == 7
assert msg["type"] == const.TYPE_RESULT
assert msg["success"]
message = {"august": 12.5, "isy994": 12.8}
async_dispatcher_send(hass, SIGNAL_BOOTSTRAP_INTEGRATONS, message)
msg = await websocket_client.receive_json()
assert msg["id"] == 7
assert msg["type"] == "event"
assert msg["event"] == message
async def test_integration_setup_info(hass, websocket_client, hass_admin_user):
"""Test subscribe/unsubscribe bootstrap_integrations."""
hass.data[DATA_SETUP_TIME] = {
"august": datetime.timedelta(seconds=12.5),
"isy994": datetime.timedelta(seconds=12.8),
}
await websocket_client.send_json({"id": 7, "type": "integration/setup_info"})
msg = await websocket_client.receive_json()
assert msg["id"] == 7
assert msg["type"] == const.TYPE_RESULT
assert msg["success"]
assert msg["result"] == [
{"domain": "august", "seconds": 12.5},
{"domain": "isy994", "seconds": 12.8},
]
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops to use variables as resources."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import variable_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import custom_gradient
from tensorflow.python.eager import tape
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_resource_variable_ops
from tensorflow.python.ops import variables
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_resource_variable_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.util import compat
class ResourceVariable(variables.Variable):
"""Variable based on resource handles.
See the ${variables} documentation for more details.
A `ResourceVariable` allows you to maintain state across subsequent calls to
session.run.
The `ResourceVariable` constructor requires an initial value for the variable,
which can be a `Tensor` of any type and shape. The initial value defines the
type and shape of the variable. After construction, the type and shape of
the variable are fixed. The value can be changed using one of the assign
methods.
Just like any `Tensor`, variables created with `ResourceVariable()` can be
used as inputs for other Ops in the graph. Additionally, all the operators
overloaded for the `Tensor` class are carried over to variables, so you can
also add nodes to the graph by just doing arithmetic on variables.
Unlike tf.Variable, a tf.ResourceVariable has well-defined semantics. Each
usage of a ResourceVariable in a TensorFlow graph adds a read_value operation
to the graph. The Tensors returned by a read_value operation are guaranteed
to see all modifications to the value of the variable which happen in any
operation on which the read_value depends on (either directly, indirectly, or
via a control dependency) and guaranteed to not see any modification to the
value of the variable on which the read_value operation does not depend on.
For example, if there is more than one assignment to a ResourceVariable in
a single session.run call there is a well-defined value for each operation
which uses the variable's value if the assignments and the read are connected
by edges in the graph. Consider the following example, in which two writes
can cause tf.Variable and tf.ResourceVariable to behave differently:
```python
a = tf.ResourceVariable(1.0)
a.initializer.run()
assign = a.assign(2.0)
with tf.control_dependencies([assign]):
b = a.read_value()
other_assign = a.assign(3.0)
with tf.control_dependencies([other_assign]):
tf.Print(b, [b]).run() # Will print 2.0 because the value was read before
# other_assign ran.
```
To enforce these consistency properties tf.ResourceVariable might make more
copies than an equivalent tf.Variable under the hood, so tf.Variable is still
not deprecated.
"""
def __init__(self,
initial_value=None,
trainable=True,
collections=None,
validate_shape=True,
caching_device=None,
name=None,
dtype=None,
variable_def=None,
import_scope=None,
constraint=None):
"""Creates a variable.
Args:
initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. The initial value must have
a shape specified unless `validate_shape` is set to False. Can also be a
callable with no argument that returns the initial value when called.
(Note that initializer functions from init_ops.py must first be bound
to a shape before being used here.)
trainable: If `True`, the default, also adds the variable to the graph
collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as
the default list of variables to use by the `Optimizer` classes.
collections: List of graph collections keys. The new variable is added to
these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
validate_shape: Ignored. Provided for compatibility with tf.Variable.
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
dtype: If set, initial_value will be converted to the given type.
If None, either the datatype will be kept (if initial_value is
a Tensor) or float32 will be used (if it is a Python object convertible
to a Tensor).
variable_def: `VariableDef` protocol buffer. If not None, recreates the
`ResourceVariable` object with its contents. `variable_def` and other
arguments (except for import_scope) are mutually exclusive.
import_scope: Optional `string`. Name scope to add to the
ResourceVariable. Only used when `variable_def` is provided.
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
Raises:
ValueError: If the initial value is not specified, or does not have a
shape and `validate_shape` is `True`.
"""
if variable_def:
if initial_value:
raise ValueError("variable_def and initial_value are mutually "
"exclusive.")
if not context.in_graph_mode():
raise ValueError("Creating ResourceVariable from variable_def"
" only supported in GRAPH mode.")
self._init_from_proto(variable_def, import_scope=import_scope)
else:
self._init_from_args(
initial_value=initial_value,
trainable=trainable,
collections=collections,
validate_shape=validate_shape,
caching_device=caching_device,
name=name,
dtype=dtype,
constraint=constraint)
# pylint: disable=unused-argument
def _init_from_args(self,
initial_value=None,
trainable=True,
collections=None,
validate_shape=True,
caching_device=None,
name=None,
dtype=None,
constraint=None):
"""Creates a variable.
Args:
initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. The initial value must have
a shape specified unless `validate_shape` is set to False. Can also be a
callable with no argument that returns the initial value when called.
(Note that initializer functions from init_ops.py must first be bound
to a shape before being used here.)
trainable: If `True`, the default, also adds the variable to the graph
collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as
the default list of variables to use by the `Optimizer` classes.
collections: List of graph collections keys. The new variable is added to
these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
validate_shape: Ignored. Provided for compatibility with tf.Variable.
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
dtype: If set, initial_value will be converted to the given type.
If None, either the datatype will be kept (if initial_value is
a Tensor) or float32 will be used (if it is a Python object convertible
to a Tensor).
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
Raises:
ValueError: If the initial value is not specified, or does not have a
shape and `validate_shape` is `True`.
"""
if initial_value is None:
raise ValueError("initial_value must be specified.")
init_from_fn = callable(initial_value)
if collections is None:
collections = [ops.GraphKeys.GLOBAL_VARIABLES]
if not isinstance(collections, (list, tuple, set)):
raise ValueError(
"collections argument to Variable constructor must be a list, tuple, "
"or set. Got %s of type %s" % (collections, type(collections)))
if constraint is not None and not callable(constraint):
raise ValueError("The `constraint` argument must be a callable.")
self._trainable = trainable
if trainable and ops.GraphKeys.TRAINABLE_VARIABLES not in collections:
collections = list(collections) + [ops.GraphKeys.TRAINABLE_VARIABLES]
self._save_slice_info = None
in_graph_mode = context.in_graph_mode()
with ops.control_dependencies(None):
with ops.name_scope(name, "Variable", []
if init_from_fn else [initial_value]) as name:
# pylint: disable=protected-access
handle_name = ops._name_from_scope_name(name)
if init_from_fn:
# Use attr_scope and device(None) to simulate the behavior of
# colocate_with when the variable we want to colocate with doesn't
# yet exist.
if in_graph_mode:
attr = attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(
s=[compat.as_bytes("loc:@%s" % handle_name)]))
with ops.get_default_graph()._attr_scope({"_class": attr}):
with ops.name_scope("Initializer"), ops.device(None):
initial_value = ops.convert_to_tensor(
initial_value(), name="initial_value", dtype=dtype)
self._handle = gen_resource_variable_ops.var_handle_op(
shape=initial_value.get_shape(),
dtype=initial_value.dtype.base_dtype,
shared_name=handle_name,
name=name)
self._handle_device = (self._handle.device if in_graph_mode else
context.get_default_context().device_name)
else:
initial_value = initial_value()
with ops.name_scope("Initializer"):
initial_value = ops.convert_to_tensor(
initial_value, name="initial_value", dtype=dtype)
self._handle = gen_resource_variable_ops.var_handle_op(
shape=initial_value.get_shape(),
dtype=initial_value.dtype.base_dtype,
shared_name=handle_name,
name=name,
container="")
self._handle_device = (self._handle.device if in_graph_mode else
context.get_default_context().device_name)
# pylint: enable=protected-access
# Or get the initial value from a Tensor or Python object.
else:
with ops.name_scope("Initializer"):
initial_value = ops.convert_to_tensor(
initial_value, name="initial_value", dtype=dtype)
# pylint: disable=protected-access
if (in_graph_mode and initial_value is not None and
initial_value.op._get_control_flow_context() is not None):
raise ValueError(
"Initializer for variable %s is from inside a control-flow "
"construct, such as a loop or conditional. When creating a "
"variable inside a loop or conditional, use a lambda as the "
"initializer." % name)
# pylint: enable=protected-access
self._handle = gen_resource_variable_ops.var_handle_op(
shape=initial_value.get_shape(),
dtype=initial_value.dtype.base_dtype,
shared_name=handle_name,
name=name,
container="")
self._handle_device = (self._handle.device if in_graph_mode else
context.get_default_context().device_name)
self._initial_value = initial_value if in_graph_mode else None
self._handle_name = handle_name + ":0"
self._dtype = initial_value.dtype.base_dtype
self._constraint = constraint
if in_graph_mode:
with ops.name_scope("IsInitialized"):
self._is_initialized_op = (
gen_resource_variable_ops.var_is_initialized_op(self._handle))
if initial_value is not None:
with ops.name_scope("Assign") as n, ops.colocate_with(self._handle):
self._initializer_op = (
gen_resource_variable_ops.assign_variable_op(
self._handle,
self._build_initializer_expr(initial_value),
name=n))
with ops.name_scope("Read"), ops.colocate_with(self._handle):
# Manually assign reads to the handle's device to avoid log
# messages.
with ops.device(self._handle_device):
value = self._read_variable_op()
self._graph_element = value
if caching_device is not None:
# Variables may be created in a tf.device() or ops.colocate_with()
# context. At the same time, users would expect caching device to
# be independent of this context, and/or would not expect the
# current device context to be merged with the caching device
# spec. Therefore we reset the colocation stack before creating
# the cached value. Note that resetting the colocation stack will
# also reset the device stack.
with ops.colocate_with(None, ignore_existing=True):
with ops.device(caching_device):
self._cached_value = array_ops.identity(value)
else:
self._cached_value = None
else:
gen_resource_variable_ops.assign_variable_op(self._handle,
initial_value)
self._is_initialized_op = None
self._initializer_op = None
self._graph_element = None
if caching_device:
with ops.device(caching_device):
self._cached_value = self._read_variable_op()
else:
self._cached_value = None
ops.add_to_collections(collections, self)
def _init_from_proto(self, variable_def, import_scope=None):
"""Initializes from `VariableDef` proto."""
# Note that init_from_proto is currently not supported in Eager mode.
assert context.in_graph_mode()
assert isinstance(variable_def, variable_pb2.VariableDef)
if not variable_def.is_resource:
raise ValueError("Trying to restore Variable as ResourceVariable.")
# Create from variable_def.
g = ops.get_default_graph()
self._handle = g.as_graph_element(
ops.prepend_name_scope(
variable_def.variable_name, import_scope=import_scope))
self._handle_device = self._handle.device
self._handle_name = self._handle.name
self._initializer_op = g.as_graph_element(
ops.prepend_name_scope(
variable_def.initializer_name, import_scope=import_scope))
if variable_def.snapshot_name:
self._cached_value = g.as_graph_element(
ops.prepend_name_scope(
variable_def.snapshot_name, import_scope=import_scope))
else:
self._cached_value = None
if variable_def.HasField("save_slice_info_def"):
self._save_slice_info = variables.Variable.SaveSliceInfo(
save_slice_info_def=variable_def.save_slice_info_def)
else:
self._save_slice_info = None
self._caching_device = None
self._dtype = dtypes.as_dtype(self._handle.op.get_attr("dtype"))
self._graph_element = self.value()
self._constraint = None
@property
def dtype(self):
"""The dtype of this variable."""
return self._dtype
@property
def device(self):
"""The device this variable is on."""
return self._handle_device
@property
def graph(self):
"""The `Graph` of this variable."""
return self._handle.graph
@property
def name(self):
"""The name of the handle for this variable."""
return self._handle_name
@property
def shape(self):
"""The shape of this variable."""
if context.in_graph_mode():
return tensor_shape.TensorShape(self._handle.op.get_attr("shape"))
return tensor_shape.TensorShape(
gen_resource_variable_ops.variable_shape(self._handle).numpy())
@property
def create(self):
"""The op responsible for initializing this variable."""
if not context.in_graph_mode():
raise RuntimeError("Calling create in EAGER mode not supported.")
return self._initializer_op
@property
def handle(self):
"""The handle by which this variable can be accessed."""
return self._handle
def value(self):
"""A cached operation which reads the value of this variable."""
if self._cached_value is not None:
return self._cached_value
with ops.colocate_with(None, ignore_existing=True):
with ops.device(self._handle_device):
return self._read_variable_op()
def _as_graph_element(self):
"""Conversion function for Graph.as_graph_element()."""
return self._graph_element
@property
def initializer(self):
"""The op responsible for initializing this variable."""
return self._initializer_op
@property
def initial_value(self):
"""Returns the Tensor used as the initial value for the variable."""
if context.in_eager_mode():
raise RuntimeError("initial_value not supported in EAGER mode.")
return self._initial_value
@property
def constraint(self):
"""Returns the constraint function associated with this variable.
Returns:
The constraint function that was passed to the variable constructor.
Can be `None` if no constraint was passed.
"""
return self._constraint
@property
def op(self):
"""The op for this variable."""
return self._handle.op
def eval(self, session=None):
"""Evaluates and returns the value of this variable."""
if context.in_eager_mode():
raise RuntimeError("Trying to eval in EAGER mode")
return self._graph_element.eval(session=session)
def _set_save_slice_info(self, save_slice_info):
"""Sets the slice info for this `ResourceVariable`.
Args:
save_slice_info: A `Variable.SaveSliceInfo` object.
"""
self._save_slice_info = save_slice_info
def _get_save_slice_info(self):
return self._save_slice_info
def _read_variable_op(self):
if context.in_eager_mode() and self._trainable:
tape.watch(self._handle)
return read_variable_op(self._handle, dtype=self._dtype)
def read_value(self):
"""Constructs an op which reads the value of this variable.
Should be used when there are multiple reads, or when it is desirable to
read the value only after some condition is true.
Returns:
the read operation.
"""
with ops.name_scope("Read"):
# In graph mode, ensure we read the variable in the same device as the
# handle. In eager mode, however, this sometimes tries to read a GPU
# variable in the CPU because the handle is host memory. For now, then, we
# need to skip the device block in eager. TODO(apassos) eager should have
# separate notions of device and memory, so handle.device can be GPU while
# handle.memory_space is always CPU.
if context.in_graph_mode():
with ops.device(self._handle_device):
value = self._read_variable_op()
else:
value = self._read_variable_op()
# Return an identity so it can get placed on whatever device the context
# specifies instead of the device where the variable is.
return array_ops.identity(value)
def sparse_read(self, indices, name=None):
"""Reads the value of this variable sparsely, using `gather`."""
with ops.name_scope("Gather" if name is None else name) as name:
value = resource_gather(
self._handle, indices, dtype=self._dtype, name=name)
return array_ops.identity(value)
def to_proto(self, export_scope=None):
"""Converts a `ResourceVariable` to a `VariableDef` protocol buffer.
Args:
export_scope: Optional `string`. Name scope to remove.
Raises:
RuntimeError: If run in EAGER mode.
Returns:
A `VariableDef` protocol buffer, or `None` if the `Variable` is not
in the specified name scope.
"""
if context.in_eager_mode():
raise RuntimeError("to_proto not supported in EAGER mode.")
if export_scope is None or self.handle.name.startswith(export_scope):
var_def = variable_pb2.VariableDef()
var_def.variable_name = ops.strip_name_scope(self.handle.name,
export_scope)
var_def.initializer_name = ops.strip_name_scope(self.initializer.name,
export_scope)
if self._cached_value is not None:
var_def.snapshot_name = ops.strip_name_scope(self._cached_value.name,
export_scope)
var_def.is_resource = True
if self._save_slice_info:
var_def.save_slice_info_def.MergeFrom(
self._save_slice_info.to_proto(export_scope=export_scope))
return var_def
else:
return None
@staticmethod
def from_proto(variable_def, import_scope=None):
if context.in_eager_mode():
raise RuntimeError("from_proto not supported in EAGER mode.")
return ResourceVariable(
variable_def=variable_def, import_scope=import_scope)
@staticmethod
def _OverloadAllOperators(): # pylint: disable=invalid-name
"""Register overloads for all operators."""
for operator in ops.Tensor.OVERLOADABLE_OPERATORS:
ResourceVariable._OverloadOperator(operator)
# For slicing, bind getitem differently than a tensor (use SliceHelperVar
# instead)
# pylint: disable=protected-access
setattr(ResourceVariable, "__getitem__", array_ops._SliceHelperVar)
def _AsTensor(self):
return self.value()
def _ref(self):
"""Unsupported."""
raise NotImplementedError("ResourceVariable does not implement _ref()")
@staticmethod
def _OverloadOperator(operator): # pylint: disable=invalid-name
"""Defer an operator overload to `ops.Tensor`.
We pull the operator out of ops.Tensor dynamically to avoid ordering issues.
Args:
operator: string. The operator name.
"""
def _run_op(a, *args):
# pylint: disable=protected-access
return getattr(ops.Tensor, operator)(a._AsTensor(), *args)
# Propagate __doc__ to wrapper
try:
_run_op.__doc__ = getattr(ops.Tensor, operator).__doc__
except AttributeError:
pass
setattr(ResourceVariable, operator, _run_op)
__array_priority__ = 100
def assign_sub(self, delta, use_locking=None, name=None):
# TODO(apassos): this here and below is not atomic. Consider making it
# atomic if there's a way to do so without a performance cost for those who
# don't need it.
with ops.control_dependencies([
gen_resource_variable_ops.assign_sub_variable_op(
self.handle,
ops.convert_to_tensor(delta, dtype=self.dtype),
name=name)
]):
return self.read_value()
def assign_add(self, delta, use_locking=None, name=None):
with ops.control_dependencies([
gen_resource_variable_ops.assign_add_variable_op(
self.handle,
ops.convert_to_tensor(delta, dtype=self.dtype),
name=name)
]):
return self.read_value()
def assign(self, value, use_locking=None, name=None):
with ops.control_dependencies([
gen_resource_variable_ops.assign_variable_op(
self.handle,
ops.convert_to_tensor(value, dtype=self.dtype),
name=name)
]):
return self.read_value()
def _strided_slice_assign(self, begin, end, strides, value, name, begin_mask,
end_mask, ellipsis_mask, new_axis_mask,
shrink_axis_mask):
with ops.control_dependencies([
gen_array_ops.resource_strided_slice_assign(
ref=self.handle,
begin=begin,
end=end,
strides=strides,
value=value,
name=name,
begin_mask=begin_mask,
end_mask=end_mask,
ellipsis_mask=ellipsis_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask)
]):
return self.value()
def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False):
del name
if dtype is not None and dtype != self.value().dtype:
print("trying to switch the dtype to ", dtype, " from ",
self.value().dtype)
return NotImplemented
if as_ref:
return self.read_value().op.inputs[0]
else:
return self.value()
@custom_gradient.custom_gradient
def read_variable_op(handle, dtype):
"""Reads the value of a variable.
The tensor returned by this operation is immutable.
The value returned by this operation is guaranteed to be influenced by all the
writes on which this operation depends directly or indirectly, and to not be
influenced by any of the writes which depend directly or indirectly on this
operation.
Args:
handle: A `Tensor` of type `resource`.
handle to the resource in which to store the variable.
dtype: A `tf.DType`. the dtype of the value.
Returns:
A `Tensor` of type `dtype`.
"""
result = gen_resource_variable_ops.read_variable_op(handle, dtype)
def grad(dresult):
return dresult
return result, grad
def _dense_var_to_tensor(var, dtype=None, name=None, as_ref=False):
return var._dense_var_to_tensor(dtype=dtype, name=name, as_ref=as_ref) # pylint: disable=protected-access
# Register a conversion function which reads the value of the variable,
# allowing instances of the class to be used as tensors.
# Note: registering for Variable after ResourceVariable because inheritance will
# otherwise lead to the wrong behavior.
ops.register_tensor_conversion_function(ResourceVariable, _dense_var_to_tensor)
ops.register_tensor_conversion_function(
variables.Variable, variables.Variable._TensorConversionFunction) # pylint: disable=protected-access
# pylint: disable=protected-access
ResourceVariable._OverloadAllOperators()
ops.register_dense_tensor_like_type(ResourceVariable)
@ops.RegisterGradient("ReadVariableOp")
def _ReadGrad(_, grad):
"""Gradient for read op."""
return grad
# TODO(apassos) do not use custom_gradient here by making other entry points
# than custom_gradient also aware of how to deal with variables implicitly
# watched in the tape (i.e. the call to _watch_value in custom_gradient)
@custom_gradient.custom_gradient
def resource_gather(resource, indices, dtype, validate_indices=True, name=None):
"""Gather slices from the variable pointed to by `resource`.
`indices` must be an integer tensor of any dimension (usually 0-D or 1-D).
Produces an output tensor with shape `indices.shape + params.shape[1:]` where:
```python
# Scalar indices
output[:, ..., :] = params[indices, :, ... :]
# Vector indices
output[i, :, ..., :] = params[indices[i], :, ... :]
# Higher rank indices
output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :]
```
Args:
resource: A `Tensor` of type `resource`.
handle to the resource in which to store the variable.
indices: a integer `Tensor` containing the indices to be gathered.
dtype: A `tf.DType`. the dtype of the value.
validate_indices: optional `bool`. If false will not validate that the
indices fit in the variable.
name: The optional name for the operation to be added.
Returns:
A `Tensor` of type `dtype`.
"""
result = gen_resource_variable_ops.resource_gather(
resource, indices, dtype, validate_indices=validate_indices, name=name)
def grad(dresult):
return ops.IndexedSlices(
dresult,
indices,
dense_shape=gen_resource_variable_ops.variable_shape(resource))
return result, grad
@ops.RegisterGradient("ResourceGather")
def _GatherGrad(op, grad):
"""Gradient for gather op."""
# Build appropriately shaped IndexedSlices
# Walk graph back until the original handle is found.
# TODO(apassos): more robust way of getting the shape.
# TODO(apassos): implement this for EAGER mode.
if context.in_eager_mode():
raise NotImplementedError("_GatherGrad not implemented for EAGER mode")
handle = op.inputs[0]
while handle.op.type != "VarHandleOp":
handle = handle.op.inputs[0]
params_shape = ops.convert_to_tensor(
tensor_shape.TensorShape(handle.op.get_attr("shape")))
indices = op.inputs[1]
size = array_ops.expand_dims(array_ops.size(indices), 0)
values_shape = array_ops.concat([size, params_shape[1:]], 0)
values = array_ops.reshape(grad, values_shape)
indices = array_ops.reshape(indices, size)
return [ops.IndexedSlices(values, indices, params_shape), None]
def _to_proto_fn(v, export_scope=None):
"""Converts Variable and ResourceVariable to VariableDef for collections."""
return v.to_proto(export_scope=export_scope)
def _from_proto_fn(v, import_scope=None):
"""Creates Variable or ResourceVariable from VariableDef as needed."""
if v.is_resource:
return ResourceVariable.from_proto(v, import_scope=import_scope)
return variables.Variable.from_proto(v, import_scope=import_scope)
ops.register_proto_function(
ops.GraphKeys.GLOBAL_VARIABLES,
proto_type=variable_pb2.VariableDef,
to_proto=_to_proto_fn,
from_proto=_from_proto_fn)
ops.register_proto_function(
ops.GraphKeys.TRAINABLE_VARIABLES,
proto_type=variable_pb2.VariableDef,
to_proto=_to_proto_fn,
from_proto=_from_proto_fn)
ops.register_proto_function(
ops.GraphKeys.MOVING_AVERAGE_VARIABLES,
proto_type=variable_pb2.VariableDef,
to_proto=_to_proto_fn,
from_proto=_from_proto_fn)
ops.register_proto_function(
ops.GraphKeys.LOCAL_VARIABLES,
proto_type=variable_pb2.VariableDef,
to_proto=_to_proto_fn,
from_proto=_from_proto_fn)
ops.register_proto_function(
ops.GraphKeys.MODEL_VARIABLES,
proto_type=variable_pb2.VariableDef,
to_proto=_to_proto_fn,
from_proto=_from_proto_fn)
|
|
from typing import List, Union
import numpy as np
import tensorflow as tf
import nn_utils.math_utils as math_utils
from dataflow.illumination_integration.helper import (
getBilinearFromUv,
)
from nn_utils.math_utils import shape_to_uv, uv_to_direction
@tf.function
def map_levels_to_samples(
num_roughness_0: int,
num_random_roughness: int,
data_levels: List[tf.Tensor],
):
# Setup uvs
env_shape = data_levels[0].shape
uvs = shape_to_uv(*env_shape[:-1])
uvs_flat = tf.reshape(uvs, [-1, 2])
total_directions_required = num_roughness_0 + num_random_roughness
if uvs_flat.shape[0] < total_directions_required:
repeats_required = tf.cast(
tf.math.ceil(total_directions_required / uvs_flat.shape[0]), tf.int32
)
uvs_flat = math_utils.repeat(uvs_flat, repeats_required, 0)
uvs_shuffle = tf.random.shuffle(uvs_flat)
uvs_random = uvs_shuffle[:total_directions_required]
jitter = tf.random.normal(uvs_random.shape, mean=0.0, stddev=0.3)
uvs_random = uvs_random + jitter
# Setup roughness
roughness_random = tf.clip_by_value(
tf.random.uniform(
(num_random_roughness, 1), minval=1 / 255, maxval=1 + 1 / 255
),
0,
1,
)
r0_uvs = uvs_random[:num_roughness_0]
rnd_uvs = uvs_random[num_roughness_0 : num_roughness_0 + num_random_roughness]
# Get samples
samples_random = random_uv_roughness_access(data_levels, rnd_uvs, roughness_random)
# Always get r0 samples
samples_r0 = random_uv_roughness_access(
data_levels, r0_uvs, tf.zeros_like(r0_uvs[:, :1])
)
ret = (
uv_to_direction(r0_uvs),
samples_r0,
uv_to_direction(rnd_uvs),
roughness_random,
samples_random,
)
return (
data_levels,
*ret,
)
@tf.function
def full_map_samples(num_roughness_steps: int, data_levels: List[tf.Tensor]):
# Setup random roughnesses and get all values
full_uvs = tf.reshape(shape_to_uv(*data_levels[0].shape[:-1]), (-1, 2))
roughness_steps = np.linspace(0.0, 1.0, num_roughness_steps, dtype=np.float32)[
:, None
] # Add a dimension
# Store the roughness steps
all_samples = tf.TensorArray(
tf.float32, size=num_roughness_steps, clear_after_read=True
)
for i, r in enumerate(roughness_steps): # The dimension is removed in the for loop
r = math_utils.repeat(
r[:, None], full_uvs.shape[0], 0
) # Add a batch dimension back
samples = random_uv_roughness_access(data_levels, full_uvs, r)
all_samples = all_samples.write(i, samples) # Write the sample
ret = (
uv_to_direction(full_uvs),
tf.convert_to_tensor(roughness_steps),
all_samples.stack(),
)
return (
data_levels,
*ret,
)
@tf.function
def random_uv_roughness_access(data_levels, uvs, roughness):
tf.debugging.assert_shapes(
[
(uvs, ("S", 2)),
(roughness, ("S", 1)),
]
+ [(d, ("H%d" % i, "W%d" % i, 3)) for i, d in enumerate(data_levels)]
)
# data_levels: List[H, W, 3]
# uvs: [S, 2]
# Roughness: [S, 1]
# Result: [S, 3]
smpl_list = []
for d in data_levels:
samples_level = getBilinearFromUv(d[None, ...], uvs[None, ...])[0]
smpl_list.append(samples_level)
level_samples_batched = tf.stack(smpl_list, 0) # M, S, 3
return interpolate_roughness_levels(level_samples_batched, roughness)
@tf.function
def interpolate_roughness_levels(samples, roughness):
tf.debugging.assert_shapes(
[
(samples, ("M", "S", 3)),
(roughness, ("S", 1)),
]
)
# Setup the roughness interpolation
roughness_mip_index = roughness[:, 0] * (samples.shape[0] - 1)
# S
lower_mip_index = tf.cast(tf.math.floor(roughness_mip_index), tf.int32)
upper_mip_index = tf.cast(tf.math.ceil(roughness_mip_index), tf.int32)
# Fetch the lower and upper roughness levels
rgh_low = tf.gather(
tf.transpose(samples, [1, 0, 2]), lower_mip_index[..., None], batch_dims=1
)[:, 0]
rgh_hgh = tf.gather(
tf.transpose(samples, [1, 0, 2]), upper_mip_index[..., None], batch_dims=1
)[:, 0]
tf.debugging.assert_shapes(
[
(samples, ("M", "S", 3)),
(roughness, ("S", 1)),
(rgh_low, ("S", 3)),
(rgh_hgh, ("S", 3)),
]
)
# Start interpolation
fraction_index = roughness_mip_index - tf.cast(lower_mip_index, tf.float32)
fraction_index = tf.reshape(fraction_index, roughness.shape)
samples_random = rgh_low * fraction_index + rgh_hgh * (1 - fraction_index)
return samples_random
@tf.function
def blend_two_maps(*batch_2_data):
ret = []
for b in batch_2_data:
b0 = b[0]
b1 = b[1]
alpha = tf.random.uniform((1,))
ret.append(alpha * b0 + (1 - alpha) * b1)
return ret
@tf.function
def specify_mip_levels_to_fetch(
dataset: List[Union[List[np.ndarray], np.ndarray]], idxs: List[int]
):
random_sampled_targets = dataset[1:]
ret = []
for idx in idxs:
ret.append(dataset[0][idx])
ret.extend(random_sampled_targets)
return (*ret,)
def random_sample_dataflow(
dataset: List[np.ndarray],
samples_roughness_0: int,
samples_random_roughness: int,
batch_size: int,
with_blend: bool = False,
full_l0: bool = False,
shuffle: bool = True,
):
dataset_len = len(dataset[0])
ds = tf.data.Dataset.from_tensor_slices((*dataset,))
if shuffle:
ds = ds.shuffle(dataset_len, reshuffle_each_iteration=True)
if with_blend:
ds = ds.batch(2, drop_remainder=True)
ds = ds.map(blend_two_maps)
ds = ds.repeat(2)
if full_l0:
ds = ds.map(
lambda *x: full_map_samples(5, x),
num_parallel_calls=tf.data.AUTOTUNE,
)
else:
ds = ds.map(
lambda *x: map_levels_to_samples(
samples_roughness_0, samples_random_roughness, x
),
num_parallel_calls=tf.data.AUTOTUNE,
)
ds = ds.map(lambda *x: specify_mip_levels_to_fetch(x, [0]))
if batch_size > 0:
ds = ds.batch(batch_size)
ds = ds.prefetch(5)
return ds
|
|
# Copyright Hybrid Logic Ltd. See LICENSE file for details.
"""
Functional tests for :module:`flocker.node._docker`.
"""
from __future__ import absolute_import
import time
from functools import partial
from docker.errors import APIError
from docker import Client
# Docker-py uses 1.16 API by default, which isn't supported by docker, so force
# the use of the 1.15 API until we upgrade docker in flocker-dev
Client = partial(Client, version="1.15")
from twisted.trial.unittest import TestCase
from twisted.python.filepath import FilePath
from twisted.internet.defer import succeed, gatherResults
from twisted.internet.error import ConnectionRefusedError
from twisted.web.client import ResponseNeverReceived
from treq import request, content
from pyrsistent import pvector
from ...testtools import (
loop_until, find_free_port, DockerImageBuilder, assertContainsAll,
random_name)
from ..test.test_docker import make_idockerclient_tests
from .._docker import (
DockerClient, PortMap, Environment, NamespacedDockerClient,
BASE_NAMESPACE, Volume)
from ...control._model import RestartNever, RestartAlways, RestartOnFailure
from ..testtools import if_docker_configured, wait_for_unit_state
def namespace_for_test(test_case):
return u"ns-" + random_name(test_case)
class IDockerClientTests(make_idockerclient_tests(
lambda test_case: DockerClient(
namespace=namespace_for_test(test_case)
)
)):
"""
``IDockerClient`` tests for ``DockerClient``.
"""
@if_docker_configured
def setUp(self):
pass
class IDockerClientNamespacedTests(make_idockerclient_tests(
lambda test_case: NamespacedDockerClient(
namespace=namespace_for_test(test_case)
)
)):
"""
``IDockerClient`` tests for ``NamespacedDockerClient``.
"""
@if_docker_configured
def setUp(self):
pass
class GenericDockerClientTests(TestCase):
"""
Functional tests for ``DockerClient`` and other clients that talk to
real Docker.
"""
clientException = APIError
@if_docker_configured
def setUp(self):
self.namespacing_prefix = namespace_for_test(self)
def make_client(self):
return DockerClient(namespace=self.namespacing_prefix)
def create_container(self, client, name, image):
"""
Create (but don't start) a container via the supplied client.
:param DockerClient client: The Docker API client.
:param unicode name: The container name.
:param unicode image: The image name.
"""
container_name = client._to_container_name(name)
client._client.create_container(
name=container_name, image=image)
def start_container(self, unit_name,
image_name=u"openshift/busybox-http-app",
ports=None, expected_states=(u'active',),
environment=None, volumes=(),
mem_limit=None, cpu_shares=None,
restart_policy=RestartNever(),
command_line=None):
"""
Start a unit and wait until it reaches the `active` state or the
supplied `expected_state`.
:param unicode unit_name: See ``IDockerClient.add``.
:param unicode image_name: See ``IDockerClient.add``.
:param list ports: See ``IDockerClient.add``.
:param expected_states: A sequence of activation states to wait for.
:param environment: See ``IDockerClient.add``.
:param volumes: See ``IDockerClient.add``.
:param mem_limit: See ``IDockerClient.add``.
:param cpu_shares: See ``IDockerClient.add``.
:param restart_policy: See ``IDockerClient.add``.
:param command_line: See ``IDockerClient.add``.
:return: ``Deferred`` that fires with the ``DockerClient`` when
the unit reaches the expected state.
"""
client = self.make_client()
d = client.add(
unit_name=unit_name,
image_name=image_name,
ports=ports,
environment=environment,
volumes=volumes,
mem_limit=mem_limit,
cpu_shares=cpu_shares,
restart_policy=restart_policy,
command_line=command_line,
)
self.addCleanup(client.remove, unit_name)
d.addCallback(lambda _: wait_for_unit_state(client, unit_name,
expected_states))
d.addCallback(lambda _: client)
return d
def test_default_base_url(self):
"""
``DockerClient`` instantiated with a default base URL for a socket
connection has a client HTTP url after the connection is made.
"""
client = DockerClient()
self.assertEqual(client._client.base_url,
u'http+unix://var/run/docker.sock')
def test_custom_base_url_tcp_http(self):
"""
``DockerClient`` instantiated with a custom base URL for a TCP
connection has a client HTTP url after the connection is made.
"""
client = DockerClient(base_url=b"tcp://127.0.0.1:2375")
self.assertEqual(client._client.base_url, b"http://127.0.0.1:2375")
def test_add_starts_container(self):
"""
``DockerClient.add`` starts the container.
"""
name = random_name(self)
return self.start_container(name)
def test_correct_image_used(self):
"""
``DockerClient.add`` creates a container with the specified image.
"""
name = random_name(self)
d = self.start_container(name)
def started(_):
docker = Client()
data = docker.inspect_container(self.namespacing_prefix + name)
self.assertEqual(data[u"Config"][u"Image"],
u"openshift/busybox-http-app")
d.addCallback(started)
return d
def test_add_error(self):
"""
``DockerClient.add`` returns a ``Deferred`` that errbacks with
``APIError`` if response code is not a success response code.
"""
client = self.make_client()
# add() calls exists(), and we don't want exists() to be the one
# failing since that's not the code path we're testing, so bypass
# it:
client.exists = lambda _: succeed(False)
# Illegal container name should make Docker complain when we try to
# install the container:
d = client.add(u"!!!###!!!", u"busybox:latest")
return self.assertFailure(d, self.clientException)
def test_dead_is_listed(self):
"""
``DockerClient.list()`` includes dead units.
We use a `busybox` image here, because it will exit immediately and
reach an `inactive` substate of `dead`.
There are no assertions in this test, because it will fail with a
timeout if the unit with that expected state is never listed or if that
unit never reaches that state.
"""
name = random_name(self)
d = self.start_container(unit_name=name, image_name="busybox:latest",
expected_states=(u'inactive',))
return d
def test_list_with_missing_image(self):
"""
``DockerClient.list()`` can list containers whose image is missing.
The resulting output may be inaccurate, but that's OK: this only
happens for non-running containers, who at worst we're going to
restart anyway.
"""
path = FilePath(self.mktemp())
path.makedirs()
path.child(b"Dockerfile.in").setContent(
b"FROM busybox\nCMD /bin/true\n")
builder = DockerImageBuilder(test=self, source_dir=path, cleanup=False)
d = builder.build()
def image_built(image_name):
name = random_name(self)
d = self.start_container(
unit_name=name, image_name=image_name,
expected_states=(u'inactive',))
return d.addCallback(lambda ignored: (name, image_name))
d.addCallback(image_built)
def stopped_container_exists((name, image_name)):
# Remove the image:
docker_client = Client()
docker_client.remove_image(image_name, force=True)
# Should be able to still list the container:
client = self.make_client()
listed = client.list()
listed.addCallback(lambda results: self.assertIn(
(name, "inactive"),
[(unit.name, unit.activation_state) for unit in results]))
return listed
d.addCallback(stopped_container_exists)
return d
def test_dead_is_removed(self):
"""
``DockerClient.remove()`` removes dead units without error.
We use a `busybox` image here, because it will exit immediately and
reach an `inactive` substate of `dead`.
"""
name = random_name(self)
d = self.start_container(unit_name=name, image_name="busybox:latest",
expected_states=(u'inactive',))
def remove_container(client):
client.remove(name)
d.addCallback(remove_container)
return d
def request_until_response(self, port):
"""
Resend a test HTTP request until a response is received.
The container may have started, but the webserver inside may take a
little while to start serving requests.
:param int port: The localhost port to which an HTTP request will be
sent.
:return: A ``Deferred`` which fires with the result of the first
successful HTTP request.
"""
def send_request():
"""
Send an HTTP request in a loop until the request is answered.
"""
response = request(
b"GET", b"http://127.0.0.1:%d" % (port,),
persistent=False)
def check_error(failure):
"""
Catch ConnectionRefused errors and response timeouts and return
False so that loop_until repeats the request.
Other error conditions will be passed down the errback chain.
"""
failure.trap(ConnectionRefusedError, ResponseNeverReceived)
return False
response.addErrback(check_error)
return response
return loop_until(send_request)
def test_add_with_port(self):
"""
``DockerClient.add`` accepts a ports argument which is passed to
Docker to expose those ports on the unit.
Assert that the busybox-http-app returns the expected "Hello world!"
response.
XXX: We should use a stable internal container instead. See
https://clusterhq.atlassian.net/browse/FLOC-120
XXX: The busybox-http-app returns headers in the body of its response,
hence this over complicated custom assertion. See
https://github.com/openshift/geard/issues/213
"""
expected_response = b'Hello world!\n'
external_port = find_free_port()[1]
name = random_name(self)
d = self.start_container(
name, ports=[PortMap(internal_port=8080,
external_port=external_port)])
d.addCallback(
lambda ignored: self.request_until_response(external_port))
def started(response):
d = content(response)
d.addCallback(lambda body: self.assertIn(expected_response, body))
return d
d.addCallback(started)
return d
def test_add_with_environment(self):
"""
``DockerClient.add`` accepts an environment object whose ID and
variables are used when starting a docker image.
"""
docker_dir = FilePath(self.mktemp())
docker_dir.makedirs()
docker_dir.child(b"Dockerfile").setContent(
b'FROM busybox\n'
b'CMD ["/bin/sh", "-c", '
b'"while true; do env && echo WOOT && sleep 1; done"]'
)
expected_variables = frozenset({
'key1': 'value1',
'key2': 'value2',
}.items())
unit_name = random_name(self)
image = DockerImageBuilder(test=self, source_dir=docker_dir)
d = image.build()
def image_built(image_name):
return self.start_container(
unit_name=unit_name,
image_name=image_name,
environment=Environment(variables=expected_variables),
)
d.addCallback(image_built)
def started(_):
output = ""
while True:
output += Client().logs(self.namespacing_prefix + unit_name)
if "WOOT" in output:
break
assertContainsAll(
output, test_case=self,
needles=['{}={}\n'.format(k, v)
for k, v in expected_variables],
)
d.addCallback(started)
return d
def test_pull_image_if_necessary(self):
"""
The Docker image is pulled if it is unavailable locally.
"""
# Use an image that isn't likely to be in use by anything, since
# it's old, and isn't used by other tests:
image = u"busybox:ubuntu-12.04"
# Make sure image is gone:
docker = Client()
try:
docker.remove_image(image, force=True)
except APIError as e:
if e.response.status_code != 404:
raise
name = random_name(self)
client = self.make_client()
self.addCleanup(client.remove, name)
d = client.add(name, image)
d.addCallback(lambda _: self.assertTrue(docker.inspect_image(image)))
return d
def test_pull_timeout(self):
"""
Pulling an image times-out if it takes longer than a provided timeout.
"""
# Use an image that isn't likely to be in use by anything, since
# it's old, and isn't used by other tests:
image = u"ubuntu:12.04"
# Make sure image is gone:
docker = Client()
try:
docker.remove_image(image, force=True)
except APIError as e:
if e.response.status_code != 404:
raise
name = random_name(self)
client = DockerClient(
namespace=self.namespacing_prefix, long_timeout=1)
self.addCleanup(client.remove, name)
d = client.add(name, image)
# requests has a TimeoutError, but timeout raises a ConnectionError.
# Both are subclasses of IOError, so use that for now
# https://github.com/kennethreitz/requests/issues/2620
self.assertFailure(d, IOError)
return d
def test_pull_timeout_pull(self):
"""
Image pull timeout does not affect subsequent pulls.
"""
# Use an image that isn't likely to be in use by anything, since
# it's old, and isn't used by other tests. Note, this is the
# same image as test_pull_image_if_necessary, but they run at
# different times.
image = u"busybox:ubuntu-12.04"
# Make sure image is gone:
docker = Client()
try:
docker.remove_image(image, force=True)
except APIError as e:
if e.response.status_code != 404:
raise
name = random_name(self)
client = DockerClient(
namespace=self.namespacing_prefix, long_timeout=1)
self.addCleanup(client.remove, name)
d = client.add(name, image)
def unexpected_success(_):
self.fail('Image unexpectedly pulled within timeout limit')
def expected_failure(failure):
self.assertIsNotNone(failure.check(IOError))
# We got our failure, now try to successfully pull
client = DockerClient(
namespace=self.namespacing_prefix, long_timeout=600)
return client.add(name, image)
d.addCallbacks(unexpected_success, expected_failure)
return d
def test_namespacing(self):
"""
Containers are created with a namespace prefixed to their container
name.
"""
docker = Client()
name = random_name(self)
client = self.make_client()
self.addCleanup(client.remove, name)
d = client.add(name, u"busybox:latest")
def added(_):
self.assertTrue(
docker.inspect_container(self.namespacing_prefix + name))
d.addCallback(added)
return d
def test_null_environment(self):
"""
A container that does not include any environment variables contains
an empty ``environment`` in the return ``Unit``.
"""
docker_dir = FilePath(self.mktemp())
docker_dir.makedirs()
docker_dir.child(b"Dockerfile").setContent(
b'FROM scratch\n'
b'MAINTAINER info@clusterhq.com\n'
b'CMD ["/bin/doesnotexist"]'
)
name = random_name(self)
image = DockerImageBuilder(test=self, source_dir=docker_dir)
d = image.build()
def image_built(image_name):
client = self.make_client()
self.create_container(client, name, image_name)
self.addCleanup(client.remove, name)
return client.list()
d.addCallback(image_built)
def got_list(units):
unit = [unit for unit in units if unit.name == name][0]
self.assertIsNone(unit.environment)
d.addCallback(got_list)
return d
def test_container_name(self):
"""
The container name stored on returned ``Unit`` instances matches the
expected container name.
"""
client = self.make_client()
name = random_name(self)
self.addCleanup(client.remove, name)
d = client.add(name, u"busybox:latest")
d.addCallback(lambda _: client.list())
def got_list(units):
unit = [unit for unit in units if unit.name == name][0]
self.assertEqual(unit.container_name,
self.namespacing_prefix + name)
d.addCallback(got_list)
return d
def test_empty_environment(self):
"""
When a container with no custom environment variables is launched via
``DockerClient.add`` the environment in the resulting ``Unit`` returned
from ``DockerClient.list`` will ignore the default HOME and PATH
environment variables, leaving the ``Unit`` with an Environment of
None.
"""
name = random_name(self)
d = self.start_container(name)
def started(client):
deferred_units = client.list()
def check_units(units):
unit = [unit for unit in units if unit.name == name][0]
self.assertIsNone(unit.environment)
deferred_units.addCallback(check_units)
d.addCallback(started)
return d
def test_list_only_custom_environment(self):
"""
When a container containing custom environment variables is launched
and the image used also injects environment variables, only the custom
variables we injected are returned by ``DockerClient.list``, whereas
variables set by the image are discarded.
All Docker containers have a PATH environment variable. In addition,
the openshift/busybox-http-app image contains an STI_SCRIPTS_URL
environment variable. These are therefore disregarded the variables
disregarded in this test, whereas our custom environment is listed in
the returned Units.
https://registry.hub.docker.com/u/openshift/busybox-http/dockerfile/
"""
name = random_name(self)
environment = {
'my_variable': 'some value',
'another_variable': '12345'
}
environment = frozenset(environment.items())
d = self.start_container(
name,
environment=Environment(variables=environment)
)
def started(client):
deferred_units = client.list()
def check_units(units):
unit = [unit for unit in units if unit.name == name][0]
expected = Environment(variables=environment)
self.assertEqual(unit.environment, expected)
deferred_units.addCallback(check_units)
d.addCallback(started)
return d
def test_add_with_volumes(self):
"""
``DockerClient.add`` accepts a list of ``Volume`` instances which are
mounted within the container.
"""
docker_dir = FilePath(self.mktemp())
docker_dir.makedirs()
docker_dir.child(b"Dockerfile").setContent(
b'FROM busybox\n'
b'CMD ["/bin/sh", "-c", '
b'"touch /mnt1/a; touch /mnt2/b"]'
)
image = DockerImageBuilder(test=self, source_dir=docker_dir)
d = image.build()
def image_built(image_name):
unit_name = random_name(self)
path1 = FilePath(self.mktemp())
path1.makedirs()
path2 = FilePath(self.mktemp())
path2.makedirs()
d = self.start_container(
unit_name=unit_name,
image_name=image_name,
volumes=[
Volume(node_path=path1, container_path=FilePath(b"/mnt1")),
Volume(
node_path=path2, container_path=FilePath(b"/mnt2"))],
expected_states=(u'inactive',),
)
return d.addCallback(lambda _: (path1, path2))
d.addCallback(image_built)
def started((path1, path2)):
expected1 = path1.child(b"a")
expected2 = path2.child(b"b")
for i in range(100):
if expected1.exists() and expected2.exists():
return
else:
time.sleep(0.1)
self.fail("Files never created.")
return d.addCallback(started)
def test_add_with_memory_limit(self):
"""
``DockerClient.add`` accepts an integer mem_limit parameter which is
passed to Docker when creating a container as the maximum amount of RAM
available to that container.
"""
MEMORY_100MB = 100000000
name = random_name(self)
d = self.start_container(name, mem_limit=MEMORY_100MB)
def started(_):
docker = Client()
data = docker.inspect_container(self.namespacing_prefix + name)
self.assertEqual(data[u"Config"][u"Memory"],
MEMORY_100MB)
d.addCallback(started)
return d
def test_add_with_cpu_shares(self):
"""
``DockerClient.add`` accepts an integer cpu_shares parameter which is
passed to Docker when creating a container as the CPU shares weight
for that container. This is a relative weight for CPU time versus other
containers and does not directly constrain CPU usage, i.e. a CPU share
constrained container can still use 100% CPU if other containers are
idle. Default shares when unspecified is 1024.
"""
name = random_name(self)
d = self.start_container(name, cpu_shares=512)
def started(_):
docker = Client()
data = docker.inspect_container(self.namespacing_prefix + name)
self.assertEqual(data[u"Config"][u"CpuShares"], 512)
d.addCallback(started)
return d
def test_add_without_cpu_or_mem_limits(self):
"""
``DockerClient.add`` when creating a container with no mem_limit or
cpu_shares specified will create a container without these resource
limits, returning integer 0 as the values for Memory and CpuShares from
its API when inspecting such a container.
"""
name = random_name(self)
d = self.start_container(name)
def started(_):
docker = Client()
data = docker.inspect_container(self.namespacing_prefix + name)
self.assertEqual(data[u"Config"][u"Memory"], 0)
self.assertEqual(data[u"Config"][u"CpuShares"], 0)
d.addCallback(started)
return d
def start_restart_policy_container(self, mode, restart_policy):
"""
Start a container for testing restart policies.
:param unicode mode: Mode of container. One of
- ``"failure"``: The container will always exit with a failure.
- ``"success-then-sleep"``: The container will exit with success
once, then sleep forever.
- ``"failure-then-sucess"``: The container will exit with failure
once, then with failure.
:param IRestartPolicy restart_policy: The restart policy to use for
the container.
:returns Deferred: A deferred that fires with the number of times the
container was started.
"""
docker_dir = FilePath(__file__).sibling('retry-docker')
name = random_name(self)
data = FilePath(self.mktemp())
data.makedirs()
count = data.child('count')
count.setContent("0")
marker = data.child('marker')
image = DockerImageBuilder(test=self, source_dir=docker_dir)
d = image.build()
def image_built(image_name):
if mode == u"success-then-sleep":
expected_states = (u'active',)
else:
expected_states = (u'inactive',)
return self.start_container(
name, image_name=image_name,
restart_policy=restart_policy,
environment=Environment(variables={u'mode': mode}),
volumes=[
Volume(node_path=data, container_path=FilePath(b"/data"))],
expected_states=expected_states)
d.addCallback(image_built)
if mode == u"success-then-sleep":
# TODO: if the `run` script fails for any reason,
# then this will loop forever.
# TODO: use the "wait for predicate" helper
def wait_for_marker(_):
while not marker.exists():
time.sleep(0.01)
d.addCallback(wait_for_marker)
d.addCallback(lambda ignored: count.getContent())
return d
def test_restart_policy_never(self):
"""
An container with a restart policy of never isn't restarted
after it exits.
"""
d = self.start_restart_policy_container(
mode=u"failure", restart_policy=RestartNever())
d.addCallback(self.assertEqual, "1")
return d
def test_restart_policy_always(self):
"""
An container with a restart policy of always is restarted
after it exits.
"""
d = self.start_restart_policy_container(
mode=u"success-then-sleep", restart_policy=RestartAlways())
d.addCallback(self.assertEqual, "2")
return d
def test_restart_policy_on_failure(self):
"""
An container with a restart policy of on-failure is restarted
after it exits with a non-zero result.
"""
d = self.start_restart_policy_container(
mode=u"failure-then-success", restart_policy=RestartOnFailure())
d.addCallback(self.assertEqual, "2")
return d
def test_restart_policy_on_failure_maximum_count(self):
"""
A container with a restart policy of on-failure and a maximum
retry count is not restarted if it fails as many times than the
specified maximum.
"""
d = self.start_restart_policy_container(
mode=u"failure",
restart_policy=RestartOnFailure(maximum_retry_count=5))
# A Docker change e721ed9b5319e8e7c1daf87c34690f8a4e62c9e3 means that
# this value depends on the version of Docker.
d.addCallback(self.assertIn, ("5", "6"))
return d
def test_command_line(self):
"""
A container with custom command line is run with those arguments.
"""
external_port = find_free_port()[1]
name = random_name(self)
d = self.start_container(
name, image_name=u"busybox",
# Pass in pvector since this likely to be what caller actually
# passes in:
command_line=pvector([u"sh", u"-c", u"""\
echo -n '#!/bin/sh
echo -n "HTTP/1.1 200 OK\r\n\r\nhi"
' > /tmp/script.sh;
chmod +x /tmp/script.sh;
nc -ll -p 8080 -e /tmp/script.sh
"""]),
ports=[PortMap(internal_port=8080,
external_port=external_port)])
d.addCallback(
lambda ignored: self.request_until_response(external_port))
def started(response):
d = content(response)
d.addCallback(lambda body: self.assertEqual(b"hi", body))
return d
d.addCallback(started)
return d
class DockerClientTests(TestCase):
"""
Tests for ``DockerClient`` specifically.
"""
@if_docker_configured
def setUp(self):
pass
def test_default_namespace(self):
"""
The default namespace is `u"flocker--"`.
"""
docker = Client()
name = random_name(self)
client = DockerClient()
self.addCleanup(client.remove, name)
d = client.add(name, u"busybox:latest")
d.addCallback(lambda _: self.assertTrue(
docker.inspect_container(u"flocker--" + name)))
return d
def test_list_removed_containers(self):
"""
``DockerClient.list`` does not list containers which are removed,
during its operation, from another thread.
"""
namespace = namespace_for_test(self)
flocker_docker_client = DockerClient(namespace=namespace)
name1 = random_name(self)
adding_unit1 = flocker_docker_client.add(
name1, u'openshift/busybox-http-app')
self.addCleanup(flocker_docker_client.remove, name1)
name2 = random_name(self)
adding_unit2 = flocker_docker_client.add(
name2, u'openshift/busybox-http-app')
self.addCleanup(flocker_docker_client.remove, name2)
docker_client = flocker_docker_client._client
docker_client_containers = docker_client.containers
def simulate_missing_containers(*args, **kwargs):
"""
Remove a container before returning the original list.
"""
containers = docker_client_containers(*args, **kwargs)
container_name1 = flocker_docker_client._to_container_name(name1)
docker_client.remove_container(
container=container_name1, force=True)
return containers
adding_units = gatherResults([adding_unit1, adding_unit2])
patches = []
def get_list(ignored):
patch = self.patch(
docker_client,
'containers',
simulate_missing_containers
)
patches.append(patch)
return flocker_docker_client.list()
listing_units = adding_units.addCallback(get_list)
def check_list(units):
for patch in patches:
patch.restore()
self.assertEqual(
[name2], sorted([unit.name for unit in units])
)
running_assertions = listing_units.addCallback(check_list)
return running_assertions
def error_passthrough_test(self, method_name):
"""
If the given method name on the underyling ``Docker`` client has a
non-404 error, that gets passed through to ``Docker.list()``.
:param str method_name: Method of a docker ``Client``.
:return: ``Deferred`` firing on test success.
"""
name = random_name(self)
client = DockerClient()
self.addCleanup(client.remove, name)
d = client.add(name, u"busybox:latest")
class Response(object):
status_code = 500
content = ""
def error(name):
raise APIError("", Response())
def added(_):
# Monekypatch cause triggering non-404 errors from
# inspect_container is hard.
self.patch(client._client, method_name, error)
return client.list()
d.addCallback(added)
return self.assertFailure(d, APIError)
def test_list_error_inspecting_container(self):
"""
If an error occurs inspecting a container it is passed through.
"""
return self.error_passthrough_test("inspect_container")
def test_list_error_inspecting_image(self):
"""
If an error occurs inspecting an image it is passed through.
"""
return self.error_passthrough_test("inspect_image")
class NamespacedDockerClientTests(GenericDockerClientTests):
"""
Functional tests for ``NamespacedDockerClient``.
"""
@if_docker_configured
def setUp(self):
self.namespace = namespace_for_test(self)
self.namespacing_prefix = BASE_NAMESPACE + self.namespace + u"--"
def make_client(self):
return NamespacedDockerClient(self.namespace)
def create_container(self, client, name, image):
"""
Create (but don't start) a container via the supplied client.
:param DockerClient client: The Docker API client.
:param unicode name: The container name.
:param unicode image: The image name.
"""
container_name = client._client._to_container_name(name)
client._client._client.create_container(
name=container_name, image=image)
def test_isolated_namespaces(self):
"""
Containers in one namespace are not visible in another namespace.
"""
client = NamespacedDockerClient(namespace=namespace_for_test(self))
client2 = NamespacedDockerClient(namespace=namespace_for_test(self))
name = random_name(self)
self.addCleanup(client.remove, name)
d = client.add(name, u"busybox:latest")
d.addCallback(lambda _: client2.list())
d.addCallback(self.assertEqual, set())
return d
|
|
#! /usr/bin/python3
import sys
import os
import glob
import shutil
from pydub import AudioSegment
import json
from watson_developer_cloud import SpeechToTextV1
import conf #contains username and password to access Watson API
from moviepy.tools import subprocess_call
from moviepy.config import get_setting
import threading
import time
######################################################################
# global variables
######################################################################
stt = SpeechToTextV1( #Watson Speech To Text API login info
username = conf.username,
password = conf.password,
x_watson_learning_opt_out=True
)
num_threads = 0 # will be set dynamically by detectCPUs()
special_chars = ['<', '>', '\\', '/', '*', ':', '?', '\"', '.'] # used later to detect special characters
basename = ""
person = ""
output_folder = ""
input_file = ""
######################################################################
# classes and functions
######################################################################
def detectCPUs():
"""
Detects the number of CPUs on a system. Credit: https://github.com/nunoplopes/alive/blob/master/tests/lit/lit/util.py
"""
if hasattr(os, "sysconf"): # Linux, Unix and MacOS:
if "SC_NPROCESSORS_ONLN" in os.sysconf_names: # Linux & Unix:
ncpus = os.sysconf("SC_NPROCESSORS_ONLN")
if isinstance(ncpus, int) and ncpus > 0:
return ncpus
else: # OSX:
return int(capture(['sysctl', '-n', 'hw.ncpu']))
if "NUMBER_OF_PROCESSORS" in os.environ: # Windows:
ncpus = int(os.environ["NUMBER_OF_PROCESSORS"])
if ncpus > 0:
return ncpus
return 1 # Default
class myThread (threading.Thread): #used this as guide: https://www.tutorialspoint.com/python3/python_multithreading.htm
def __init__(self, threadID, audio, start_time):
threading.Thread.__init__(self)
self.threadID = threadID
self.audio = audio
self.start_time = start_time
def run(self):
audio_len = len(self.audio)
increment_by = 60000
good_timestamps = {}
for i in range(self.threadID, audio_len, increment_by):
end_time = audio_len if(i + 60000 > audio_len) else i+60000
audio_chunk = self.audio[i:end_time]
start = str("%d" % (float(self.threadID*audio_len + i) / 1000))
end = str("%d" % (float(self.threadID*audio_len + end_time) / 1000))
path = "workspace/" + basename + "_" + start + "-" + end + ".wav"
audio_chunk.export(path, format="wav")
for j in range(self.threadID, audio_len, increment_by):
t2 = time.time()
end_time = audio_len if(j + 60000 > audio_len) else j+60000
start = str("%d" % (float(self.threadID*audio_len + j) / 1000))
end = str("%d" % (float(self.threadID*audio_len + end_time) / 1000))
path = "workspace/" + basename + "_" + start + "-" + end + ".wav"
#Use Watson Speech API on audio chunk
with open(path, 'rb') as audio:
stt.models()
stt.get_model('en-US_BroadbandModel')
#these parameters above can be altered to effect the output of the api
stt_result = stt.recognize( audio, content_type='audio/wav',
timestamps=True,
word_confidence=True,
continuous=True,
profanity_filter=False,
word_alternatives_threshold=0.0 )
#dump response to a json file if we want to check it later then open it
with open('speech-snippets/' + basename + "_" + start + "-" + end + '.json', 'w') as data_file:
json.dump(stt_result, data_file, indent=1)
with open('speech-snippets/' + basename + "_" + start + "-" + end + '.json') as data_file:
get_good_timestamps(good_timestamps, data_file, float(self.threadID*audio_len + j) / 1000)
t3 = time.time()
print("thread " + str(self.threadID) + ", j: " + str(j) + ", time: " + str(t3-t2))
t4 = time.time()
print("thread " + str(self.threadID) + " starting to extract_words")
#clip audio into word clips
extract_words(input_file, good_timestamps, self.threadID)
t5 = time.time()
print("thread " + str(self.threadID) + " finished extract_words. time: " + str(t5-t4))
def ffmpeg_extract_subclip(filename, start, end, targetname=None):
"""
Creates a new video file playing video file "filename" between
the times "start" and "end".
Parameters
--------------------
filename -- path
start -- time to start cutting from
end -- time to finish cutting to
targetname -- output file name (not used)
Returns
--------------------
nothing
"""
"""
Note: This function is from the moviepy library but it was buggy so we fixed it here"""
name,ext = os.path.splitext(filename)
if not targetname:
T1, T2 = [int(1000*t) for t in [start, end]]
targetname = name+ "%sSUB%d_%d.%s"(name, T1, T2, ext)
cmd = [get_setting("FFMPEG_BINARY"), "-y",
"-ss", "%0.2f"%start,
"-t", "%0.2f"%(end-start),
"-i", filename,
"-c:v", "libx264", "-x264opts", "keyint=1:min-keyint=1:scenecut=-1",
targetname]
subprocess_call(cmd, False)
def is_new_clip_better(word, new_s, new_e, old_s, old_e):
"""
Compares two clips, seeing which one is better.
For now, this function simply considers "better" to be "longer"
Parameters
--------------------
word -- word being compared
new_s -- start time of new word
new_e -- end time of new word
old_s -- start time of old word
old_e -- end time of old word
Returns
--------------------
Bool -- True if new clip is better than old one. False otherwise
"""
new_len = new_e - new_s
old_len = old_e - old_s
return True if (new_len > old_len) else False
def get_good_timestamps( good_timestamps, data_file, offset):
"""
Adds correctly recognized words and their timestamps to good_timestamps
Parameters
--------------------
good_timestamps -- a dict of timestamps that are highly accurate.
'word': (start_time, end_time) --> string: (double, double)
datafile -- JSON file of Watson's generated text, containing
word guesses, start and end times, confidence levels, etc.
This will most likely have some incorrect speech-to-text translations.
offset -- The amount by which we change the timestamp
Returns
--------------------
nothing, but good_timestamps is modified.
"""
data = json.load(data_file)
if 'results' in data:
for res in data['results']:
for word_alternatives in res['word_alternatives']:
pred_word = word_alternatives['alternatives'][0]
if pred_word['confidence'] > 0.95:
start = word_alternatives['start_time']
end = word_alternatives['end_time']
word = pred_word['word']
if word in good_timestamps:
prev_word_start, prev_word_end = good_timestamps[word]
if not is_new_clip_better(word, start, end, prev_word_start, prev_word_end):
continue
tup = (start + offset, end + offset)
good_timestamps[word] = tup
def assure_path_exists(path):
"""
Check if path exists. If not, create it.
Parameters
--------------------
path -- path
Returns
--------------------
nothing
"""
dir = os.path.dirname(path)
if not os.path.exists(dir):
os.makedirs(dir)
def extract_words(orig_clip, good_timestamps, ID):
"""
Extract word clips from the input audio clip.
Parameters
--------------------
orig_clip -- The input audio clip that we want to extract word clips from
good_timestamps -- The timestamps of the words
Returns
--------------------
nothing
"""
for word, val in good_timestamps.items():
start = val[0]
end = val[1]
#check for special chars b/c windows doesn't allow these in filenames
no_special_char = True
for char in special_chars:
if char in word:
no_special_char = False
break
if no_special_char:
path = output_folder + "/" + person + "/" + word.lower() + "/" + "thread" + str(ID) + "_"
assure_path_exists(path)
num_clips = len(glob.glob(path + "*")) # get num of clips already in folder, to avoid overwiting
ffmpeg_extract_subclip(orig_clip, start, end, targetname=(path + str(num_clips + 1) + ".mp4"))
def remove_extra_clips():
path = output_folder + "/" + person + "/"
assure_path_exists(path)
subdirectories = os.listdir(path)
for subdir in subdirectories:
if not '.' in subdir:
files = os.listdir(path + subdir)
maximum = 0
biggest_file_path = ""
for file in files:
if not file[0] == '.':
path_to_file = path + subdir + "/" + file
file_size = os.path.getsize(path_to_file)
if maximum < file_size:
maximum = file_size
biggest_file_path = path_to_file
else:
os.remove(path + subdir + "/" + file)
if biggest_file_path != (path + subdir + "/1.mp4"):
if os.path.exists(path + subdir + "/1.mp4"):
os.remove(path + subdir + "/1.mp4")
os.rename(biggest_file_path, path + subdir + "/1.mp4")
def run(argv):
t0 = time.time()
if(len(argv) != 3):
print('Usage: speech2text.py output_folder person inputfile')
sys.exit(2)
num_threads = detectCPUs()
print("Using " + str(num_threads) + " threads...")
#modifying global variables
global basename
global person
global clip_len
global output_folder
global input_file
output_folder = argv[0]
input_file = argv[2]
#open input file and convert to flac (assume test file in same directory for now)
basename = os.path.splitext(os.path.basename(argv[2]))[0]
file_ext = os.path.splitext(argv[2])[1][1:]
audio_init = AudioSegment.from_file(argv[2], file_ext) #assuming input files are all supported by ffmpeg
audio_chunk = AudioSegment.silent(duration=0)
end_time = len(audio_init)
person = argv[1].lower()
if os.path.exists("workspace"):
shutil.rmtree("workspace") #clear workspace and remake it
os.makedirs("workspace")
else:
os.makedirs("workspace")
clip_len = end_time / num_threads
threads = []
start_time = 0
end = 0
for i in range(0,num_threads):
start_time = i * clip_len
end = end_time if (i == num_threads -1) else (i+1) * clip_len -1
audio_chunk = audio_init[start_time: end]
thread = myThread(i, audio_chunk, start_time)
thread.start()
threads.append(thread)
for t in threads:
t.join()
remove_extra_clips() # threads may have created word duplicates because they have been embarassingly parallelized.
tlast = time.time()
print("Total elapsed time: " + str(tlast-t0))
######################################################################
# main
######################################################################
def main(argv) :
run(argv)
if __name__ == "__main__" :
main(sys.argv[1:])
|
|
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import tempfile
import six
import mock
from oslo_config import cfg
from st2tests import config
from st2tests.base import CleanFilesTestCase
import st2common.util.virtualenvs as virtualenvs
from st2common.util.virtualenvs import install_requirement
from st2common.util.virtualenvs import install_requirements
from st2common.util.virtualenvs import setup_pack_virtualenv
__all__ = ["VirtualenvUtilsTestCase"]
# Note: We set base requirements to an empty list to speed up the tests
@mock.patch("st2common.util.virtualenvs.BASE_PACK_REQUIREMENTS", [])
class VirtualenvUtilsTestCase(CleanFilesTestCase):
def setUp(self):
super(VirtualenvUtilsTestCase, self).setUp()
config.parse_args()
dir_path = tempfile.mkdtemp()
cfg.CONF.set_override(name="base_path", override=dir_path, group="system")
self.base_path = dir_path
self.virtualenvs_path = os.path.join(self.base_path, "virtualenvs/")
# Make sure dir is deleted on tearDown
self.to_delete_directories.append(self.base_path)
def test_setup_pack_virtualenv_doesnt_exist_yet(self):
# Test a fresh virtualenv creation
pack_name = "dummy_pack_1"
pack_virtualenv_dir = os.path.join(self.virtualenvs_path, pack_name)
# Verify virtualenv directory doesn't exist
self.assertFalse(os.path.exists(pack_virtualenv_dir))
# Create virtualenv
# Note: This pack has no requirements
setup_pack_virtualenv(
pack_name=pack_name,
update=False,
include_pip=False,
include_setuptools=False,
include_wheel=False,
)
# Verify that virtualenv has been created
self.assertVirtualenvExists(pack_virtualenv_dir)
def test_setup_pack_virtualenv_already_exists(self):
# Test a scenario where virtualenv already exists
pack_name = "dummy_pack_1"
pack_virtualenv_dir = os.path.join(self.virtualenvs_path, pack_name)
# Verify virtualenv directory doesn't exist
self.assertFalse(os.path.exists(pack_virtualenv_dir))
# Create virtualenv
setup_pack_virtualenv(
pack_name=pack_name,
update=False,
include_pip=False,
include_setuptools=False,
include_wheel=False,
)
# Verify that virtualenv has been created
self.assertVirtualenvExists(pack_virtualenv_dir)
# Re-create virtualenv
setup_pack_virtualenv(
pack_name=pack_name,
update=False,
include_pip=False,
include_setuptools=False,
include_wheel=False,
)
# Verify virtrualenv is still there
self.assertVirtualenvExists(pack_virtualenv_dir)
def test_setup_virtualenv_update(self):
# Test a virtualenv update with pack which has requirements.txt
pack_name = "dummy_pack_2"
pack_virtualenv_dir = os.path.join(self.virtualenvs_path, pack_name)
# Verify virtualenv directory doesn't exist
self.assertFalse(os.path.exists(pack_virtualenv_dir))
# Create virtualenv
setup_pack_virtualenv(
pack_name=pack_name,
update=False,
include_setuptools=False,
include_wheel=False,
)
# Verify that virtualenv has been created
self.assertVirtualenvExists(pack_virtualenv_dir)
# Update it
setup_pack_virtualenv(
pack_name=pack_name,
update=True,
include_setuptools=False,
include_wheel=False,
)
# Verify virtrualenv is still there
self.assertVirtualenvExists(pack_virtualenv_dir)
def test_setup_virtualenv_invalid_dependency_in_requirements_file(self):
pack_name = "pack_invalid_requirements"
pack_virtualenv_dir = os.path.join(self.virtualenvs_path, pack_name)
# Verify virtualenv directory doesn't exist
self.assertFalse(os.path.exists(pack_virtualenv_dir))
# Try to create virtualenv, assert that it fails
try:
setup_pack_virtualenv(
pack_name=pack_name,
update=False,
include_setuptools=False,
include_wheel=False,
)
except Exception as e:
self.assertIn("Failed to install requirements from", six.text_type(e))
self.assertTrue(
"No matching distribution found for someinvalidname" in six.text_type(e)
)
else:
self.fail("Exception not thrown")
@mock.patch.object(
virtualenvs, "run_command", mock.MagicMock(return_value=(0, "", ""))
)
@mock.patch.object(
virtualenvs, "get_env_for_subprocess_command", mock.MagicMock(return_value={})
)
def test_install_requirement_without_proxy(self):
pack_virtualenv_dir = "/opt/stackstorm/virtualenvs/dummy_pack_tests/"
requirement = "six>=1.9.0"
install_requirement(pack_virtualenv_dir, requirement, proxy_config=None)
expected_args = {
"cmd": [
"/opt/stackstorm/virtualenvs/dummy_pack_tests/bin/pip",
"install",
"six>=1.9.0",
],
"env": {},
}
virtualenvs.run_command.assert_called_once_with(**expected_args)
@mock.patch.object(
virtualenvs, "run_command", mock.MagicMock(return_value=(0, "", ""))
)
@mock.patch.object(
virtualenvs, "get_env_for_subprocess_command", mock.MagicMock(return_value={})
)
def test_install_requirement_with_http_proxy(self):
pack_virtualenv_dir = "/opt/stackstorm/virtualenvs/dummy_pack_tests/"
requirement = "six>=1.9.0"
proxy_config = {"http_proxy": "http://192.168.1.5:8080"}
install_requirement(pack_virtualenv_dir, requirement, proxy_config=proxy_config)
expected_args = {
"cmd": [
"/opt/stackstorm/virtualenvs/dummy_pack_tests/bin/pip",
"--proxy",
"http://192.168.1.5:8080",
"install",
"six>=1.9.0",
],
"env": {},
}
virtualenvs.run_command.assert_called_once_with(**expected_args)
@mock.patch.object(
virtualenvs, "run_command", mock.MagicMock(return_value=(0, "", ""))
)
@mock.patch.object(
virtualenvs, "get_env_for_subprocess_command", mock.MagicMock(return_value={})
)
def test_install_requirement_with_https_proxy(self):
pack_virtualenv_dir = "/opt/stackstorm/virtualenvs/dummy_pack_tests/"
requirement = "six>=1.9.0"
proxy_config = {
"https_proxy": "https://192.168.1.5:8080",
"proxy_ca_bundle_path": "/etc/ssl/certs/mitmproxy-ca.pem",
}
install_requirement(pack_virtualenv_dir, requirement, proxy_config=proxy_config)
expected_args = {
"cmd": [
"/opt/stackstorm/virtualenvs/dummy_pack_tests/bin/pip",
"--proxy",
"https://192.168.1.5:8080",
"--cert",
"/etc/ssl/certs/mitmproxy-ca.pem",
"install",
"six>=1.9.0",
],
"env": {},
}
virtualenvs.run_command.assert_called_once_with(**expected_args)
@mock.patch.object(
virtualenvs, "run_command", mock.MagicMock(return_value=(0, "", ""))
)
@mock.patch.object(
virtualenvs, "get_env_for_subprocess_command", mock.MagicMock(return_value={})
)
def test_install_requirement_with_https_proxy_no_cert(self):
pack_virtualenv_dir = "/opt/stackstorm/virtualenvs/dummy_pack_tests/"
requirement = "six>=1.9.0"
proxy_config = {
"https_proxy": "https://192.168.1.5:8080",
}
install_requirement(pack_virtualenv_dir, requirement, proxy_config=proxy_config)
expected_args = {
"cmd": [
"/opt/stackstorm/virtualenvs/dummy_pack_tests/bin/pip",
"--proxy",
"https://192.168.1.5:8080",
"install",
"six>=1.9.0",
],
"env": {},
}
virtualenvs.run_command.assert_called_once_with(**expected_args)
@mock.patch.object(
virtualenvs, "run_command", mock.MagicMock(return_value=(0, "", ""))
)
@mock.patch.object(
virtualenvs, "get_env_for_subprocess_command", mock.MagicMock(return_value={})
)
def test_install_requirements_without_proxy(self):
pack_virtualenv_dir = "/opt/stackstorm/virtualenvs/dummy_pack_tests/"
requirements_file_path = (
"/opt/stackstorm/packs/dummy_pack_tests/requirements.txt"
)
install_requirements(
pack_virtualenv_dir, requirements_file_path, proxy_config=None
)
expected_args = {
"cmd": [
"/opt/stackstorm/virtualenvs/dummy_pack_tests/bin/pip",
"install",
"-U",
"-r",
requirements_file_path,
],
"env": {},
}
virtualenvs.run_command.assert_called_once_with(**expected_args)
@mock.patch.object(
virtualenvs, "run_command", mock.MagicMock(return_value=(0, "", ""))
)
@mock.patch.object(
virtualenvs, "get_env_for_subprocess_command", mock.MagicMock(return_value={})
)
def test_install_requirements_with_http_proxy(self):
pack_virtualenv_dir = "/opt/stackstorm/virtualenvs/dummy_pack_tests/"
requirements_file_path = (
"/opt/stackstorm/packs/dummy_pack_tests/requirements.txt"
)
proxy_config = {"http_proxy": "http://192.168.1.5:8080"}
install_requirements(
pack_virtualenv_dir, requirements_file_path, proxy_config=proxy_config
)
expected_args = {
"cmd": [
"/opt/stackstorm/virtualenvs/dummy_pack_tests/bin/pip",
"--proxy",
"http://192.168.1.5:8080",
"install",
"-U",
"-r",
requirements_file_path,
],
"env": {},
}
virtualenvs.run_command.assert_called_once_with(**expected_args)
@mock.patch.object(
virtualenvs, "run_command", mock.MagicMock(return_value=(0, "", ""))
)
@mock.patch.object(
virtualenvs, "get_env_for_subprocess_command", mock.MagicMock(return_value={})
)
def test_install_requirements_with_https_proxy(self):
pack_virtualenv_dir = "/opt/stackstorm/virtualenvs/dummy_pack_tests/"
requirements_file_path = (
"/opt/stackstorm/packs/dummy_pack_tests/requirements.txt"
)
proxy_config = {
"https_proxy": "https://192.168.1.5:8080",
"proxy_ca_bundle_path": "/etc/ssl/certs/mitmproxy-ca.pem",
}
install_requirements(
pack_virtualenv_dir, requirements_file_path, proxy_config=proxy_config
)
expected_args = {
"cmd": [
"/opt/stackstorm/virtualenvs/dummy_pack_tests/bin/pip",
"--proxy",
"https://192.168.1.5:8080",
"--cert",
"/etc/ssl/certs/mitmproxy-ca.pem",
"install",
"-U",
"-r",
requirements_file_path,
],
"env": {},
}
virtualenvs.run_command.assert_called_once_with(**expected_args)
@mock.patch.object(
virtualenvs, "run_command", mock.MagicMock(return_value=(0, "", ""))
)
@mock.patch.object(
virtualenvs, "get_env_for_subprocess_command", mock.MagicMock(return_value={})
)
def test_install_requirements_with_https_proxy_no_cert(self):
pack_virtualenv_dir = "/opt/stackstorm/virtualenvs/dummy_pack_tests/"
requirements_file_path = (
"/opt/stackstorm/packs/dummy_pack_tests/requirements.txt"
)
proxy_config = {
"https_proxy": "https://192.168.1.5:8080",
}
install_requirements(
pack_virtualenv_dir, requirements_file_path, proxy_config=proxy_config
)
expected_args = {
"cmd": [
"/opt/stackstorm/virtualenvs/dummy_pack_tests/bin/pip",
"--proxy",
"https://192.168.1.5:8080",
"install",
"-U",
"-r",
requirements_file_path,
],
"env": {},
}
virtualenvs.run_command.assert_called_once_with(**expected_args)
def assertVirtualenvExists(self, virtualenv_dir):
self.assertTrue(os.path.exists(virtualenv_dir))
self.assertTrue(os.path.isdir(virtualenv_dir))
self.assertTrue(os.path.isdir(os.path.join(virtualenv_dir, "bin/")))
return True
def test_setup_virtualenv_reserved_packname(self):
# Test a virtualenv update with pack which has global name
pack_name = "_global"
self.assertRaises(
ValueError,
setup_pack_virtualenv,
pack_name=pack_name,
update=False,
include_setuptools=False,
include_wheel=False,
)
|
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""dm_env environment wrapper around Gym Atari configured to be like Xitari.
Gym Atari is built on the Arcade Learning Environment (ALE), whereas Xitari is
an old fork of the ALE.
"""
# pylint: disable=g-bad-import-order
from typing import Optional, Tuple
import atari_py # pylint: disable=unused-import for gym to load Atari games.
import dm_env
from dm_env import specs
import gym
import numpy as np
from dqn_zoo import atari_data
_GYM_ID_SUFFIX = '-xitari-v1'
def _register_atari_environments():
"""Registers Atari environments in Gym to be as similar to Xitari as possible.
Main difference from PongNoFrameSkip-v4, etc. is max_episode_steps is unset
and only the usual 57 Atari games are registered.
"""
for game in atari_data.ATARI_GAMES:
gym.envs.registration.register(
id=game + _GYM_ID_SUFFIX, # Add suffix so ID has required format.
entry_point='gym.envs.atari:AtariEnv',
kwargs={ # Explicitly set all known arguments.
'game': game,
'mode': None, # Not necessarily the same as 0.
'difficulty': None, # Not necessarily the same as 0.
'obs_type': 'image',
'frameskip': 1, # Get every frame.
'repeat_action_probability': 0.0, # No sticky actions.
'full_action_space': False,
},
max_episode_steps=None, # No time limit, handled in training run loop.
nondeterministic=False, # Xitari is deterministic.
)
_register_atari_environments()
class GymAtari(dm_env.Environment):
"""Gym Atari with a `dm_env.Environment` interface."""
def __init__(self, game, seed):
self._gym_env = gym.make(game + _GYM_ID_SUFFIX)
self._gym_env.seed(seed)
self._start_of_episode = True
def reset(self) -> dm_env.TimeStep:
"""Resets the environment and starts a new episode."""
observation = self._gym_env.reset()
lives = np.int32(self._gym_env.ale.lives())
timestep = dm_env.restart((observation, lives))
self._start_of_episode = False
return timestep
def step(self, action: np.int32) -> dm_env.TimeStep:
"""Updates the environment given an action and returns a timestep."""
# If the previous timestep was LAST then we call reset() on the Gym
# environment, otherwise step(). Although Gym environments allow you to step
# through episode boundaries (similar to dm_env) they emit a warning.
if self._start_of_episode:
step_type = dm_env.StepType.FIRST
observation = self._gym_env.reset()
discount = None
reward = None
done = False
else:
observation, reward, done, info = self._gym_env.step(action)
if done:
assert 'TimeLimit.truncated' not in info, 'Should never truncate.'
step_type = dm_env.StepType.LAST
discount = 0.
else:
step_type = dm_env.StepType.MID
discount = 1.
lives = np.int32(self._gym_env.ale.lives())
timestep = dm_env.TimeStep(
step_type=step_type,
observation=(observation, lives),
reward=reward,
discount=discount,
)
self._start_of_episode = done
return timestep
def observation_spec(self) -> Tuple[specs.Array, specs.Array]:
space = self._gym_env.observation_space
return (specs.Array(shape=space.shape, dtype=space.dtype, name='rgb'),
specs.Array(shape=(), dtype=np.int32, name='lives'))
def action_spec(self) -> specs.DiscreteArray:
space = self._gym_env.action_space
return specs.DiscreteArray(
num_values=space.n, dtype=np.int32, name='action')
def close(self):
self._gym_env.close()
class RandomNoopsEnvironmentWrapper(dm_env.Environment):
"""Adds a random number of noop actions at the beginning of each episode."""
def __init__(self,
environment: dm_env.Environment,
max_noop_steps: int,
min_noop_steps: int = 0,
noop_action: int = 0,
seed: Optional[int] = None):
"""Initializes the random noops environment wrapper."""
self._environment = environment
if max_noop_steps < min_noop_steps:
raise ValueError('max_noop_steps must be greater or equal min_noop_steps')
self._min_noop_steps = min_noop_steps
self._max_noop_steps = max_noop_steps
self._noop_action = noop_action
self._rng = np.random.RandomState(seed)
def reset(self):
"""Begins new episode.
This method resets the wrapped environment and applies a random number
of noop actions before returning the last resulting observation
as the first episode timestep. Intermediate timesteps emitted by the inner
environment (including all rewards and discounts) are discarded.
Returns:
First episode timestep corresponding to the timestep after a random number
of noop actions are applied to the inner environment.
Raises:
RuntimeError: if an episode end occurs while the inner environment
is being stepped through with the noop action.
"""
return self._apply_random_noops(initial_timestep=self._environment.reset())
def step(self, action):
"""Steps environment given action.
If beginning a new episode then random noops are applied as in `reset()`.
Args:
action: action to pass to environment conforming to action spec.
Returns:
`Timestep` from the inner environment unless beginning a new episode, in
which case this is the timestep after a random number of noop actions
are applied to the inner environment.
"""
timestep = self._environment.step(action)
if timestep.first():
return self._apply_random_noops(initial_timestep=timestep)
else:
return timestep
def _apply_random_noops(self, initial_timestep):
assert initial_timestep.first()
num_steps = self._rng.randint(self._min_noop_steps,
self._max_noop_steps + 1)
timestep = initial_timestep
for _ in range(num_steps):
timestep = self._environment.step(self._noop_action)
if timestep.last():
raise RuntimeError('Episode ended while applying %s noop actions.' %
num_steps)
# We make sure to return a FIRST timestep, i.e. discard rewards & discounts.
return dm_env.restart(timestep.observation)
## All methods except for reset and step redirect to the underlying env.
def observation_spec(self):
return self._environment.observation_spec()
def action_spec(self):
return self._environment.action_spec()
def reward_spec(self):
return self._environment.reward_spec()
def discount_spec(self):
return self._environment.discount_spec()
def close(self):
return self._environment.close()
|
|
import pytest
from . import normalize_ret
pytestmark = [
pytest.mark.windows_whitelisted,
]
def test_requisites_onfail_any(state, state_tree):
"""
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
"""
sls_contents = """
a:
cmd.run:
- name: exit 0
b:
cmd.run:
- name: exit 1
c:
cmd.run:
- name: exit 0
d:
cmd.run:
- name: echo itworked
- onfail_any:
- cmd: a
- cmd: b
- cmd: c
e:
cmd.run:
- name: exit 0
f:
cmd.run:
- name: exit 0
g:
cmd.run:
- name: exit 0
h:
cmd.run:
- name: echo itworked
- onfail_any:
- cmd: e
- cmd: f
- cmd: g
"""
expected_result = {
"cmd_|-a_|-exit 0_|-run": {
"__run_num__": 0,
"changes": True,
"comment": 'Command "exit 0" run',
"result": True,
},
"cmd_|-b_|-exit 1_|-run": {
"__run_num__": 1,
"changes": True,
"comment": 'Command "exit 1" run',
"result": False,
},
"cmd_|-c_|-exit 0_|-run": {
"__run_num__": 2,
"changes": True,
"comment": 'Command "exit 0" run',
"result": True,
},
"cmd_|-d_|-echo itworked_|-run": {
"__run_num__": 3,
"changes": True,
"comment": 'Command "echo itworked" run',
"result": True,
},
"cmd_|-e_|-exit 0_|-run": {
"__run_num__": 4,
"changes": True,
"comment": 'Command "exit 0" run',
"result": True,
},
"cmd_|-f_|-exit 0_|-run": {
"__run_num__": 5,
"changes": True,
"comment": 'Command "exit 0" run',
"result": True,
},
"cmd_|-g_|-exit 0_|-run": {
"__run_num__": 6,
"changes": True,
"comment": 'Command "exit 0" run',
"result": True,
},
"cmd_|-h_|-echo itworked_|-run": {
"__run_num__": 7,
"changes": False,
"comment": "State was not run because onfail req did not change",
"result": True,
},
}
with pytest.helpers.temp_file("requisite.sls", sls_contents, state_tree):
ret = state.sls("requisite")
result = normalize_ret(ret.raw)
assert result == expected_result
def test_requisites_onfail_all(state, state_tree):
"""
Call sls file containing several onfail-all
Ensure that some of them are failing and that the order is right.
"""
sls_contents = """
a:
cmd.run:
- name: exit 0
b:
cmd.run:
- name: exit 0
c:
cmd.run:
- name: exit 0
d:
cmd.run:
- name: exit 1
e:
cmd.run:
- name: exit 1
f:
cmd.run:
- name: exit 1
reqs not met:
cmd.run:
- name: echo itdidntonfail
- onfail_all:
- cmd: a
- cmd: e
reqs also not met:
cmd.run:
- name: echo italsodidnonfail
- onfail_all:
- cmd: a
- cmd: b
- cmd: c
reqs met:
cmd.run:
- name: echo itonfailed
- onfail_all:
- cmd: d
- cmd: e
- cmd: f
reqs also met:
cmd.run:
- name: echo itonfailed
- onfail_all:
- cmd: d
- require:
- cmd: a
"""
expected_result = {
"cmd_|-a_|-exit 0_|-run": {
"__run_num__": 0,
"changes": True,
"comment": 'Command "exit 0" run',
"result": True,
},
"cmd_|-b_|-exit 0_|-run": {
"__run_num__": 1,
"changes": True,
"comment": 'Command "exit 0" run',
"result": True,
},
"cmd_|-c_|-exit 0_|-run": {
"__run_num__": 2,
"changes": True,
"comment": 'Command "exit 0" run',
"result": True,
},
"cmd_|-d_|-exit 1_|-run": {
"__run_num__": 3,
"changes": True,
"comment": 'Command "exit 1" run',
"result": False,
},
"cmd_|-e_|-exit 1_|-run": {
"__run_num__": 4,
"changes": True,
"comment": 'Command "exit 1" run',
"result": False,
},
"cmd_|-f_|-exit 1_|-run": {
"__run_num__": 5,
"changes": True,
"comment": 'Command "exit 1" run',
"result": False,
},
"cmd_|-reqs also met_|-echo itonfailed_|-run": {
"__run_num__": 9,
"changes": True,
"comment": 'Command "echo itonfailed" run',
"result": True,
},
"cmd_|-reqs also not met_|-echo italsodidnonfail_|-run": {
"__run_num__": 7,
"changes": False,
"comment": "State was not run because onfail req did not change",
"result": True,
},
"cmd_|-reqs met_|-echo itonfailed_|-run": {
"__run_num__": 8,
"changes": True,
"comment": 'Command "echo itonfailed" run',
"result": True,
},
"cmd_|-reqs not met_|-echo itdidntonfail_|-run": {
"__run_num__": 6,
"changes": False,
"comment": "State was not run because onfail req did not change",
"result": True,
},
}
with pytest.helpers.temp_file("requisite.sls", sls_contents, state_tree):
ret = state.sls("requisite")
result = normalize_ret(ret.raw)
assert result == expected_result
def test_onfail_requisite(state, state_tree):
"""
Tests a simple state using the onfail requisite
"""
sls_contents = """
failing_state:
cmd.run:
- name: asdf
non_failing_state:
cmd.run:
- name: echo "Non-failing state"
test_failing_state:
cmd.run:
- name: echo "Success!"
- onfail:
- cmd: failing_state
test_non_failing_state:
cmd.run:
- name: echo "Should not run"
- onfail:
- cmd: non_failing_state
"""
with pytest.helpers.temp_file("requisite.sls", sls_contents, state_tree):
ret = state.sls("requisite")
assert (
ret['cmd_|-test_failing_state_|-echo "Success!"_|-run'].comment
== 'Command "echo "Success!"" run'
)
assert (
ret['cmd_|-test_non_failing_state_|-echo "Should not run"_|-run'].comment
== "State was not run because onfail req did not change"
)
def test_multiple_onfail_requisite(state, state_tree):
"""
test to ensure state is run even if only one
of the onfails fails. This is a test for the issue:
https://github.com/saltstack/salt/issues/22370
"""
sls_contents = """
a:
cmd.run:
- name: exit 0
b:
cmd.run:
- name: exit 1
c:
cmd.run:
- name: echo itworked
- onfail:
- cmd: a
- cmd: b
"""
with pytest.helpers.temp_file("requisite.sls", sls_contents, state_tree):
ret = state.sls("requisite")
assert ret["cmd_|-c_|-echo itworked_|-run"].changes["retcode"] == 0
assert ret["cmd_|-c_|-echo itworked_|-run"].changes["stdout"] == "itworked"
def test_onfail_in_requisite(state, state_tree):
"""
Tests a simple state using the onfail_in requisite
"""
sls_contents = """
failing_state:
cmd.run:
- name: asdf
- onfail_in:
- cmd: test_failing_state
non_failing_state:
cmd.run:
- name: echo "Non-failing state"
- onfail_in:
- cmd: test_non_failing_state
test_failing_state:
cmd.run:
- name: echo "Success!"
test_non_failing_state:
cmd.run:
- name: echo "Should not run"
"""
with pytest.helpers.temp_file("requisite.sls", sls_contents, state_tree):
ret = state.sls("requisite")
assert (
ret['cmd_|-test_failing_state_|-echo "Success!"_|-run'].comment
== 'Command "echo "Success!"" run'
)
assert (
ret['cmd_|-test_non_failing_state_|-echo "Should not run"_|-run'].comment
== "State was not run because onfail req did not change"
)
def test_onfail_requisite_no_state_module(state, state_tree):
"""
Tests a simple state using the onfail requisite
"""
sls_contents = """
failing_state:
cmd.run:
- name: asdf
non_failing_state:
cmd.run:
- name: echo "Non-failing state"
test_failing_state:
cmd.run:
- name: echo "Success!"
- onfail:
- failing_state
test_non_failing_state:
cmd.run:
- name: echo "Should not run"
- onfail:
- non_failing_state
"""
with pytest.helpers.temp_file("requisite.sls", sls_contents, state_tree):
ret = state.sls("requisite")
assert (
ret['cmd_|-test_failing_state_|-echo "Success!"_|-run'].comment
== 'Command "echo "Success!"" run'
)
assert (
ret['cmd_|-test_non_failing_state_|-echo "Should not run"_|-run'].comment
== "State was not run because onfail req did not change"
)
def test_onfail_requisite_with_duration(state, state_tree):
"""
Tests a simple state using the onfail requisite
"""
sls_contents = """
failing_state:
cmd.run:
- name: asdf
non_failing_state:
cmd.run:
- name: echo "Non-failing state"
test_failing_state:
cmd.run:
- name: echo "Success!"
- onfail:
- cmd: failing_state
test_non_failing_state:
cmd.run:
- name: echo "Should not run"
- onfail:
- cmd: non_failing_state
"""
with pytest.helpers.temp_file("requisite.sls", sls_contents, state_tree):
ret = state.sls("requisite")
assert (
"duration"
in ret['cmd_|-test_non_failing_state_|-echo "Should not run"_|-run']
)
def test_multiple_onfail_requisite_with_required(state, state_tree):
"""
test to ensure multiple states are run
when specified as onfails for a single state.
This is a test for the issue:
https://github.com/saltstack/salt/issues/46552
"""
sls_contents = """
a:
cmd.run:
- name: exit 1
pass:
cmd.run:
- name: exit 0
b:
cmd.run:
- name: echo b
- onfail:
- cmd: a
c:
cmd.run:
- name: echo c
- onfail:
- cmd: a
- require:
- cmd: b
d:
cmd.run:
- name: echo d
- onfail:
- cmd: a
- require:
- cmd: c
e:
cmd.run:
- name: echo e
- onfail:
- cmd: pass
- require:
- cmd: c
f:
cmd.run:
- name: echo f
- onfail:
- cmd: pass
- onchanges:
- cmd: b
"""
with pytest.helpers.temp_file("requisite.sls", sls_contents, state_tree):
ret = state.sls("requisite")
assert ret["cmd_|-b_|-echo b_|-run"].changes["retcode"] == 0
assert ret["cmd_|-c_|-echo c_|-run"].changes["retcode"] == 0
assert ret["cmd_|-d_|-echo d_|-run"].changes["retcode"] == 0
assert ret["cmd_|-b_|-echo b_|-run"].changes["stdout"] == "b"
assert ret["cmd_|-c_|-echo c_|-run"].changes["stdout"] == "c"
assert ret["cmd_|-d_|-echo d_|-run"].changes["stdout"] == "d"
assert (
ret["cmd_|-e_|-echo e_|-run"].comment
== "State was not run because onfail req did not change"
)
assert (
ret["cmd_|-f_|-echo f_|-run"].comment
== "State was not run because onfail req did not change"
)
def test_multiple_onfail_requisite_with_required_no_run(state, state_tree):
"""
test to ensure multiple states are not run
when specified as onfails for a single state
which fails.
This is a test for the issue:
https://github.com/saltstack/salt/issues/46552
"""
sls_contents = """
a:
cmd.run:
- name: exit 0
b:
cmd.run:
- name: echo b
- onfail:
- cmd: a
c:
cmd.run:
- name: echo c
- onfail:
- cmd: a
- require:
- cmd: b
d:
cmd.run:
- name: echo d
- onfail:
- cmd: a
- require:
- cmd: c
"""
expected = "State was not run because onfail req did not change"
with pytest.helpers.temp_file("requisite.sls", sls_contents, state_tree):
ret = state.sls("requisite")
assert ret["cmd_|-b_|-echo b_|-run"].comment == expected
assert ret["cmd_|-c_|-echo c_|-run"].comment == expected
assert ret["cmd_|-d_|-echo d_|-run"].comment == expected
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to create TensorProtos."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.python.platform
import numpy as np
import six
from tensorflow.core.framework import tensor_pb2
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.python.util import compat
# TODO(opensource): Add support for pyx_library in the open-source build.
# For now, we use the slow versions that fast_tensor_util replaces.
# pylint: disable=g-import-not-at-top
try:
from tensorflow.python.framework import fast_tensor_util
_FAST_TENSOR_UTIL_AVAILABLE = True
except ImportError:
_FAST_TENSOR_UTIL_AVAILABLE = False
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
# pylint: enable=g-import-not-at-top
if _FAST_TENSOR_UTIL_AVAILABLE:
_NP_TO_APPEND_FN = {
np.float32: fast_tensor_util.AppendFloat32ArrayToTensorProto,
np.float64: fast_tensor_util.AppendFloat64ArrayToTensorProto,
np.int32: fast_tensor_util.AppendInt32ArrayToTensorProto,
np.int64: fast_tensor_util.AppendInt64ArrayToTensorProto,
np.uint8: fast_tensor_util.AppendUInt8ArrayToTensorProto,
np.int16: fast_tensor_util.AppendInt16ArrayToTensorProto,
np.int8: fast_tensor_util.AppendInt8ArrayToTensorProto,
np.complex64: fast_tensor_util.AppendComplex64ArrayToTensorProto,
np.complex128: fast_tensor_util.AppendComplex128ArrayToTensorProto,
np.object: fast_tensor_util.AppendObjectArrayToTensorProto,
np.bool: fast_tensor_util.AppendBoolArrayToTensorProto,
dtypes.qint8.as_numpy_dtype:
fast_tensor_util.AppendInt8ArrayToTensorProto,
dtypes.quint8.as_numpy_dtype:
fast_tensor_util.AppendUInt8ArrayToTensorProto,
dtypes.qint32.as_numpy_dtype:
fast_tensor_util.AppendInt32ArrayToTensorProto,
# NOTE(touts): Intentionally no way to feed a DT_BFLOAT16.
}
else:
def SlowAppendFloat32ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.float_val.extend([np.asscalar(x) for x in proto_values])
def SlowAppendFloat64ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.double_val.extend([np.asscalar(x) for x in proto_values])
def SlowAppendIntArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.int_val.extend([np.asscalar(x) for x in proto_values])
def SlowAppendInt64ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.int64_val.extend([np.asscalar(x) for x in proto_values])
def SlowAppendComplexArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.scomplex_val.extend([np.asscalar(v)
for x in proto_values
for v in [x.real, x.imag]])
def SlowAppendObjectArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.string_val.extend([compat.as_bytes(x) for x in proto_values])
def SlowAppendBoolArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.bool_val.extend([np.asscalar(x) for x in proto_values])
_NP_TO_APPEND_FN = {
np.float32: SlowAppendFloat32ArrayToTensorProto,
np.float64: SlowAppendFloat64ArrayToTensorProto,
np.int32: SlowAppendIntArrayToTensorProto,
np.int64: SlowAppendInt64ArrayToTensorProto,
np.uint8: SlowAppendIntArrayToTensorProto,
np.int16: SlowAppendIntArrayToTensorProto,
np.int8: SlowAppendIntArrayToTensorProto,
np.complex64: SlowAppendComplexArrayToTensorProto,
np.complex128: SlowAppendComplexArrayToTensorProto,
np.object: SlowAppendObjectArrayToTensorProto,
np.bool: SlowAppendBoolArrayToTensorProto,
dtypes.qint8.as_numpy_dtype: SlowAppendIntArrayToTensorProto,
dtypes.quint8.as_numpy_dtype: SlowAppendIntArrayToTensorProto,
dtypes.qint32.as_numpy_dtype: SlowAppendIntArrayToTensorProto,
# NOTE(touts): Intentionally no way to feed a DT_BFLOAT16.
}
def GetFromNumpyDTypeDict(dtype_dict, dtype):
# NOTE: dtype_dict.get(dtype) always returns None.
for key, val in six.iteritems(dtype_dict):
if key == dtype:
return val
return None
def GetNumpyAppendFn(dtype):
# numpy dtype for strings are variable length. We can not compare
# dtype with a single constant (np.string does not exist) to decide
# dtype is a "string" type. We need to compare the dtype.type to be
# sure it's a string type.
if dtype.type == np.string_ or dtype.type == np.unicode_:
if _FAST_TENSOR_UTIL_AVAILABLE:
return fast_tensor_util.AppendObjectArrayToTensorProto
else:
return SlowAppendObjectArrayToTensorProto
return GetFromNumpyDTypeDict(_NP_TO_APPEND_FN, dtype)
# TODO(mrry,irving): Make this a method of `TensorShape`.
def make_tensor_shape_proto(shape):
"""Converts a list of integers to a `TensorShapeProto`.
Args:
shape: List of integers representing the dimensions of the tensor.
Returns:
A `TensorShapeProto`.
"""
return tensor_shape_pb2.TensorShapeProto(
dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=x) for x in shape])
def TensorShapeProtoToList(shape):
"""Convert a TensorShape to a list.
Args:
shape: A TensorShapeProto.
Returns:
List of integers representing the dimensions of the tensor.
"""
return [dim.size for dim in shape.dim]
def _GetDenseDimensions(list_of_lists):
"""Returns the inferred dense dimensions of a list of lists."""
if not isinstance(list_of_lists, (list, tuple)):
return []
elif not list_of_lists:
return [0]
else:
return [len(list_of_lists)] + _GetDenseDimensions(list_of_lists[0])
def _FlattenToStrings(nested_strings):
if isinstance(nested_strings, list):
for inner in nested_strings:
for flattened_string in _FlattenToStrings(inner):
yield flattened_string
else:
yield nested_strings
_TENSOR_CONTENT_TYPES = frozenset([
dtypes.float32, dtypes.float64, dtypes.int32, dtypes.uint8, dtypes.int16,
dtypes.int8, dtypes.int64
])
class _Message(object):
def __init__(self, message):
self._message = message
def __repr__(self):
return self._message
def _FirstNotNone(l):
for x in l:
if x is not None:
if isinstance(x, ops.Tensor):
return _Message("list containing Tensors")
else:
return x
return None
def _NotNone(v):
if v is None:
return _Message("None")
else:
return v
def _FilterInt(v):
if isinstance(v, (list, tuple)):
return _FirstNotNone([_FilterInt(x) for x in v])
return None if isinstance(v, compat.integral_types) else _NotNone(v)
def _FilterFloat(v):
if isinstance(v, (list, tuple)):
return _FirstNotNone([_FilterFloat(x) for x in v])
return None if isinstance(v, compat.real_types) else _NotNone(v)
def _FilterComplex(v):
if isinstance(v, (list, tuple)):
return _FirstNotNone([_FilterComplex(x) for x in v])
return None if isinstance(v, compat.complex_types) else _NotNone(v)
def _FilterStr(v):
if isinstance(v, (list, tuple)):
return _FirstNotNone([_FilterStr(x) for x in v])
if isinstance(v, compat.bytes_or_text_types):
return None
else:
return _NotNone(v)
def _FilterBool(v):
if isinstance(v, (list, tuple)):
return _FirstNotNone([_FilterBool(x) for x in v])
return None if isinstance(v, bool) else _NotNone(v)
def _FilterNotTensor(v):
if isinstance(v, (list, tuple)):
return _FirstNotNone([_FilterNotTensor(x) for x in v])
return str(v) if isinstance(v, ops.Tensor) else None
_TF_TO_IS_OK = {
dtypes.float32: _FilterFloat,
dtypes.float64: _FilterFloat,
dtypes.int32: _FilterInt,
dtypes.uint8: _FilterInt,
dtypes.int16: _FilterInt,
dtypes.int8: _FilterInt,
dtypes.string: _FilterStr,
dtypes.complex64: _FilterComplex,
dtypes.int64: _FilterInt,
dtypes.bool: _FilterBool,
dtypes.qint32: _FilterInt,
dtypes.quint8: _FilterInt,
dtypes.qint8: _FilterInt,
}
def _AssertCompatible(values, dtype):
fn = _TF_TO_IS_OK.get(dtype, _FilterNotTensor)
mismatch = fn(values)
if mismatch is not None:
if dtype is None:
raise TypeError("List of Tensors when single Tensor expected")
else:
raise TypeError("Expected %s, got %s of type '%s' instead." %
(dtype.name, repr(mismatch), type(mismatch).__name__))
def make_tensor_proto(values, dtype=None, shape=None):
"""Create a TensorProto.
Args:
values: Values to put in the TensorProto.
dtype: Optional tensor_pb2 DataType value.
shape: List of integers representing the dimensions of tensor.
Returns:
A TensorProto. Depending on the type, it may contain data in the
"tensor_content" attribute, which is not directly useful to Python programs.
To access the values you should convert the proto back to a numpy ndarray
with tensor_util.MakeNdarray(proto).
Raises:
TypeError: if unsupported types are provided.
ValueError: if arguments have inappropriate values.
make_tensor_proto accepts "values" of a python scalar, a python list, a
numpy ndarray, or a numpy scalar.
If "values" is a python scalar or a python list, make_tensor_proto
first convert it to numpy ndarray. If dtype is None, the
conversion tries its best to infer the right numpy data
type. Otherwise, the resulting numpy array has a compatible data
type with the given dtype.
In either case above, the numpy ndarray (either the caller provided
or the auto converted) must have the compatible type with dtype.
make_tensor_proto then converts the numpy array to a tensor proto.
If "shape" is None, the resulting tensor proto represents the numpy
array precisely.
Otherwise, "shape" specifies the tensor's shape and the numpy array
can not have more elements than what "shape" specifies.
"""
if dtype:
dtype = dtypes.as_dtype(dtype)
# We first convert value to a numpy array or scalar.
if isinstance(values, (np.ndarray, np.generic)):
if dtype:
nparray = values.astype(dtype.as_numpy_dtype)
else:
nparray = values
else:
if values is None:
raise ValueError("None values not supported.")
# if dtype is provided, forces numpy array to be the type
# provided if possible.
np_dt = dtype.as_numpy_dtype if dtype else None
if np.prod(shape) == 0:
nparray = np.empty(shape, dtype=np_dt)
else:
_AssertCompatible(values, dtype)
nparray = np.array(values, dtype=np_dt)
if list(nparray.shape) != _GetDenseDimensions(values):
raise ValueError("Argument must be a dense tensor: %s" % values)
# python/numpy default float type is float64. We prefer float32 instead.
if (nparray.dtype == np.float64) and dtype is None:
nparray = nparray.astype(np.float32)
# python/numpy default int type is int64. We prefer int32 instead.
elif (nparray.dtype == np.int64) and dtype is None:
nparray = nparray.astype(np.int32)
# if dtype is provided, it must be compatible with what numpy
# conversion says.
numpy_dtype = dtypes.as_dtype(nparray.dtype)
if numpy_dtype is None:
raise TypeError("Unrecognized data type: %s" % nparray.dtype)
# If dtype was specified and is a quantized type, we convert
# numpy_dtype back into the quantized version.
if dtype in [dtypes.qint8, dtypes.quint8, dtypes.qint32]:
numpy_dtype = dtype
if dtype is not None and not dtype.base_dtype == numpy_dtype.base_dtype:
raise TypeError("Incompatible types: %s vs. %s" % (dtype, nparray.dtype))
# If shape is not given, get the shape from the numpy array.
if shape is None:
shape = nparray.shape
is_same_size = True
shape_size = nparray.size
else:
shape = [int(dim) for dim in shape]
shape_size = np.prod(shape)
is_same_size = shape_size == nparray.size
if nparray.size > shape_size:
raise ValueError(
"Too many elements provided. Needed at most %d, but received %d" %
(shape_size, nparray.size))
tensor_proto = tensor_pb2.TensorProto(
dtype=numpy_dtype.as_datatype_enum,
tensor_shape=make_tensor_shape_proto(shape))
if is_same_size and numpy_dtype in _TENSOR_CONTENT_TYPES and shape_size > 1:
tensor_proto.tensor_content = nparray.tostring()
return tensor_proto
# If we were not given values as a numpy array, compute the proto_values
# from the given values directly, to avoid numpy trimming nulls from the
# strings. Since values could be a list of strings, or a multi-dimensional
# list of lists that might or might not correspond to the given shape,
# we flatten it conservatively.
if numpy_dtype == dtypes.string and not isinstance(values, np.ndarray):
proto_values = _FlattenToStrings(values)
tensor_proto.string_val.extend([compat.as_bytes(x) for x in proto_values])
return tensor_proto
# TensorFlow expects C order (a.k.a., eigen row major).
proto_values = nparray.ravel()
append_fn = GetNumpyAppendFn(proto_values.dtype)
if append_fn is None:
raise TypeError("Element type not supported in TensorProto: %s" %
numpy_dtype.name)
append_fn(tensor_proto, proto_values)
return tensor_proto
def MakeNdarray(tensor):
"""Create a numpy ndarray from a tensor.
Create a numpy ndarray with the same shape and data as the tensor.
Args:
tensor: A TensorProto.
Returns:
A numpy array with the tensor contents.
Raises:
TypeError: if tensor has unsupported type.
"""
shape = [d.size for d in tensor.tensor_shape.dim]
num_elements = np.prod(shape)
tensor_dtype = dtypes.as_dtype(tensor.dtype)
dtype = tensor_dtype.as_numpy_dtype
if tensor.tensor_content:
return np.fromstring(tensor.tensor_content, dtype=dtype).reshape(shape)
elif tensor_dtype == dtypes.float32:
if len(tensor.float_val) == 1:
return np.repeat(np.array(tensor.float_val[0], dtype=dtype),
num_elements).reshape(shape)
else:
return np.fromiter(tensor.float_val, dtype=dtype).reshape(shape)
elif tensor_dtype == dtypes.float64:
if len(tensor.double_val) == 1:
return np.repeat(np.array(tensor.double_val[0], dtype=dtype),
num_elements).reshape(shape)
else:
return np.fromiter(tensor.double_val, dtype=dtype).reshape(shape)
elif tensor_dtype in [dtypes.int32, dtypes.uint8, dtypes.int16, dtypes.int8,
dtypes.qint32, dtypes.quint8, dtypes.qint8,
dtypes.bfloat16]:
if len(tensor.int_val) == 1:
return np.repeat(np.array(tensor.int_val[0], dtype=dtype),
num_elements).reshape(shape)
else:
return np.fromiter(tensor.int_val, dtype=dtype).reshape(shape)
elif tensor_dtype == dtypes.int64:
if len(tensor.int64_val) == 1:
return np.repeat(np.array(tensor.int64_val[0], dtype=dtype),
num_elements).reshape(shape)
else:
return np.fromiter(tensor.int64_val, dtype=dtype).reshape(shape)
elif tensor_dtype == dtypes.string:
if len(tensor.string_val) == 1:
return np.repeat(np.array(tensor.string_val[0], dtype=dtype),
num_elements).reshape(shape)
else:
return np.array([x for x in tensor.string_val],
dtype=dtype).reshape(shape)
elif tensor_dtype == dtypes.complex64:
it = iter(tensor.scomplex_val)
if len(tensor.scomplex_val) == 2:
return np.repeat(np.array(complex(tensor.scomplex_val[0],
tensor.scomplex_val[1]), dtype=dtype),
num_elements).reshape(shape)
else:
return np.array([complex(x[0], x[1]) for x in zip(it, it)],
dtype=dtype).reshape(shape)
elif tensor_dtype == dtypes.bool:
if len(tensor.bool_val) == 1:
return np.repeat(np.array(tensor.bool_val[0], dtype=dtype),
num_elements).reshape(shape)
else:
return np.fromiter(tensor.bool_val, dtype=dtype).reshape(shape)
else:
raise TypeError("Unsupported tensor type: %s" % tensor.dtype)
def ShapeEquals(tensor_proto, shape):
"""Returns True if "tensor_proto" has the given "shape".
Args:
tensor_proto: A TensorProto.
shape: A tensor shape, expressed as a TensorShape, list, or tuple.
Returns:
True if "tensor_proto" has the given "shape", otherwise False.
Raises:
TypeError: If "tensor_proto" is not a TensorProto, or shape is not a
TensorShape, list, or tuple.
"""
if not isinstance(tensor_proto, tensor_pb2.TensorProto):
raise TypeError("tensor_proto is not a tensor_pb2.TensorProto object")
if isinstance(shape, tensor_shape_pb2.TensorShapeProto):
shape = [d.size for d in shape.dim]
elif not isinstance(shape, (list, tuple)):
raise TypeError("shape is not a list or tuple")
tensor_shape_list = [d.size for d in tensor_proto.tensor_shape.dim]
return all(x == y for x, y in zip(tensor_shape_list, shape))
def constant_value(tensor):
"""Returns the constant value of the given tensor, if efficiently calculable.
This function attempts to partially evaluate the given tensor, and
returns its value as a numpy ndarray if this succeeds.
TODO(mrry): Consider whether this function should use a registration
mechanism like gradients and ShapeFunctions, so that it is easily
extensible.
Args:
tensor: The Tensor to be evaluated.
Returns:
A numpy ndarray containing the constant value of the given `tensor`,
or None if it cannot be calculated.
Raises:
TypeError: if tensor is not an ops.Tensor.
"""
# TODO(touts): Support Variables?
if not isinstance(tensor, ops.Tensor):
raise TypeError("tensor is not a Tensor")
if tensor.op.type == "Const":
return MakeNdarray(tensor.op.get_attr("value"))
elif tensor.op.type == "Shape":
input_shape = tensor.op.inputs[0].get_shape()
if input_shape.is_fully_defined():
return np.array([dim.value for dim in input_shape.dims],
dtype=tensor.dtype.as_numpy_dtype)
else:
return None
elif tensor.op.type == "Size":
input_shape = tensor.op.inputs[0].get_shape()
if input_shape.is_fully_defined():
return np.prod([dim.value for dim in input_shape.dims], dtype=np.int32)
else:
return None
elif tensor.op.type == "Rank":
input_shape = tensor.op.inputs[0].get_shape()
if input_shape.ndims is not None:
return input_shape.ndims
else:
return None
elif tensor.op.type == "Range":
start = constant_value(tensor.op.inputs[0])
if start is None:
return None
limit = constant_value(tensor.op.inputs[1])
if limit is None:
return None
delta = constant_value(tensor.op.inputs[2])
if delta is None:
return None
return np.arange(start, limit, delta, dtype=tensor.dtype.as_numpy_dtype)
elif tensor.op.type == "Cast":
pre_cast = constant_value(tensor.op.inputs[0])
if pre_cast is None:
return None
cast_dtype = dtypes.as_dtype(tensor.op.get_attr("DstT"))
return pre_cast.astype(cast_dtype.as_numpy_dtype)
elif tensor.op.type == "Concat":
dim = constant_value(tensor.op.inputs[0])
if dim is None:
return None
values = []
for x in tensor.op.inputs[1:]:
value = constant_value(x)
if value is None:
return None
values.append(value)
return np.concatenate(values, axis=dim)
else:
return None
# Add some temporary backwards compatibility aliases until all downstream code
# is changed. TODO(irving): Remove these aliases.
ConstantValue = constant_value
MakeTensorShapeProto = make_tensor_shape_proto
|
|
"""
Calculators are used to execute calculations.
"""
from carousel.core import logging, UREG
import numpy as np
LOGGER = logging.getLogger(__name__)
def index_registry(args, reg, ts=None, idx=None):
"""
Index into a :class:`~carousel.core.Registry` to return arguments
from :class:`~carousel.core.data_sources.DataRegistry` and
:class:`~carousel.core.outputs.OutputRegistry` based on the
calculation parameter file.
:param args: Arguments field from the calculation parameter file.
:param reg: Registry in which to index to get the arguments.
:type reg: :class:`~carousel.core.data_sources.DataRegistry`,
:class:`~carousel.core.outputs.OutputRegistry`
:param ts: Time step [units of time].
:param idx: [None] Index of current time step for dynamic calculations.
Required arguments for static and dynamic calculations are specified in the
calculation parameter file by the "args" key. Arguments can be from
either the data registry or the outputs registry, which is denoted by the
"data" and "outputs" keys. Each argument is a dictionary whose key is the
name of the argument in the formula specified and whose value can be one of
the following:
* The name of the argument in the registry ::
{"args": {"outputs": {"T_bypass": "T_bypass_diode"}}}
maps the formula argument "T_bypass" to the outputs registry item
"T_bypass_diode".
* A list with the name of the argument in the registry as the first element
and a negative integer denoting the index relative to the current
timestep as the second element ::
{"args": {"data": {"T_cell": ["Tcell", -1]}}}
indexes the previous timestep of "Tcell" from the data registry.
* A list with the name of the argument in the registry as the first element
and a list of positive integers denoting the index into the item from the
registry as the second element ::
{"args": {"data": {"cov": ["bypass_diode_covariance", [2]]}}}
indexes the third element of "bypass_diode_covariance".
* A list with the name of the argument in the registry as the first
element, a negative real number denoting the time relative to the current
timestep as the second element, and the units of the time as the third ::
{"args": {"data": {"T_cell": ["Tcell", -1, 'day']}}}
indexes the entire previous day of "Tcell".
"""
# TODO: move this to new Registry method or __getitem__
# TODO: replace idx with datetime object and use timeseries to interpolate
# into data, not necessary for outputs since that will conform to idx
rargs = dict.fromkeys(args) # make dictionary from arguments
# iterate over arguments
for k, v in args.iteritems():
# var ------------------ states ------------------
# idx ===== not None ===== ======= None =======
# isconstant True False None True False None
# is_dynamic no yes yes no no no
is_dynamic = idx and not reg.isconstant.get(v)
# switch based on string type instead of sequence
if isinstance(v, basestring):
# the default assumes the current index
rargs[k] = reg[v][idx] if is_dynamic else reg[v]
elif len(v) < 3:
if reg.isconstant[v[0]]:
# only get indices specified by v[1]
# tuples interpreted as a list of indices, see
# NumPy basic indexing: Dealing with variable
# numbers of indices within programs
rargs[k] = reg[v[0]][tuple(v[1])]
elif v[1] < 0:
# specified offset from current index
rargs[k] = reg[v[0]][idx + v[1]]
else:
# get indices specified by v[1] at current index
rargs[k] = reg[v[0]][idx][tuple(v[1])]
else:
# specified timedelta from current index
dt = 1 + (v[1] * UREG(str(v[2])) / ts).item()
# TODO: deal with fractions of timestep
rargs[k] = reg[v[0]][(idx + dt):(idx + 1)]
return rargs
class Calculator(object):
"""
Base class for calculators. Must implement ``calculate`` method.
"""
shortname = ''
@staticmethod
def get_covariance(datargs, outargs, vargs, datvar, outvar):
"""
Get covariance matrix.
:param datargs: data arguments
:param outargs: output arguments
:param vargs: variable arguments
:param datvar: variance of data arguments
:param outvar: variance of output arguments
:return: covariance
"""
# number of formula arguments that are not constant
argn = len(vargs)
# number of observations must be the same for all vargs
nobs = 1
for m in xrange(argn):
a = vargs[m]
try:
a = datargs[a]
except (KeyError, TypeError):
a = outargs[a]
avar = outvar[a]
else:
avar = datvar[a]
for n in xrange(argn):
b = vargs[n]
try:
b = datargs[b]
except (KeyError, TypeError):
b = outargs[b]
c = avar.get(b, 0.0)
try:
nobs = max(nobs, len(c))
except (TypeError, ValueError):
LOGGER.debug('c of %s vs %s = %g', a, b, c)
# covariance matrix is initially zeros
cov = np.zeros((nobs, argn, argn))
# loop over arguments in both directions, fill in covariance
for m in xrange(argn):
a = vargs[m]
try:
a = datargs[a]
except (KeyError, TypeError):
a = outargs[a]
avar = outvar[a]
else:
avar = datvar[a]
for n in xrange(argn):
b = vargs[n]
try:
b = datargs[b]
except (KeyError, TypeError):
b = outargs[b]
cov[:, m, n] = avar.get(b, 0.0)
if nobs == 1:
cov = cov.squeeze() # squeeze out any extra dimensions
LOGGER.debug('covariance:\n%r', cov)
return cov
@classmethod
def calculate(cls, calc, formula_reg, data_reg, out_reg,
timestep=None, idx=None):
"""
Execute calculation
:param calc: calculation, with formula, args and return keys
:type calc: dict
:param formula_reg: Registry of formulas.
:type formula_reg: :class:`~carousel.core.FormulaRegistry`
:param data_reg: Data registry.
:type data_reg: :class:`~carousel.core.data_sources.DataRegistry`
:param out_reg: Outputs registry.
:type out_reg: :class:`~carousel.core.outputs.OutputRegistry`
:param timestep: simulation interval length [time], default is ``None``
:param idx: interval index, default is ``None``
:type idx: int
"""
# get the formula-key from each static calc
formula = calc['formula'] # name of formula in calculation
func = formula_reg[formula] # formula function object
fargs = formula_reg.args.get(formula, []) # formula arguments
constants = formula_reg.isconstant.get(formula) # constant args
# formula arguments that are not constant
vargs = [] if constants is None else [a for a in fargs if a not in constants]
args = calc['args'] # calculation arguments
# separate data and output arguments
datargs, outargs = args.get('data', {}), args.get('outputs', {})
data = index_registry(datargs, data_reg, timestep, idx)
outputs = index_registry(outargs, out_reg, timestep, idx)
kwargs = dict(data, **outputs) # combined data and output args
args = [kwargs.pop(a) for a in fargs if a in kwargs]
returns = calc['returns'] # return arguments
# if constants is None then the covariance should also be None
# TODO: except other values, eg: "all" to indicate no covariance
if constants is None:
cov = None # do not propagate uncertainty
else:
# get covariance matrix
cov = cls.get_covariance(datargs, outargs, vargs,
data_reg.variance, out_reg.variance)
# update kwargs with covariance if it exists
kwargs['__covariance__'] = cov
retval = func(*args, **kwargs) # calculate function
# update output registry with covariance and jacobian
if cov is not None:
# split uncertainty and jacobian from return values
cov, jac = retval[-2:]
retval = retval[:-2]
# scale covariance
scale = np.asarray(
[1 / r.m if isinstance(r, UREG.Quantity) else 1 / r
for r in retval]
) # use magnitudes if quantities
cov = (np.swapaxes((cov.T * scale), 0, 1) * scale).T
nret = len(retval) # number of return output
for m in xrange(nret):
a = returns[m] # name in output registry
out_reg.variance[a] = {}
out_reg.uncertainty[a] = {}
out_reg.jacobian[a] = {}
for n in xrange(nret):
b = returns[n]
out_reg.variance[a][b] = cov[:, m, n]
if a == b:
unc = np.sqrt(cov[:, m, n]) * 100 * UREG.percent
out_reg.uncertainty[a][b] = unc
for n in xrange(len(vargs)):
b = vargs[n]
try:
b = datargs[b]
except (KeyError, TypeError):
b = outargs[b]
out_reg.jacobian[a][b] = jac[:, m, n]
LOGGER.debug('%s cov:\n%r', a, out_reg.variance[a])
LOGGER.debug('%s jac:\n%r', a, out_reg.jacobian[a])
LOGGER.debug('%s unc:\n%r', a, out_reg.uncertainty[a])
# if there's only one return value, squeeze out extra dimensions
if len(retval) == 1:
retval = retval[0]
# put return values into output registry
if len(returns) > 1:
# more than one return, zip them up
if idx is None:
out_reg.update(zip(returns, retval))
else:
for k, v in zip(returns, retval):
out_reg[k][idx] = v
else:
# only one return, get it by index at 0
if idx is None:
out_reg[returns[0]] = retval
else:
out_reg[returns[0]][idx] = retval
|
|
# Copyright (c) 2013, OpenStack
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from novaclient import exceptions
from novaclient.tests.unit import utils
from novaclient.tests.unit.v2 import fakes
from novaclient.v2 import flavors
class FlavorsTest(utils.TestCase):
def setUp(self):
super(FlavorsTest, self).setUp()
self.cs = self._get_fake_client()
self.flavor_type = self._get_flavor_type()
def _get_fake_client(self):
return fakes.FakeClient()
def _get_flavor_type(self):
return flavors.Flavor
def test_list_flavors(self):
fl = self.cs.flavors.list()
self.cs.assert_called('GET', '/flavors/detail')
for flavor in fl:
self.assertIsInstance(flavor, self.flavor_type)
def test_list_flavors_undetailed(self):
fl = self.cs.flavors.list(detailed=False)
self.cs.assert_called('GET', '/flavors')
for flavor in fl:
self.assertIsInstance(flavor, self.flavor_type)
def test_list_flavors_with_marker_limit(self):
self.cs.flavors.list(marker=1234, limit=4)
self.cs.assert_called('GET', '/flavors/detail?limit=4&marker=1234')
def test_list_flavors_is_public_none(self):
fl = self.cs.flavors.list(is_public=None)
self.cs.assert_called('GET', '/flavors/detail?is_public=None')
for flavor in fl:
self.assertIsInstance(flavor, self.flavor_type)
def test_list_flavors_is_public_false(self):
fl = self.cs.flavors.list(is_public=False)
self.cs.assert_called('GET', '/flavors/detail?is_public=False')
for flavor in fl:
self.assertIsInstance(flavor, self.flavor_type)
def test_list_flavors_is_public_true(self):
fl = self.cs.flavors.list(is_public=True)
self.cs.assert_called('GET', '/flavors/detail')
for flavor in fl:
self.assertIsInstance(flavor, self.flavor_type)
def test_get_flavor_details(self):
f = self.cs.flavors.get(1)
self.cs.assert_called('GET', '/flavors/1')
self.assertIsInstance(f, self.flavor_type)
self.assertEqual(256, f.ram)
self.assertEqual(10, f.disk)
self.assertEqual(10, f.ephemeral)
self.assertTrue(f.is_public)
def test_get_flavor_details_alphanum_id(self):
f = self.cs.flavors.get('aa1')
self.cs.assert_called('GET', '/flavors/aa1')
self.assertIsInstance(f, self.flavor_type)
self.assertEqual(128, f.ram)
self.assertEqual(0, f.disk)
self.assertEqual(0, f.ephemeral)
self.assertTrue(f.is_public)
def test_get_flavor_details_diablo(self):
f = self.cs.flavors.get(3)
self.cs.assert_called('GET', '/flavors/3')
self.assertIsInstance(f, self.flavor_type)
self.assertEqual(256, f.ram)
self.assertEqual(10, f.disk)
self.assertEqual('N/A', f.ephemeral)
self.assertEqual('N/A', f.is_public)
def test_find(self):
f = self.cs.flavors.find(ram=256)
self.cs.assert_called('GET', '/flavors/detail')
self.assertEqual('256 MB Server', f.name)
f = self.cs.flavors.find(disk=0)
self.assertEqual('128 MB Server', f.name)
self.assertRaises(exceptions.NotFound, self.cs.flavors.find,
disk=12345)
def _create_body(self, name, ram, vcpus, disk, ephemeral, id, swap,
rxtx_factor, is_public):
return {
"flavor": {
"name": name,
"ram": ram,
"vcpus": vcpus,
"disk": disk,
"OS-FLV-EXT-DATA:ephemeral": ephemeral,
"id": id,
"swap": swap,
"rxtx_factor": rxtx_factor,
"os-flavor-access:is_public": is_public,
}
}
def test_create(self):
f = self.cs.flavors.create("flavorcreate", 512, 1, 10, 1234,
ephemeral=10, is_public=False)
body = self._create_body("flavorcreate", 512, 1, 10, 10, 1234, 0, 1.0,
False)
self.cs.assert_called('POST', '/flavors', body)
self.assertIsInstance(f, self.flavor_type)
def test_create_with_id_as_string(self):
flavor_id = 'foobar'
f = self.cs.flavors.create("flavorcreate", 512,
1, 10, flavor_id, ephemeral=10,
is_public=False)
body = self._create_body("flavorcreate", 512, 1, 10, 10, flavor_id, 0,
1.0, False)
self.cs.assert_called('POST', '/flavors', body)
self.assertIsInstance(f, self.flavor_type)
def test_create_ephemeral_ispublic_defaults(self):
f = self.cs.flavors.create("flavorcreate", 512, 1, 10, 1234)
body = self._create_body("flavorcreate", 512, 1, 10, 0, 1234, 0,
1.0, True)
self.cs.assert_called('POST', '/flavors', body)
self.assertIsInstance(f, self.flavor_type)
def test_invalid_parameters_create(self):
self.assertRaises(exceptions.CommandError, self.cs.flavors.create,
"flavorcreate", "invalid", 1, 10, 1234, swap=0,
ephemeral=0, rxtx_factor=1.0, is_public=True)
self.assertRaises(exceptions.CommandError, self.cs.flavors.create,
"flavorcreate", 512, "invalid", 10, 1234, swap=0,
ephemeral=0, rxtx_factor=1.0, is_public=True)
self.assertRaises(exceptions.CommandError, self.cs.flavors.create,
"flavorcreate", 512, 1, "invalid", 1234, swap=0,
ephemeral=0, rxtx_factor=1.0, is_public=True)
self.assertRaises(exceptions.CommandError, self.cs.flavors.create,
"flavorcreate", 512, 1, 10, 1234, swap="invalid",
ephemeral=0, rxtx_factor=1.0, is_public=True)
self.assertRaises(exceptions.CommandError, self.cs.flavors.create,
"flavorcreate", 512, 1, 10, 1234, swap=0,
ephemeral="invalid", rxtx_factor=1.0, is_public=True)
self.assertRaises(exceptions.CommandError, self.cs.flavors.create,
"flavorcreate", 512, 1, 10, 1234, swap=0,
ephemeral=0, rxtx_factor="invalid", is_public=True)
self.assertRaises(exceptions.CommandError, self.cs.flavors.create,
"flavorcreate", 512, 1, 10, 1234, swap=0,
ephemeral=0, rxtx_factor=1.0, is_public='invalid')
def test_delete(self):
self.cs.flavors.delete("flavordelete")
self.cs.assert_called('DELETE', '/flavors/flavordelete')
def test_delete_with_flavor_instance(self):
f = self.cs.flavors.get(2)
self.cs.flavors.delete(f)
self.cs.assert_called('DELETE', '/flavors/2')
def test_delete_with_flavor_instance_method(self):
f = self.cs.flavors.get(2)
f.delete()
self.cs.assert_called('DELETE', '/flavors/2')
def test_set_keys(self):
f = self.cs.flavors.get(1)
f.set_keys({'k1': 'v1'})
self.cs.assert_called('POST', '/flavors/1/os-extra_specs',
{"extra_specs": {'k1': 'v1'}})
def test_set_with_valid_keys(self):
valid_keys = ['key4', 'month.price', 'I-Am:AK-ey.44-',
'key with spaces and _']
f = self.cs.flavors.get(4)
for key in valid_keys:
f.set_keys({key: 'v4'})
self.cs.assert_called('POST', '/flavors/4/os-extra_specs',
{"extra_specs": {key: 'v4'}})
def test_set_with_invalid_keys(self):
invalid_keys = ['/1', '?1', '%1', '<', '>']
f = self.cs.flavors.get(1)
for key in invalid_keys:
self.assertRaises(exceptions.CommandError, f.set_keys, {key: 'v1'})
@mock.patch.object(flavors.FlavorManager, '_delete')
def test_unset_keys(self, mock_delete):
f = self.cs.flavors.get(1)
keys = ['k1', 'k2']
f.unset_keys(keys)
mock_delete.assert_has_calls([
mock.call("/flavors/1/os-extra_specs/k1"),
mock.call("/flavors/1/os-extra_specs/k2")
])
|
|
"""Functional tests for pooling operations."""
from __future__ import print_function
import tensorflow.python.platform
import numpy as np
import tensorflow as tf
from tensorflow.python.kernel_tests import gradient_checker as gc
from tensorflow.python.ops import gen_nn_ops
def GetInceptionMaxPoolShapes():
"""Iterator for some of the max pool ops in the Inception 2015 model.
Yields:
Tuple (name, input_size, filter_size, out_size, strides, padding)
"""
names = ["maxpool2", "maxpool3", "maxpool4", "maxpool5"]
input_sizes = [[32, 71, 71, 192],
[32, 35, 35, 288], [32, 17, 17, 1248], [32, 8, 8, 2048]]
filter_sizes = [[1, 3, 3, 1], [1, 3, 3, 1],
[1, 3, 3, 1], [1, 3, 3, 1]]
output_sizes = [[32, 35, 35, 192], [32, 17, 17, 288],
[32, 8, 8, 1248], [32, 8, 8, 2048]]
strides = [[1, 2, 2, 1], [1, 2, 2, 1], [1, 2, 2, 1],
[1, 1, 1, 1]]
paddings = ["VALID", "VALID", "VALID", "SAME"]
for n, i, f, o, s, p in zip(names, input_sizes, filter_sizes, output_sizes,
strides, paddings):
yield n, i, f, o, s, p
class PoolingTest(tf.test.TestCase):
def _VerifyValues(self, pool_func, input_sizes, ksize, strides, padding,
expected, use_gpu):
"""Verifies the output values of the pooling function.
Args:
pool_func: Function to be called, co.MaxPool, co.AvgPool,
or the Lua version.
input_sizes: Input tensor dimensions.
ksize: The kernel size dimensions
strides: The stride dimensions
padding: Padding type.
expected: An array containing the expected operation outputs.
use_gpu: Whether we are running on GPU.
"""
total_size = 1
for s in input_sizes:
total_size *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x = [f * 1.0 for f in range(1, total_size + 1)]
with self.test_session(use_gpu=use_gpu) as sess:
t = tf.constant(x, shape=input_sizes)
t = pool_func(t, ksize=ksize, strides=strides, padding=padding)
actual = t.eval()
self.assertAllClose(expected, actual.flatten())
self.assertShapeEqual(actual, t)
def _testAvgPoolValidPadding(self, use_gpu):
expected_output = [7.0, 8.0, 9.0]
self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 3, 3, 3],
ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding="VALID",
expected=expected_output, use_gpu=use_gpu)
def _testAvgPoolSamePadding(self, use_gpu):
expected_output = [8.5, 9.5, 10.5, 14.5, 15.5, 16.5]
self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 2, 4, 3],
ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output, use_gpu=use_gpu)
def _testAvgPoolSamePaddingNonSquareWindow(self, use_gpu):
# input is:
# [1.0, 2.0
# 3.0 4.0]
#
# Window of [x, x] should do:
# [avg(1.0, 2.0), avg(2.0, padded0),
# avg(3.0, 4.0), avg(4.0, padded0)]
self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 2, 2, 1],
ksize=[1, 1, 2, 1], strides=[1, 1, 1, 1],
padding="SAME",
expected=[1.5, 2.0, 3.5, 4.0], use_gpu=use_gpu)
# Window of [x,
# x] should do:
# [avg(1.0, 3.0), avg(2.0, 4.0)
# avg(3.0, padded0), avg(4.0, padded0)]
self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 2, 2, 1],
ksize=[1, 2, 1, 1], strides=[1, 1, 1, 1],
padding="SAME",
expected=[2.0, 3.0, 3.0, 4.0], use_gpu=use_gpu)
def _testAvgPoolSamePaddingNonSquareWindowMultiBatch(self, use_gpu):
self._VerifyValues(tf.nn.avg_pool, input_sizes=[2, 2, 2, 2],
ksize=[1, 1, 2, 1], strides=[1, 1, 1, 1],
padding="SAME",
expected=[2.0, 3.0, 3.0, 4.0,
6.0, 7.0, 7.0, 8.0,
10.0, 11.0, 11.0, 12.0,
14.0, 15.0, 15.0, 16.0],
use_gpu=use_gpu)
self._VerifyValues(tf.nn.avg_pool, input_sizes=[2, 2, 2, 2],
ksize=[1, 2, 1, 1], strides=[1, 1, 1, 1],
padding="SAME",
expected=[3.0, 4.0, 5.0, 6.0,
5.0, 6.0, 7.0, 8.0,
11.0, 12.0, 13.0, 14.0,
13.0, 14.0, 15.0, 16.0],
use_gpu=use_gpu)
def _testAvgPoolValidPaddingUnevenStride(self, use_gpu):
self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 3, 3, 3],
ksize=[1, 2, 2, 1], strides=[1, 1, 2, 1],
padding="VALID",
expected=[7.0, 8.0, 9.0, 16.0, 17.0, 18.0],
use_gpu=use_gpu)
self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 3, 3, 3],
ksize=[1, 2, 2, 1], strides=[1, 2, 1, 1],
padding="VALID",
expected=[7.0, 8.0, 9.0, 10.0, 11.0, 12.0],
use_gpu=use_gpu)
def _testAvgPoolSamePadding4(self, use_gpu):
expected_output = [11.0, 12.0, 13.0, 14.0, 19.0, 20.0, 21.0, 22.0, 43.0,
44.0, 45.0, 46.0, 51.0, 52.0, 53.0, 54.0]
self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 4, 4, 4],
ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output, use_gpu=use_gpu)
def _testAvgPoolSamePaddingPacket4(self, use_gpu):
expected_output = [21.0, 22.0, 23.0, 24.0, 27.0, 28.0, 29.0, 30.0,
45.0, 46.0, 47.0, 48.0, 51.0, 52.0, 53.0, 54.0]
self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 4, 4, 4],
ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output, use_gpu=use_gpu)
def _testAvgPoolSamePaddingPacket8(self, use_gpu):
expected_output = [73.0, 74.0, 75.0, 76.0, 77.0, 78.0, 79.0, 80.0, 89.0,
90.0, 91.0, 92.0, 93.0, 94.0, 95.0, 96.0, 105.0, 106.0,
107.0, 108.0, 109.0, 110.0, 111.0, 112.0, 117.0, 118.0,
119.0, 120.0, 121.0, 122.0, 123.0, 124.0, 201.0, 202.0,
203.0, 204.0, 205.0, 206.0, 207.0, 208.0, 217.0, 218.0,
219.0, 220.0, 221.0, 222.0, 223.0, 224.0, 233.0, 234.0,
235.0, 236.0, 237.0, 238.0, 239.0, 240.0, 245.0, 246.0,
247.0, 248.0, 249.0, 250.0, 251.0, 252.0, 329.0, 330.0,
331.0, 332.0, 333.0, 334.0, 335.0, 336.0, 345.0, 346.0,
347.0, 348.0, 349.0, 350.0, 351.0, 352.0, 361.0, 362.0,
363.0, 364.0, 365.0, 366.0, 367.0, 368.0, 373.0, 374.0,
375.0, 376.0, 377.0, 378.0, 379.0, 380.0, 425.0, 426.0,
427.0, 428.0, 429.0, 430.0, 431.0, 432.0, 441.0, 442.0,
443.0, 444.0, 445.0, 446.0, 447.0, 448.0, 457.0, 458.0,
459.0, 460.0, 461.0, 462.0, 463.0, 464.0, 469.0, 470.0,
471.0, 472.0, 473.0, 474.0, 475.0, 476.0]
self._VerifyValues(tf.nn.avg_pool, input_sizes=[1, 8, 8, 8],
ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output, use_gpu=use_gpu)
def testAvgPooling(self):
for use_gpu in True, False:
self._testAvgPoolValidPadding(use_gpu)
self._testAvgPoolSamePadding(use_gpu)
self._testAvgPoolSamePaddingNonSquareWindow(use_gpu)
self._testAvgPoolSamePaddingNonSquareWindowMultiBatch(use_gpu)
self._testAvgPoolValidPaddingUnevenStride(use_gpu)
self._testAvgPoolSamePadding4(use_gpu)
self._testAvgPoolSamePaddingPacket4(use_gpu)
self._testAvgPoolSamePaddingPacket8(use_gpu)
def _testMaxPoolValidPadding(self, use_gpu):
expected_output = [13.0, 14.0, 15.0]
self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 3, 3, 3],
ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding="VALID",
expected=expected_output, use_gpu=use_gpu)
def _testMaxPoolSamePadding(self, use_gpu):
expected_output = [13.0, 14.0, 15.0, 16.0, 17.0, 18.0]
self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 2, 3, 3],
ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output, use_gpu=use_gpu)
def _testMaxPoolSamePaddingNonSquareWindow(self, use_gpu):
# input is:
# [1.0, 2.0
# 3.0 4.0]
#
# Window of [x, x] should do:
#
# [max(1.0, 2.0), max(2.0, padded0),
# max(3.0, 4.0), max(4.0, padded0)]
self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 2, 2, 1],
ksize=[1, 1, 2, 1], strides=[1, 1, 1, 1],
padding="SAME",
expected=[2.0, 2.0, 4.0, 4.0], use_gpu=use_gpu)
def _testMaxPoolValidPaddingUnevenStride(self, use_gpu):
self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 4, 4, 1],
ksize=[1, 2, 2, 1], strides=[1, 1, 2, 1],
padding="VALID",
expected=[6.0, 8.0, 10.0, 12.0, 14.0, 16.0],
use_gpu=use_gpu)
self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 4, 4, 1],
ksize=[1, 2, 2, 1], strides=[1, 2, 1, 1],
padding="VALID",
expected=[6.0, 7.0, 8.0, 14.0, 15.0, 16.0],
use_gpu=use_gpu)
def _testMaxPoolSamePaddingPacket4(self, use_gpu):
expected_output = [21.0, 22.0, 23.0, 24.0, 29.0, 30.0, 31.0, 32.0, 53.0,
54.0, 55.0, 56.0, 61.0, 62.0, 63.0, 64.0]
self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 4, 4, 4],
ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output, use_gpu=use_gpu)
def _testMaxPoolSamePaddingPacket8(self, use_gpu):
expected_output = [145.0, 146.0, 147.0, 148.0, 149.0, 150.0, 151.0, 152.0,
161.0, 162.0, 163.0, 164.0, 165.0, 166.0, 167.0, 168.0,
177.0, 178.0, 179.0, 180.0, 181.0, 182.0, 183.0, 184.0,
185.0, 186.0, 187.0, 188.0, 189.0, 190.0, 191.0, 192.0,
273.0, 274.0, 275.0, 276.0, 277.0, 278.0, 279.0, 280.0,
289.0, 290.0, 291.0, 292.0, 293.0, 294.0, 295.0, 296.0,
305.0, 306.0, 307.0, 308.0, 309.0, 310.0, 311.0, 312.0,
313.0, 314.0, 315.0, 316.0, 317.0, 318.0, 319.0, 320.0,
401.0, 402.0, 403.0, 404.0, 405.0, 406.0, 407.0, 408.0,
417.0, 418.0, 419.0, 420.0, 421.0, 422.0, 423.0, 424.0,
433.0, 434.0, 435.0, 436.0, 437.0, 438.0, 439.0, 440.0,
441.0, 442.0, 443.0, 444.0, 445.0, 446.0, 447.0, 448.0,
465.0, 466.0, 467.0, 468.0, 469.0, 470.0, 471.0, 472.0,
481.0, 482.0, 483.0, 484.0, 485.0, 486.0, 487.0, 488.0,
497.0, 498.0, 499.0, 500.0, 501.0, 502.0, 503.0, 504.0,
505.0, 506.0, 507.0, 508.0, 509.0, 510.0, 511.0, 512.0]
self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 8, 8, 8],
ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output, use_gpu=use_gpu)
def testMaxPooling(self):
for use_gpu in True, False:
self._testMaxPoolValidPadding(use_gpu)
self._testMaxPoolSamePadding(use_gpu)
self._testMaxPoolSamePaddingNonSquareWindow(use_gpu)
self._testMaxPoolValidPaddingUnevenStride(use_gpu)
self._testMaxPoolSamePaddingPacket4(use_gpu)
self._testMaxPoolSamePaddingPacket8(use_gpu)
# Tests for DepthwiseMaxPooling on CPU only.
def testDepthwiseMaxPool1x1DepthWindow1(self):
# input is:
# [1.0, ..., 10.0] along depth,
#
# We maxpool by depth in patches of 2.
self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 1, 1, 10],
ksize=[1, 1, 1, 2], strides=[1, 1, 1, 2],
padding="SAME",
expected=[2.0, 4.0, 6.0, 8.0, 10.0], use_gpu=False)
def testDepthwiseMaxPool2x2DepthWindow3(self):
# input is:
#
# a 2x2x6 cube, and we depthwise max across 3 to produce a 2x2x2
# output. Each node has contiguous values, so the depthwise max
# should be multiples of 3.0.
self._VerifyValues(tf.nn.max_pool, input_sizes=[1, 2, 2, 6],
ksize=[1, 1, 1, 3], strides=[1, 1, 1, 3],
padding="SAME",
expected=[3.0, 6.0, 9.0, 12.0, 15.0, 18.0, 21.0, 24.0],
use_gpu=False)
def _testDepthwiseMaxPoolInvalidConfig(self, in_size, ksize, strides,
error_msg, use_gpu=False):
t = tf.constant(1.0, shape=in_size)
with self.assertRaisesRegexp(ValueError, error_msg):
t = tf.nn.max_pool(t, ksize=ksize, strides=strides, padding="SAME")
def testDepthwiseMaxPoolInvalidConfigs(self):
self._testDepthwiseMaxPoolInvalidConfig(
[1, 2, 2, 4], [1, 2, 2, 2],
[1, 1, 1, 2], "exactly one of pooling across depth")
self._testDepthwiseMaxPoolInvalidConfig(
[1, 2, 2, 4], [1, 1, 1, 2],
[1, 1, 1, 1], "depth window to equal the depth stride")
self._testDepthwiseMaxPoolInvalidConfig(
[1, 2, 2, 4], [1, 1, 1, 3],
[1, 1, 1, 3], "evenly divide")
if tf.test.IsBuiltWithCuda():
with self.test_session(use_gpu=True):
t = tf.constant(1.0, shape=[1, 2, 2, 4])
with self.assertRaisesOpError("for CPU devices"):
tf.nn.max_pool(t, ksize=[1, 1, 1, 2], strides=[1, 1, 1, 2],
padding="SAME").eval()
# The following are tests that verify that the CPU and GPU implementations
# produce the same resuts.
def _CompareMaxPoolingFwd(self, input_shape, ksize, strides, padding):
tensor_input = np.random.rand(*input_shape).astype(np.float32)
with self.test_session(use_gpu=True):
t = tf.constant(tensor_input, shape=input_shape)
out_op, _ = tf.nn.max_pool_with_argmax(t, ksize, strides, padding)
gpu_val = out_op.eval()
with self.test_session(use_gpu=False):
t = tf.constant(tensor_input, shape=input_shape)
out_op = tf.nn.max_pool(t, ksize, strides, padding)
cpu_val = out_op.eval()
self.assertAllClose(cpu_val, gpu_val, rtol=1e-5, atol=1e-5)
def _CompareMaxPoolingBk(self, input_shape, output_shape, ksize, strides,
padding):
# Generate numbers in a narrow range, so that there are many duplicates
# in the input.
tensor_input = np.random.random_integers(0, 3,
input_shape).astype(np.float32)
tensor_output = np.random.rand(*output_shape).astype(np.float32)
with self.test_session(use_gpu=True):
t = tf.constant(tensor_input, shape=input_shape)
_, argmax_op = tf.nn.max_pool_with_argmax(t, ksize, strides, padding)
argmax = argmax_op.eval()
grad_in = tf.constant(tensor_output, shape=output_shape)
out_op = gen_nn_ops._max_pool_grad_with_argmax(t, grad_in, argmax,
ksize, strides, padding)
gpu_val = out_op.eval()
self.assertShapeEqual(gpu_val, out_op)
with self.test_session(use_gpu=False):
t = tf.constant(tensor_input, shape=input_shape)
out_op = tf.nn.max_pool(t, ksize, strides, padding)
orig_out = out_op.eval()
grad_in = tf.constant(tensor_output, shape=output_shape)
out_op = gen_nn_ops._max_pool_grad(t, orig_out, grad_in, ksize,
strides, padding)
cpu_val = out_op.eval()
self.assertShapeEqual(cpu_val, out_op)
self.assertAllClose(cpu_val, gpu_val, rtol=1e-5, atol=1e-5)
def testMaxPoolingWithArgmax(self):
# MaxPoolWithArgMax is implemented only on GPU.
if not tf.test.IsBuiltWithCuda():
return
tensor_input = [1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0]
with self.test_session(use_gpu=True) as sess:
t = tf.constant(tensor_input, shape=[1, 3, 3, 1])
out_op, argmax_op = tf.nn.max_pool_with_argmax(t,
ksize=[1, 2, 2, 1],
strides=[1, 1, 1, 1],
Targmax=tf.int64,
padding="VALID")
out, argmax = sess.run([out_op, argmax_op])
self.assertShapeEqual(out, out_op)
self.assertShapeEqual(argmax, argmax_op)
self.assertAllClose(out.ravel(), [1.0, 1.0, 1.0, 1.0])
self.assertAllEqual(argmax.ravel(), [0, 1, 3, 5])
def testMaxPoolingGradWithArgmax(self):
# MaxPoolWithArgMax is implemented only on GPU.
if not tf.test.IsBuiltWithCuda():
return
orig_input = [1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0]
tensor_input = [11.0, 12.0, 13.0, 14.0]
tensor_argmax = list(np.array([0, 1, 3, 5], dtype=np.int64))
with self.test_session(use_gpu=True) as sess:
orig_in = tf.constant(orig_input, shape=[1, 3, 3, 1])
t = tf.constant(tensor_input, shape=[1, 2, 2, 1])
argmax = tf.constant(tensor_argmax, shape=[1, 2, 2, 1],
dtype=tf.int64)
out_op = gen_nn_ops._max_pool_grad_with_argmax(orig_in, t, argmax,
ksize=[1, 2, 2, 1],
strides=[1, 1, 1, 1],
padding="VALID")
out = out_op.eval().flatten()
self.assertAllClose(out, [11.0, 12.0, 0.0, 13.0, 0.0,
14.0, 0.0, 0.0, 0.0])
def _ConstructAndTestGradient(self, pool_func, input_sizes, output_sizes,
window_rows, window_cols, row_stride,
col_stride, padding, use_gpu,
x_init_value=None):
"""Verifies the gradients of the avg pooling function.
Args:
pool_func: Function to be called, co.MaxPool, co.AvgPool,
or the Lua version.
input_sizes: Input tensor dimensions.
output_sizes: Output tensor dimensions.
window_rows: kernel size in row dim
window_cols: kernel size in col dim
row_stride: Row Stride.
col_stride: Col Stride.
padding: Padding type.
use_gpu: whether we are running on GPU
x_init_value: Values to be passed to the gradient checker.
"""
total_size = 1
for s in input_sizes:
total_size *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x = [f * 1.0 for f in range(1, total_size + 1)]
with self.test_session(use_gpu=use_gpu):
input_tensor = tf.constant(x, shape=input_sizes, name="input")
if pool_func == tf.nn.avg_pool:
func_name = "avg_pool"
err_margin = 1e-4
else:
if x_init_value is None:
x_init_value = np.asfarray(
np.arange(1, total_size + 1),
dtype=np.float32).reshape(input_sizes)
func_name = "max_pool"
err_margin = 1e-3
t = pool_func(input_tensor, ksize=[1, window_rows, window_rows, 1],
strides=[1, row_stride, col_stride, 1],
padding=padding, name=func_name)
err = gc.ComputeGradientError(
input_tensor, input_sizes, t, output_sizes,
x_init_value=x_init_value, delta=1e-2)
print("%s gradient error = " % func_name, err)
self.assertLess(err, err_margin)
def _testMaxPoolGradValidPadding1_1(self, use_gpu):
self._ConstructAndTestGradient(
tf.nn.max_pool, input_sizes=[1, 3, 3, 1],
output_sizes=[1, 3, 3, 1], window_rows=1, window_cols=1, row_stride=1,
col_stride=1, padding="VALID", use_gpu=use_gpu)
def _testMaxPoolGradValidPadding2_1_6(self, use_gpu):
self._ConstructAndTestGradient(
tf.nn.max_pool, input_sizes=[2, 6, 6, 3],
output_sizes=[2, 5, 5, 3], window_rows=2, window_cols=2, row_stride=1,
col_stride=1, padding="VALID", use_gpu=use_gpu)
def _testMaxPoolGradValidPadding2_1_7(self, use_gpu):
self._ConstructAndTestGradient(
tf.nn.max_pool, input_sizes=[2, 7, 7, 3],
output_sizes=[2, 6, 6, 3], window_rows=2, window_cols=2, row_stride=1,
col_stride=1, padding="VALID", use_gpu=use_gpu)
def _testMaxPoolGradValidPadding2_2(self, use_gpu):
self._ConstructAndTestGradient(
tf.nn.max_pool, input_sizes=[2, 2, 2, 3],
output_sizes=[2, 1, 1, 3], window_rows=2, window_cols=2, row_stride=2,
col_stride=2, padding="VALID", use_gpu=use_gpu)
def _testMaxPoolGradSamePadding1_1(self, use_gpu):
self._ConstructAndTestGradient(
tf.nn.max_pool, input_sizes=[2, 2, 4, 3],
output_sizes=[2, 2, 4, 3], window_rows=1, window_cols=1, row_stride=1,
col_stride=1, padding="SAME", use_gpu=use_gpu)
def _testMaxPoolGradSamePadding2_1(self, use_gpu):
self._ConstructAndTestGradient(
tf.nn.max_pool, input_sizes=[2, 2, 4, 3],
output_sizes=[2, 2, 4, 3], window_rows=2, window_cols=2, row_stride=1,
col_stride=1, padding="SAME", use_gpu=use_gpu)
def _testMaxPoolGradSamePadding2_2(self, use_gpu):
self._ConstructAndTestGradient(
tf.nn.max_pool, input_sizes=[2, 2, 4, 3],
output_sizes=[2, 1, 2, 3], window_rows=2, window_cols=2, row_stride=2,
col_stride=2, padding="SAME", use_gpu=use_gpu)
def _testMaxPoolGradSamePadding3_1(self, use_gpu):
self._ConstructAndTestGradient(
tf.nn.max_pool, input_sizes=[1, 7, 7, 1],
output_sizes=[1, 7, 7, 1], window_rows=3, window_cols=3, row_stride=1,
col_stride=1, padding="SAME", use_gpu=use_gpu)
def testMaxPoolGrad(self):
for use_gpu in True, False:
self._testMaxPoolGradValidPadding1_1(use_gpu=use_gpu)
self._testMaxPoolGradValidPadding2_1_6(use_gpu=use_gpu)
self._testMaxPoolGradValidPadding2_1_7(use_gpu=use_gpu)
self._testMaxPoolGradValidPadding2_2(use_gpu=use_gpu)
self._testMaxPoolGradSamePadding1_1(use_gpu=use_gpu)
self._testMaxPoolGradSamePadding2_1(use_gpu=use_gpu)
self._testMaxPoolGradSamePadding2_2(use_gpu=use_gpu)
self._testMaxPoolGradSamePadding3_1(use_gpu=use_gpu)
def _MaxPoolGrad(self, orig_input, orig_output, grad, window_rows,
window_cols, row_stride, col_stride, padding):
"""Max Pooling Gradient.
Args:
orig_input: A float Tensor. The original input tensor.
orig_output: A float Tensor. The original output tensor.
grad: A float Tensor.
The 4D (batch x rows x cols x depth) output backprop.
window_rows: integer. Kernel size along rows dimension.
window_cols: integer. Kernel size along cols dimension.
row_stride: integer. Stride along rows dimension
col_stride: integer. Stride along cols dimension
padding: PoolingOpDef.Padding. Padding type.
Returns:
A Tensor.
"""
return gen_nn_ops._max_pool_grad(
orig_input, orig_output, grad,
[1, window_rows, window_cols, 1], [1, row_stride, col_stride, 1],
padding)
def _testMaxPoolGradDirect(self, input_data, output_backprop,
expected_input_backprop, input_sizes, output_sizes,
window_rows, window_cols, row_stride, col_stride,
padding, use_gpu):
with self.test_session(use_gpu=use_gpu) as sess:
input_tensor = tf.constant(input_data, shape=input_sizes)
output_tensor = tf.nn.max_pool(
input_tensor, [1, window_rows, window_cols, 1],
[1, row_stride, col_stride, 1], padding)
output_backprop_tensor = tf.constant(output_backprop,
shape=output_sizes)
input_backprop_tensor = self._MaxPoolGrad(
input_tensor, output_tensor, output_backprop_tensor,
window_rows, window_cols, row_stride, col_stride, padding)
actual_input_backprop = input_backprop_tensor.eval()
self.assertShapeEqual(actual_input_backprop, input_backprop_tensor)
actual_input_backprop = actual_input_backprop.flatten()
actual_input_backprop = self._GetNdArray(actual_input_backprop)
actual_output = output_tensor.eval().flatten()
actual_output = self._GetNdArray(actual_output)
self.assertAllClose(expected_input_backprop, actual_input_backprop,
rtol=1e-6, atol=1e-6)
def _testMaxPoolGradDirect1_1(self):
input_data = [
1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0]
output_backprop = [
11.0, 12.0, 13.0,
15.0, 16.0, 17.0,
19.0, 20.0, 21.0]
expected_input_backprop = [
11.0, 12.0, 13.0, 0.0,
15.0, 16.0, 17.0, 0.0,
19.0, 20.0, 21.0, 0.0,
0.0, 0.0, 0.0, 0.0]
for use_gpu in True, False:
self._testMaxPoolGradDirect(
input_data, output_backprop, expected_input_backprop,
input_sizes=[1, 4, 4, 1], output_sizes=[1, 3, 3, 1],
window_rows=2, window_cols=2, row_stride=1, col_stride=1,
padding="VALID", use_gpu=use_gpu)
def _testMaxPoolGradDirect1_2(self):
input_data = [
1.0, 0.0, 1.0, 0.0,
0.0, 1.0, 0.0, 1.0,
1.0, 0.0, 1.0, 0.0,
0.0, 1.0, 0.0, 1.0]
output_backprop = [
11.0, 12.0, 13.0,
15.0, 16.0, 17.0,
19.0, 20.0, 21.0]
expected_input_backprop = [
11.0, 0.0, 25.0, 0.0,
0.0, 31.0, 0.0, 17.0,
19.0, 0.0, 41.0, 0.0,
0.0, 0.0, 0.0, 0.0]
for use_gpu in True, False:
self._testMaxPoolGradDirect(
input_data, output_backprop, expected_input_backprop,
input_sizes=[1, 4, 4, 1], output_sizes=[1, 3, 3, 1],
window_rows=2, window_cols=2, row_stride=1, col_stride=1,
padding="VALID", use_gpu=use_gpu)
def _testMaxPoolGradDirect1_3(self):
input_data = [
1.0, 0.0, 1.0, 0.0,
0.0, 1.0, 0.0, 1.0,
1.0, 0.0, 1.0, 0.0,
0.0, 1.0, 0.0, 1.0,]
output_backprop = [
11.0, 12.0, 13.0, 14.0,
15.0, 16.0, 17.0, 18.0,
19.0, 20.0, 21.0, 22.0,
23.0, 24.0, 25.0, 26.0]
expected_input_backprop = [
54, 0.0, 62, 0.0,
0.0, 60, 0.0, 22.0,
47, 0.0, 51, 0.0,
0.0, 0.0, 0.0, 0.0,]
for use_gpu in True, False:
self._testMaxPoolGradDirect(
input_data, output_backprop, expected_input_backprop,
input_sizes=[1, 4, 4, 1], output_sizes=[1, 4, 4, 1],
window_rows=3, window_cols=3, row_stride=1, col_stride=1,
padding="SAME", use_gpu=use_gpu)
def _testMaxPoolGradDirectWithNans2_1(self):
input_data = [float("nan")] * 16
output_backprop = [
11.0, 12.0, 13.0,
15.0, 16.0, 17.0,
19.0, 20.0, 21.0]
# Test the CPU implementation, which propagates diffs in case of NaN
expected_input_backprop_tf_cpu = [
11.0, 12.0, 13.0, 0.0,
15.0, 16.0, 17.0, 0.0,
19.0, 20.0, 21.0, 0.0,
0.0, 0.0, 0.0, 0.0]
self._testMaxPoolGradDirect(
input_data, output_backprop, expected_input_backprop_tf_cpu,
input_sizes=[1, 4, 4, 1], output_sizes=[1, 3, 3, 1],
window_rows=2, window_cols=2, row_stride=1, col_stride=1,
padding="VALID", use_gpu=False)
if not tf.test.IsBuiltWithCuda():
return
# Test the GPU implementation that uses cudnn for now.
# It does not propagate the diff in cases of NaNs
expected_input_backprop_cudnn = [
0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0]
self._testMaxPoolGradDirect(
input_data, output_backprop, expected_input_backprop_cudnn,
input_sizes=[1, 4, 4, 1], output_sizes=[1, 3, 3, 1],
window_rows=2, window_cols=2, row_stride=1, col_stride=1,
padding="VALID", use_gpu=True)
def _testMaxPoolGradDirectWithNans2_2(self):
input_data = [float("nan")] * 16
output_backprop = [
float("nan"), 12.0, 13.0,
15.0, float("nan"), 17.0,
19.0, 20.0, float("nan")]
# Test the CPU implementation, which propagates diffs in case of NaN
expected_input_backprop_tf_cpu = [
float("nan"), 12.0, 13.0, 0.0,
15.0, float("nan"), 17.0, 0.0,
19.0, 20.0, float("nan"), 0.0,
0.0, 0.0, 0.0, 0.0]
self._testMaxPoolGradDirect(
input_data, output_backprop, expected_input_backprop_tf_cpu,
input_sizes=[1, 4, 4, 1], output_sizes=[1, 3, 3, 1],
window_rows=2, window_cols=2, row_stride=1, col_stride=1,
padding="VALID", use_gpu=False)
if not tf.test.IsBuiltWithCuda():
return
# Test the GPU implementation that uses cudnn for now.
# It does not propagate the diff in cases of NaNs
expected_input_backprop_cudnn = [
0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0]
self._testMaxPoolGradDirect(
input_data, output_backprop, expected_input_backprop_cudnn,
input_sizes=[1, 4, 4, 1], output_sizes=[1, 3, 3, 1],
window_rows=2, window_cols=2, row_stride=1, col_stride=1,
padding="VALID", use_gpu=True)
def testMaxPoolGradDirect(self):
self._testMaxPoolGradDirect1_1()
self._testMaxPoolGradDirect1_2()
self._testMaxPoolGradDirect1_3()
self._testMaxPoolGradDirectWithNans2_1()
self._testMaxPoolGradDirectWithNans2_2()
def testAvgPoolGrad(self):
for use_gpu in False, True:
self._testAvgPoolGradValidPadding1_1(use_gpu)
self._testAvgPoolGradValidPadding2_1(use_gpu)
self._testAvgPoolGradValidPadding2_2(use_gpu)
self._testAvgPoolGradSamePadding1_1(use_gpu)
self._testAvgPoolGradSamePadding2_1(use_gpu)
self._testAvgPoolGradSamePadding2_2(use_gpu)
self._testAvgPoolGradSamePadding3_1(use_gpu)
def _testAvgPoolGradValidPadding1_1(self, use_gpu):
self._ConstructAndTestGradient(
tf.nn.avg_pool, input_sizes=[2, 3, 3, 3],
output_sizes=[2, 3, 3, 3], window_rows=1, window_cols=1, row_stride=1,
col_stride=1, padding="VALID", use_gpu=use_gpu)
def _testAvgPoolGradValidPadding2_1(self, use_gpu):
self._ConstructAndTestGradient(
tf.nn.avg_pool, input_sizes=[2, 3, 3, 3],
output_sizes=[2, 2, 2, 3], window_rows=2, window_cols=2, row_stride=1,
col_stride=1, padding="VALID", use_gpu=use_gpu)
def _testAvgPoolGradValidPadding2_2(self, use_gpu):
self._ConstructAndTestGradient(
tf.nn.avg_pool, input_sizes=[2, 2, 2, 3],
output_sizes=[2, 1, 1, 3], window_rows=2, window_cols=2, row_stride=2,
col_stride=2, padding="VALID", use_gpu=use_gpu)
def _testAvgPoolGradSamePadding1_1(self, use_gpu):
self._ConstructAndTestGradient(
tf.nn.avg_pool, input_sizes=[2, 2, 4, 3],
output_sizes=[2, 2, 4, 3], window_rows=1, window_cols=1, row_stride=1,
col_stride=1, padding="SAME", use_gpu=use_gpu)
def _testAvgPoolGradSamePadding2_1(self, use_gpu):
self._ConstructAndTestGradient(
tf.nn.avg_pool, input_sizes=[2, 2, 4, 3],
output_sizes=[2, 2, 4, 3], window_rows=2, window_cols=2, row_stride=1,
col_stride=1, padding="SAME", use_gpu=use_gpu)
def _testAvgPoolGradSamePadding2_2(self, use_gpu):
self._ConstructAndTestGradient(
tf.nn.avg_pool, input_sizes=[2, 2, 4, 3],
output_sizes=[2, 1, 2, 3], window_rows=2, window_cols=2, row_stride=2,
col_stride=2, padding="SAME", use_gpu=use_gpu)
def _testAvgPoolGradSamePadding3_1(self, use_gpu):
self._ConstructAndTestGradient(
tf.nn.avg_pool, input_sizes=[1, 7, 7, 1],
output_sizes=[1, 7, 7, 1], window_rows=3, window_cols=3, row_stride=1,
col_stride=1, padding="SAME", use_gpu=use_gpu)
def testShapeFunctionEdgeCases(self):
# All shapes unknown.
for pool_func in [tf.nn.max_pool, tf.nn.avg_pool]:
p = tf.nn.max_pool(tf.placeholder(tf.float32),
ksize=[1, 1, 1, 1], strides=[1, 1, 1, 1],
padding="SAME")
self.assertEqual([None, None, None, None], p.get_shape().as_list())
p, am = tf.nn.max_pool_with_argmax(
tf.placeholder(tf.float32),
ksize=[1, 1, 1, 1], strides=[1, 1, 1, 1],
padding="SAME")
self.assertEqual([None, None, None, None], p.get_shape().as_list())
self.assertEqual([None, None, None, None], am.get_shape().as_list())
# Incorrect input shape.
for pool_func in [tf.nn.max_pool, tf.nn.avg_pool,
tf.nn.max_pool_with_argmax]:
with self.assertRaises(ValueError):
pool_func(tf.placeholder(tf.float32, shape=[1, 3]),
ksize=[1, 1, 1, 1], strides=[1, 1, 1, 1], padding="SAME")
# Illegal strides.
for pool_func in [tf.nn.max_pool, tf.nn.avg_pool,
tf.nn.max_pool_with_argmax]:
with self.assertRaisesRegexp(ValueError, "strides in the batch"):
pool_func(tf.placeholder(tf.float32),
ksize=[1, 1, 1, 1], strides=[2, 1, 1, 1], padding="SAME")
with self.assertRaisesRegexp(ValueError, "strides in the batch and depth"):
tf.nn.avg_pool(tf.placeholder(tf.float32),
ksize=[1, 1, 1, 1], strides=[1, 1, 1, 2], padding="SAME")
# Filter larger than input.
for pool_func in [tf.nn.max_pool, tf.nn.avg_pool,
tf.nn.max_pool_with_argmax]:
with self.assertRaisesRegexp(ValueError,
"filter must not be larger than the input"):
pool_func(tf.placeholder(tf.float32,
shape=[32, 20, 20, 3]),
ksize=[1, 20, 21, 1], strides=[1, 1, 1, 1], padding="SAME")
with self.assertRaisesRegexp(ValueError,
"filter must not be larger than the input"):
pool_func(tf.placeholder(tf.float32,
shape=[32, 20, 20, 3]),
ksize=[1, 21, 20, 1], strides=[1, 1, 1, 1], padding="SAME")
# Stride larger than filter.
for pool_func in [tf.nn.max_pool, tf.nn.avg_pool,
tf.nn.max_pool_with_argmax]:
with self.assertRaisesRegexp(
ValueError, "stride must be less than or equal to filter"):
pool_func(tf.placeholder(tf.float32,
shape=[32, 20, 20, 3]),
ksize=[1, 5, 3, 1], strides=[1, 5, 5, 1], padding="SAME")
with self.assertRaisesRegexp(
ValueError, "stride must be less than or equal to filter"):
pool_func(tf.placeholder(tf.float32,
shape=[32, 20, 20, 3]),
ksize=[1, 3, 5, 1], strides=[1, 5, 5, 1], padding="SAME")
def GetMaxPoolFwdTest(input_size, filter_size, strides, padding):
def Test(self):
# MaxPoolWithArgMax is implemented only on GPU.
if not tf.test.IsBuiltWithCuda():
return
self._CompareMaxPoolingFwd(input_size, filter_size, strides, padding)
return Test
def GetMaxPoolGradTest(input_size, filter_size, output_size, strides, padding):
def Test(self):
# MaxPoolWithArgMax is implemented only on GPU.
if not tf.test.IsBuiltWithCuda():
return
self._CompareMaxPoolingBk(input_size, output_size,
filter_size, strides, padding)
return Test
if __name__ == "__main__":
for (name_, input_size_, filter_size_, output_size_, stride_,
padding_) in GetInceptionMaxPoolShapes():
setattr(PoolingTest, "testMaxPoolFwd_" + name_,
GetMaxPoolFwdTest(input_size_, filter_size_, stride_, padding_))
setattr(PoolingTest, "testMaxPoolGrad_" + name_,
GetMaxPoolGradTest(input_size_, filter_size_, output_size_,
stride_, padding_))
tf.test.main()
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the loss scaling optimizer class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.distribute import collective_all_reduce_strategy
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.distribute import mirrored_strategy
from tensorflow.python.distribute import one_device_strategy
from tensorflow.python.distribute import tpu_strategy
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import smart_cond
from tensorflow.python.keras import backend
from tensorflow.python.keras import optimizers
from tensorflow.python.keras.mixed_precision.experimental import loss_scale as keras_loss_scale_module
from tensorflow.python.keras.optimizer_v2 import optimizer_v2
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.training.experimental import loss_scale as loss_scale_module
from tensorflow.python.training.experimental import mixed_precision
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.util.tf_export import keras_export
class _UnwrapPreventer(object):
"""Wrapper that DistributionStrategy will not unwrap.
Typically, DistributionStrategy will unwrap values when going from a cross-
replica context to a replica context via `call_for_each_replica`. This class
is a wrapper that DistributionStrategy will not unwrap, so it can be used to
prevent it from unwrapping a value.
TODO(reedwm): Find/implement a better way of preventing values from being
unwrapped by DistributionStrategy
"""
def __init__(self, value):
self.value = value
class _DelegatingTrackableMixin(object):
"""A mixin that delegates all Trackable methods to another trackable object.
This class must be used with multiple inheritance. A class that subclasses
Trackable can also subclass this class, which causes all Trackable methods to
be delegated to the trackable object passed in the constructor.
A subclass can use this mixin to appear as if it were the trackable passed to
the constructor, from a Checkpoint's perspective. LossScaleOptimizer uses this
mixin, so that the checkpoint format for a LossScaleOptimizer is identical to
the checkpoint format for a normal optimizer. This allows a model to be saved
with a normal Optimizer and restored with a LossScaleOptimizer, or vice versa.
The only difference in checkpoint format is that the loss scale is also saved
with a LossScaleOptimizer.
"""
def __init__(self, trackable_obj):
self._trackable = trackable_obj
# pylint: disable=protected-access
@property
def _setattr_tracking(self):
return self._trackable._setattr_tracking
@_setattr_tracking.setter
def _setattr_tracking(self, value):
self._trackable._setattr_tracking = value
@property
def _update_uid(self):
return self._trackable._update_uid
@_update_uid.setter
def _update_uid(self, value):
self._trackable._update_uid = value
@property
def _unconditional_checkpoint_dependencies(self):
return self._trackable._unconditional_checkpoint_dependencies
@property
def _unconditional_dependency_names(self):
return self._trackable._unconditional_dependency_names
@property
def _name_based_restores(self):
return self._trackable._name_based_restores
def _maybe_initialize_trackable(self):
return self._trackable._maybe_initialize_trackable()
@property
def _object_identifier(self):
return self._trackable._object_identifier
@property
def _tracking_metadata(self):
return self._trackable._tracking_metadata
def _no_dependency(self, value):
return self._trackable._no_dependency(value)
def _name_based_attribute_restore(self, checkpoint):
return self._trackable._name_based_attribute_restore(checkpoint)
@property
def _checkpoint_dependencies(self):
return self._trackable._checkpoint_dependencies
@property
def _deferred_dependencies(self):
return self._trackable._deferred_dependencies
def _lookup_dependency(self, name):
self._trackable._lookup_dependency(name)
def _add_variable_with_custom_getter(self,
name,
shape=None,
dtype=dtypes.float32,
initializer=None,
getter=None,
overwrite=False,
**kwargs_for_getter):
return self._trackable._add_variable_with_custom_getter(
name, shape, dtype, initializer, getter, overwrite, **kwargs_for_getter)
def _preload_simple_restoration(self, name, shape):
return self._trackable._preload_simple_restoration(name, shape)
def _track_trackable(self, trackable, name, overwrite=False): # pylint: disable=redefined-outer-name
return self._trackable._track_trackable(trackable, name, overwrite)
def _handle_deferred_dependencies(self, name, trackable): # pylint: disable=redefined-outer-name
return self._trackable._handle_deferred_dependencies(name, trackable)
def _restore_from_checkpoint_position(self, checkpoint_position):
return self._trackable._restore_from_checkpoint_position(
checkpoint_position)
def _single_restoration_from_checkpoint_position(self, checkpoint_position,
visit_queue):
return self._trackable._single_restoration_from_checkpoint_position(
checkpoint_position, visit_queue)
def _gather_saveables_for_checkpoint(self):
return self._trackable._gather_saveables_for_checkpoint()
def _list_extra_dependencies_for_serialization(self, serialization_cache):
return self._trackable._list_extra_dependencies_for_serialization(
serialization_cache)
def _list_functions_for_serialization(self, serialization_cache):
return self._trackable._list_functions_for_serialization(
serialization_cache)
# pylint: enable=protected-access
@keras_export('keras.mixed_precision.experimental.LossScaleOptimizer')
class LossScaleOptimizer(_DelegatingTrackableMixin, optimizer_v2.OptimizerV2):
"""An optimizer that applies loss scaling.
Loss scaling is a process that multiplies the loss by a multiplier called the
loss scale, and divides each gradient by the same multiplier. The pseudocode
for this process is:
```
loss = ...
loss *= loss_scale
grads = gradients(loss, vars)
grads /= loss_scale
```
Mathematically, loss scaling has no effect, but can help avoid numerical
underflow in intermediate gradients when float16 tensors are used. By
multiplying the loss, each intermediate gradient will have the same multiplier
applied.
The loss scale can either be a fixed constant, chosen by the user, or be
dynamically determined. Dynamically determining the loss scale is convenient
as a loss scale does not have to be explicitly chosen. However it reduces
performance.
This optimizer wraps another optimizer and applies loss scaling to it via a
`LossScale`. Loss scaling is applied whenever gradients are
computed, either through `minimize()` or `get_gradients()`. The loss scale is
updated via `LossScale.update()` whenever gradients are applied, either
through `minimize()` or `apply_gradients()`. For example:
>>> opt = tf.keras.optimizers.SGD(0.25)
>>> opt = tf.keras.mixed_precision.experimental.LossScaleOptimizer(opt,
... "dynamic")
>>> var = tf.Variable(1.)
>>> loss_fn = lambda: var ** 2
>>> # 'minimize' applies loss scaling to the loss and updates the loss sale.
>>> opt.minimize(loss_fn, var_list=var)
>>> var.numpy()
0.5
If a `tf.GradientTape` is used to compute gradients instead of
`LossScaleOptimizer.minimize` or `LossScaleOptimizer.get_gradients`, the loss
and gradients must be scaled manually. This can be done by calling
`LossScaleOptimizer.get_scaled_loss` before passing the loss to
`tf.GradientTape`, and `LossScaleOptimizer.get_unscaled_gradients` after
computing the gradients with `tf.GradientTape`. For example:
>>> with tf.GradientTape() as tape:
... loss = loss_fn()
... scaled_loss = opt.get_scaled_loss(loss)
>>> scaled_grad = tape.gradient(scaled_loss, var)
>>> (grad,) = opt.get_unscaled_gradients([scaled_grad])
>>> opt.apply_gradients([(grad, var)]) # Loss scale is updated here
>>> var.numpy()
0.25
"""
_HAS_AGGREGATE_GRAD = True
def __init__(self, optimizer, loss_scale):
"""Initializes this loss scale optimizer.
Args:
optimizer: The Optimizer instance to wrap.
loss_scale: The loss scale to scale the loss and gradients. This can
either be an int/float to use a fixed loss scale, the string "dynamic"
to use dynamic loss scaling, or an instance of a LossScale. The string
"dynamic" equivalent to passing `DynamicLossScale()`, and passing an
int/float is equivalent to passing a FixedLossScale with the given loss
scale.
"""
if not isinstance(optimizer, optimizer_v2.OptimizerV2):
raise ValueError('"optimizer" must be an instance of OptimizerV2, but '
'got: %s' % optimizer)
if optimizer.clipnorm is not None:
raise ValueError('LossScaleOptimizer does not support wrapping '
'optimizers with a clipnorm. Optimizer %s has clipnorm '
'%s' % (optimizer, optimizer.clipnorm))
if optimizer.clipvalue is not None:
raise ValueError('LossScaleOptimizer does not support wrapping '
'optimizers with a clipvalue. Optimizer %s has '
'clipvalue %s' % (optimizer, optimizer.clipvalue))
self._raise_if_strategy_unsupported()
self.clipnorm = None
self.clipvalue = None
self._optimizer = optimizer
self._loss_scale = keras_loss_scale_module.get(loss_scale)
if self._loss_scale is None:
raise ValueError('loss_scale cannot be None.')
# We don't call super().__init__, since we do not want to call OptimizerV2's
# constructor.
_DelegatingTrackableMixin.__init__(self, self._optimizer)
for weight in loss_scale_module.get_loss_scale_weights(self._loss_scale):
# We cannot call `track_variable` in the LossScale class itself, because a
# file outside of Keras cannot depend on a Keras file. Calling it here
# instead is OK, because a variable only needs to be tracked if used with
# a Keras class, and the only way to use LossScale with a Keras class is
# through the LossScaleOptimizer.
backend.track_variable(weight)
self._track_trackable(self._loss_scale, 'loss_scale')
# Needed because the superclass's __getattribute__ checks this.
self._hyper = {}
# To support restoring TensorFlow 2.2 checkpoints.
self._track_trackable(FakeOptimizerForRestoration(self._optimizer),
'base_optimizer')
@property
def loss_scale(self):
"""The `LossScale` instance associated with this optimizer."""
return self._loss_scale
def get_scaled_loss(self, loss):
"""Scales the loss by the loss scale.
This method is only needed if you compute gradients manually, e.g. with
`tf.GradientTape`. In that case, call this method to scale the loss before
passing the loss to `tf.GradientTape`. If you use
`LossScaleOptimizer.minimize` or `LossScaleOptimizer.get_gradients`, loss
scaling is automatically applied and this method is unneeded.
If this method is called, `get_unscaled_gradients` should also be called.
See the `tf.keras.mixed_precision.experimental.LossScaleOptimizer` doc for
an example.
Args:
loss: The loss, which will be multiplied by the loss scale. Can either be
a tensor or a callable returning a tensor.
Returns:
`loss` multiplied by `LossScaleOptimizer.loss_scale()`.
"""
loss_scale = self._loss_scale()
if callable(loss):
def new_loss():
loss_val = loss()
return loss_val * math_ops.cast(loss_scale, loss_val.dtype)
return new_loss
else:
return loss * math_ops.cast(loss_scale, loss.dtype)
def get_unscaled_gradients(self, grads):
"""Unscales the gradients by the loss scale.
This method is only needed if you compute gradients manually, e.g. with
`tf.GradientTape`. In that case, call this method to unscale the gradients
after computing them with `tf.GradientTape`. If you use
`LossScaleOptimizer.minimize` or `LossScaleOptimizer.get_gradients`, loss
scaling is automatically applied and this method is unneeded.
If this method is called, `get_scaled_loss` should also be called. See
the `tf.keras.mixed_precision.experimental.LossScaleOptimizer` doc for an
example.
Args:
grads: A list of tensors, each which will be divided by the loss scale.
Can have None values, which are ignored.
Returns:
A new list the same size as `grads`, where every non-None value in `grads`
is divided by `LossScaleOptimizer.loss_scale()`.
"""
loss_scale = self._loss_scale()
loss_scale_reciprocal = 1. / loss_scale
return [
_multiply_gradient(g, loss_scale_reciprocal) if g is not None else None
for g in grads
]
def _compute_gradients(self, loss, var_list, grad_loss=None):
loss = self.get_scaled_loss(loss)
grads_and_vars = self._optimizer._compute_gradients(loss, var_list, # pylint: disable=protected-access
grad_loss)
grads = [g for g, _ in grads_and_vars]
variables = [v for _, v in grads_and_vars]
unscaled_grads = self.get_unscaled_gradients(grads)
return list(zip(unscaled_grads, variables))
def get_gradients(self, loss, params):
loss = self.get_scaled_loss(loss)
grads = self._optimizer.get_gradients(loss, params)
return self.get_unscaled_gradients(grads)
def _create_all_weights(self, var_list):
self._optimizer._create_all_weights(var_list) # pylint: disable=protected-access
def apply_gradients(self,
grads_and_vars,
name=None,
experimental_aggregate_gradients=True):
if distribution_strategy_context.in_cross_replica_context():
raise ValueError('apply_gradients() must be called in a replica context.')
# We check for the strategy here despite already checking in the constructor
# as frequently the optimizer is created outside the strategy's scope.
self._raise_if_strategy_unsupported()
grads_and_vars = tuple(grads_and_vars)
return distribution_strategy_context.get_replica_context().merge_call(
self._apply_gradients_cross_replica,
args=(grads_and_vars, name, experimental_aggregate_gradients))
def _apply_gradients_cross_replica(self, distribution, grads_and_vars, name,
experimental_aggregate_gradients):
grads = [g for g, _ in grads_and_vars]
loss_scale_update_op, should_apply_grads = self._loss_scale.update(grads)
def apply_fn():
# We do not want DistributionStrategy to unwrap any MirroredVariables in
# grads_and_vars, because even in a replica context, the wrapped optimizer
# expects mirrored variables. So we wrap the variables with an
# _UnwrapPreventer, preventing DistributionStrategy from unwrapping the
# MirroredVariables.
wrapped_vars = _UnwrapPreventer([v for _, v in grads_and_vars])
return distribution.extended.call_for_each_replica(
self._apply_gradients,
args=(grads, wrapped_vars, name, experimental_aggregate_gradients))
# Note: We must call this cond() in a cross-replica context.
# DistributionStrategy does not support having a cond in a replica context
# with a branch that calls `merge_call`, and self._optimizer.apply_gradients
# calls `merge_call`.
maybe_apply_op = smart_cond.smart_cond(should_apply_grads,
apply_fn,
control_flow_ops.no_op)
return control_flow_ops.group(maybe_apply_op, loss_scale_update_op)
def _apply_gradients(self, grads, wrapped_vars, name,
experimental_aggregate_gradients):
# TODO(reedwm): This will raise a fairly cryptic error message if
# self._optimizer.apply_gradients does not take
# experimental_aggregate_gradients.
return self._optimizer.apply_gradients(
list(zip(grads, wrapped_vars.value)), name,
experimental_aggregate_gradients=experimental_aggregate_gradients)
def get_config(self):
serialized_optimizer = optimizers.serialize(self._optimizer)
serialized_loss_scale = keras_loss_scale_module.serialize(self._loss_scale)
return {
'optimizer': serialized_optimizer,
'loss_scale': serialized_loss_scale,
}
@classmethod
def from_config(cls, config, custom_objects=None):
config = config.copy() # Make a copy, since we mutate config
config['optimizer'] = optimizers.deserialize(
config['optimizer'], custom_objects=custom_objects)
config['loss_scale'] = keras_loss_scale_module.deserialize(
config['loss_scale'], custom_objects=custom_objects)
return cls(**config)
def _raise_if_strategy_unsupported(self):
if not strategy_supports_loss_scaling():
strategy = distribution_strategy_context.get_strategy()
if isinstance(strategy,
(tpu_strategy.TPUStrategy, tpu_strategy.TPUStrategyV1)):
raise ValueError(
'Loss scaling is not supported with TPUStrategy. Loss scaling is '
'unnecessary with TPUs, since they support bfloat16 instead of '
'float16 and bfloat16 does not require loss scaling. You should '
'remove the use of the LossScaleOptimizer when TPUs are used.')
else:
raise ValueError('Loss scaling is not supported with the '
'tf.distribute.Strategy: %s. Try using a different '
'Strategy, e.g. a MirroredStrategy' %
strategy.__class__.__name__)
# Delegations: We delegate most OptimizerV2 methods to the wrapped optimizer
# below.
@property
def iterations(self):
return self._optimizer.iterations
@iterations.setter
def iterations(self, variable):
self._optimizer.iterations = variable
def get_slot_names(self):
return self._optimizer.get_slot_names()
def variables(self):
return self._optimizer.variables()
@property
def weights(self):
return self._optimizer.weights
def get_weights(self):
return self._optimizer.get_weights()
def set_weights(self, weights):
return self._optimizer.set_weights(weights)
def _aggregate_gradients(self, grads_and_vars):
return self._optimizer._aggregate_gradients(grads_and_vars) # pylint: disable=protected-access
def _restore_slot_variable(self, slot_name, variable, slot_variable):
return self._optimizer._restore_slot_variable(slot_name, variable, # pylint: disable=protected-access
slot_variable)
def _create_or_restore_slot_variable(self, slot_variable_position, slot_name,
variable):
return self._optimizer._create_or_restore_slot_variable( # pylint: disable=protected-access
slot_variable_position, slot_name, variable)
def get_slot(self, var, slot_name):
return self._optimizer.get_slot(var, slot_name)
def add_slot(self, var, slot_name, initializer='zeros'):
return self._optimizer.add_slot(var, slot_name, initializer)
# For the most part, we only expose methods in the base OptimizerV2, not
# individual subclasses like Adam. However, although "learning_rate" and "lr"
# properties are not part of the base OptimizerV2 class, they are part of most
# subclasses, so we expose them here for convenience.
@property
def learning_rate(self):
return self._optimizer.learning_rate
@learning_rate.setter
def learning_rate(self, lr):
self._optimizer.learning_rate = lr
@property
def lr(self):
return self._optimizer.lr
@lr.setter
def lr(self, lr):
self._optimizer.lr = lr
# We do not override some OptimizerV2 methods. For each, we describe why we do
# not delegate them to self._optimizer:
# * get_updates: get_updates() calls get_gradients(). Since we override
# get_gradients(), we cannot delegate get_updates() to self._optimizer,
# otherwise the overridden get_gradients() method would not be called.
# Luckily, get_updates() does not access any OptimizerV2 fields, so
# inheriting the OptimizerV2 version works fine.
# * minimize: We don't delegate for a similar as get_updates(): it calls
# both self._compute_gradients() and self.apply_gradients(), and both need
# to have the LossScaleOptimizer version called.
# TODO(reedwm): Maybe merge this class's functionality into OptimizerV2.
# TODO(reedwm): Maybe throw an error if mixed precision is used without this
# optimizer being used.
# Trackable delegations: Delegate all Trackable methods to the wrapped
# optimizer. This is so the checkpoint format for a LossScaleOptimizer is
# identical to the checkpoint format for a normal optimizer, except the loss
# scale is stored in the checkpoint.
class FakeOptimizerForRestoration(trackable.Trackable):
"""A fake optimizer used to support restoring TensorFlow 2.2 checkpoints.
The checkpoint format for LossScaleOptimizers changed after TF 2.2. This class
exists to support restoring TF 2.2 checkpoints in newer version of TensorFlow.
In TF 2.2, LossScaleOptimizer would track the wrapped optimizer by calling the
following in LossScaleOptimizer.__init__
```
self._track_trackable(self._optimizer, 'base_optimizer')
```
This means a dependency from the LossScaleOptimizer to the wrapped optimizer
would be stored in the checkpoint. However now, the checkpoint format with a
LossScaleOptimizer is the same as the format without a LossScaleOptimizer,
except the loss scale is also stored. This means there is no dependency from
the LossScaleOptimizer to the wrapped optimizer. Instead, the
LossScaleOptimizer acts as if it is the wrapped optimizer, from a checkpoint's
perspective, by overriding all Trackable methods and delegating them to the
wrapped optimizer.
To allow restoring TF 2.2. checkpoints, LossScaleOptimizer adds a dependency
on this class instead of the inner optimizer. When restored, this class will
instead restore the slot variables of the inner optimizer. Since this class
has no variables, it does not affect the checkpoint when saved.
"""
def __init__(self, optimizer):
self._optimizer = optimizer
def get_slot_names(self):
return self._optimizer.get_slot_names()
def _create_or_restore_slot_variable(self, slot_variable_position, slot_name,
variable):
return self._optimizer._create_or_restore_slot_variable( # pylint: disable=protected-access
slot_variable_position, slot_name, variable)
# pylint: disable=protected-access
mixed_precision._register_wrapper_optimizer_cls(optimizer_v2.OptimizerV2,
LossScaleOptimizer)
def _multiply_gradient(gradient, scale):
"""Multiply a (possibly sparse) gradient by the given scale factor."""
scale = math_ops.cast(scale, gradient.dtype)
if isinstance(gradient, ops.IndexedSlices):
return ops.IndexedSlices(
gradient.values * scale,
gradient.indices,
dense_shape=gradient.dense_shape)
else:
return gradient * scale
def strategy_supports_loss_scaling():
"""Returns True if the current Strategy supports loss scaling."""
if not distribution_strategy_context.has_strategy():
return True
strategy = distribution_strategy_context.get_strategy()
# Strategies are supported if either there is only one replica or if variables
# are replicated per device. Otherwise, the current model.fit() implementation
# and most custom training loops incorrectly unscale the gradients. Currently,
# gradients are unscaled once per compute replica, but they should be unscaled
# once per variable replica. When there is one variable replica for each
# compute replica, this works fine, but otherwise issues will occur.
# TODO(reedwm): Support all strategies.
return isinstance(strategy, (
collective_all_reduce_strategy.CollectiveAllReduceStrategy,
collective_all_reduce_strategy.CollectiveAllReduceStrategyV1,
one_device_strategy.OneDeviceStrategy,
one_device_strategy.OneDeviceStrategyV1,
mirrored_strategy.MirroredStrategy,
mirrored_strategy.MirroredStrategyV1,
))
|
|
# encoding: utf8
import logging
from pyramid.httpexceptions import HTTPForbidden, HTTPNotFound, HTTPSeeOther
from pyramid.response import Response
from pyramid.security import effective_principals
from pyramid.view import view_config
from sqlalchemy.orm.exc import NoResultFound
import wtforms
from floof import model
from floof.forms import FloofForm, KeygenField
from floof.lib.authn import get_ca
from floof.lib.helpers import friendly_serial
log = logging.getLogger(__name__)
def get_cert(serial, user=None, check_validity=False):
"""Helper for fetching certs and running common authorization checks."""
# XXX return a tuple or check result with isinstance()?
try:
cert = model.session.query(model.Certificate) \
.filter_by(serial=serial) \
.one()
except NoResultFound:
raise HTTPNotFound(detail="Certificate not found.")
if cert is None:
raise HTTPNotFound(detail="Certificate not found.")
if user and cert not in user.certificates:
raise HTTPForbidden(detail="That does not appear to be your certificate.")
if check_validity and not cert.valid:
raise HTTPNotFound(detail="That certificate has already expired or "
"been revoked.")
return cert
class CertificateForm(FloofForm):
days = wtforms.fields.SelectField(u'Validity Period',
coerce=int,
choices=[(31, '31 days'), (366, '1 year'), (1096, '3 years')]
)
class BrowserCertificateForm(CertificateForm):
pubkey = KeygenField(u'Public Key')
generate_browser = wtforms.fields.SubmitField(u'Generate In Browser')
def validate_pubkey(form, field):
if not field.data and form.generate_browser.data:
raise wtforms.ValidationError('It looks like your browser '
'doesn\'t support this method. Try "Generate '
'Certificate on Server".')
class ServerCertificateForm(CertificateForm):
name = wtforms.fields.TextField(u'Cert Friendly Name', [
wtforms.validators.Length(max=64),
])
passphrase = wtforms.fields.PasswordField(u'Cert Passphrase', [
wtforms.validators.Length(max=64),
])
generate_server = wtforms.fields.SubmitField(u'Generate On Server')
class RevokeCertificateForm(FloofForm):
ok = wtforms.fields.SubmitField(u'Revoke Certificate')
cancel = wtforms.fields.SubmitField(u'Cancel')
@view_config(
route_name='controls.certs',
permission='auth.certificates',
request_method='GET',
renderer='account/controls/certificates.mako')
def certificates(context, request):
return dict()
@view_config(
route_name='controls.certs.add',
permission='auth.certificates',
request_method='GET',
renderer='account/controls/certificates_add.mako')
def certificates_add(context, request):
return dict(
browser_form=BrowserCertificateForm(request),
server_form=ServerCertificateForm(request),
)
@view_config(
route_name='controls.certs.add',
permission='auth.certificates',
request_method='POST',
renderer='account/controls/certificates_add.mako')
def certificates_generate_client(context, request):
form = BrowserCertificateForm(request, request.POST)
ret = dict(
browser_form=form,
server_form=ServerCertificateForm(request),
)
if not form.validate():
return ret
# Generate a new certificate from UA-supplied key.
spkac = form.pubkey.data
try:
cert = model.Certificate(
request.user,
*get_ca(request.registry.settings),
spkac=spkac,
days=form.days.data
)
except model.Certificate.InvalidSPKACError:
form.pubkey.errors.append("Invalid SPKAC; "
"try using a server-generated certificate")
return ret
request.user.certificates.append(cert)
request.session.flash(
u'New certificate generated. You may need to restart '
'your browser to begin authenticating with it.',
level=u'success')
return Response(
body=cert.public_data_der,
headerlist=[('Content-type', 'application/x-x509-user-cert')],
)
@view_config(
route_name='controls.certs.generate_server',
permission='auth.certificates',
request_method='POST',
renderer='account/controls/certificates.mako')
def certificates_generate_server(context, request):
form = ServerCertificateForm(request, request.POST)
if not form.validate():
return dict(form=form)
# Generate a new certificate.
ca = get_ca(request.registry.settings)
cert = model.Certificate(
request.user,
*ca,
days=form.days.data
)
request.user.certificates.append(cert)
request.session.flash(
u'New certificate generated.',
level=u'success')
return Response(
body=cert.pkcs12(form.passphrase.data, form.name.data, *ca),
headerlist=[('Content-type', 'application/x-pkcs12')],
)
@view_config(
route_name='controls.certs.details',
permission='auth.certificates',
request_method='GET',
renderer='account/controls/certificates_details.mako')
def certificates_details(context, request):
cert = get_cert(request.matchdict['serial'], request.user)
return dict(cert=cert)
@view_config(
route_name='controls.certs.download',
permission='auth.certificates',
request_method='GET')
def certificates_download(context, request):
cert = get_cert(request.matchdict['serial'], request.user)
# TODO: Redirect to the cert overview page. Somehow.
return Response(
body=cert.public_data,
headerlist=[('Content-type', 'application/x-pem-file')],
)
@view_config(
route_name='controls.certs.revoke',
permission='auth.certificates',
request_method='GET',
renderer='account/controls/certificates_revoke.mako')
def certificates_revoke(context, request, id=None):
form = RevokeCertificateForm(request)
cert = get_cert(request.matchdict['serial'], request.user)
will_override_auth = (
len(request.user.valid_certificates) == 1 and
request.user.cert_auth in [u'required', u'sensitive_required'])
return dict(
form=form,
cert=cert,
will_override_auth=will_override_auth,
)
@view_config(
route_name='controls.certs.revoke',
permission='auth.certificates',
request_method='POST')
def certificates_revoke_commit(context, request):
form = RevokeCertificateForm(request, request.POST)
cert = get_cert(request.matchdict['serial'], request.user)
if not form.validate() or not form.ok.data:
return HTTPSeeOther(location=request.route_url('controls.certs'))
cert.revoke()
request.session.flash(
u"Certificate {0} revoked successfully.".format(friendly_serial(cert.serial)),
level=u'success')
principals = effective_principals(request)
trust = [p for p in principals if p.startswith('trusted:')]
serial = request.auth.certificate_serial
if trust == ['trusted:cert'] and cert.serial == serial:
# The user will be logged out by this revocation
return HTTPSeeOther(location=request.route_url('account.login'))
return HTTPSeeOther(location=request.route_url('controls.certs'))
|
|
import functools
import os
import sys
import unittest
from contextlib import contextmanager
from cloudbridge.cloud.factory import CloudProviderFactory
from cloudbridge.cloud.interfaces import InstanceState
from cloudbridge.cloud.interfaces import TestMockHelperMixin
from six import reraise
def parse_bool(val):
if val:
return str(val).upper() in ['TRUE', 'YES']
else:
return False
@contextmanager
def cleanup_action(cleanup_func):
"""
Context manager to carry out a given
cleanup action after carrying out a set
of tasks, or when an exception occurs.
If any errors occur during the cleanup
action, those are ignored, and the original
traceback is preserved.
:params func: This function is called if
an exception occurs or at the end of the
context block. If any exceptions raised
by func are ignored.
Usage:
with cleanup_action(lambda e: print("Oops!")):
do_something()
"""
try:
yield
except Exception:
ex_class, ex_val, ex_traceback = sys.exc_info()
try:
cleanup_func()
except Exception as e:
print("Error during exception cleanup: {0}".format(e))
reraise(ex_class, ex_val, ex_traceback)
try:
cleanup_func()
except Exception as e:
print("Error during cleanup: {0}".format(e))
def skipIfNoService(services):
"""
A decorator for skipping tests if the provider
does not implement a given service.
"""
def wrap(func):
"""
The actual wrapper
"""
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
provider = getattr(self, 'provider')
if provider:
for service in services:
if not provider.has_service(service):
self.skipTest("Skipping test because '%s' service is"
" not implemented" % (service,))
func(self, *args, **kwargs)
return wrapper
return wrap
TEST_DATA_CONFIG = {
"AWSCloudProvider": {
"image": os.environ.get('CB_IMAGE_AWS', 'ami-5ac2cd4d'),
"instance_type": os.environ.get('CB_INSTANCE_TYPE_AWS', 't2.nano'),
"placement": os.environ.get('CB_PLACEMENT_AWS', 'us-east-1a'),
},
"OpenStackCloudProvider": {
"image": os.environ.get('CB_IMAGE_OS',
'842b949c-ea76-48df-998d-8a41f2626243'),
"instance_type": os.environ.get('CB_INSTANCE_TYPE_OS', 'm1.tiny'),
"placement": os.environ.get('CB_PLACEMENT_OS', 'nova'),
},
"AzureCloudProvider": {
"placement":
os.environ.get('CB_PLACEMENT_AZURE', 'eastus'),
"image":
os.environ.get('CB_IMAGE_AZURE', 'CbTest-Img'),
"instance_type":
os.environ.get('CB_INSTANCE_TYPE_AZURE', 'Standard_DS1_v2'),
}
}
def get_provider_test_data(provider, key):
if "AWSCloudProvider" in provider.name:
return TEST_DATA_CONFIG.get("AWSCloudProvider").get(key)
elif "OpenStackCloudProvider" in provider.name:
return TEST_DATA_CONFIG.get("OpenStackCloudProvider").get(key)
elif "AzureCloudProvider" in provider.name:
return TEST_DATA_CONFIG.get("AzureCloudProvider").get(key)
return None
def create_test_network(provider, name):
"""
Create a network with one subnet, returning the network and subnet objects.
"""
net = provider.network.create(name=name)
cidr_block = (net.cidr_block).split('/')[0] or '10.0.0.1'
sn = net.create_subnet(cidr_block='{0}/28'.format(cidr_block), name=name,
zone=get_provider_test_data(provider, 'placement'))
return net, sn
def delete_test_network(network):
"""
Delete the supplied network, first deleting any contained subnets.
"""
with cleanup_action(lambda: network.delete()):
for sn in network.subnets():
sn.delete()
def create_test_instance(
provider, instance_name, subnet, zone=None, launch_config=None,
key_pair=None, security_groups=None):
return provider.compute.instances.create(
instance_name,
get_provider_test_data(provider, 'image'),
get_provider_test_data(provider, 'instance_type'),
subnet=subnet,
zone=zone,
key_pair=key_pair,
security_groups=security_groups,
launch_config=launch_config)
def get_test_instance(provider, name, key_pair=None, security_groups=None,
subnet=None):
launch_config = None
instance = create_test_instance(
provider,
name,
subnet=subnet,
key_pair=key_pair,
security_groups=security_groups,
launch_config=launch_config)
instance.wait_till_ready()
return instance
def get_test_fixtures_folder():
return os.path.join(os.path.dirname(__file__), 'fixtures/')
def delete_test_instance(instance):
if instance:
instance.terminate()
instance.wait_for([InstanceState.TERMINATED, InstanceState.UNKNOWN],
terminal_states=[InstanceState.ERROR])
def cleanup_test_resources(instance=None, network=None, security_group=None,
key_pair=None):
"""Clean up any combination of supplied resources."""
with cleanup_action(lambda:
delete_test_network(network) if network else None):
with cleanup_action(lambda: key_pair.delete() if key_pair else None):
with cleanup_action(
lambda:
security_group.delete() if security_group else None):
delete_test_instance(instance)
class ProviderTestBase(unittest.TestCase):
_provider = None
def setUp(self):
if isinstance(self.provider, TestMockHelperMixin):
self.provider.setUpMock()
def tearDown(self):
if isinstance(self.provider, TestMockHelperMixin):
self.provider.tearDownMock()
self._provider = None
def get_provider_wait_interval(self, provider_class):
if issubclass(provider_class, TestMockHelperMixin):
return 0
else:
return 1
def create_provider_instance(self):
provider_name = os.environ.get("CB_TEST_PROVIDER", "azure")
use_mock_drivers = parse_bool(
os.environ.get("CB_USE_MOCK_PROVIDERS", "False"))
factory = CloudProviderFactory()
provider_class = factory.get_provider_class(provider_name,
get_mock=use_mock_drivers)
config = {'default_wait_interval':
self.get_provider_wait_interval(provider_class),
'azure_subscription_id':
'7904d702-e01c-4826-8519-f5a25c866a96',
'azure_client_id':
'69621fe1-f59f-43de-8799-269007c76b95',
'azure_secret':
'Orcw9U5Kd4cUDntDABg0dygN32RQ4FGBYyLRaJ/BlrM=',
'azure_tenant':
'75ec242e-054d-4b22-98a9-a4602ebb6027',
'azure_resource_group': 'CB-TEST-TEST-RG',
'azure_storage_account': 'cbtestsa134',
'azure_vm_default_user_name': 'cbtestuser'
}
return provider_class(config)
@property
def provider(self):
if not self._provider:
self._provider = self.create_provider_instance()
return self._provider
|
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
import re
from sklearn.feature_extraction import DictVectorizer
from sklearn.model_selection import train_test_split
import pandas as pd
from pandas.tseries.holiday import USFederalHolidayCalendar as calendar
import nltk
import xgboost as xgb
from extract_feat_base import *
from multiprocessing import Pool, Lock
import pandas as pd
import numpy as np
import os
import glob
global grid
### FUNC ########################################################################
def create_grid():
#
min_child_weight_p = []
eta_p = []
colsample_bytree_p = []
max_depth_p = []
subsample_p = []
lambda_p = []
nround_p = []
rmse_cv_mean = []
rmse_cv_std = []
i = 1
for min_child_weight in [0,0.5,1,15,50]:
for eta in [0.01,0.005]:
for colsample_bytree in [0.5,0.7]:
for max_depth in [6,15]:
for subsample in [0.5,0.7]:
for lambdaa in [0.5,1]:
xgb_pars = {'min_child_weight': min_child_weight,
'eta': eta,
'colsample_bytree': colsample_bytree,
'max_depth': max_depth,
'subsample': subsample,
'lambda': lambdaa,
'nthread': -1,
'booster' : 'gbtree',
'silent': 1,
'eval_metric': 'rmse',
'objective': 'reg:linear'}
#print(">>>>",i,"<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
min_child_weight_p.append(min_child_weight)
eta_p.append(eta)
colsample_bytree_p.append(colsample_bytree)
max_depth_p.append(max_depth)
subsample_p.append(subsample)
lambda_p.append(lambdaa)
nround_p.append(-1)
rmse_cv_mean.append(-1)
rmse_cv_std.append(-1)
i = i + 1
grid = pd.DataFrame({
'min_child_weight': min_child_weight_p,
'eta': eta_p,
'colsample_bytree': colsample_bytree_p,
'max_depth': max_depth_p,
'subsample': subsample_p,
'lambda': lambda_p,
'nround': nround_p,
'rmse_cv_mean': rmse_cv_mean,
'rmse_cv_std': rmse_cv_std,
'nround': nround_p
})
grid.index = range(len(grid))
print("Grid:",str(grid.shape))
print(grid.head())
#grid.to_csv('base_grid_xgb_40perc.csv',index=False)
return grid
grid = create_grid()
def do_compute(x):
row = grid.iloc[x,:]
eta = row['eta']
min_child_weight = row['min_child_weight']
colsample_bytree = row['colsample_bytree']
max_depth = row['max_depth']
subsample = row['subsample']
_lambda = row['lambda']
nround = row['nround']
####
xgb_pars = {'min_child_weight': min_child_weight,
'eta': eta,
'colsample_bytree': colsample_bytree,
'max_depth': int(max_depth),
'subsample': subsample,
'lambda': _lambda,
'nthread': -1,
'booster' : 'gbtree',
'silent': 1,
'eval_metric': 'rmse',
'objective': 'reg:linear'}
#print(xgb_pars)
model = xgb.cv(xgb_pars, dtrain, 100000,nfold = 4, early_stopping_rounds=50,maximize=False, verbose_eval=10)
nround = model.shape[0]
rmse_cv_mean = model['test-rmse-mean'][model.shape[0]-1]
rmse_cv_std = model['test-rmse-std'][model.shape[0]-1]
# calculate the square of the value of x
grid.loc[x,'rmse_cv_mean'] = rmse_cv_mean
grid.loc[x,'rmse_cv_std'] = rmse_cv_std
grid.loc[x,'nround'] = nround
grid.to_csv('base_grid_xgb_40perc__'+str(os.getpid())+'.csv',index=False)
return rmse_cv_mean
#################################################################################
### FEATURE ENG. ################################################################
meta = {'target': 'deal_probability',
'test_id': 'item_id',
'cols': {
'item_id': 'REM',
'user_id': 'CAT',
'region': 'CAT',
'city': 'CAT',
'parent_category_name': 'CAT',
'category_name': 'CAT',
'param_1': 'CAT',
'param_2': 'CAT',
'param_3': 'CAT',
'title': 'LEN',
'description': 'LEN' ,
'price': 'NUM',
'item_seq_number': 'NUM',
'activation_date': 'DATE',
'user_type': 'CAT',
'image': 'REM',
'image_top_1': 'NUM'
}}
train = pd.read_csv('data/train.csv')
test = pd.read_csv('data/test.csv')
print('--------------> Basic Feature Engineering ... ')
all_data , y_train = encode_dataset(train=train,test=test,meta=meta)
print(all_data.head())
print(">>>>>>> shape:",all_data.shape)
#for f in ['activation_date_is_holiday']:
# all_data = all_data.drop(f,axis=1)
print(all_data.head())
print(">>>>>>> shape:",all_data.shape)
#################################################################################
### MODELING ####################################################################
print('--------------> Modeling ... ')
train_obs = len(y_train)
Xtr, Xv, ytr, yv = train_test_split(all_data[:train_obs].values, y_train, test_size=0.6, random_state=1973)
dtrain = xgb.DMatrix(Xtr, label=ytr)
dvalid = xgb.DMatrix(Xv, label=yv)
dtest = xgb.DMatrix(all_data[train_obs:].values)
watchlist = [(dtrain, 'train'), (dvalid, 'valid')]
#Try different parameters! My favorite is random search :)
#################################################################################
if __name__ == '__main__':
#print("grid created")
#print(grid.head())
# Define the dataset
dataset = range(len(grid))
agents = 4
chunksize = int(len(grid)/agents)
# Output the dataset
#print ('Dataset: ' , str(dataset) , "chunksize:",str(chunksize))
# Run this with a pool of 5 agents having a chunksize of 3 until finished
with Pool(processes=agents) as pool:
result = pool.map(do_compute, dataset, chunksize)
# Output the result
print ('Result: ' + str(result) , "---type:",type(result))
#grid.to_csv('base_grid_xgb_40perc.csv',index=False)
print(">>> merge ...")
agrid = create_grid()
listing = glob.glob('./base_grid_xgb_40perc__*')
print(listing)
for filename in listing:
print(filename)
gg = pd.read_csv(filename)
gg = gg[gg.rmse_cv_mean >=0]
print(gg.index)
for i in (gg.index):
row = gg.loc[i,:]
rmse_cv_mean = row['rmse_cv_mean']
rmse_cv_std = row['rmse_cv_std']
nround = row['nround']
agrid.loc[i,'rmse_cv_mean'] = rmse_cv_mean
agrid.loc[i,'rmse_cv_std'] = rmse_cv_std
agrid.loc[i,'nround'] = nround
agrid.to_csv('base_grid_xgb_40perc.csv',index=False)
|
|
from luigi_bigquery.config import get_config
from luigi_bigquery.client import ResultProxy
from luigi_bigquery.job import Job
from luigi_bigquery.targets.result import ResultTarget
from luigi_bigquery.targets.bq import DatasetTarget
from luigi_bigquery.targets.bq import TableTarget
from luigi_bigquery.targets.gcs import BucketTarget
from luigi_bigquery.targets.gcs import FileTarget
import luigi
import jinja2
import time
import bigquery
import string
import random
import logging
logger = logging.getLogger('luigi-interface')
def _id_generator(size=16, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
# Dataset
class DatasetTask(luigi.Task):
config = get_config()
dataset_id = luigi.Parameter()
def output(self):
return DatasetTarget(self.dataset_id)
def run(self):
client = self.config.get_client()
logger.info('%s: creating dataset: %s', self, self.dataset_id)
client.create_dataset(self.dataset_id)
# Table
class TableTask(luigi.Task):
config = get_config()
dataset_id = luigi.Parameter()
table_id = luigi.Parameter()
schema = luigi.Parameter(is_list=True, default=[], significant=False)
empty = luigi.BooleanParameter(default=False, significant=False)
def requires(self):
return DatasetTask(self.dataset_id)
def output(self):
return TableTarget(self.dataset_id, self.table_id, self.schema, empty=self.empty)
def run(self):
client = self.config.get_client()
logger.info('%s: creating table: %s.%s', self, self.datasset_id, self.table_id)
client.create_table(self.dataset_id, self.table_id, self.schema)
# Query
class QueryTimeout(Exception):
pass
class Query(luigi.Task):
config = get_config()
debug = False
timeout = 3600
source = None
variables = {}
def query(self):
return NotImplemented()
def load_query(self, source):
env = jinja2.Environment(loader=jinja2.PackageLoader(self.__module__, '.'))
template = env.get_template(source)
return template.render(task=self, **self.variables)
def run_query(self, query):
result = self.output()
client = self.config.get_client()
logger.info("%s: query: %s", self, query)
job_id, _ = client.query(query)
logger.info("%s: bigquery.job.id: %s", self, job_id)
complete, result_size = client.check_job(job_id)
try:
if self.timeout:
timeout = time.time() + self.timeout
else:
timeout = None
while not complete:
if timeout and time.time() > timeout:
raise QueryTimeout('{0} timed out'.format(self))
time.sleep(5)
complete, result_size = client.check_job(job_id)
except:
raise
logger.info("%s: bigquery.job.result: job_id=%s result_size=%d", self, job_id, result_size)
return ResultProxy(Job(client, job_id))
def run(self):
query = self.load_query(self.source) if self.source else self.query()
result = self.run_query(query)
target = self.output()
if target and isinstance(target, ResultTarget):
target.save_result_state(result)
if self.debug:
import pandas as pd
TERMINAL_WIDTH = 120
pd.options.display.width = TERMINAL_WIDTH
print '-' * TERMINAL_WIDTH
print 'Query result:'
print result.to_dataframe()
print '-' * TERMINAL_WIDTH
class QueryTable(Query):
create_disposition = bigquery.JOB_CREATE_IF_NEEDED
write_disposition = bigquery.JOB_WRITE_EMPTY
def requires(self):
return DatasetTask(self.dataset())
def output(self):
return TableTarget(self.dataset(), self.table(), append=self._append())
def dataset(self):
return NotImplemented()
def table(self):
return NotImplemented()
def _append(self):
return self.write_disposition == bigquery.JOB_WRITE_APPEND
def save_as_table(self, query):
result = self.output()
client = self.config.get_client()
logger.info("%s: query: %s", self, query)
job = client.write_to_table(
query,
dataset=self.dataset(),
table=self.table(),
create_disposition=self.create_disposition,
write_disposition=self.write_disposition,
allow_large_results=True)
job_id = job['jobReference'].get('jobId')
logger.info("%s: bigquery.job.id: %s", self, job_id)
complete, result_size = client.check_job(job_id)
try:
if self.timeout:
timeout = time.time() + self.timeout
else:
timeout = None
while not complete:
if timeout and time.time() > timeout:
raise QueryTimeout('{0} timed out'.format(self))
time.sleep(5)
complete, result_size = client.check_job(job_id)
except:
raise
logger.info("%s: bigquery.job.result: job_id=%s result_size=%d", self, job_id, result_size)
return ResultProxy(Job(client, job_id))
def run(self):
query = self.load_query(self.source) if self.source else self.query()
self.save_as_table(query)
class QueryToGCS(QueryTable):
compression = 'NONE' # or GZIP
format = 'CSV' # or NEWLINE_DELIMITED_JSON
print_header = True
use_temporary_table = False
_random_id = 'tmp_{}'.format(_id_generator())
def dataset(self):
if self.use_temporary_table:
return self._random_id
else:
return NotImplemented()
def table(self):
if self.use_temporary_table:
return self._random_id
else:
return NotImplemented()
def output(self):
return FileTarget(self.bucket(), self.path())
def bucket(self):
return NotImplemented()
def path(self):
return NotImplemented()
def export_to_gcs(self):
result = self.output()
client = self.config.get_client()
logger.info("%s: export %s.%s to %s", self, self.dataset(), self.table(), result.uri())
job = client.export_data_to_uris(
destination_uris=[result.uri()],
dataset=self.dataset(),
table=self.table(),
compression=self.compression,
destination_format=self.format,
print_header=self.print_header)
job_id = job['jobReference'].get('jobId')
logger.info("%s: bigquery.job.id: %s", self, job_id)
try:
job_resource = client.wait_for_job(job, timeout=3600)
except:
raise
def _cleanup(self):
if self.use_temporary_table:
client = self.config.get_client()
client.delete_dataset(self.dataset(), delete_contents=True)
def run(self):
query = self.load_query(self.source) if self.source else self.query()
try:
self.save_as_table(query)
self.export_to_gcs()
finally:
self._cleanup()
|
|
# Copyright 2015 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from datetime import datetime
import time
from unittest import TestCase, skipUnless
from uuid import uuid1, uuid4
import uuid
from cassandra.cluster import Session
from tests.integration.cqlengine.base import BaseCassEngTestCase
from cassandra.cqlengine.connection import NOT_SET
import mock
from cassandra.cqlengine import functions
from cassandra.cqlengine.management import sync_table, drop_table
from cassandra.cqlengine.models import Model
from cassandra.cqlengine import columns
from cassandra.cqlengine import query
from datetime import timedelta
from datetime import tzinfo
from cassandra.cqlengine import statements
from cassandra.cqlengine import operators
from cassandra.cqlengine.connection import get_session
from tests.integration import PROTOCOL_VERSION
class TzOffset(tzinfo):
"""Minimal implementation of a timezone offset to help testing with timezone
aware datetimes.
"""
def __init__(self, offset):
self._offset = timedelta(hours=offset)
def utcoffset(self, dt):
return self._offset
def tzname(self, dt):
return 'TzOffset: {}'.format(self._offset.hours)
def dst(self, dt):
return timedelta(0)
class TestModel(Model):
test_id = columns.Integer(primary_key=True)
attempt_id = columns.Integer(primary_key=True)
description = columns.Text()
expected_result = columns.Integer()
test_result = columns.Integer()
class IndexedTestModel(Model):
test_id = columns.Integer(primary_key=True)
attempt_id = columns.Integer(index=True)
description = columns.Text()
expected_result = columns.Integer()
test_result = columns.Integer(index=True)
class TestMultiClusteringModel(Model):
one = columns.Integer(primary_key=True)
two = columns.Integer(primary_key=True)
three = columns.Integer(primary_key=True)
class TestQuerySetOperation(BaseCassEngTestCase):
def test_query_filter_parsing(self):
"""
Tests the queryset filter method parses it's kwargs properly
"""
query1 = TestModel.objects(test_id=5)
assert len(query1._where) == 1
op = query1._where[0]
assert isinstance(op, statements.WhereClause)
assert isinstance(op.operator, operators.EqualsOperator)
assert op.value == 5
query2 = query1.filter(expected_result__gte=1)
assert len(query2._where) == 2
op = query2._where[1]
self.assertIsInstance(op, statements.WhereClause)
self.assertIsInstance(op.operator, operators.GreaterThanOrEqualOperator)
assert op.value == 1
def test_query_expression_parsing(self):
""" Tests that query experessions are evaluated properly """
query1 = TestModel.filter(TestModel.test_id == 5)
assert len(query1._where) == 1
op = query1._where[0]
assert isinstance(op, statements.WhereClause)
assert isinstance(op.operator, operators.EqualsOperator)
assert op.value == 5
query2 = query1.filter(TestModel.expected_result >= 1)
assert len(query2._where) == 2
op = query2._where[1]
self.assertIsInstance(op, statements.WhereClause)
self.assertIsInstance(op.operator, operators.GreaterThanOrEqualOperator)
assert op.value == 1
def test_using_invalid_column_names_in_filter_kwargs_raises_error(self):
"""
Tests that using invalid or nonexistant column names for filter args raises an error
"""
with self.assertRaises(query.QueryException):
TestModel.objects(nonsense=5)
def test_using_nonexistant_column_names_in_query_args_raises_error(self):
"""
Tests that using invalid or nonexistant columns for query args raises an error
"""
with self.assertRaises(AttributeError):
TestModel.objects(TestModel.nonsense == 5)
def test_using_non_query_operators_in_query_args_raises_error(self):
"""
Tests that providing query args that are not query operator instances raises an error
"""
with self.assertRaises(query.QueryException):
TestModel.objects(5)
def test_queryset_is_immutable(self):
"""
Tests that calling a queryset function that changes it's state returns a new queryset
"""
query1 = TestModel.objects(test_id=5)
assert len(query1._where) == 1
query2 = query1.filter(expected_result__gte=1)
assert len(query2._where) == 2
assert len(query1._where) == 1
def test_queryset_limit_immutability(self):
"""
Tests that calling a queryset function that changes it's state returns a new queryset with same limit
"""
query1 = TestModel.objects(test_id=5).limit(1)
assert query1._limit == 1
query2 = query1.filter(expected_result__gte=1)
assert query2._limit == 1
query3 = query1.filter(expected_result__gte=1).limit(2)
assert query1._limit == 1
assert query3._limit == 2
def test_the_all_method_duplicates_queryset(self):
"""
Tests that calling all on a queryset with previously defined filters duplicates queryset
"""
query1 = TestModel.objects(test_id=5)
assert len(query1._where) == 1
query2 = query1.filter(expected_result__gte=1)
assert len(query2._where) == 2
query3 = query2.all()
assert query3 == query2
def test_defining_only_and_defer_fails(self):
"""
Tests that trying to add fields to either only or defer, or doing so more than once fails
"""
def test_defining_only_or_defer_on_nonexistant_fields_fails(self):
"""
Tests that setting only or defer fields that don't exist raises an exception
"""
class BaseQuerySetUsage(BaseCassEngTestCase):
@classmethod
def setUpClass(cls):
super(BaseQuerySetUsage, cls).setUpClass()
drop_table(TestModel)
drop_table(IndexedTestModel)
sync_table(TestModel)
sync_table(IndexedTestModel)
sync_table(TestMultiClusteringModel)
TestModel.objects.create(test_id=0, attempt_id=0, description='try1', expected_result=5, test_result=30)
TestModel.objects.create(test_id=0, attempt_id=1, description='try2', expected_result=10, test_result=30)
TestModel.objects.create(test_id=0, attempt_id=2, description='try3', expected_result=15, test_result=30)
TestModel.objects.create(test_id=0, attempt_id=3, description='try4', expected_result=20, test_result=25)
TestModel.objects.create(test_id=1, attempt_id=0, description='try5', expected_result=5, test_result=25)
TestModel.objects.create(test_id=1, attempt_id=1, description='try6', expected_result=10, test_result=25)
TestModel.objects.create(test_id=1, attempt_id=2, description='try7', expected_result=15, test_result=25)
TestModel.objects.create(test_id=1, attempt_id=3, description='try8', expected_result=20, test_result=20)
TestModel.objects.create(test_id=2, attempt_id=0, description='try9', expected_result=50, test_result=40)
TestModel.objects.create(test_id=2, attempt_id=1, description='try10', expected_result=60, test_result=40)
TestModel.objects.create(test_id=2, attempt_id=2, description='try11', expected_result=70, test_result=45)
TestModel.objects.create(test_id=2, attempt_id=3, description='try12', expected_result=75, test_result=45)
IndexedTestModel.objects.create(test_id=0, attempt_id=0, description='try1', expected_result=5, test_result=30)
IndexedTestModel.objects.create(test_id=1, attempt_id=1, description='try2', expected_result=10, test_result=30)
IndexedTestModel.objects.create(test_id=2, attempt_id=2, description='try3', expected_result=15, test_result=30)
IndexedTestModel.objects.create(test_id=3, attempt_id=3, description='try4', expected_result=20, test_result=25)
IndexedTestModel.objects.create(test_id=4, attempt_id=0, description='try5', expected_result=5, test_result=25)
IndexedTestModel.objects.create(test_id=5, attempt_id=1, description='try6', expected_result=10, test_result=25)
IndexedTestModel.objects.create(test_id=6, attempt_id=2, description='try7', expected_result=15, test_result=25)
IndexedTestModel.objects.create(test_id=7, attempt_id=3, description='try8', expected_result=20, test_result=20)
IndexedTestModel.objects.create(test_id=8, attempt_id=0, description='try9', expected_result=50, test_result=40)
IndexedTestModel.objects.create(test_id=9, attempt_id=1, description='try10', expected_result=60,
test_result=40)
IndexedTestModel.objects.create(test_id=10, attempt_id=2, description='try11', expected_result=70,
test_result=45)
IndexedTestModel.objects.create(test_id=11, attempt_id=3, description='try12', expected_result=75,
test_result=45)
@classmethod
def tearDownClass(cls):
super(BaseQuerySetUsage, cls).tearDownClass()
drop_table(TestModel)
drop_table(IndexedTestModel)
drop_table(TestMultiClusteringModel)
class TestQuerySetCountSelectionAndIteration(BaseQuerySetUsage):
def test_count(self):
""" Tests that adding filtering statements affects the count query as expected """
assert TestModel.objects.count() == 12
q = TestModel.objects(test_id=0)
assert q.count() == 4
def test_query_expression_count(self):
""" Tests that adding query statements affects the count query as expected """
assert TestModel.objects.count() == 12
q = TestModel.objects(TestModel.test_id == 0)
assert q.count() == 4
def test_query_limit_count(self):
""" Tests that adding query with a limit affects the count as expected """
assert TestModel.objects.count() == 12
q = TestModel.objects(TestModel.test_id == 0).limit(2)
result = q.count()
self.assertEqual(2, result)
def test_iteration(self):
""" Tests that iterating over a query set pulls back all of the expected results """
q = TestModel.objects(test_id=0)
#tuple of expected attempt_id, expected_result values
compare_set = set([(0, 5), (1, 10), (2, 15), (3, 20)])
for t in q:
val = t.attempt_id, t.expected_result
assert val in compare_set
compare_set.remove(val)
assert len(compare_set) == 0
# test with regular filtering
q = TestModel.objects(attempt_id=3).allow_filtering()
assert len(q) == 3
#tuple of expected test_id, expected_result values
compare_set = set([(0, 20), (1, 20), (2, 75)])
for t in q:
val = t.test_id, t.expected_result
assert val in compare_set
compare_set.remove(val)
assert len(compare_set) == 0
# test with query method
q = TestModel.objects(TestModel.attempt_id == 3).allow_filtering()
assert len(q) == 3
#tuple of expected test_id, expected_result values
compare_set = set([(0, 20), (1, 20), (2, 75)])
for t in q:
val = t.test_id, t.expected_result
assert val in compare_set
compare_set.remove(val)
assert len(compare_set) == 0
def test_multiple_iterations_work_properly(self):
""" Tests that iterating over a query set more than once works """
# test with both the filtering method and the query method
for q in (TestModel.objects(test_id=0), TestModel.objects(TestModel.test_id == 0)):
#tuple of expected attempt_id, expected_result values
compare_set = set([(0, 5), (1, 10), (2, 15), (3, 20)])
for t in q:
val = t.attempt_id, t.expected_result
assert val in compare_set
compare_set.remove(val)
assert len(compare_set) == 0
#try it again
compare_set = set([(0, 5), (1, 10), (2, 15), (3, 20)])
for t in q:
val = t.attempt_id, t.expected_result
assert val in compare_set
compare_set.remove(val)
assert len(compare_set) == 0
def test_multiple_iterators_are_isolated(self):
"""
tests that the use of one iterator does not affect the behavior of another
"""
for q in (TestModel.objects(test_id=0), TestModel.objects(TestModel.test_id == 0)):
q = q.order_by('attempt_id')
expected_order = [0, 1, 2, 3]
iter1 = iter(q)
iter2 = iter(q)
for attempt_id in expected_order:
assert next(iter1).attempt_id == attempt_id
assert next(iter2).attempt_id == attempt_id
def test_get_success_case(self):
"""
Tests that the .get() method works on new and existing querysets
"""
m = TestModel.objects.get(test_id=0, attempt_id=0)
assert isinstance(m, TestModel)
assert m.test_id == 0
assert m.attempt_id == 0
q = TestModel.objects(test_id=0, attempt_id=0)
m = q.get()
assert isinstance(m, TestModel)
assert m.test_id == 0
assert m.attempt_id == 0
q = TestModel.objects(test_id=0)
m = q.get(attempt_id=0)
assert isinstance(m, TestModel)
assert m.test_id == 0
assert m.attempt_id == 0
def test_query_expression_get_success_case(self):
"""
Tests that the .get() method works on new and existing querysets
"""
m = TestModel.get(TestModel.test_id == 0, TestModel.attempt_id == 0)
assert isinstance(m, TestModel)
assert m.test_id == 0
assert m.attempt_id == 0
q = TestModel.objects(TestModel.test_id == 0, TestModel.attempt_id == 0)
m = q.get()
assert isinstance(m, TestModel)
assert m.test_id == 0
assert m.attempt_id == 0
q = TestModel.objects(TestModel.test_id == 0)
m = q.get(TestModel.attempt_id == 0)
assert isinstance(m, TestModel)
assert m.test_id == 0
assert m.attempt_id == 0
def test_get_doesnotexist_exception(self):
"""
Tests that get calls that don't return a result raises a DoesNotExist error
"""
with self.assertRaises(TestModel.DoesNotExist):
TestModel.objects.get(test_id=100)
def test_get_multipleobjects_exception(self):
"""
Tests that get calls that return multiple results raise a MultipleObjectsReturned error
"""
with self.assertRaises(TestModel.MultipleObjectsReturned):
TestModel.objects.get(test_id=1)
def test_allow_filtering_flag(self):
"""
"""
def test_non_quality_filtering():
class NonEqualityFilteringModel(Model):
example_id = columns.UUID(primary_key=True, default=uuid.uuid4)
sequence_id = columns.Integer(primary_key=True) # sequence_id is a clustering key
example_type = columns.Integer(index=True)
created_at = columns.DateTime()
drop_table(NonEqualityFilteringModel)
sync_table(NonEqualityFilteringModel)
# setup table, etc.
NonEqualityFilteringModel.create(sequence_id=1, example_type=0, created_at=datetime.now())
NonEqualityFilteringModel.create(sequence_id=3, example_type=0, created_at=datetime.now())
NonEqualityFilteringModel.create(sequence_id=5, example_type=1, created_at=datetime.now())
qA = NonEqualityFilteringModel.objects(NonEqualityFilteringModel.sequence_id > 3).allow_filtering()
num = qA.count()
assert num == 1, num
class TestQuerySetOrdering(BaseQuerySetUsage):
def test_order_by_success_case(self):
q = TestModel.objects(test_id=0).order_by('attempt_id')
expected_order = [0, 1, 2, 3]
for model, expect in zip(q, expected_order):
assert model.attempt_id == expect
q = q.order_by('-attempt_id')
expected_order.reverse()
for model, expect in zip(q, expected_order):
assert model.attempt_id == expect
def test_ordering_by_non_second_primary_keys_fail(self):
# kwarg filtering
with self.assertRaises(query.QueryException):
q = TestModel.objects(test_id=0).order_by('test_id')
# kwarg filtering
with self.assertRaises(query.QueryException):
q = TestModel.objects(TestModel.test_id == 0).order_by('test_id')
def test_ordering_by_non_primary_keys_fails(self):
with self.assertRaises(query.QueryException):
q = TestModel.objects(test_id=0).order_by('description')
def test_ordering_on_indexed_columns_fails(self):
with self.assertRaises(query.QueryException):
q = IndexedTestModel.objects(test_id=0).order_by('attempt_id')
def test_ordering_on_multiple_clustering_columns(self):
TestMultiClusteringModel.create(one=1, two=1, three=4)
TestMultiClusteringModel.create(one=1, two=1, three=2)
TestMultiClusteringModel.create(one=1, two=1, three=5)
TestMultiClusteringModel.create(one=1, two=1, three=1)
TestMultiClusteringModel.create(one=1, two=1, three=3)
results = TestMultiClusteringModel.objects.filter(one=1, two=1).order_by('-two', '-three')
assert [r.three for r in results] == [5, 4, 3, 2, 1]
results = TestMultiClusteringModel.objects.filter(one=1, two=1).order_by('two', 'three')
assert [r.three for r in results] == [1, 2, 3, 4, 5]
results = TestMultiClusteringModel.objects.filter(one=1, two=1).order_by('two').order_by('three')
assert [r.three for r in results] == [1, 2, 3, 4, 5]
class TestQuerySetSlicing(BaseQuerySetUsage):
def test_out_of_range_index_raises_error(self):
q = TestModel.objects(test_id=0).order_by('attempt_id')
with self.assertRaises(IndexError):
q[10]
def test_array_indexing_works_properly(self):
q = TestModel.objects(test_id=0).order_by('attempt_id')
expected_order = [0, 1, 2, 3]
for i in range(len(q)):
assert q[i].attempt_id == expected_order[i]
def test_negative_indexing_works_properly(self):
q = TestModel.objects(test_id=0).order_by('attempt_id')
expected_order = [0, 1, 2, 3]
assert q[-1].attempt_id == expected_order[-1]
assert q[-2].attempt_id == expected_order[-2]
def test_slicing_works_properly(self):
q = TestModel.objects(test_id=0).order_by('attempt_id')
expected_order = [0, 1, 2, 3]
for model, expect in zip(q[1:3], expected_order[1:3]):
assert model.attempt_id == expect
def test_negative_slicing(self):
q = TestModel.objects(test_id=0).order_by('attempt_id')
expected_order = [0, 1, 2, 3]
for model, expect in zip(q[-3:], expected_order[-3:]):
assert model.attempt_id == expect
for model, expect in zip(q[:-1], expected_order[:-1]):
assert model.attempt_id == expect
class TestQuerySetValidation(BaseQuerySetUsage):
def test_primary_key_or_index_must_be_specified(self):
"""
Tests that queries that don't have an equals relation to a primary key or indexed field fail
"""
with self.assertRaises(query.QueryException):
q = TestModel.objects(test_result=25)
list([i for i in q])
def test_primary_key_or_index_must_have_equal_relation_filter(self):
"""
Tests that queries that don't have non equal (>,<, etc) relation to a primary key or indexed field fail
"""
with self.assertRaises(query.QueryException):
q = TestModel.objects(test_id__gt=0)
list([i for i in q])
def test_indexed_field_can_be_queried(self):
"""
Tests that queries on an indexed field will work without any primary key relations specified
"""
q = IndexedTestModel.objects(test_result=25)
assert q.count() == 4
class TestQuerySetDelete(BaseQuerySetUsage):
def test_delete(self):
TestModel.objects.create(test_id=3, attempt_id=0, description='try9', expected_result=50, test_result=40)
TestModel.objects.create(test_id=3, attempt_id=1, description='try10', expected_result=60, test_result=40)
TestModel.objects.create(test_id=3, attempt_id=2, description='try11', expected_result=70, test_result=45)
TestModel.objects.create(test_id=3, attempt_id=3, description='try12', expected_result=75, test_result=45)
assert TestModel.objects.count() == 16
assert TestModel.objects(test_id=3).count() == 4
TestModel.objects(test_id=3).delete()
assert TestModel.objects.count() == 12
assert TestModel.objects(test_id=3).count() == 0
def test_delete_without_partition_key(self):
""" Tests that attempting to delete a model without defining a partition key fails """
with self.assertRaises(query.QueryException):
TestModel.objects(attempt_id=0).delete()
def test_delete_without_any_where_args(self):
""" Tests that attempting to delete a whole table without any arguments will fail """
with self.assertRaises(query.QueryException):
TestModel.objects(attempt_id=0).delete()
class TimeUUIDQueryModel(Model):
partition = columns.UUID(primary_key=True)
time = columns.TimeUUID(primary_key=True)
data = columns.Text(required=False)
class TestMinMaxTimeUUIDFunctions(BaseCassEngTestCase):
@classmethod
def setUpClass(cls):
super(TestMinMaxTimeUUIDFunctions, cls).setUpClass()
sync_table(TimeUUIDQueryModel)
@classmethod
def tearDownClass(cls):
super(TestMinMaxTimeUUIDFunctions, cls).tearDownClass()
drop_table(TimeUUIDQueryModel)
def test_tzaware_datetime_support(self):
"""Test that using timezone aware datetime instances works with the
MinTimeUUID/MaxTimeUUID functions.
"""
pk = uuid4()
midpoint_utc = datetime.utcnow().replace(tzinfo=TzOffset(0))
midpoint_helsinki = midpoint_utc.astimezone(TzOffset(3))
# Assert pre-condition that we have the same logical point in time
assert midpoint_utc.utctimetuple() == midpoint_helsinki.utctimetuple()
assert midpoint_utc.timetuple() != midpoint_helsinki.timetuple()
TimeUUIDQueryModel.create(
partition=pk,
time=columns.TimeUUID.from_datetime(midpoint_utc - timedelta(minutes=1)),
data='1')
TimeUUIDQueryModel.create(
partition=pk,
time=columns.TimeUUID.from_datetime(midpoint_utc),
data='2')
TimeUUIDQueryModel.create(
partition=pk,
time=columns.TimeUUID.from_datetime(midpoint_utc + timedelta(minutes=1)),
data='3')
assert ['1', '2'] == [o.data for o in TimeUUIDQueryModel.filter(
TimeUUIDQueryModel.partition == pk,
TimeUUIDQueryModel.time <= functions.MaxTimeUUID(midpoint_utc))]
assert ['1', '2'] == [o.data for o in TimeUUIDQueryModel.filter(
TimeUUIDQueryModel.partition == pk,
TimeUUIDQueryModel.time <= functions.MaxTimeUUID(midpoint_helsinki))]
assert ['2', '3'] == [o.data for o in TimeUUIDQueryModel.filter(
TimeUUIDQueryModel.partition == pk,
TimeUUIDQueryModel.time >= functions.MinTimeUUID(midpoint_utc))]
assert ['2', '3'] == [o.data for o in TimeUUIDQueryModel.filter(
TimeUUIDQueryModel.partition == pk,
TimeUUIDQueryModel.time >= functions.MinTimeUUID(midpoint_helsinki))]
def test_success_case(self):
""" Test that the min and max time uuid functions work as expected """
pk = uuid4()
TimeUUIDQueryModel.create(partition=pk, time=uuid1(), data='1')
time.sleep(0.2)
TimeUUIDQueryModel.create(partition=pk, time=uuid1(), data='2')
time.sleep(0.2)
midpoint = datetime.utcnow()
time.sleep(0.2)
TimeUUIDQueryModel.create(partition=pk, time=uuid1(), data='3')
time.sleep(0.2)
TimeUUIDQueryModel.create(partition=pk, time=uuid1(), data='4')
time.sleep(0.2)
# test kwarg filtering
q = TimeUUIDQueryModel.filter(partition=pk, time__lte=functions.MaxTimeUUID(midpoint))
q = [d for d in q]
assert len(q) == 2
datas = [d.data for d in q]
assert '1' in datas
assert '2' in datas
q = TimeUUIDQueryModel.filter(partition=pk, time__gte=functions.MinTimeUUID(midpoint))
assert len(q) == 2
datas = [d.data for d in q]
assert '3' in datas
assert '4' in datas
# test query expression filtering
q = TimeUUIDQueryModel.filter(
TimeUUIDQueryModel.partition == pk,
TimeUUIDQueryModel.time <= functions.MaxTimeUUID(midpoint)
)
q = [d for d in q]
assert len(q) == 2
datas = [d.data for d in q]
assert '1' in datas
assert '2' in datas
q = TimeUUIDQueryModel.filter(
TimeUUIDQueryModel.partition == pk,
TimeUUIDQueryModel.time >= functions.MinTimeUUID(midpoint)
)
assert len(q) == 2
datas = [d.data for d in q]
assert '3' in datas
assert '4' in datas
class TestInOperator(BaseQuerySetUsage):
def test_kwarg_success_case(self):
""" Tests the in operator works with the kwarg query method """
q = TestModel.filter(test_id__in=[0, 1])
assert q.count() == 8
def test_query_expression_success_case(self):
""" Tests the in operator works with the query expression query method """
q = TestModel.filter(TestModel.test_id.in_([0, 1]))
assert q.count() == 8
class TestValuesList(BaseQuerySetUsage):
def test_values_list(self):
q = TestModel.objects.filter(test_id=0, attempt_id=1)
item = q.values_list('test_id', 'attempt_id', 'description', 'expected_result', 'test_result').first()
assert item == [0, 1, 'try2', 10, 30]
item = q.values_list('expected_result', flat=True).first()
assert item == 10
class TestObjectsProperty(BaseQuerySetUsage):
def test_objects_property_returns_fresh_queryset(self):
assert TestModel.objects._result_cache is None
len(TestModel.objects) # evaluate queryset
assert TestModel.objects._result_cache is None
@skipUnless(PROTOCOL_VERSION >= 2, "only runs against the cql3 protocol v2.0")
def test_paged_result_handling():
# addresses #225
class PagingTest(Model):
id = columns.Integer(primary_key=True)
val = columns.Integer()
sync_table(PagingTest)
PagingTest.create(id=1, val=1)
PagingTest.create(id=2, val=2)
session = get_session()
with mock.patch.object(session, 'default_fetch_size', 1):
results = PagingTest.objects()[:]
assert len(results) == 2
class ModelQuerySetTimeoutTestCase(BaseQuerySetUsage):
def test_default_timeout(self):
with mock.patch.object(Session, 'execute', autospec=True) as mock_execute:
list(TestModel.objects())
mock_execute.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY, timeout=NOT_SET)
def test_float_timeout(self):
with mock.patch.object(Session, 'execute', autospec=True) as mock_execute:
list(TestModel.objects().timeout(0.5))
mock_execute.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY, timeout=0.5)
def test_none_timeout(self):
with mock.patch.object(Session, 'execute', autospec=True) as mock_execute:
list(TestModel.objects().timeout(None))
mock_execute.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY, timeout=None)
class DMLQueryTimeoutTestCase(BaseQuerySetUsage):
def setUp(self):
self.model = TestModel(test_id=1, attempt_id=1, description='timeout test')
super(DMLQueryTimeoutTestCase, self).setUp()
def test_default_timeout(self):
with mock.patch.object(Session, 'execute', autospec=True) as mock_execute:
self.model.save()
mock_execute.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY, timeout=NOT_SET)
def test_float_timeout(self):
with mock.patch.object(Session, 'execute', autospec=True) as mock_execute:
self.model.timeout(0.5).save()
mock_execute.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY, timeout=0.5)
def test_none_timeout(self):
with mock.patch.object(Session, 'execute', autospec=True) as mock_execute:
self.model.timeout(None).save()
mock_execute.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY, timeout=None)
def test_timeout_then_batch(self):
b = query.BatchQuery()
m = self.model.timeout(None)
with self.assertRaises(AssertionError):
m.batch(b)
def test_batch_then_timeout(self):
b = query.BatchQuery()
m = self.model.batch(b)
with self.assertRaises(AssertionError):
m.timeout(0.5)
|
|
#!/usr/bin/env python
##########################################################################
#
# Copyright 2011 Jose Fonseca
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the 'Software'), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
##########################################################################/
'''Run two retrace instances in parallel, comparing generated snapshots.
'''
import math
import optparse
import os.path
import subprocess
import platform
import sys
from PIL import Image
from snapdiff import Comparer
from highlight import AutoHighlighter
import jsondiff
# Null file, to use when we're not interested in subprocesses output
NULL = open(os.path.devnull, 'wb')
class RetraceRun:
def __init__(self, process):
self.process = process
def nextSnapshot(self):
image, comment = read_pnm(self.process.stdout)
if image is None:
return None, None
callNo = int(comment.strip())
return image, callNo
def terminate(self):
try:
self.process.terminate()
except OSError:
# Avoid http://bugs.python.org/issue14252
pass
class Retracer:
def __init__(self, retraceExe, args, env=None):
self.retraceExe = retraceExe
self.args = args
self.env = env
def _retrace(self, args, stdout=subprocess.PIPE):
cmd = [
self.retraceExe,
] + args + self.args
if self.env:
for name, value in self.env.iteritems():
sys.stderr.write('%s=%s ' % (name, value))
sys.stderr.write(' '.join(cmd) + '\n')
try:
return subprocess.Popen(cmd, env=self.env, stdout=stdout, stderr=NULL)
except OSError, ex:
sys.stderr.write('error: failed to execute %s: %s\n' % (cmd[0], ex.strerror))
sys.exit(1)
def retrace(self, args):
p = self._retrace([])
p.wait()
return p.returncode
def snapshot(self, call_nos):
process = self._retrace([
'-s', '-',
'-S', call_nos,
])
return RetraceRun(process)
def dump_state(self, call_no):
'''Get the state dump at the specified call no.'''
p = self._retrace([
'-D', str(call_no),
])
state = jsondiff.load(p.stdout)
p.wait()
return state.get('parameters', {})
def diff_state(self, ref_call_no, src_call_no, stream):
'''Compare the state between two calls.'''
ref_state = self.dump_state(ref_call_no)
src_state = self.dump_state(src_call_no)
stream.flush()
differ = jsondiff.Differ(stream)
differ.visit(ref_state, src_state)
stream.write('\n')
def read_pnm(stream):
'''Read a PNM from the stream, and return the image object, and the comment.'''
magic = stream.readline()
if not magic:
return None, None
magic = magic.rstrip()
if magic == 'P5':
channels = 1
bytesPerChannel = 1
mode = 'L'
elif magic == 'P6':
channels = 3
bytesPerChannel = 1
mode = 'RGB'
elif magic == 'Pf':
channels = 1
bytesPerChannel = 4
mode = 'R'
elif magic == 'PF':
channels = 3
bytesPerChannel = 4
mode = 'RGB'
elif magic == 'PX':
channels = 4
bytesPerChannel = 4
mode = 'RGB'
else:
raise Exception('Unsupported magic `%s`' % magic)
comment = ''
line = stream.readline()
while line.startswith('#'):
comment += line[1:]
line = stream.readline()
width, height = map(int, line.strip().split())
maximum = int(stream.readline().strip())
if bytesPerChannel == 1:
assert maximum == 255
else:
assert maximum == 1
data = stream.read(height * width * channels * bytesPerChannel)
if bytesPerChannel == 4:
# Image magic only supports single channel floating point images, so
# represent the image as numpy arrays
import numpy
pixels = numpy.fromstring(data, dtype=numpy.float32)
pixels.resize((height, width, channels))
return pixels, comment
image = Image.frombuffer(mode, (width, height), data, 'raw', mode, 0, 1)
return image, comment
def dumpNumpyImage(output, pixels, filename):
height, width, channels = pixels.shape
import numpy
pixels = (pixels*255).clip(0, 255).astype('uint8')
if 0:
# XXX: Doesn't work somehow
im = Image.fromarray(pixels)
else:
# http://code.activestate.com/recipes/577591-conversion-of-pil-image-and-numpy-array/
pixels = pixels.reshape(height*width, channels)
if channels == 4:
mode = 'RGBA'
else:
if channels < 3:
pixels = numpy.c_[arr, 255*numpy.ones((heigth * width, 3 - channels), numpy.uint8)]
assert channels == 3
mode = 'RGB'
im = Image.frombuffer(mode, (width, height), pixels.tostring(), 'raw', mode, 0, 1)
im.save(filename)
if 0:
# Dump to stdout
for y in range(height):
output.write(' ')
for x in range(width):
for c in range(channels):
output.write('%0.9g,' % pixels[y, x, c])
output.write(' ')
output.write('\n')
def parse_env(optparser, entries):
'''Translate a list of NAME=VALUE entries into an environment dictionary.'''
if not entries:
return None
env = os.environ.copy()
for entry in entries:
try:
name, var = entry.split('=', 1)
except Exception:
optparser.error('invalid environment entry %r' % entry)
env[name] = var
return env
def main():
'''Main program.
'''
global options
# Parse command line options
optparser = optparse.OptionParser(
usage='\n\t%prog [options] -- [glretrace options] <trace>',
version='%%prog')
optparser.add_option(
'-r', '--retrace', metavar='PROGRAM',
type='string', dest='retrace', default='glretrace',
help='retrace command [default: %default]')
optparser.add_option(
'--ref-driver', metavar='DRIVER',
type='string', dest='ref_driver', default=None,
help='force reference driver')
optparser.add_option(
'--src-driver', metavar='DRIVER',
type='string', dest='src_driver', default=None,
help='force source driver')
optparser.add_option(
'--ref-arg', metavar='OPTION',
type='string', action='append', dest='ref_args', default=[],
help='pass argument to reference retrace')
optparser.add_option(
'--src-arg', metavar='OPTION',
type='string', action='append', dest='src_args', default=[],
help='pass argument to source retrace')
optparser.add_option(
'--ref-env', metavar='NAME=VALUE',
type='string', action='append', dest='ref_env', default=[],
help='add variable to reference environment')
optparser.add_option(
'--src-env', metavar='NAME=VALUE',
type='string', action='append', dest='src_env', default=[],
help='add variable to source environment')
optparser.add_option(
'--diff-prefix', metavar='PATH',
type='string', dest='diff_prefix', default='.',
help='prefix for the difference images')
optparser.add_option(
'-t', '--threshold', metavar='BITS',
type="float", dest="threshold", default=12.0,
help="threshold precision [default: %default]")
optparser.add_option(
'-S', '--snapshot-frequency', metavar='CALLSET',
type="string", dest="snapshot_frequency", default='draw',
help="calls to compare [default: %default]")
optparser.add_option(
'--diff-state',
action='store_true', dest='diff_state', default=False,
help='diff state between failing calls')
optparser.add_option(
'-o', '--output', metavar='FILE',
type="string", dest="output",
help="output file [default: stdout]")
(options, args) = optparser.parse_args(sys.argv[1:])
ref_env = parse_env(optparser, options.ref_env)
src_env = parse_env(optparser, options.src_env)
if not args:
optparser.error("incorrect number of arguments")
if options.ref_driver:
options.ref_args.insert(0, '--driver=' + options.ref_driver)
if options.src_driver:
options.src_args.insert(0, '--driver=' + options.src_driver)
refRetracer = Retracer(options.retrace, options.ref_args + args, ref_env)
srcRetracer = Retracer(options.retrace, options.src_args + args, src_env)
if options.output:
output = open(options.output, 'wt')
else:
output = sys.stdout
highligher = AutoHighlighter(output)
highligher.write('call\tprecision\n')
last_bad = -1
last_good = 0
refRun = refRetracer.snapshot(options.snapshot_frequency)
try:
srcRun = srcRetracer.snapshot(options.snapshot_frequency)
try:
while True:
# Get the reference image
refImage, refCallNo = refRun.nextSnapshot()
if refImage is None:
break
# Get the source image
srcImage, srcCallNo = srcRun.nextSnapshot()
if srcImage is None:
break
assert refCallNo == srcCallNo
callNo = refCallNo
# Compare the two images
if isinstance(refImage, Image.Image) and isinstance(srcImage, Image.Image):
# Using PIL
numpyImages = False
comparer = Comparer(refImage, srcImage)
precision = comparer.precision()
else:
# Using numpy (for floating point images)
# TODO: drop PIL when numpy path becomes general enough
import numpy
assert not isinstance(refImage, Image.Image)
assert not isinstance(srcImage, Image.Image)
numpyImages = True
assert refImage.shape == srcImage.shape
diffImage = numpy.square(srcImage - refImage)
height, width, channels = diffImage.shape
square_error = numpy.sum(diffImage)
square_error += numpy.finfo(numpy.float32).eps
rel_error = square_error / float(height*width*channels)
bits = -math.log(rel_error)/math.log(2.0)
precision = bits
mismatch = precision < options.threshold
if mismatch:
highligher.color(highligher.red)
highligher.bold()
highligher.write('%u\t%f\n' % (callNo, precision))
if mismatch:
highligher.normal()
if mismatch:
if options.diff_prefix:
prefix = os.path.join(options.diff_prefix, '%010u' % callNo)
prefix_dir = os.path.dirname(prefix)
if not os.path.isdir(prefix_dir):
os.makedirs(prefix_dir)
if numpyImages:
dumpNumpyImage(output, refImage, prefix + '.ref.png')
dumpNumpyImage(output, srcImage, prefix + '.src.png')
else:
refImage.save(prefix + '.ref.png')
srcImage.save(prefix + '.src.png')
comparer.write_diff(prefix + '.diff.png')
if last_bad < last_good and options.diff_state:
srcRetracer.diff_state(last_good, callNo, output)
last_bad = callNo
else:
last_good = callNo
highligher.flush()
finally:
srcRun.terminate()
finally:
refRun.terminate()
if __name__ == '__main__':
main()
|
|
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import errno
import functools
import logging
import os
import shutil
import subprocess
import sys
import tempfile
import threading
import time
import weakref
from oslo.config import cfg
from octavia.openstack.common import fileutils
from octavia.openstack.common.gettextutils import _, _LE, _LI
LOG = logging.getLogger(__name__)
util_opts = [
cfg.BoolOpt('disable_process_locking', default=False,
help='Enables or disables inter-process locks.'),
cfg.StrOpt('lock_path',
default=os.environ.get("OCTAVIA_LOCK_PATH"),
help='Directory to use for lock files.')
]
CONF = cfg.CONF
CONF.register_opts(util_opts)
def set_defaults(lock_path):
cfg.set_defaults(util_opts, lock_path=lock_path)
class _FileLock(object):
"""Lock implementation which allows multiple locks, working around
issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does
not require any cleanup. Since the lock is always held on a file
descriptor rather than outside of the process, the lock gets dropped
automatically if the process crashes, even if __exit__ is not executed.
There are no guarantees regarding usage by multiple green threads in a
single process here. This lock works only between processes. Exclusive
access between local threads should be achieved using the semaphores
in the @synchronized decorator.
Note these locks are released when the descriptor is closed, so it's not
safe to close the file descriptor while another green thread holds the
lock. Just opening and closing the lock file can break synchronisation,
so lock files must be accessed only using this abstraction.
"""
def __init__(self, name):
self.lockfile = None
self.fname = name
def acquire(self):
basedir = os.path.dirname(self.fname)
if not os.path.exists(basedir):
fileutils.ensure_tree(basedir)
LOG.info(_LI('Created lock path: %s'), basedir)
self.lockfile = open(self.fname, 'w')
while True:
try:
# Using non-blocking locks since green threads are not
# patched to deal with blocking locking calls.
# Also upon reading the MSDN docs for locking(), it seems
# to have a laughable 10 attempts "blocking" mechanism.
self.trylock()
LOG.debug('Got file lock "%s"', self.fname)
return True
except IOError as e:
if e.errno in (errno.EACCES, errno.EAGAIN):
# external locks synchronise things like iptables
# updates - give it some time to prevent busy spinning
time.sleep(0.01)
else:
raise threading.ThreadError(_("Unable to acquire lock on"
" `%(filename)s` due to"
" %(exception)s") %
{'filename': self.fname,
'exception': e})
def __enter__(self):
self.acquire()
return self
def release(self):
try:
self.unlock()
self.lockfile.close()
LOG.debug('Released file lock "%s"', self.fname)
except IOError:
LOG.exception(_LE("Could not release the acquired lock `%s`"),
self.fname)
def __exit__(self, exc_type, exc_val, exc_tb):
self.release()
def exists(self):
return os.path.exists(self.fname)
def trylock(self):
raise NotImplementedError()
def unlock(self):
raise NotImplementedError()
class _WindowsLock(_FileLock):
def trylock(self):
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1)
def unlock(self):
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1)
class _FcntlLock(_FileLock):
def trylock(self):
fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
def unlock(self):
fcntl.lockf(self.lockfile, fcntl.LOCK_UN)
class _PosixLock(object):
def __init__(self, name):
# Hash the name because it's not valid to have POSIX semaphore
# names with things like / in them. Then use base64 to encode
# the digest() instead taking the hexdigest() because the
# result is shorter and most systems can't have shm sempahore
# names longer than 31 characters.
h = hashlib.sha1()
h.update(name.encode('ascii'))
self.name = str((b'/' + base64.urlsafe_b64encode(
h.digest())).decode('ascii'))
def acquire(self, timeout=None):
self.semaphore = posix_ipc.Semaphore(self.name,
flags=posix_ipc.O_CREAT,
initial_value=1)
self.semaphore.acquire(timeout)
return self
def __enter__(self):
self.acquire()
return self
def release(self):
self.semaphore.release()
self.semaphore.close()
def __exit__(self, exc_type, exc_val, exc_tb):
self.release()
def exists(self):
try:
semaphore = posix_ipc.Semaphore(self.name)
except posix_ipc.ExistentialError:
return False
else:
semaphore.close()
return True
if os.name == 'nt':
import msvcrt
InterProcessLock = _WindowsLock
FileLock = _WindowsLock
else:
import base64
import fcntl
import hashlib
import posix_ipc
InterProcessLock = _PosixLock
FileLock = _FcntlLock
_semaphores = weakref.WeakValueDictionary()
_semaphores_lock = threading.Lock()
def _get_lock_path(name, lock_file_prefix, lock_path=None):
# NOTE(mikal): the lock name cannot contain directory
# separators
name = name.replace(os.sep, '_')
if lock_file_prefix:
sep = '' if lock_file_prefix.endswith('-') else '-'
name = '%s%s%s' % (lock_file_prefix, sep, name)
local_lock_path = lock_path or CONF.lock_path
if not local_lock_path:
# NOTE(bnemec): Create a fake lock path for posix locks so we don't
# unnecessarily raise the RequiredOptError below.
if InterProcessLock is not _PosixLock:
raise cfg.RequiredOptError('lock_path')
local_lock_path = 'posixlock:/'
return os.path.join(local_lock_path, name)
def external_lock(name, lock_file_prefix=None, lock_path=None):
LOG.debug('Attempting to grab external lock "%(lock)s"',
{'lock': name})
lock_file_path = _get_lock_path(name, lock_file_prefix, lock_path)
# NOTE(bnemec): If an explicit lock_path was passed to us then it
# means the caller is relying on file-based locking behavior, so
# we can't use posix locks for those calls.
if lock_path:
return FileLock(lock_file_path)
return InterProcessLock(lock_file_path)
def remove_external_lock_file(name, lock_file_prefix=None):
"""Remove an external lock file when it's not used anymore
This will be helpful when we have a lot of lock files
"""
with internal_lock(name):
lock_file_path = _get_lock_path(name, lock_file_prefix)
try:
os.remove(lock_file_path)
except OSError:
LOG.info(_LI('Failed to remove file %(file)s'),
{'file': lock_file_path})
def internal_lock(name):
with _semaphores_lock:
try:
sem = _semaphores[name]
except KeyError:
sem = threading.Semaphore()
_semaphores[name] = sem
LOG.debug('Got semaphore "%(lock)s"', {'lock': name})
return sem
@contextlib.contextmanager
def lock(name, lock_file_prefix=None, external=False, lock_path=None):
"""Context based lock
This function yields a `threading.Semaphore` instance (if we don't use
eventlet.monkey_patch(), else `semaphore.Semaphore`) unless external is
True, in which case, it'll yield an InterProcessLock instance.
:param lock_file_prefix: The lock_file_prefix argument is used to provide
lock files on disk with a meaningful prefix.
:param external: The external keyword argument denotes whether this lock
should work across multiple processes. This means that if two different
workers both run a method decorated with @synchronized('mylock',
external=True), only one of them will execute at a time.
"""
int_lock = internal_lock(name)
with int_lock:
if external and not CONF.disable_process_locking:
ext_lock = external_lock(name, lock_file_prefix, lock_path)
with ext_lock:
yield ext_lock
else:
yield int_lock
LOG.debug('Released semaphore "%(lock)s"', {'lock': name})
def synchronized(name, lock_file_prefix=None, external=False, lock_path=None):
"""Synchronization decorator.
Decorating a method like so::
@synchronized('mylock')
def foo(self, *args):
...
ensures that only one thread will execute the foo method at a time.
Different methods can share the same lock::
@synchronized('mylock')
def foo(self, *args):
...
@synchronized('mylock')
def bar(self, *args):
...
This way only one of either foo or bar can be executing at a time.
"""
def wrap(f):
@functools.wraps(f)
def inner(*args, **kwargs):
try:
with lock(name, lock_file_prefix, external, lock_path):
LOG.debug('Got semaphore / lock "%(function)s"',
{'function': f.__name__})
return f(*args, **kwargs)
finally:
LOG.debug('Semaphore / lock released "%(function)s"',
{'function': f.__name__})
return inner
return wrap
def synchronized_with_prefix(lock_file_prefix):
"""Partial object generator for the synchronization decorator.
Redefine @synchronized in each project like so::
(in nova/utils.py)
from nova.openstack.common import lockutils
synchronized = lockutils.synchronized_with_prefix('nova-')
(in nova/foo.py)
from nova import utils
@utils.synchronized('mylock')
def bar(self, *args):
...
The lock_file_prefix argument is used to provide lock files on disk with a
meaningful prefix.
"""
return functools.partial(synchronized, lock_file_prefix=lock_file_prefix)
def main(argv):
"""Create a dir for locks and pass it to command from arguments
If you run this:
python -m openstack.common.lockutils python setup.py testr <etc>
a temporary directory will be created for all your locks and passed to all
your tests in an environment variable. The temporary dir will be deleted
afterwards and the return value will be preserved.
"""
lock_dir = tempfile.mkdtemp()
os.environ["OCTAVIA_LOCK_PATH"] = lock_dir
try:
ret_val = subprocess.call(argv[1:])
finally:
shutil.rmtree(lock_dir, ignore_errors=True)
return ret_val
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
|
import numpy as np
import regreg.api as rr
from selection.randomized.glm import pairs_bootstrap_glm, bootstrap_cov
from selection.randomized.query import query
from selection.randomized.randomization import split
import functools
def pairs_bootstrap_glm(glm_loss,
active,
beta_full=None,
inactive=None,
scaling=1.,
solve_args={'min_its':50, 'tol':1.e-10}):
"""
pairs bootstrap of (beta_hat_active, -grad_inactive(beta_hat_active))
"""
X, Y = glm_loss.data
if beta_full is None:
beta_active = restricted_Mest(glm_loss, active, solve_args=solve_args)
beta_full = np.zeros(glm_loss.shape)
beta_full[active] = beta_active
else:
beta_active = beta_full[active]
X_active = X[:,active]
nactive = active.sum()
ntotal = nactive
if inactive is not None:
X_inactive = X[:,inactive]
ntotal += inactive.sum()
_bootW = np.diag(glm_loss.saturated_loss.hessian(X_active.dot(beta_active)))
_bootQ = X_active.T.dot(_bootW.dot(X_active))
_bootQinv = np.linalg.inv(_bootQ)
if inactive is not None:
_bootC = X_inactive.T.dot(_bootW.dot(X_active))
_bootI = _bootC.dot(_bootQinv)
else:
_bootI = None
nactive = active.sum()
if inactive is not None:
X_full = np.hstack([X_active,X_inactive])
beta_overall = np.zeros(X_full.shape[1])
beta_overall[:nactive] = beta_active
else:
X_full = X_active
beta_overall = beta_active
_boot_mu = lambda X_full, beta_overall: glm_loss.saturated_loss.mean_function(X_full.dot(beta_overall))
if ntotal > nactive:
observed = np.hstack([beta_active, -glm_loss.smooth_objective(beta_full, 'grad')[inactive]])
else:
observed = beta_active
# scaling is a lipschitz constant for a gradient squared
_sqrt_scaling = np.sqrt(scaling)
def _boot_score(X_full, Y, ntotal, _bootQinv, _bootI, nactive, _sqrt_scaling, beta_overall, indices):
X_star = X_full[indices]
Y_star = Y[indices]
score = X_star.T.dot(Y_star - _boot_mu(X_star, beta_overall))
result = np.zeros(ntotal)
result[:nactive] = _bootQinv.dot(score[:nactive])
if ntotal > nactive:
result[nactive:] = score[nactive:] - _bootI.dot(score[:nactive])
result[:nactive] *= _sqrt_scaling
result[nactive:] /= _sqrt_scaling
return result
observed[:nactive] *= _sqrt_scaling
observed[nactive:] /= _sqrt_scaling
return functools.partial(_boot_score, X_full, Y, ntotal, _bootQinv, _bootI, nactive, _sqrt_scaling, beta_overall), observed
def pairs_bootstrap_score(glm_loss,
active,
beta_active=None,
solve_args={'min_its':50, 'tol':1.e-10}):
"""
pairs bootstrap of (beta_hat_active, -grad_inactive(beta_hat_active))
"""
X, Y = glm_loss.data
if beta_active is None:
beta_active = restricted_Mest(glm_loss, active, solve_args=solve_args)
X_active = X[:,active]
_bootW = np.diag(glm_loss.saturated_loss.hessian(X_active.dot(beta_active)))
_boot_mu = lambda X_active, beta_active: glm_loss.saturated_loss.mean_function(X_active.dot(beta_active))
def _boot_score(X, Y, active, beta_active, indices):
X_star = X[indices]
Y_star = Y[indices]
score = -X_star.T.dot(Y_star - _boot_mu(X_star[:,active], beta_active))
return score
return functools.partial(_boot_score, X, Y, active, beta_active)
def set_alpha_matrix(glm_loss,
active,
beta_full=None,
inactive=None,
scaling=1.,
solve_args={'min_its': 50, 'tol': 1.e-10}):
X, Y = glm_loss.data
if beta_full is None:
beta_active = restricted_Mest(glm_loss, active, solve_args=solve_args)
beta_full = np.zeros(glm_loss.shape)
beta_full[active] = beta_active
else:
beta_active = beta_full[active]
X_active = X[:,active]
nactive = active.sum()
ntotal = nactive
if inactive is not None:
X_inactive = X[:,inactive]
ntotal += inactive.sum()
_W = np.diag(glm_loss.saturated_loss.hessian(X_active.dot(beta_active)))
_Q = X_active.T.dot(_W.dot(X_active))
_Qinv = np.linalg.inv(_Q)
nactive = active.sum()
if inactive is not None:
X_full = np.hstack([X_active, X_inactive])
beta_overall = np.zeros(X_full.shape[1])
beta_overall[:nactive] = beta_active
else:
X_full = X_active
beta_overall = beta_active
obs_residuals = Y - glm_loss.saturated_loss.mean_function(X_full.dot(beta_overall))
return np.dot(np.dot(_Qinv, X_active.T), np.diag(obs_residuals))
class M_estimator(query):
def __init__(self, loss, epsilon, penalty, randomization, solve_args={'min_its':50, 'tol':1.e-10}):
"""
Fits the logistic regression to a candidate active set, without penalty.
Calls the method bootstrap_covariance() to bootstrap the covariance matrix.
Computes $\bar{\beta}_E$ which is the restricted
M-estimator (i.e. subject to the constraint $\beta_{-E}=0$).
Parameters:
-----------
active: np.bool
The active set from fitting the logistic lasso
solve_args: dict
Arguments to be passed to regreg solver.
Returns:
--------
None
Notes:
------
Sets self._beta_unpenalized which will be used in the covariance matrix calculation.
Also computes Hessian of loss at restricted M-estimator as well as the bootstrap covariance.
"""
query.__init__(self, randomization)
(self.loss,
self.epsilon,
self.penalty,
self.randomization,
self.solve_args) = (loss,
epsilon,
penalty,
randomization,
solve_args)
# Methods needed for subclassing a query
def solve(self, scaling=1, solve_args={'min_its':20, 'tol':1.e-10}):
self.randomize()
(loss,
randomized_loss,
epsilon,
penalty,
randomization,
solve_args) = (self.loss,
self.randomized_loss,
self.epsilon,
self.penalty,
self.randomization,
self.solve_args)
# initial solution
problem = rr.simple_problem(randomized_loss, penalty)
self.initial_soln = problem.solve(**solve_args)
# find the active groups and their direction vectors
# as well as unpenalized groups
groups = np.unique(penalty.groups)
active_groups = np.zeros(len(groups), np.bool)
unpenalized_groups = np.zeros(len(groups), np.bool)
active_directions = []
active = np.zeros(loss.shape, np.bool)
unpenalized = np.zeros(loss.shape, np.bool)
initial_scalings = []
for i, g in enumerate(groups):
group = penalty.groups == g
active_groups[i] = (np.linalg.norm(self.initial_soln[group]) > 1.e-6 * penalty.weights[g]) and (penalty.weights[g] > 0)
unpenalized_groups[i] = (penalty.weights[g] == 0)
if active_groups[i]:
active[group] = True
z = np.zeros(active.shape, np.float)
z[group] = self.initial_soln[group] / np.linalg.norm(self.initial_soln[group])
active_directions.append(z)
initial_scalings.append(np.linalg.norm(self.initial_soln[group]))
if unpenalized_groups[i]:
unpenalized[group] = True
# solve the restricted problem
self._overall = active + unpenalized
self._inactive = ~self._overall
self._unpenalized = unpenalized
self._active_directions = np.array(active_directions).T
self._active_groups = np.array(active_groups, np.bool)
self._unpenalized_groups = np.array(unpenalized_groups, np.bool)
self.selection_variable = {'groups':self._active_groups,
'variables':self._overall,
'directions':self._active_directions}
# initial state for opt variables
initial_subgrad = -(self.randomized_loss.smooth_objective(self.initial_soln, 'grad') +
self.randomized_loss.quadratic.objective(self.initial_soln, 'grad'))
# the quadratic of a smooth_atom is not included in computing the smooth_objective
initial_subgrad = initial_subgrad[self._inactive]
initial_unpenalized = self.initial_soln[self._unpenalized]
self.observed_opt_state = np.concatenate([initial_scalings,
initial_unpenalized,
initial_subgrad], axis=0)
# set the _solved bit
self._solved = True
# Now setup the pieces for linear decomposition
(loss,
epsilon,
penalty,
initial_soln,
overall,
inactive,
unpenalized,
active_groups,
active_directions) = (self.loss,
self.epsilon,
self.penalty,
self.initial_soln,
self._overall,
self._inactive,
self._unpenalized,
self._active_groups,
self._active_directions)
# scaling should be chosen to be Lipschitz constant for gradient of Gaussian part
# we are implicitly assuming that
# loss is a pairs model
_sqrt_scaling = np.sqrt(scaling)
_beta_unpenalized = restricted_Mest(loss, overall, solve_args=solve_args)
beta_full = np.zeros(overall.shape)
beta_full[overall] = _beta_unpenalized
_hessian = loss.hessian(beta_full)
self._beta_full = beta_full
# observed state for score
self.observed_score_state = np.hstack([_beta_unpenalized * _sqrt_scaling,
-loss.smooth_objective(beta_full, 'grad')[inactive] / _sqrt_scaling])
# form linear part
self.num_opt_var = p = loss.shape[0] # shorthand for p
# (\bar{\beta}_{E \cup U}, N_{-E}, c_E, \beta_U, z_{-E})
# E for active
# U for unpenalized
# -E for inactive
_opt_linear_term = np.zeros((p, self._active_groups.sum() + unpenalized.sum() + inactive.sum()))
_score_linear_term = np.zeros((p, p))
# \bar{\beta}_{E \cup U} piece -- the unpenalized M estimator
Mest_slice = slice(0, overall.sum())
_Mest_hessian = _hessian[:,overall]
_score_linear_term[:,Mest_slice] = -_Mest_hessian / _sqrt_scaling
# N_{-(E \cup U)} piece -- inactive coordinates of score of M estimator at unpenalized solution
null_idx = range(overall.sum(), p)
inactive_idx = np.nonzero(inactive)[0]
for _i, _n in zip(inactive_idx, null_idx):
_score_linear_term[_i,_n] = -_sqrt_scaling
# c_E piece
scaling_slice = slice(0, active_groups.sum())
if len(active_directions)==0:
_opt_hessian=0
else:
_opt_hessian = (_hessian + epsilon * np.identity(p)).dot(active_directions)
_opt_linear_term[:,scaling_slice] = _opt_hessian / _sqrt_scaling
self.observed_opt_state[scaling_slice] *= _sqrt_scaling
# beta_U piece
unpenalized_slice = slice(active_groups.sum(), active_groups.sum() + unpenalized.sum())
unpenalized_directions = np.identity(p)[:,unpenalized]
if unpenalized.sum():
_opt_linear_term[:,unpenalized_slice] = (_hessian + epsilon * np.identity(p)).dot(unpenalized_directions) / _sqrt_scaling
self.observed_opt_state[unpenalized_slice] *= _sqrt_scaling
# subgrad piece
subgrad_idx = range(active_groups.sum() + unpenalized.sum(), active_groups.sum() + inactive.sum() + unpenalized.sum())
subgrad_slice = slice(active_groups.sum() + unpenalized.sum(), active_groups.sum() + inactive.sum() + unpenalized.sum())
for _i, _s in zip(inactive_idx, subgrad_idx):
_opt_linear_term[_i,_s] = _sqrt_scaling
self.observed_opt_state[subgrad_slice] /= _sqrt_scaling
# form affine part
_opt_affine_term = np.zeros(p)
idx = 0
groups = np.unique(penalty.groups)
for i, g in enumerate(groups):
if active_groups[i]:
group = penalty.groups == g
_opt_affine_term[group] = active_directions[:,idx][group] * penalty.weights[g]
idx += 1
# two transforms that encode score and optimization
# variable roles
self.opt_transform = (_opt_linear_term, _opt_affine_term)
self.score_transform = (_score_linear_term, np.zeros(_score_linear_term.shape[0]))
# later, we will modify `score_transform`
# in `linear_decomposition`
# now store everything needed for the projections
# the projection acts only on the optimization
# variables
self.scaling_slice = scaling_slice
# weights are scaled here because the linear terms scales them by scaling
new_groups = penalty.groups[inactive]
new_weights = dict([(g, penalty.weights[g] / _sqrt_scaling) for g in penalty.weights.keys() if g in np.unique(new_groups)])
# we form a dual group lasso object
# to do the projection
self.group_lasso_dual = rr.group_lasso_dual(new_groups, weights=new_weights, bound=1.)
self.subgrad_slice = subgrad_slice
self._setup = True
def setup_sampler(self, scaling=1, solve_args={'min_its':20, 'tol':1.e-10}):
pass
def projection(self, opt_state):
"""
Full projection for Langevin.
The state here will be only the state of the optimization variables.
"""
if not self._setup:
raise ValueError('setup_sampler should be called before using this function')
if ('subgradient' not in self.selection_variable and
'scaling' not in self.selection_variable): # have not conditioned on any thing else
new_state = opt_state.copy() # not really necessary to copy
new_state[self.scaling_slice] = np.maximum(opt_state[self.scaling_slice], 0)
new_state[self.subgrad_slice] = self.group_lasso_dual.bound_prox(opt_state[self.subgrad_slice])
elif ('subgradient' not in self.selection_variable and
'scaling' in self.selection_variable): # conditioned on the initial scalings
# only the subgradient in opt_state
new_state = self.group_lasso_dual.bound_prox(opt_state)
elif ('subgradient' in self.selection_variable and
'scaling' not in self.selection_variable): # conditioned on the subgradient
# only the scaling in opt_state
new_state = np.maximum(opt_state, 0)
else:
new_state = opt_state
return new_state
# optional things to condition on
def condition_on_subgradient(self):
"""
Maybe we should allow subgradients of only some variables...
"""
if not self._setup:
raise ValueError('setup_sampler should be called before using this function')
opt_linear, opt_offset = self.opt_transform
new_offset = opt_linear[:,self.subgrad_slice].dot(self.observed_opt_state[self.subgrad_slice]) + opt_offset
new_linear = opt_linear[:,self.scaling_slice]
self.opt_transform = (new_linear, new_offset)
# for group LASSO this should not induce a bigger jacobian as
# the subgradients are in the interior of a ball
self.selection_variable['subgradient'] = self.observed_opt_state[self.subgrad_slice]
# reset variables
self.observed_opt_state = self.observed_opt_state[self.scaling_slice]
self.scaling_slice = slice(None, None, None)
self.subgrad_slice = np.zeros(new_linear.shape[1], np.bool)
self.num_opt_var = new_linear.shape[1]
def condition_on_scalings(self):
"""
Maybe we should allow subgradients of only some variables...
"""
if not self._setup:
raise ValueError('setup_sampler should be called before using this function')
opt_linear, opt_offset = self.opt_transform
new_offset = opt_linear[:,self.scaling_slice].dot(self.observed_opt_state[self.scaling_slice]) + opt_offset
new_linear = opt_linear[:,self.subgrad_slice]
self.opt_transform = (new_linear, new_offset)
# for group LASSO this will induce a bigger jacobian
self.selection_variable['scalings'] = self.observed_opt_state[self.scaling_slice]
# reset slices
self.observed_opt_state = self.observed_opt_state[self.subgrad_slice]
self.subgrad_slice = slice(None, None, None)
self.scaling_slice = np.zeros(new_linear.shape[1], np.bool)
self.num_opt_var = new_linear.shape[1]
def restricted_Mest(Mest_loss, active, solve_args={'min_its':50, 'tol':1.e-10}):
X, Y = Mest_loss.data
if Mest_loss._is_transform:
raise NotImplementedError('to fit restricted model, X must be an ndarray or scipy.sparse; general transforms not implemented')
X_restricted = X[:,active]
loss_restricted = rr.affine_smooth(Mest_loss.saturated_loss, X_restricted)
beta_E = loss_restricted.solve(**solve_args)
return beta_E
class M_estimator_split(M_estimator):
def __init__(self, loss, epsilon, subsample_size, penalty, solve_args={'min_its':50, 'tol':1.e-10}):
total_size = loss.saturated_loss.shape[0]
self.randomization = split(loss.shape, subsample_size, total_size)
M_estimator.__init__(self,loss, epsilon, penalty, self.randomization, solve_args=solve_args)
total_size = loss.saturated_loss.shape[0]
if subsample_size > total_size:
raise ValueError('subsample size must be smaller than total sample size')
self.total_size, self.subsample_size = total_size, subsample_size
def setup_sampler(self, scaling=1., solve_args={'min_its': 50, 'tol': 1.e-10}, B=2000):
M_estimator.setup_sampler(self,
scaling=scaling,
solve_args=solve_args)
# now we need to estimate covariance of
# loss.grad(\beta_E^*) - 1/pi * randomized_loss.grad(\beta_E^*)
m, n, p = self.subsample_size, self.total_size, self.loss.shape[0] # shorthand
#from .glm import pairs_bootstrap_score
bootstrap_score = pairs_bootstrap_score(self.loss,
self._overall,
beta_active=self._beta_full[self._overall],
solve_args=solve_args)
# find unpenalized MLE on subsample
newq, oldq = rr.identity_quadratic(0, 0, 0, 0), self.randomized_loss.quadratic
self.randomized_loss.quadratic = newq
beta_active_subsample = restricted_Mest(self.randomized_loss,
self._overall)
bootstrap_score_split = pairs_bootstrap_score(self.loss,
self._overall,
beta_active=beta_active_subsample,
solve_args=solve_args)
self.randomized_loss.quadratic = oldq
inv_frac = n / m
def subsample_diff(m, n, indices):
subsample = np.random.choice(indices, size=m, replace=False)
full_score = bootstrap_score(indices) # a sum of n terms
randomized_score = bootstrap_score_split(subsample) # a sum of m terms
return full_score - randomized_score * inv_frac
first_moment = np.zeros(p)
second_moment = np.zeros((p, p))
_n = np.arange(n)
for _ in range(B):
indices = np.random.choice(_n, size=n, replace=True)
randomized_score = subsample_diff(m, n, indices)
first_moment += randomized_score
second_moment += np.multiply.outer(randomized_score, randomized_score)
first_moment /= B
second_moment /= B
cov = second_moment - np.multiply.outer(first_moment,
first_moment)
self.randomization.set_covariance(cov)
return bootstrap_score, cov
class M_estimator_approx(M_estimator):
def __init__(self, loss, epsilon, penalty, randomization, randomizer, estimation):
M_estimator.__init__(self, loss, epsilon, penalty, randomization)
self.randomizer = randomizer
self.estimation = estimation
def solve_approx(self):
self.solve()
(_opt_linear_term, _opt_affine_term) = self.opt_transform
self._opt_linear_term = np.concatenate((_opt_linear_term[self._overall, :], _opt_linear_term[~self._overall, :]), 0)
self._opt_affine_term = np.concatenate((_opt_affine_term[self._overall], _opt_affine_term[~self._overall]), 0)
self.opt_transform = (self._opt_linear_term, self._opt_affine_term)
(_score_linear_term, _) = self.score_transform
self._score_linear_term = np.concatenate((_score_linear_term[self._overall, :], _score_linear_term[~self._overall, :]), 0)
self.score_transform = (self._score_linear_term, np.zeros(self._score_linear_term.shape[0]))
self.feasible_point = np.append(self.observed_score_state, np.abs(self.initial_soln[self._overall]))
lagrange = self.penalty._weight_array
self.inactive_lagrange = lagrange[~self._overall]
X, _ = self.loss.data
n, p = X.shape
self.p = p
nactive = self._overall.sum()
self.nactive = nactive
self.target_observed = self.observed_score_state[:self.nactive]
if self.estimation == 'parametric':
score_cov = np.zeros((p,p))
inv_X_active = np.linalg.inv(X[:, self._overall].T.dot(X[:, self._overall]))
projection_X_active = X[:,self._overall].dot(np.linalg.inv(X[:, self._overall].T.dot(X[:, self._overall]))).dot(X[:,self._overall].T)
score_cov[:self.nactive, :self.nactive] = inv_X_active
score_cov[self.nactive:, self.nactive:] = X[:,~self._overall].T.dot(np.identity(n)- projection_X_active).dot(X[:,~self._overall])
elif self.estimation == 'bootstrap':
bootstrap_score = pairs_bootstrap_glm(self.loss,
self._overall,
beta_full=self._beta_full,
inactive=~self._overall)[0]
score_cov = bootstrap_cov(lambda: np.random.choice(n, size=(n,), replace=True), bootstrap_score)
self.score_cov = score_cov
self.target_cov = score_cov[:nactive, :nactive]
self.score_cov_inv = np.linalg.inv(self.score_cov)
self.B = self._opt_linear_term
self.A = self._score_linear_term
self.B_active = self.B[:nactive, :nactive]
self.B_inactive = self.B[nactive:, :nactive]
self.A_active = self._score_linear_term[:nactive, :]
self.A_inactive = self._score_linear_term[nactive:, :]
self.offset_active = self._opt_affine_term[:nactive]
class M_estimator_approx_carved(M_estimator_split):
def __init__(self, loss, epsilon, subsample_size, penalty, estimation):
M_estimator_split.__init__(self,loss, epsilon, subsample_size, penalty, solve_args={'min_its':50, 'tol':1.e-10})
self.estimation = estimation
def solve_approx(self):
self.solve()
self.nactive = self._overall.sum()
X, _ = self.loss.data
n, p = X.shape
self.p = p
self.target_observed = self.observed_score_state[:self.nactive]
self.feasible_point = np.concatenate([self.observed_score_state, np.fabs(self.observed_opt_state[:self.nactive]),
self.observed_opt_state[self.nactive:]], axis = 0)
(_opt_linear_term, _opt_affine_term) = self.opt_transform
self._opt_linear_term = np.concatenate(
(_opt_linear_term[self._overall, :], _opt_linear_term[~self._overall, :]), 0)
self._opt_affine_term = np.concatenate((_opt_affine_term[self._overall], _opt_affine_term[~self._overall]), 0)
self.opt_transform = (self._opt_linear_term, self._opt_affine_term)
(_score_linear_term, _) = self.score_transform
self._score_linear_term = np.concatenate(
(_score_linear_term[self._overall, :], _score_linear_term[~self._overall, :]), 0)
self.score_transform = (self._score_linear_term, np.zeros(self._score_linear_term.shape[0]))
lagrange = self.penalty._weight_array
#print("True or false", np.all(lagrange[0]-np.fabs(self.feasible_point[p+self.nactive:]))>0)
#print("True or false", np.all(self.feasible_point[p:][:self.nactive]) > 0)
self.inactive_lagrange = lagrange[~self._overall]
self.bootstrap_score, self.randomization_cov = self.setup_sampler()
if self.estimation == 'parametric':
score_cov = np.zeros((p,p))
inv_X_active = np.linalg.inv(X[:, self._overall].T.dot(X[:, self._overall]))
projection_X_active = X[:,self._overall].dot(np.linalg.inv(X[:, self._overall].T.dot(X[:, self._overall]))).dot(X[:,self._overall].T)
score_cov[:self.nactive, :self.nactive] = inv_X_active
score_cov[self.nactive:, self.nactive:] = X[:,~self._overall].T.dot(np.identity(n)- projection_X_active).dot(X[:,~self._overall])
elif self.estimation == 'bootstrap':
score_cov = bootstrap_cov(lambda: np.random.choice(n, size=(n,), replace=True), self.bootstrap_score)
self.score_cov = score_cov
self.score_cov_inv = np.linalg.inv(self.score_cov)
class M_estimator_approx_logistic(M_estimator):
def __init__(self, loss, epsilon, penalty, randomization, randomizer, estimation):
M_estimator.__init__(self, loss, epsilon, penalty, randomization)
self.randomizer = randomizer
self.estimation = estimation
def solve_approx(self):
self.solve()
(_opt_linear_term, _opt_affine_term) = self.opt_transform
self._opt_linear_term = np.concatenate((_opt_linear_term[self._overall, :], _opt_linear_term[~self._overall, :]), 0)
self._opt_affine_term = np.concatenate((_opt_affine_term[self._overall], _opt_affine_term[~self._overall]), 0)
self.opt_transform = (self._opt_linear_term, self._opt_affine_term)
(_score_linear_term, _) = self.score_transform
self._score_linear_term = np.concatenate((_score_linear_term[self._overall, :], _score_linear_term[~self._overall, :]), 0)
self.score_transform = (self._score_linear_term, np.zeros(self._score_linear_term.shape[0]))
self.feasible_point = np.append(self.observed_score_state, np.abs(self.initial_soln[self._overall]))
lagrange = self.penalty._weight_array
self.inactive_lagrange = lagrange[~self._overall]
X, _ = self.loss.data
n, p = X.shape
self.p = p
nactive = self._overall.sum()
self.nactive = nactive
self.target_observed = self.observed_score_state[:self.nactive]
if self.estimation == 'parametric':
score_cov = np.zeros((p,p))
vec = np.exp(X[:, self._overall].dot(self.target_observed))
#vec = np.exp(np.zeros(n))
pi = np.true_divide(vec, np.power(1. + vec, 2))
weights = np.diag(pi)
Q_active = X[:, self._overall].T.dot(weights).dot(X[:, self._overall])
Q_active_inv = np.linalg.inv(Q_active)
P_inactive = X[:, ~self._overall].T.dot(np.identity(n)-weights.dot(X[:, self._overall].dot(Q_active_inv)
.dot(X[:, self._overall].T)))
score_cov[:self.nactive, :self.nactive] = Q_active_inv
score_cov[self.nactive:, self.nactive:] = P_inactive.dot(weights).dot(P_inactive.T)
elif self.estimation == 'bootstrap':
bootstrap_score = pairs_bootstrap_glm(self.loss,
self._overall,
beta_full=self._beta_full,
inactive=~self._overall)[0]
score_cov = bootstrap_cov(lambda: np.random.choice(n, size=(n,), replace=True), bootstrap_score)
self.score_cov = score_cov
self.target_cov = score_cov[:nactive, :nactive]
self.score_cov_inv = np.linalg.inv(self.score_cov)
self.B = self._opt_linear_term
self.A = self._score_linear_term
self.B_active = self.B[:nactive, :nactive]
self.B_inactive = self.B[nactive:, :nactive]
self.A_active = self._score_linear_term[:nactive, :]
self.A_inactive = self._score_linear_term[nactive:, :]
self.offset_active = self._opt_affine_term[:nactive]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.